未验证 提交 831fd86e 编写于 作者: J Jiabin Yang 提交者: GitHub

EagerTensor to EagerVariable (#39447)

* merge legacy to fluid

* Remove legacy code

* Remove legacy code

* Remove DataType test

* Using Tensor directly instead of using EagerTensor

* support gradient_accumulation

* make test_imperative_lod_tensor_to_selected_rows longer

* make test_imperative_lod_tensor_to_selected_rows longer

* refine code

* Rename all EagerTensor to Tensor

* Rename some EagerTensor to Tensor

* rename EagerTensor to EagerVariable

* add more test

* merge develop and refine code
上级 f21d7957
...@@ -1227,11 +1227,11 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents( ...@@ -1227,11 +1227,11 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
// Forward Function Body // Forward Function Body
// According to fwd_inputs_name_pos_map // According to fwd_inputs_name_pos_map
std::map<std::string, std::vector<std::shared_ptr<egr::EagerTensor>>> std::map<std::string, std::vector<std::shared_ptr<egr::EagerVariable>>>
ins = ins =
{ {"X" , TrySyncToVars(X)}, { "Y" , TrySyncToVars(Y)} }; { {"X" , TrySyncToVars(X)}, { "Y" , TrySyncToVars(Y)} };
std::map<std::string, std::vector<std::shared_ptr<egr::EagerTensor>>> std::map<std::string, std::vector<std::shared_ptr<egr::EagerVariable>>>
outs = outs =
{ {
{"Out0" , CreateVars(Out0Num)}, {"Out1" {"Out0" , CreateVars(Out0Num)}, {"Out1"
...@@ -1316,7 +1316,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents( ...@@ -1316,7 +1316,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
const char* FWD_INS_MAP_TEMPLATE = const char* FWD_INS_MAP_TEMPLATE =
" std::map<std::string, " " std::map<std::string, "
"std::vector<std::shared_ptr<egr::EagerTensor>>> ins = { " "std::vector<std::shared_ptr<egr::EagerVariable>>> ins = { "
"%s };\n"; "%s };\n";
std::string ins_map_str = std::string ins_map_str =
paddle::string::Sprintf(FWD_INS_MAP_TEMPLATE, ins_contents_str); paddle::string::Sprintf(FWD_INS_MAP_TEMPLATE, ins_contents_str);
...@@ -1353,8 +1353,9 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents( ...@@ -1353,8 +1353,9 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
if (op_passing_outs_map[op_type].count(output_name)) { if (op_passing_outs_map[op_type].count(output_name)) {
const std::string output_var_name = output_name + "Var"; const std::string output_var_name = output_name + "Var";
// Pass Output from function argument(EagerTensor*/vector<EagerTensor*>&), // Pass Output from function
// in form of shared_ptr<EagerTensor>/vector<shared_ptr<EagerTensor>> // argument(EagerVariable*/vector<EagerVariable*>&),
// in form of shared_ptr<EagerVariable>/vector<shared_ptr<EagerVariable>>
if (output.duplicable()) { if (output.duplicable()) {
const char* FWD_NUM_ARG_TEMPLATE = const char* FWD_NUM_ARG_TEMPLATE =
", std::vector<paddle::experimental::Tensor*>& %s"; ", std::vector<paddle::experimental::Tensor*>& %s";
...@@ -1395,7 +1396,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents( ...@@ -1395,7 +1396,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
} else { } else {
const char* FWD_OUTS_CONTENT_TEMPLATE = const char* FWD_OUTS_CONTENT_TEMPLATE =
"{ \"%s\", " "{ \"%s\", "
"{std::make_shared<egr::EagerTensor>(egr::Controller::Instance()." "{std::make_shared<egr::EagerVariable>(egr::Controller::Instance()."
"GenerateUniqueName())}},"; "GenerateUniqueName())}},";
outs_contents_str += outs_contents_str +=
paddle::string::Sprintf(FWD_OUTS_CONTENT_TEMPLATE, output_name); paddle::string::Sprintf(FWD_OUTS_CONTENT_TEMPLATE, output_name);
...@@ -1407,7 +1408,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents( ...@@ -1407,7 +1408,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
const char* FWD_OUTS_MAP_TEMPLATE = const char* FWD_OUTS_MAP_TEMPLATE =
" std::map<std::string, " " std::map<std::string, "
"std::vector<std::shared_ptr<egr::EagerTensor>>> outs = { " "std::vector<std::shared_ptr<egr::EagerVariable>>> outs = { "
"%s };\n"; "%s };\n";
std::string outs_map_str = std::string outs_map_str =
paddle::string::Sprintf(FWD_OUTS_MAP_TEMPLATE, outs_contents_str); paddle::string::Sprintf(FWD_OUTS_MAP_TEMPLATE, outs_contents_str);
...@@ -1482,7 +1483,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents( ...@@ -1482,7 +1483,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
generated_function_body += out_tensor_str; generated_function_body += out_tensor_str;
} }
generated_function_body += "\n"; generated_function_body += "\n";
VLOG(6) << "Converted Output VarBase to EagerTensor(s)"; VLOG(6) << "Converted Output VarBase to EagerVariable(s)";
// [Generation] Handle core_ops_returns_info // [Generation] Handle core_ops_returns_info
core_ops_returns_info[op_type] = return_contents; core_ops_returns_info[op_type] = return_contents;
...@@ -1627,7 +1628,7 @@ static std::string GenerateSingleOpBase( ...@@ -1627,7 +1628,7 @@ static std::string GenerateSingleOpBase(
const char* BWD_INS_MAP_TEMPLATE = const char* BWD_INS_MAP_TEMPLATE =
" std::map<std::string, " " std::map<std::string, "
"std::vector<std::shared_ptr<egr::EagerTensor>>> %s = { " "std::vector<std::shared_ptr<egr::EagerVariable>>> %s = { "
"%s };\n"; "%s };\n";
std::string ins_map_str = std::string ins_map_str =
paddle::string::Sprintf(BWD_INS_MAP_TEMPLATE, ins_name, ins_contents_str); paddle::string::Sprintf(BWD_INS_MAP_TEMPLATE, ins_name, ins_contents_str);
...@@ -1704,7 +1705,7 @@ static std::string GenerateSingleOpBase( ...@@ -1704,7 +1705,7 @@ static std::string GenerateSingleOpBase(
} else { } else {
const char* GRAD_OUTS_CONTENT_TEMPLATE = const char* GRAD_OUTS_CONTENT_TEMPLATE =
"{ \"%s\", " "{ \"%s\", "
"{std::make_shared<egr::EagerTensor>(egr::Controller::Instance(" "{std::make_shared<egr::EagerVariable>(egr::Controller::Instance("
")." ")."
"GenerateUniqueName())}},"; "GenerateUniqueName())}},";
outs_contents_str += paddle::string::Sprintf( outs_contents_str += paddle::string::Sprintf(
...@@ -1723,7 +1724,7 @@ static std::string GenerateSingleOpBase( ...@@ -1723,7 +1724,7 @@ static std::string GenerateSingleOpBase(
const char* BWD_OUTS_MAP_TEMPLATE = const char* BWD_OUTS_MAP_TEMPLATE =
" std::map<std::string, " " std::map<std::string, "
"std::vector<std::shared_ptr<egr::EagerTensor>>> %s = { " "std::vector<std::shared_ptr<egr::EagerVariable>>> %s = { "
"%s };\n"; "%s };\n";
std::string outs_map_str = paddle::string::Sprintf( std::string outs_map_str = paddle::string::Sprintf(
BWD_OUTS_MAP_TEMPLATE, outs_name, outs_contents_str); BWD_OUTS_MAP_TEMPLATE, outs_name, outs_contents_str);
......
...@@ -40,36 +40,28 @@ ...@@ -40,36 +40,28 @@
* **/ * **/
namespace egr { namespace egr {
class EagerTensor final { class EagerVariable final {
public: public:
/* Default constructor and name constructor should only be used for contruct /* Default constructor and name constructor should only be used for contruct
* output and in fluid*/ * output and in fluid*/
EagerTensor() = default; EagerVariable() = default;
explicit EagerTensor(const std::string& name) : name_(name) {} explicit EagerVariable(const std::string& name) : name_(name) {}
explicit EagerTensor(const paddle::experimental::Tensor& tensor) explicit EagerVariable(const paddle::experimental::Tensor& tensor)
: name_(tensor.name()) { : name_(tensor.name()) {
if (tensor.defined()) { if (tensor.defined()) {
if (tensor.is_dense_tensor()) { if (tensor.is_dense_tensor()) {
auto* framework_tensor = ConstructVariableFromTensor(tensor);
var_.GetMutable<paddle::framework::LoDTensor>(); } else if (tensor.is_selected_rows()) {
// Contruct framework::Tensor from egr::EagerTensor ConstructVariableFromSelectedRows(tensor);
auto tensor_dense =
std::dynamic_pointer_cast<pten::DenseTensor>(tensor.impl());
PADDLE_ENFORCE_EQ((tensor_dense.get() && tensor_dense), true,
paddle::platform::errors::Fatal(
"Failed to Trans Tensor to EagerVariable since "
"we got Tensor with type DenseTensor, and we got "
"EagerVariable with another type."));
*framework_tensor = *tensor_dense;
} else { } else {
PADDLE_THROW(paddle::platform::errors::Fatal( PADDLE_THROW(paddle::platform::errors::Fatal(
"Unrecognized egr::EagerVariable type, only " "Unrecognized egr::EagerVariable type, only "
"DenseTensor and SelectedRows is supported for now.")); "DenseTensor and SelectedRows are supported for now."));
} }
} else { } else {
VLOG(6) << "Build Empty EagerTensor with name " << name_; VLOG(6) << "Build Empty EagerVariable with name " << name_;
} }
} }
...@@ -77,21 +69,20 @@ class EagerTensor final { ...@@ -77,21 +69,20 @@ class EagerTensor final {
std::shared_ptr<pten::TensorBase> GetTensorBase() { std::shared_ptr<pten::TensorBase> GetTensorBase() {
// Construct allocation only once. // Construct allocation only once.
if (var_.IsInitialized()) { if (var_.IsInitialized()) {
if (var_.IsType<paddle::framework::LoDTensor>()) { if (var_.IsType<paddle::framework::LoDTensor>() ||
return SetImplWithLegacyTensor<pten::DenseTensor>(); var_.IsType<paddle::framework::Tensor>()) {
} else if (var_.IsType<paddle::framework::Tensor>()) { return SetImplWithLegacyTensor();
return SetImplWithLegacyTensor<pten::DenseTensor>();
} else if (var_.IsType<pten::SelectedRows>()) { } else if (var_.IsType<pten::SelectedRows>()) {
return SetImplWithSelectedRows(); return SetImplWithLegacySelectedRows();
} else { } else {
PADDLE_THROW(paddle::platform::errors::Fatal( PADDLE_THROW(paddle::platform::errors::Fatal(
"Unable to fetch underlying tensor " "Unable to fetch underlying tensor "
"from EagerTensor, only LoDTensor and " "from EagerVariable, only LoDTensor and "
"Tensor are supported for now")); "Tensor are supported for now"));
} }
} else { } else {
PADDLE_THROW(paddle::platform::errors::Fatal( PADDLE_THROW(paddle::platform::errors::Fatal(
"Can not Sync EagerTensor %s whose paddle::framework::Variable is " "Can not Sync EagerVariable %s whose paddle::framework::Variable is "
"not initialized!", "not initialized!",
name())); name()));
} }
...@@ -107,23 +98,52 @@ class EagerTensor final { ...@@ -107,23 +98,52 @@ class EagerTensor final {
void set_name(const std::string& name) { name_ = name; } void set_name(const std::string& name) { name_ = name; }
private: private:
template <typename LEGACY_TYPE>
std::shared_ptr<pten::TensorBase> SetImplWithLegacyTensor() { std::shared_ptr<pten::TensorBase> SetImplWithLegacyTensor() {
const auto& framework_tensor = var_.Get<LEGACY_TYPE>(); const auto& framework_tensor = var_.Get<pten::DenseTensor>();
VLOG(8) << "Sync Var to tensor for: " << name(); VLOG(8) << "Sync Var to tensor for: " << name();
return std::make_shared<LEGACY_TYPE>(std::move(framework_tensor)); return std::make_shared<pten::DenseTensor>(framework_tensor);
} }
std::shared_ptr<pten::TensorBase> SetImplWithSelectedRows() { std::shared_ptr<pten::TensorBase> SetImplWithLegacySelectedRows() {
auto* selected_rows = var_.GetMutable<pten::SelectedRows>(); auto* framework_tensor = var_.GetMutable<pten::SelectedRows>();
auto res = std::make_shared<pten::SelectedRows>(selected_rows->rows_, VLOG(8) << "Sync SelectedRows to tensor for: " << name();
selected_rows->height_); auto res =
res->value_.reset(selected_rows->value_.release()); std::make_shared<pten::SelectedRows>(std::move(*framework_tensor));
res->id_to_index_ = std::move(selected_rows->id_to_index_); var_.Clear();
res->rwlock_.reset(selected_rows->rwlock_.release());
return res; return res;
} }
void ConstructVariableFromTensor(const paddle::experimental::Tensor& tensor) {
auto* framework_tensor = var_.GetMutable<pten::DenseTensor>();
// Contruct framework::Tensor from egr::EagerVariable
auto tensor_dense =
std::dynamic_pointer_cast<pten::DenseTensor>(tensor.impl());
PADDLE_ENFORCE_EQ(
(tensor_dense.get() && tensor_dense), true,
paddle::platform::errors::Fatal(
"Tensor %s does not hold pten::SelectedRows or pten::DenseTensor. "
"Or it holds empty impl, this should not happend since we should "
"treat all kinds of tensor as what they are.",
tensor.name()));
*framework_tensor = *tensor_dense;
}
void ConstructVariableFromSelectedRows(
const paddle::experimental::Tensor& tensor) {
auto* framework_tensor = var_.GetMutable<pten::SelectedRows>();
// Contruct framework::Tensor from egr::EagerVariable
auto tensor_dense =
std::dynamic_pointer_cast<pten::SelectedRows>(tensor.impl());
PADDLE_ENFORCE_EQ(
(tensor_dense.get() && tensor_dense), true,
paddle::platform::errors::Fatal(
"Tensor %s does not hold pten::SelectedRows or pten::DenseTensor. "
"Or it holds empty impl, this should not happend since we should "
"treat all kinds of tensor as what they are.",
tensor.name()));
*framework_tensor = std::move(*tensor_dense);
}
private: private:
std::string name_{""}; std::string name_{""};
paddle::framework::Variable var_; paddle::framework::Variable var_;
......
...@@ -115,7 +115,7 @@ TEST(Tensor, MemberFunction) { ...@@ -115,7 +115,7 @@ TEST(Tensor, MemberFunction) {
CHECK_EQ(tmp_autograd_meta_test->val_, 2); CHECK_EQ(tmp_autograd_meta_test->val_, 2);
} }
TEST(EagerTensor, Constructor) { TEST(EagerVariable, Constructor) {
paddle::experimental::Tensor t3; paddle::experimental::Tensor t3;
pten::DenseTensorMeta meta = pten::DenseTensorMeta( pten::DenseTensorMeta meta = pten::DenseTensorMeta(
pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 2})); pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 2}));
...@@ -134,7 +134,7 @@ TEST(EagerTensor, Constructor) { ...@@ -134,7 +134,7 @@ TEST(EagerTensor, Constructor) {
CHECK_EQ(t3.defined(), false); CHECK_EQ(t3.defined(), false);
t3.set_impl(dt); t3.set_impl(dt);
egr::EagerTensor et3 = egr::EagerTensor(t3); egr::EagerVariable et3 = egr::EagerVariable(t3);
VLOG(6) << "SyncToVar"; VLOG(6) << "SyncToVar";
CHECK_EQ(et3.Var().Get<paddle::framework::LoDTensor>().data<float>()[0], CHECK_EQ(et3.Var().Get<paddle::framework::LoDTensor>().data<float>()[0],
5.0f); 5.0f);
......
...@@ -167,7 +167,7 @@ TEST(EagerUtils, PassStopGradient) { ...@@ -167,7 +167,7 @@ TEST(EagerUtils, PassStopGradient) {
TEST(EagerUtils, TrySyncToVar) { TEST(EagerUtils, TrySyncToVar) {
paddle::framework::DDim ddim = paddle::framework::make_ddim({2, 4, 4, 4}); paddle::framework::DDim ddim = paddle::framework::make_ddim({2, 4, 4, 4});
auto tensor = CreateTestCPUTensor(5.0f, ddim); auto tensor = CreateTestCPUTensor(5.0f, ddim);
std::vector<std::shared_ptr<egr::EagerTensor>> var_bases = { std::vector<std::shared_ptr<egr::EagerVariable>> var_bases = {
egr::EagerUtils::TrySyncToVar(tensor)}; egr::EagerUtils::TrySyncToVar(tensor)};
paddle::framework::Variable* var = var_bases[0]->MutableVar(); paddle::framework::Variable* var = var_bases[0]->MutableVar();
...@@ -187,7 +187,7 @@ TEST(EagerUtils, TrySyncToVars) { ...@@ -187,7 +187,7 @@ TEST(EagerUtils, TrySyncToVars) {
std::vector<paddle::experimental::Tensor> tensors = { std::vector<paddle::experimental::Tensor> tensors = {
CreateTestCPUTensor(1.0f, ddim), CreateTestCPUTensor(2.0f, ddim)}; CreateTestCPUTensor(1.0f, ddim), CreateTestCPUTensor(2.0f, ddim)};
std::vector<std::shared_ptr<egr::EagerTensor>> var_bases = std::vector<std::shared_ptr<egr::EagerVariable>> var_bases =
egr::EagerUtils::TrySyncToVars(tensors); egr::EagerUtils::TrySyncToVars(tensors);
{ {
...@@ -218,7 +218,7 @@ TEST(EagerUtils, TrySyncToVars) { ...@@ -218,7 +218,7 @@ TEST(EagerUtils, TrySyncToVars) {
TEST(EagerUtils, CreateVars) { TEST(EagerUtils, CreateVars) {
VLOG(6) << "Check CreateVars"; VLOG(6) << "Check CreateVars";
std::vector<std::shared_ptr<egr::EagerTensor>> outs = std::vector<std::shared_ptr<egr::EagerVariable>> outs =
egr::EagerUtils::CreateVars(2); egr::EagerUtils::CreateVars(2);
CHECK_EQ(outs.size(), size_t(2)); CHECK_EQ(outs.size(), size_t(2));
CHECK(outs[0]->Var().IsInitialized() == false); CHECK(outs[0]->Var().IsInitialized() == false);
......
...@@ -131,17 +131,17 @@ void EagerUtils::SetOutRankWithSlot(AutogradMeta* target, size_t slot_id) { ...@@ -131,17 +131,17 @@ void EagerUtils::SetOutRankWithSlot(AutogradMeta* target, size_t slot_id) {
target->SetSingleOutRankWithSlot(slot_id, 0); target->SetSingleOutRankWithSlot(slot_id, 0);
} }
std::shared_ptr<egr::EagerTensor> EagerUtils::TrySyncToVar( std::shared_ptr<egr::EagerVariable> EagerUtils::TrySyncToVar(
const paddle::experimental::Tensor& tensor) { const paddle::experimental::Tensor& tensor) {
return std::make_shared<egr::EagerTensor>(tensor); return std::make_shared<egr::EagerVariable>(tensor);
} }
std::vector<std::shared_ptr<egr::EagerTensor>> EagerUtils::TrySyncToVars( std::vector<std::shared_ptr<egr::EagerVariable>> EagerUtils::TrySyncToVars(
const paddle::experimental::Tensor& tensor) { const paddle::experimental::Tensor& tensor) {
return {TrySyncToVar(tensor)}; return {TrySyncToVar(tensor)};
} }
std::vector<std::shared_ptr<egr::EagerTensor>> EagerUtils::TrySyncToVars( std::vector<std::shared_ptr<egr::EagerVariable>> EagerUtils::TrySyncToVars(
paddle::experimental::Tensor* tensor) { paddle::experimental::Tensor* tensor) {
PADDLE_ENFORCE_NOT_NULL( PADDLE_ENFORCE_NOT_NULL(
tensor, tensor,
...@@ -151,9 +151,9 @@ std::vector<std::shared_ptr<egr::EagerTensor>> EagerUtils::TrySyncToVars( ...@@ -151,9 +151,9 @@ std::vector<std::shared_ptr<egr::EagerTensor>> EagerUtils::TrySyncToVars(
return {TrySyncToVar(*tensor)}; return {TrySyncToVar(*tensor)};
} }
std::vector<std::shared_ptr<egr::EagerTensor>> EagerUtils::TrySyncToVars( std::vector<std::shared_ptr<egr::EagerVariable>> EagerUtils::TrySyncToVars(
const std::vector<paddle::experimental::Tensor*>& tensors) { const std::vector<paddle::experimental::Tensor*>& tensors) {
std::vector<std::shared_ptr<EagerTensor>> res; std::vector<std::shared_ptr<EagerVariable>> res;
size_t num = tensors.size(); size_t num = tensors.size();
res.reserve(num); res.reserve(num);
for (size_t i = 0; i < num; i++) { for (size_t i = 0; i < num; i++) {
...@@ -169,9 +169,9 @@ std::vector<std::shared_ptr<egr::EagerTensor>> EagerUtils::TrySyncToVars( ...@@ -169,9 +169,9 @@ std::vector<std::shared_ptr<egr::EagerTensor>> EagerUtils::TrySyncToVars(
return res; return res;
} }
std::vector<std::shared_ptr<egr::EagerTensor>> EagerUtils::TrySyncToVars( std::vector<std::shared_ptr<egr::EagerVariable>> EagerUtils::TrySyncToVars(
const std::vector<paddle::experimental::Tensor>& tensors) { const std::vector<paddle::experimental::Tensor>& tensors) {
std::vector<std::shared_ptr<EagerTensor>> res; std::vector<std::shared_ptr<EagerVariable>> res;
size_t num = tensors.size(); size_t num = tensors.size();
res.reserve(num); res.reserve(num);
for (size_t i = 0; i < num; i++) { for (size_t i = 0; i < num; i++) {
...@@ -180,19 +180,19 @@ std::vector<std::shared_ptr<egr::EagerTensor>> EagerUtils::TrySyncToVars( ...@@ -180,19 +180,19 @@ std::vector<std::shared_ptr<egr::EagerTensor>> EagerUtils::TrySyncToVars(
return res; return res;
} }
std::vector<std::shared_ptr<EagerTensor>> EagerUtils::CreateVars( std::vector<std::shared_ptr<EagerVariable>> EagerUtils::CreateVars(
const size_t num) { const size_t num) {
std::vector<std::shared_ptr<EagerTensor>> res; std::vector<std::shared_ptr<EagerVariable>> res;
res.reserve(num); res.reserve(num);
for (size_t i = 0; i < num; i++) { for (size_t i = 0; i < num; i++) {
res.emplace_back( res.emplace_back(
new EagerTensor(egr::Controller::Instance().GenerateUniqueName())); new EagerVariable(egr::Controller::Instance().GenerateUniqueName()));
} }
return res; return res;
} }
std::vector<paddle::experimental::Tensor> EagerUtils::GetOutputs( std::vector<paddle::experimental::Tensor> EagerUtils::GetOutputs(
const std::vector<std::shared_ptr<EagerTensor>>& outs) { const std::vector<std::shared_ptr<EagerVariable>>& outs) {
std::vector<paddle::experimental::Tensor> res; std::vector<paddle::experimental::Tensor> res;
res.reserve(outs.size()); res.reserve(outs.size());
for (const auto& out : outs) { for (const auto& out : outs) {
...@@ -209,7 +209,7 @@ std::vector<paddle::experimental::Tensor> EagerUtils::GetOutputs( ...@@ -209,7 +209,7 @@ std::vector<paddle::experimental::Tensor> EagerUtils::GetOutputs(
} }
paddle::experimental::Tensor EagerUtils::GetOutput( paddle::experimental::Tensor EagerUtils::GetOutput(
const std::shared_ptr<EagerTensor>& out) { const std::shared_ptr<EagerVariable>& out) {
PADDLE_ENFORCE_NOT_NULL( PADDLE_ENFORCE_NOT_NULL(
out.get(), paddle::platform::errors::Fatal( out.get(), paddle::platform::errors::Fatal(
"Eager Tensor %s is null and cannot be copied. We " "Eager Tensor %s is null and cannot be copied. We "
...@@ -219,7 +219,7 @@ paddle::experimental::Tensor EagerUtils::GetOutput( ...@@ -219,7 +219,7 @@ paddle::experimental::Tensor EagerUtils::GetOutput(
return paddle::experimental::Tensor(out->GetTensorBase(), out->name()); return paddle::experimental::Tensor(out->GetTensorBase(), out->name());
} }
void EagerUtils::OverwriteOutputs(const std::shared_ptr<EagerTensor>& out, void EagerUtils::OverwriteOutputs(const std::shared_ptr<EagerVariable>& out,
paddle::experimental::Tensor* tensor) { paddle::experimental::Tensor* tensor) {
PADDLE_ENFORCE_NOT_NULL( PADDLE_ENFORCE_NOT_NULL(
tensor, paddle::platform::errors::Fatal( tensor, paddle::platform::errors::Fatal(
...@@ -231,7 +231,7 @@ void EagerUtils::OverwriteOutputs(const std::shared_ptr<EagerTensor>& out, ...@@ -231,7 +231,7 @@ void EagerUtils::OverwriteOutputs(const std::shared_ptr<EagerTensor>& out,
} }
void EagerUtils::OverwriteOutputs( void EagerUtils::OverwriteOutputs(
const std::vector<std::shared_ptr<EagerTensor>>& outs, const std::vector<std::shared_ptr<EagerVariable>>& outs,
const std::vector<paddle::experimental::Tensor*>& tensors) { const std::vector<paddle::experimental::Tensor*>& tensors) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
outs.size(), tensors.size(), outs.size(), tensors.size(),
......
...@@ -88,7 +88,7 @@ class EagerUtils { ...@@ -88,7 +88,7 @@ class EagerUtils {
/** /**
* We have to use autograd_meta and multi_autograd_meta to initialize * We have to use autograd_meta and multi_autograd_meta to initialize
* autograd_meta for tensor, since we can't init it in * autograd_meta for tensor, since we can't init it in
* egr::EagerTensor's * egr::EagerVariable's
* constructor (it's abstract class there) * constructor (it's abstract class there)
* *
* **/ * **/
...@@ -151,34 +151,35 @@ class EagerUtils { ...@@ -151,34 +151,35 @@ class EagerUtils {
// Intermidate needed remove this once we don't need legacy // Intermidate needed remove this once we don't need legacy
// Inner Method // Inner Method
static std::shared_ptr<egr::EagerTensor> TrySyncToVar( static std::shared_ptr<egr::EagerVariable> TrySyncToVar(
const paddle::experimental::Tensor& tensor); const paddle::experimental::Tensor& tensor);
// Basic Input // Basic Input
static std::vector<std::shared_ptr<egr::EagerTensor>> TrySyncToVars( static std::vector<std::shared_ptr<egr::EagerVariable>> TrySyncToVars(
const paddle::experimental::Tensor& tensor); const paddle::experimental::Tensor& tensor);
// Basic Output // Basic Output
static std::vector<std::shared_ptr<egr::EagerTensor>> TrySyncToVars( static std::vector<std::shared_ptr<egr::EagerVariable>> TrySyncToVars(
paddle::experimental::Tensor* tensor); paddle::experimental::Tensor* tensor);
// Multi Output // Multi Output
static std::vector<std::shared_ptr<egr::EagerTensor>> TrySyncToVars( static std::vector<std::shared_ptr<egr::EagerVariable>> TrySyncToVars(
const std::vector<paddle::experimental::Tensor*>& tensors); const std::vector<paddle::experimental::Tensor*>& tensors);
// Multi Input // Multi Input
static std::vector<std::shared_ptr<egr::EagerTensor>> TrySyncToVars( static std::vector<std::shared_ptr<egr::EagerVariable>> TrySyncToVars(
const std::vector<paddle::experimental::Tensor>& tensors); const std::vector<paddle::experimental::Tensor>& tensors);
// Construct empty output // Construct empty output
static std::vector<std::shared_ptr<EagerTensor>> CreateVars(const size_t num); static std::vector<std::shared_ptr<EagerVariable>> CreateVars(
const size_t num);
// Construct Tensor From var // Construct Tensor From var
static std::vector<paddle::experimental::Tensor> GetOutputs( static std::vector<paddle::experimental::Tensor> GetOutputs(
const std::vector<std::shared_ptr<EagerTensor>>& outs); const std::vector<std::shared_ptr<EagerVariable>>& outs);
static paddle::experimental::Tensor GetOutput( static paddle::experimental::Tensor GetOutput(
const std::shared_ptr<EagerTensor>& out); const std::shared_ptr<EagerVariable>& out);
// Sync Back to origin output Tensor // Sync Back to origin output Tensor
static void OverwriteOutputs(const std::shared_ptr<EagerTensor>& out, static void OverwriteOutputs(const std::shared_ptr<EagerVariable>& out,
paddle::experimental::Tensor* tensor); paddle::experimental::Tensor* tensor);
static void OverwriteOutputs(const paddle::experimental::Tensor& out, static void OverwriteOutputs(const paddle::experimental::Tensor& out,
paddle::experimental::Tensor* tensor); paddle::experimental::Tensor* tensor);
static void OverwriteOutputs( static void OverwriteOutputs(
const std::vector<std::shared_ptr<EagerTensor>>& outs, const std::vector<std::shared_ptr<EagerVariable>>& outs,
const std::vector<paddle::experimental::Tensor*>& tensors); const std::vector<paddle::experimental::Tensor*>& tensors);
static void OverwriteOutputs( static void OverwriteOutputs(
const std::vector<paddle::experimental::Tensor>& outs, const std::vector<paddle::experimental::Tensor>& outs,
......
...@@ -340,8 +340,8 @@ NameVarMap<VarType> AutoCastInputs(const std::string& op_type, ...@@ -340,8 +340,8 @@ NameVarMap<VarType> AutoCastInputs(const std::string& op_type,
} }
template NameVarMap<VarBase> AutoCastInputs<VarBase>( template NameVarMap<VarBase> AutoCastInputs<VarBase>(
const std::string& op_type, const NameVarMap<VarBase>& ins); const std::string& op_type, const NameVarMap<VarBase>& ins);
template NameVarMap<egr::EagerTensor> AutoCastInputs<egr::EagerTensor>( template NameVarMap<egr::EagerVariable> AutoCastInputs<egr::EagerVariable>(
const std::string& op_type, const NameVarMap<egr::EagerTensor>& ins); const std::string& op_type, const NameVarMap<egr::EagerVariable>& ins);
template <typename VarType> template <typename VarType>
NameVarMap<VarType> CastPureFp16Inputs(const std::string& op_type, NameVarMap<VarType> CastPureFp16Inputs(const std::string& op_type,
const NameVarMap<VarType>& ins) { const NameVarMap<VarType>& ins) {
...@@ -384,7 +384,7 @@ NameVarMap<VarType> CastPureFp16Inputs(const std::string& op_type, ...@@ -384,7 +384,7 @@ NameVarMap<VarType> CastPureFp16Inputs(const std::string& op_type,
} }
template NameVarMap<VarBase> CastPureFp16Inputs<VarBase>( template NameVarMap<VarBase> CastPureFp16Inputs<VarBase>(
const std::string& op_type, const NameVarMap<VarBase>& ins); const std::string& op_type, const NameVarMap<VarBase>& ins);
template NameVarMap<egr::EagerTensor> CastPureFp16Inputs<egr::EagerTensor>( template NameVarMap<egr::EagerVariable> CastPureFp16Inputs<egr::EagerVariable>(
const std::string& op_type, const NameVarMap<egr::EagerTensor>& ins); const std::string& op_type, const NameVarMap<egr::EagerVariable>& ins);
} // namespace imperative } // namespace imperative
} // namespace paddle } // namespace paddle
...@@ -177,9 +177,9 @@ std::string LayerDebugString(const std::string& op_type, ...@@ -177,9 +177,9 @@ std::string LayerDebugString(const std::string& op_type,
} }
std::string LayerDebugString(const std::string& op_type, std::string LayerDebugString(const std::string& op_type,
const NameVarMap<egr::EagerTensor>& ins, const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerTensor>& outs) { const NameVarMap<egr::EagerVariable>& outs) {
return LayerDebugStringImpl<egr::EagerTensor>(op_type, ins, outs); return LayerDebugStringImpl<egr::EagerVariable>(op_type, ins, outs);
} }
template <typename VarType> template <typename VarType>
...@@ -194,11 +194,16 @@ static void SetForwardDataTypeOfGradVars(const NameVarMap<VarType>& outs) { ...@@ -194,11 +194,16 @@ static void SetForwardDataTypeOfGradVars(const NameVarMap<VarType>& outs) {
} }
} }
template <> template <>
void SetForwardDataTypeOfGradVars<egr::EagerTensor>( void SetForwardDataTypeOfGradVars<egr::EagerVariable>(
const NameVarMap<egr::EagerTensor>& outs) { const NameVarMap<egr::EagerVariable>& outs) {
// In eager mode we don't need this. // In eager mode we don't need this.
} }
void TestSetForwardDataTypeOfGradVarsEager(
const NameVarMap<egr::EagerVariable>& outs) {
SetForwardDataTypeOfGradVars<egr::EagerVariable>(outs);
}
VarBase::VarBase(const std::shared_ptr<VariableWrapper>& var) VarBase::VarBase(const std::shared_ptr<VariableWrapper>& var)
: var_(var), grad_node_(var->GetGradNode()) { : var_(var), grad_node_(var->GetGradNode()) {
if (auto grad_var = var_->GetGradVar()) { if (auto grad_var = var_->GetGradVar()) {
...@@ -528,12 +533,12 @@ void OpBase::Run(const framework::OperatorBase& op, ...@@ -528,12 +533,12 @@ void OpBase::Run(const framework::OperatorBase& op,
} }
void OpBase::Run(const framework::OperatorBase& op, void OpBase::Run(const framework::OperatorBase& op,
const NameVarMap<egr::EagerTensor>& ins, const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerTensor>& outs, const NameVarMap<egr::EagerVariable>& outs,
const framework::AttributeMap& attrs, const framework::AttributeMap& attrs,
const framework::AttributeMap& default_attrs, const framework::AttributeMap& default_attrs,
const platform::Place& place) { const platform::Place& place) {
OpBaseRunImpl<egr::EagerTensor>(op, ins, outs, attrs, default_attrs, place); OpBaseRunImpl<egr::EagerVariable>(op, ins, outs, attrs, default_attrs, place);
} }
void ClearNoNeedBufferInputs(OpBase* op) { void ClearNoNeedBufferInputs(OpBase* op) {
......
...@@ -185,8 +185,8 @@ class OpBase { ...@@ -185,8 +185,8 @@ class OpBase {
const framework::AttributeMap& default_attrs, const framework::AttributeMap& default_attrs,
const platform::Place& place); const platform::Place& place);
static void Run(const framework::OperatorBase& op, static void Run(const framework::OperatorBase& op,
const NameVarMap<egr::EagerTensor>& ins, const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerTensor>& outs, const NameVarMap<egr::EagerVariable>& outs,
const framework::AttributeMap& attrs, const framework::AttributeMap& attrs,
const framework::AttributeMap& default_attrs, const framework::AttributeMap& default_attrs,
const platform::Place& place); const platform::Place& place);
......
...@@ -89,11 +89,16 @@ void HandleComplexGradToRealGrad(const NameVarMap<VarType>& outs) { ...@@ -89,11 +89,16 @@ void HandleComplexGradToRealGrad(const NameVarMap<VarType>& outs) {
} }
template <> template <>
void HandleComplexGradToRealGrad<egr::EagerTensor>( void HandleComplexGradToRealGrad<egr::EagerVariable>(
const NameVarMap<egr::EagerTensor>& outs) { const NameVarMap<egr::EagerVariable>& outs) {
// TODO(jiabin): Support Complex here. // TODO(jiabin): Support Complex here.
} }
void TestHandleComplexGradToRealGradEager(
const NameVarMap<egr::EagerVariable>& outs) {
HandleComplexGradToRealGrad<egr::EagerVariable>(outs);
}
PreparedOp::PreparedOp(const framework::OperatorBase& op, PreparedOp::PreparedOp(const framework::OperatorBase& op,
const framework::RuntimeContext& ctx, const framework::RuntimeContext& ctx,
const framework::OpKernelType& kernel_type, const framework::OpKernelType& kernel_type,
...@@ -322,14 +327,14 @@ PreparedOp PreparedOp::Prepare(const NameVarMap<VariableWrapper>& ins, ...@@ -322,14 +327,14 @@ PreparedOp PreparedOp::Prepare(const NameVarMap<VariableWrapper>& ins,
default_attrs); default_attrs);
} }
PreparedOp PreparedOp::Prepare(const NameVarMap<egr::EagerTensor>& ins, PreparedOp PreparedOp::Prepare(const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerTensor>& outs, const NameVarMap<egr::EagerVariable>& outs,
const framework::OperatorWithKernel& op, const framework::OperatorWithKernel& op,
const platform::Place& place, const platform::Place& place,
const framework::AttributeMap& attrs, const framework::AttributeMap& attrs,
const framework::AttributeMap& default_attrs) { const framework::AttributeMap& default_attrs) {
return PrepareImpl<egr::EagerTensor>(ins, outs, op, place, attrs, return PrepareImpl<egr::EagerVariable>(ins, outs, op, place, attrs,
default_attrs); default_attrs);
} }
template <typename VarType> template <typename VarType>
static void PreparedOpRunImpl( static void PreparedOpRunImpl(
...@@ -461,18 +466,18 @@ void PreparedOp::Run(const NameVarMap<VariableWrapper>& ins, ...@@ -461,18 +466,18 @@ void PreparedOp::Run(const NameVarMap<VariableWrapper>& ins,
} }
} }
void PreparedOp::Run(const NameVarMap<egr::EagerTensor>& ins, void PreparedOp::Run(const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerTensor>& outs, const NameVarMap<egr::EagerVariable>& outs,
const framework::AttributeMap& attrs, const framework::AttributeMap& attrs,
const framework::AttributeMap& default_attrs) { const framework::AttributeMap& default_attrs) {
if (run_pten_kernel_) { if (run_pten_kernel_) {
PreparedOpRunPtImpl<egr::EagerTensor>( PreparedOpRunPtImpl<egr::EagerVariable>(
op_, kernel_type_, pt_kernel_signature_, pt_kernel_, dev_ctx_, ins, op_, kernel_type_, pt_kernel_signature_, pt_kernel_, dev_ctx_, ins,
outs, attrs, default_attrs); outs, attrs, default_attrs);
} else { } else {
PreparedOpRunImpl<egr::EagerTensor>(op_, ctx_, kernel_type_, func_, PreparedOpRunImpl<egr::EagerVariable>(op_, ctx_, kernel_type_, func_,
dev_ctx_, ins, outs, attrs, dev_ctx_, ins, outs, attrs,
default_attrs); default_attrs);
} }
} }
......
...@@ -63,8 +63,8 @@ void SetForwardDataTypeOfGradVar<VarBase>(const std::shared_ptr<VarBase>& var) { ...@@ -63,8 +63,8 @@ void SetForwardDataTypeOfGradVar<VarBase>(const std::shared_ptr<VarBase>& var) {
} }
template <> template <>
void SetForwardDataTypeOfGradVar<egr::EagerTensor>( void SetForwardDataTypeOfGradVar<egr::EagerVariable>(
const std::shared_ptr<egr::EagerTensor>& var) { const std::shared_ptr<egr::EagerVariable>& var) {
VLOG(10) << "Var in Eager dose not support SetForwardDataTypeOfGradVar: " VLOG(10) << "Var in Eager dose not support SetForwardDataTypeOfGradVar: "
<< var->name(); << var->name();
// TODO(jiabin): SetForwardDataType of Grad var is not supported yet in // TODO(jiabin): SetForwardDataType of Grad var is not supported yet in
...@@ -171,8 +171,8 @@ class PreparedOp { ...@@ -171,8 +171,8 @@ class PreparedOp {
const framework::AttributeMap& attrs, const framework::AttributeMap& attrs,
const framework::AttributeMap& default_attrs); const framework::AttributeMap& default_attrs);
static PreparedOp Prepare(const NameVarMap<egr::EagerTensor>& ins, static PreparedOp Prepare(const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerTensor>& outs, const NameVarMap<egr::EagerVariable>& outs,
const framework::OperatorWithKernel& op, const framework::OperatorWithKernel& op,
const platform::Place& place, const platform::Place& place,
const framework::AttributeMap& attrs, const framework::AttributeMap& attrs,
...@@ -187,8 +187,8 @@ class PreparedOp { ...@@ -187,8 +187,8 @@ class PreparedOp {
const framework::AttributeMap& attrs, const framework::AttributeMap& attrs,
const framework::AttributeMap& default_attrs); const framework::AttributeMap& default_attrs);
void Run(const NameVarMap<egr::EagerTensor>& ins, void Run(const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerTensor>& outs, const NameVarMap<egr::EagerVariable>& outs,
const framework::AttributeMap& attrs, const framework::AttributeMap& attrs,
const framework::AttributeMap& default_attrs); const framework::AttributeMap& default_attrs);
......
...@@ -31,8 +31,8 @@ ...@@ -31,8 +31,8 @@
namespace paddle { namespace paddle {
namespace imperative { namespace imperative {
extern std::string LayerDebugString(const std::string& op_type, extern std::string LayerDebugString(const std::string& op_type,
const NameVarMap<egr::EagerTensor>& ins, const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerTensor>& outs); const NameVarMap<egr::EagerVariable>& outs);
extern std::shared_ptr<GradOpNode> CreateGradOpNode( extern std::shared_ptr<GradOpNode> CreateGradOpNode(
const framework::OperatorBase& op, const NameTensorMap& ins, const framework::OperatorBase& op, const NameTensorMap& ins,
...@@ -41,20 +41,21 @@ extern std::shared_ptr<GradOpNode> CreateGradOpNode( ...@@ -41,20 +41,21 @@ extern std::shared_ptr<GradOpNode> CreateGradOpNode(
const std::map<std::string, std::string>& inplace_map); const std::map<std::string, std::string>& inplace_map);
TEST(test_eager, eager_debug) { TEST(test_eager, eager_debug) {
std::shared_ptr<egr::EagerTensor> x_in(new egr::EagerTensor("x_in")); std::shared_ptr<egr::EagerVariable> x_in(new egr::EagerVariable("x_in"));
std::shared_ptr<egr::EagerTensor> y_in(new egr::EagerTensor("y_in")); std::shared_ptr<egr::EagerVariable> y_in(new egr::EagerVariable("y_in"));
std::shared_ptr<egr::EagerTensor> vout(new egr::EagerTensor("vout")); std::shared_ptr<egr::EagerVariable> vout(new egr::EagerVariable("vout"));
imperative::NameVarMap<egr::EagerTensor> ins = {{"X", {x_in}}, {"Y", {y_in}}}; imperative::NameVarMap<egr::EagerVariable> ins = {{"X", {x_in}},
imperative::NameVarMap<egr::EagerTensor> outs = {{"Out", {vout}}}; {"Y", {y_in}}};
imperative::NameVarMap<egr::EagerVariable> outs = {{"Out", {vout}}};
LayerDebugString("mul", ins, outs); LayerDebugString("mul", ins, outs);
} }
TEST(test_create_node, eager_node) { TEST(test_create_node, eager_node) {
auto op = framework::OpRegistry::CreateOp("mul", {}, {}, {}, false); auto op = framework::OpRegistry::CreateOp("mul", {}, {}, {}, false);
framework::Scope scope; framework::Scope scope;
auto ctx = framework::RuntimeContext({}, {}); auto ctx = framework::RuntimeContext({}, {});
imperative::NameVarMap<egr::EagerTensor> ins = {{"X", {nullptr}}, imperative::NameVarMap<egr::EagerVariable> ins = {{"X", {nullptr}},
{"Y", {nullptr}}}; {"Y", {nullptr}}};
imperative::NameVarMap<egr::EagerTensor> outs = {{"Out", {nullptr}}}; imperative::NameVarMap<egr::EagerVariable> outs = {{"Out", {nullptr}}};
CreateGradOpNode((*op.get()), ins, outs, framework::AttributeMap{}, CreateGradOpNode((*op.get()), ins, outs, framework::AttributeMap{},
framework::AttributeMap{}, platform::CPUPlace(), {}); framework::AttributeMap{}, platform::CPUPlace(), {});
} }
...@@ -72,26 +73,26 @@ TEST(test_var_helper, eager_var_helper) { ...@@ -72,26 +73,26 @@ TEST(test_var_helper, eager_var_helper) {
ASSERT_ANY_THROW( ASSERT_ANY_THROW(
InitializeVariable(&var8, paddle::framework::proto::VarType::FP64)); InitializeVariable(&var8, paddle::framework::proto::VarType::FP64));
auto egr_tensor = std::make_shared<egr::EagerTensor>(); auto egr_tensor = std::make_shared<egr::EagerVariable>();
auto egr_tensor2 = std::make_shared<egr::EagerTensor>(); auto egr_tensor2 = std::make_shared<egr::EagerVariable>();
egr_tensor->MutableVar() egr_tensor->MutableVar()
->GetMutable<pten::SelectedRows>() ->GetMutable<pten::SelectedRows>()
->mutable_value() ->mutable_value()
->mutable_data<float>(platform::CPUPlace()); ->mutable_data<float>(platform::CPUPlace());
egr_tensor2->MutableVar()->GetMutable<framework::LoDRankTable>(); egr_tensor2->MutableVar()->GetMutable<framework::LoDRankTable>();
VLOG(6) << "egr_tensor create with "; VLOG(6) << "egr_tensor create with ";
ASSERT_TRUE(platform::is_cpu_place(GetPlace<egr::EagerTensor>(egr_tensor))); ASSERT_TRUE(platform::is_cpu_place(GetPlace<egr::EagerVariable>(egr_tensor)));
ASSERT_TRUE(GetDataType<egr::EagerTensor>(egr_tensor) == ASSERT_TRUE(GetDataType<egr::EagerVariable>(egr_tensor) ==
framework::proto::VarType::FP32); framework::proto::VarType::FP32);
GetCachedValue<egr::EagerTensor>( GetCachedValue<egr::EagerVariable>(
egr_tensor, framework::OpKernelType(framework::proto::VarType::FP32, egr_tensor, framework::OpKernelType(framework::proto::VarType::FP32,
platform::CPUPlace())); platform::CPUPlace()));
SetCachedValue<egr::EagerTensor>( SetCachedValue<egr::EagerVariable>(
egr_tensor, framework::OpKernelType(framework::proto::VarType::FP32, egr_tensor, framework::OpKernelType(framework::proto::VarType::FP32,
platform::CPUPlace()), platform::CPUPlace()),
egr_tensor2); egr_tensor2);
ASSERT_ANY_THROW(GetPlace<egr::EagerTensor>(egr_tensor2)); ASSERT_ANY_THROW(GetPlace<egr::EagerVariable>(egr_tensor2));
ASSERT_ANY_THROW(SetType<egr::EagerTensor>( ASSERT_ANY_THROW(SetType<egr::EagerVariable>(
egr_tensor, paddle::framework::proto::VarType::LOD_TENSOR_ARRAY)); egr_tensor, paddle::framework::proto::VarType::LOD_TENSOR_ARRAY));
} }
} // namespace imperative } // namespace imperative
......
...@@ -39,6 +39,8 @@ using vb_vector = std::vector<std::shared_ptr<imperative::VarBase>>; ...@@ -39,6 +39,8 @@ using vb_vector = std::vector<std::shared_ptr<imperative::VarBase>>;
using var_pair = std::pair<std::string, vb_vector>; using var_pair = std::pair<std::string, vb_vector>;
extern void TestSetForwardDataTypeOfGradVarsEager(
const NameVarMap<egr::EagerVariable>& outs);
template <typename VarType> template <typename VarType>
class TestRuntimeInferVarTypeContext class TestRuntimeInferVarTypeContext
: public RuntimeInferVarTypeContext<VarType> { : public RuntimeInferVarTypeContext<VarType> {
...@@ -406,6 +408,11 @@ TEST(test_layer, test_inner_op_not_inited) { ...@@ -406,6 +408,11 @@ TEST(test_layer, test_inner_op_not_inited) {
ASSERT_THROW(op.CheckAttrs(), platform::EnforceNotMet); ASSERT_THROW(op.CheckAttrs(), platform::EnforceNotMet);
} }
TEST(test_layer, test_eager) {
imperative::NameTensorMap ins = {};
TestSetForwardDataTypeOfGradVarsEager(ins);
}
} // namespace imperative } // namespace imperative
} // namespace paddle } // namespace paddle
......
...@@ -32,6 +32,9 @@ namespace framework = paddle::framework; ...@@ -32,6 +32,9 @@ namespace framework = paddle::framework;
namespace paddle { namespace paddle {
namespace imperative { namespace imperative {
extern void TestHandleComplexGradToRealGradEager(
const NameVarMap<egr::EagerVariable>& outs);
static framework::VariableNameMap CreateVarNameMap( static framework::VariableNameMap CreateVarNameMap(
const framework::OpInfo& op_info, const std::string& op_type, const framework::OpInfo& op_info, const std::string& op_type,
const NameVarBaseMap& varbase_map, bool is_input) { const NameVarBaseMap& varbase_map, bool is_input) {
...@@ -209,6 +212,11 @@ TEST(test_prepare_op, test_prepare_data_same_place) { ...@@ -209,6 +212,11 @@ TEST(test_prepare_op, test_prepare_data_same_place) {
TestPrepareDataSamePlace({}); TestPrepareDataSamePlace({});
} }
TEST(test_prepare_op, test_complex_eager) {
NameVarMap<egr::EagerVariable> outs = {};
TestHandleComplexGradToRealGradEager(outs);
}
#ifdef PADDLE_WITH_MKLDNN #ifdef PADDLE_WITH_MKLDNN
TEST(test_prepare_op, test_prepare_data_cpu_mkldnn) { TEST(test_prepare_op, test_prepare_data_cpu_mkldnn) {
TestPrepareDataSamePlace({{"use_mkldnn", true}}); TestPrepareDataSamePlace({{"use_mkldnn", true}});
......
...@@ -37,9 +37,10 @@ namespace paddle { ...@@ -37,9 +37,10 @@ namespace paddle {
namespace imperative { namespace imperative {
using vb_vector = std::vector<std::shared_ptr<imperative::VarBase>>; using vb_vector = std::vector<std::shared_ptr<imperative::VarBase>>;
using var_pair = std::pair<std::string, vb_vector>; using var_pair = std::pair<std::string, vb_vector>;
using ev_vector = std::vector<std::shared_ptr<egr::EagerVariable>>;
using ev_pair = std::pair<std::string, ev_vector>;
TEST(test_tracer, test_trace_op) { TEST(test_tracer, test_trace_op) {
// Doing an mul // Doing an mul
imperative::Tracer tracer; imperative::Tracer tracer;
...@@ -546,6 +547,44 @@ TEST(test_tracer, test_execution_context) { ...@@ -546,6 +547,44 @@ TEST(test_tracer, test_execution_context) {
ASSERT_EQ(dy_ctx.OutputName("Out"), framework::kEmptyVarName); ASSERT_EQ(dy_ctx.OutputName("Out"), framework::kEmptyVarName);
} }
TEST(test_tracer, eager_tracer) {
// Doing an mul
imperative::Tracer tracer;
std::shared_ptr<egr::EagerVariable> x_in(new egr::EagerVariable("x_in"));
std::shared_ptr<egr::EagerVariable> y_in(new egr::EagerVariable("y_in"));
std::shared_ptr<egr::EagerVariable> vout(new egr::EagerVariable("vout"));
platform::CPUPlace place;
std::vector<float> src_data(10, 2.0);
std::vector<int64_t> dims1 = {2, 5};
std::vector<int64_t> dims2 = {5, 2};
auto* x_in_tensor = x_in->MutableVar()->GetMutable<framework::LoDTensor>();
auto* y_in_tensor = y_in->MutableVar()->GetMutable<framework::LoDTensor>();
x_in_tensor->Resize(framework::make_ddim(dims1));
auto* mutable_x = x_in_tensor->mutable_data<float>(place);
paddle::memory::Copy(place, mutable_x, place, src_data.data(),
sizeof(float) * src_data.size());
y_in_tensor->Resize(framework::make_ddim(dims2));
auto* mutable_y = y_in_tensor->mutable_data<float>(place);
paddle::memory::Copy(place, mutable_y, place, src_data.data(),
sizeof(float) * src_data.size());
ev_pair x_pair = ev_pair("X", ev_vector(1, x_in));
ev_pair y_pair = ev_pair("Y", ev_vector(1, y_in));
ev_pair out_pair = ev_pair("Out", ev_vector(1, vout));
imperative::NameTensorMap ins = {x_pair, y_pair};
imperative::NameTensorMap outs = {out_pair};
framework::AttributeMap mul_attr_map;
mul_attr_map["use_mkldnn"] = false;
tracer.TraceOp<egr::EagerVariable>("mul", ins, outs, mul_attr_map, place,
true);
const auto& out_tensor = vout->Var().Get<framework::LoDTensor>();
for (int i = 0; i < vout->Var().Get<framework::LoDTensor>().numel(); i++) {
ASSERT_EQ(out_tensor.data<float>()[i], 20.0);
}
}
} // namespace imperative } // namespace imperative
} // namespace paddle } // namespace paddle
......
...@@ -168,7 +168,7 @@ void Tracer::TraceOp(const std::string& type, const NameVarMap<VarType>& ins, ...@@ -168,7 +168,7 @@ void Tracer::TraceOp(const std::string& type, const NameVarMap<VarType>& ins,
const platform::Place& place, bool trace_backward, const platform::Place& place, bool trace_backward,
const std::map<std::string, std::string>& inplace_map, const std::map<std::string, std::string>& inplace_map,
paddle::framework::AttributeMap* passed_default_attrs_, paddle::framework::AttributeMap* passed_default_attrs_,
bool override_default_attr_map) { bool use_default_attr_map) {
platform::RecordEvent op_type_record_event(type); platform::RecordEvent op_type_record_event(type);
platform::ScopedFlushDenormal flush; platform::ScopedFlushDenormal flush;
VLOG(1) << "Trace Op: " << type; VLOG(1) << "Trace Op: " << type;
...@@ -244,7 +244,7 @@ void Tracer::TraceOp(const std::string& type, const NameVarMap<VarType>& ins, ...@@ -244,7 +244,7 @@ void Tracer::TraceOp(const std::string& type, const NameVarMap<VarType>& ins,
"CustomPlace.")); "CustomPlace."));
#endif #endif
} }
if (!override_default_attr_map) { if (!use_default_attr_map) {
PADDLE_ENFORCE_NOT_NULL(passed_default_attrs_, PADDLE_ENFORCE_NOT_NULL(passed_default_attrs_,
paddle::platform::errors::PermissionDenied( paddle::platform::errors::PermissionDenied(
"Detected default_attrs = nullptr.")); "Detected default_attrs = nullptr."));
...@@ -280,16 +280,14 @@ void Tracer::TraceOp(const std::string& type, const NameVarMap<VarType>& ins, ...@@ -280,16 +280,14 @@ void Tracer::TraceOp(const std::string& type, const NameVarMap<VarType>& ins,
} }
if (ComputeRequiredGrad(new_ins, outs, trace_backward)) { if (ComputeRequiredGrad(new_ins, outs, trace_backward)) {
if (!override_default_attr_map) { PADDLE_ENFORCE_EQ(
PADDLE_ENFORCE_NOT_NULL(passed_default_attrs_, passed_default_attrs_, nullptr,
paddle::platform::errors::PermissionDenied( paddle::platform::errors::PermissionDenied(
"Detected default_attrs = nullptr.")); "We expect passed_default_attrs_ is nullptr while "
CreateGradOpNode(*op, new_ins, outs, attrs, *passed_default_attrs_, place, "use_default_attr_map is true, however we got not null "
inplace_map); "passed_default_attrs_. Please check your usage of trace_op. "));
} else { CreateGradOpNode(*op, new_ins, outs, attrs, default_attrs, place,
CreateGradOpNode(*op, new_ins, outs, attrs, default_attrs, place, inplace_map);
inplace_map);
}
} else { } else {
VLOG(3) << "No Grad to track for Op: " << type; VLOG(3) << "No Grad to track for Op: " << type;
} }
...@@ -301,16 +299,14 @@ template void Tracer::TraceOp<VarBase>( ...@@ -301,16 +299,14 @@ template void Tracer::TraceOp<VarBase>(
const NameVarMap<VarBase>& outs, framework::AttributeMap attrs, const NameVarMap<VarBase>& outs, framework::AttributeMap attrs,
const platform::Place& place, bool trace_backward, const platform::Place& place, bool trace_backward,
const std::map<std::string, std::string>& inplace_map, const std::map<std::string, std::string>& inplace_map,
paddle::framework::AttributeMap* default_attrs, paddle::framework::AttributeMap* default_attrs, bool use_default_attr_map);
bool override_default_attr_map);
template void Tracer::TraceOp<egr::EagerTensor>( template void Tracer::TraceOp<egr::EagerVariable>(
const std::string& type, const NameVarMap<egr::EagerTensor>& ins, const std::string& type, const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerTensor>& outs, framework::AttributeMap attrs, const NameVarMap<egr::EagerVariable>& outs, framework::AttributeMap attrs,
const platform::Place& place, bool trace_backward, const platform::Place& place, bool trace_backward,
const std::map<std::string, std::string>& inplace_map_, const std::map<std::string, std::string>& inplace_map_,
paddle::framework::AttributeMap* default_attrs, paddle::framework::AttributeMap* default_attrs, bool use_default_attr_map);
bool override_default_attr_map);
void Tracer::TraceOp(const std::string& type, const NameVarBaseMap& ins, void Tracer::TraceOp(const std::string& type, const NameVarBaseMap& ins,
const NameVarBaseMap& outs, framework::AttributeMap attrs, const NameVarBaseMap& outs, framework::AttributeMap attrs,
...@@ -324,13 +320,12 @@ void Tracer::TraceOp(const std::string& type, const NameTensorMap& ins, ...@@ -324,13 +320,12 @@ void Tracer::TraceOp(const std::string& type, const NameTensorMap& ins,
paddle::framework::AttributeMap attrs, paddle::framework::AttributeMap attrs,
const paddle::platform::Place& place, const paddle::platform::Place& place,
paddle::framework::AttributeMap* default_attrs, paddle::framework::AttributeMap* default_attrs,
bool override_default_attr_map, bool use_default_attr_map,
const std::map<std::string, std::string>& inplace_map) { const std::map<std::string, std::string>& inplace_map) {
VLOG(6) << "Running On Eager TraceOp with override_default_attr_map: " VLOG(6) << "Running On Eager TraceOp with use_default_attr_map: "
<< override_default_attr_map; << use_default_attr_map;
TraceOp<egr::EagerTensor>(type, ins, outs, std::move(attrs), place, false, TraceOp<egr::EagerVariable>(type, ins, outs, std::move(attrs), place, false,
inplace_map, default_attrs, inplace_map, default_attrs, use_default_attr_map);
override_default_attr_map);
} }
void Tracer::TraceOp(const std::string& type, const NameTensorMap& ins, void Tracer::TraceOp(const std::string& type, const NameTensorMap& ins,
...@@ -338,8 +333,9 @@ void Tracer::TraceOp(const std::string& type, const NameTensorMap& ins, ...@@ -338,8 +333,9 @@ void Tracer::TraceOp(const std::string& type, const NameTensorMap& ins,
paddle::framework::AttributeMap attrs, paddle::framework::AttributeMap attrs,
const std::map<std::string, std::string>& inplace_map) { const std::map<std::string, std::string>& inplace_map) {
VLOG(6) << "Running On Eager TraceOp(less): "; VLOG(6) << "Running On Eager TraceOp(less): ";
TraceOp<egr::EagerTensor>(type, ins, outs, std::move(attrs), expected_place_, TraceOp<egr::EagerVariable>(type, ins, outs, std::move(attrs),
false, inplace_map, nullptr, true); expected_place_, false, inplace_map, nullptr,
true);
} }
void Tracer::SetExpectedPlace(platform::Place place) { void Tracer::SetExpectedPlace(platform::Place place) {
......
...@@ -69,7 +69,7 @@ class Tracer { ...@@ -69,7 +69,7 @@ class Tracer {
const platform::Place& place, bool trace_backward, const platform::Place& place, bool trace_backward,
const std::map<std::string, std::string>& inplace_map = {}, const std::map<std::string, std::string>& inplace_map = {},
paddle::framework::AttributeMap* passed_default_attrs_ = nullptr, paddle::framework::AttributeMap* passed_default_attrs_ = nullptr,
bool override_default_attr_map = true); bool use_default_attr_map = true);
void TraceOp(const std::string& type, const NameVarBaseMap& ins, void TraceOp(const std::string& type, const NameVarBaseMap& ins,
const NameVarBaseMap& outs, framework::AttributeMap attrs, const NameVarBaseMap& outs, framework::AttributeMap attrs,
...@@ -83,7 +83,7 @@ class Tracer { ...@@ -83,7 +83,7 @@ class Tracer {
const NameTensorMap& outs, paddle::framework::AttributeMap attrs, const NameTensorMap& outs, paddle::framework::AttributeMap attrs,
const paddle::platform::Place& place, const paddle::platform::Place& place,
paddle::framework::AttributeMap* default_attrs, paddle::framework::AttributeMap* default_attrs,
bool override_default_attr_map, bool use_default_attr_map,
const std::map<std::string, std::string>& inplace_map = {}); const std::map<std::string, std::string>& inplace_map = {});
bool ComputeRequiredGrad(const NameVarBaseMap& ins, bool ComputeRequiredGrad(const NameVarBaseMap& ins,
......
...@@ -95,8 +95,8 @@ template const paddle::platform::Place &GetPlace<VarBase>( ...@@ -95,8 +95,8 @@ template const paddle::platform::Place &GetPlace<VarBase>(
const std::shared_ptr<VarBase> &var); const std::shared_ptr<VarBase> &var);
template const paddle::platform::Place &GetPlace<VariableWrapper>( template const paddle::platform::Place &GetPlace<VariableWrapper>(
const std::shared_ptr<VariableWrapper> &var); const std::shared_ptr<VariableWrapper> &var);
template const paddle::platform::Place &GetPlace<egr::EagerTensor>( template const paddle::platform::Place &GetPlace<egr::EagerVariable>(
const std::shared_ptr<egr::EagerTensor> &var); const std::shared_ptr<egr::EagerVariable> &var);
/* GetNameFromVar */ /* GetNameFromVar */
template <typename VarType> template <typename VarType>
...@@ -104,8 +104,8 @@ const std::string &GetNameFromVar(std::shared_ptr<VarType> var) { ...@@ -104,8 +104,8 @@ const std::string &GetNameFromVar(std::shared_ptr<VarType> var) {
return var->Name(); return var->Name();
} }
template <> template <>
const std::string &GetNameFromVar<egr::EagerTensor>( const std::string &GetNameFromVar<egr::EagerVariable>(
std::shared_ptr<egr::EagerTensor> tensor) { std::shared_ptr<egr::EagerVariable> tensor) {
return tensor->name(); return tensor->name();
} }
template const std::string &GetNameFromVar<VariableWrapper>( template const std::string &GetNameFromVar<VariableWrapper>(
...@@ -120,8 +120,8 @@ void SetType(std::shared_ptr<VarType> var, ...@@ -120,8 +120,8 @@ void SetType(std::shared_ptr<VarType> var,
var->SetType(type); var->SetType(type);
} }
template <> template <>
void SetType<egr::EagerTensor>(std::shared_ptr<egr::EagerTensor> var, void SetType<egr::EagerVariable>(std::shared_ptr<egr::EagerVariable> var,
framework::proto::VarType::Type type) { framework::proto::VarType::Type type) {
switch (type) { switch (type) {
case paddle::framework::proto::VarType::LOD_TENSOR: { case paddle::framework::proto::VarType::LOD_TENSOR: {
var->MutableVar()->GetMutable<paddle::framework::LoDTensor>(); var->MutableVar()->GetMutable<paddle::framework::LoDTensor>();
...@@ -149,8 +149,8 @@ framework::proto::VarType::Type GetType(std::shared_ptr<VarType> var) { ...@@ -149,8 +149,8 @@ framework::proto::VarType::Type GetType(std::shared_ptr<VarType> var) {
return var->Type(); return var->Type();
} }
template <> template <>
framework::proto::VarType::Type GetType<egr::EagerTensor>( framework::proto::VarType::Type GetType<egr::EagerVariable>(
std::shared_ptr<egr::EagerTensor> var) { std::shared_ptr<egr::EagerVariable> var) {
if (var->Var().IsInitialized()) { if (var->Var().IsInitialized()) {
return paddle::framework::ToVarType(var->Var().Type()); return paddle::framework::ToVarType(var->Var().Type());
} else { } else {
...@@ -168,8 +168,8 @@ framework::proto::VarType::Type GetDataType(std::shared_ptr<VarType> var) { ...@@ -168,8 +168,8 @@ framework::proto::VarType::Type GetDataType(std::shared_ptr<VarType> var) {
return var->DataType(); return var->DataType();
} }
template <> template <>
framework::proto::VarType::Type GetDataType<egr::EagerTensor>( framework::proto::VarType::Type GetDataType<egr::EagerVariable>(
std::shared_ptr<egr::EagerTensor> var) { std::shared_ptr<egr::EagerVariable> var) {
if (var->Var().IsType<pten::SelectedRows>()) { if (var->Var().IsType<pten::SelectedRows>()) {
return framework::TransToProtoVarType( return framework::TransToProtoVarType(
var->Var().Get<pten::SelectedRows>().value().type()); var->Var().Get<pten::SelectedRows>().value().type());
...@@ -197,8 +197,8 @@ bool CheckCachedKey(std::shared_ptr<VarType> var, ...@@ -197,8 +197,8 @@ bool CheckCachedKey(std::shared_ptr<VarType> var,
return GetVariableWrapper(var)->hasCacheKey(key); return GetVariableWrapper(var)->hasCacheKey(key);
} }
template <> template <>
bool CheckCachedKey<egr::EagerTensor>( bool CheckCachedKey<egr::EagerVariable>(
std::shared_ptr<egr::EagerTensor> tensor, std::shared_ptr<egr::EagerVariable> tensor,
const paddle::framework::OpKernelType &key) { const paddle::framework::OpKernelType &key) {
// TODO(jiabin): Support this later // TODO(jiabin): Support this later
// VLOG(10) << "CheckCachedKey with tensor: " << tensor->name() << "and key is // VLOG(10) << "CheckCachedKey with tensor: " << tensor->name() << "and key is
...@@ -219,7 +219,7 @@ std::shared_ptr<VariableWrapper> GetCachedValue( ...@@ -219,7 +219,7 @@ std::shared_ptr<VariableWrapper> GetCachedValue(
} }
template <> template <>
std::shared_ptr<VariableWrapper> GetCachedValue( std::shared_ptr<VariableWrapper> GetCachedValue(
std::shared_ptr<egr::EagerTensor> var, std::shared_ptr<egr::EagerVariable> var,
const paddle::framework::OpKernelType &key) { const paddle::framework::OpKernelType &key) {
// TODO(jiabin): Support this later // TODO(jiabin): Support this later
// PADDLE_THROW(platform::errors::Fatal("In eager mode program should not // PADDLE_THROW(platform::errors::Fatal("In eager mode program should not
...@@ -243,10 +243,10 @@ void SetCachedValue(std::shared_ptr<VarType> var, ...@@ -243,10 +243,10 @@ void SetCachedValue(std::shared_ptr<VarType> var,
GetVariableWrapper(var)->setCacheValue(key, GetVariableWrapper(res)); GetVariableWrapper(var)->setCacheValue(key, GetVariableWrapper(res));
} }
template <> template <>
void SetCachedValue<egr::EagerTensor>( void SetCachedValue<egr::EagerVariable>(
std::shared_ptr<egr::EagerTensor> tensor, std::shared_ptr<egr::EagerVariable> tensor,
const paddle::framework::OpKernelType &key, const paddle::framework::OpKernelType &key,
std::shared_ptr<egr::EagerTensor> res) { std::shared_ptr<egr::EagerVariable> res) {
// PADDLE_THROW(platform::errors::Fatal("In eager mode program should not // PADDLE_THROW(platform::errors::Fatal("In eager mode program should not
// reach this, support cache and remove this error check later, or this // reach this, support cache and remove this error check later, or this
// should not be supported.")); // should not be supported."));
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#include "paddle/fluid/framework/variable.h" #include "paddle/fluid/framework/variable.h"
namespace egr { namespace egr {
class EagerTensor; class EagerVariable;
} // namespace egr } // namespace egr
namespace pten { namespace pten {
class DenseTensor; class DenseTensor;
......
...@@ -45,7 +45,7 @@ PyTypeObject* p_tensor_type; ...@@ -45,7 +45,7 @@ PyTypeObject* p_tensor_type;
extern PyTypeObject* g_vartype_pytype; extern PyTypeObject* g_vartype_pytype;
extern PyTypeObject* g_framework_tensor_pytype; extern PyTypeObject* g_framework_tensor_pytype;
PyObject* EagerTensorNew(PyTypeObject* type, PyObject* args, PyObject* kwargs) { PyObject* TensorNew(PyTypeObject* type, PyObject* args, PyObject* kwargs) {
PyObject* obj = type->tp_alloc(type, 0); PyObject* obj = type->tp_alloc(type, 0);
if (obj) { if (obj) {
auto v = reinterpret_cast<TensorObject*>(obj); auto v = reinterpret_cast<TensorObject*>(obj);
...@@ -56,14 +56,14 @@ PyObject* EagerTensorNew(PyTypeObject* type, PyObject* args, PyObject* kwargs) { ...@@ -56,14 +56,14 @@ PyObject* EagerTensorNew(PyTypeObject* type, PyObject* args, PyObject* kwargs) {
} }
// TODO(jiabin): Overload this once we need more constructor in Python // TODO(jiabin): Overload this once we need more constructor in Python
void EmptyEagerTensorInitializer( void EmptyTensorInitializer(TensorObject* self, const std::string& name,
TensorObject* self, const std::string& name, const paddle::platform::Place& place,
const paddle::platform::Place& place, bool persistable = false, bool persistable = false, bool stop_gradient = true,
bool stop_gradient = true, framework::proto::VarType::Type dtype = framework::proto::VarType::Type dtype =
paddle::framework::proto::VarType::FP32, paddle::framework::proto::VarType::FP32,
const std::vector<int>& dims = {}, const std::vector<int>& dims = {},
framework::proto::VarType::Type var_type = framework::proto::VarType::Type var_type =
paddle::framework::proto::VarType::LOD_TENSOR) { paddle::framework::proto::VarType::LOD_TENSOR) {
auto ddims = paddle::framework::make_ddim(dims); auto ddims = paddle::framework::make_ddim(dims);
PADDLE_ENFORCE_GE( PADDLE_ENFORCE_GE(
paddle::framework::product(ddims), 0, paddle::framework::product(ddims), 0,
...@@ -98,46 +98,41 @@ void EmptyEagerTensorInitializer( ...@@ -98,46 +98,41 @@ void EmptyEagerTensorInitializer(
} }
} }
void InitEagerTensorWithNumpyValue(TensorObject* self, const py::object& array, void InitTensorWithNumpyValue(TensorObject* self, const py::object& array,
bool zero_copy = false) { bool zero_copy = false) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
self->tensor.defined(), true, self->tensor.defined(), true,
paddle::platform::errors::Fatal( paddle::platform::errors::Fatal(
"Calling InitEagerTensorWithNumpyValue of Eager Tensor without " "Calling InitTensorWithNumpyValue of Eager Tensor without "
"EmptyEagerTensorInitializer is " "EmptyTensorInitializer is "
"forbidden. Please check your code and make sure you new a " "forbidden. Please check your code and make sure you new a "
"eager tensor before init it with NumPy.")); "eager tensor before init it with NumPy."));
pten::DenseTensor* impl_ptr = pten::DenseTensor* impl_ptr =
static_cast<pten::DenseTensor*>(self->tensor.impl().get()); static_cast<pten::DenseTensor*>(self->tensor.impl().get());
paddle::platform::Place place = impl_ptr->place(); paddle::platform::Place place = impl_ptr->place();
paddle::framework::LoDTensor temp_tensor = paddle::framework::LoDTensor();
if (platform::is_cpu_place(place)) { if (platform::is_cpu_place(place)) {
SetTensorFromPyArray<platform::CPUPlace>(&temp_tensor, array, place, SetTensorFromPyArray<platform::CPUPlace>(impl_ptr, array, place, zero_copy);
zero_copy);
} else if (platform::is_xpu_place(place)) { } else if (platform::is_xpu_place(place)) {
SetTensorFromPyArray<platform::XPUPlace>(&temp_tensor, array, place, SetTensorFromPyArray<platform::XPUPlace>(impl_ptr, array, place, zero_copy);
zero_copy);
} else if (platform::is_gpu_place(place)) { } else if (platform::is_gpu_place(place)) {
SetTensorFromPyArray<platform::CUDAPlace>(&temp_tensor, array, place, SetTensorFromPyArray<platform::CUDAPlace>(impl_ptr, array, place,
zero_copy); zero_copy);
} else if (platform::is_cuda_pinned_place(place)) { } else if (platform::is_cuda_pinned_place(place)) {
SetTensorFromPyArray<platform::CUDAPinnedPlace>(&temp_tensor, array, place, SetTensorFromPyArray<platform::CUDAPinnedPlace>(impl_ptr, array, place,
zero_copy); zero_copy);
} else if (platform::is_npu_place(place)) { } else if (platform::is_npu_place(place)) {
SetTensorFromPyArray<platform::NPUPlace>(&temp_tensor, array, place, SetTensorFromPyArray<platform::NPUPlace>(impl_ptr, array, place, zero_copy);
zero_copy);
} else { } else {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"Place should be one of " "Place should be one of "
"CPUPlace/XPUPlace/CUDAPlace/CUDAPinnedPlace/NPUPlace")); "CPUPlace/XPUPlace/CUDAPlace/CUDAPinnedPlace/NPUPlace"));
} }
*impl_ptr = temp_tensor;
} }
void InitEagerTensorWithEagerTensor(TensorObject* self, void InitTensorWithTensor(TensorObject* self,
const paddle::experimental::Tensor& src, const paddle::experimental::Tensor& src,
const paddle::platform::Place& place, const paddle::platform::Place& place,
const std::string& name) { const std::string& name) {
self->tensor.set_name(name); self->tensor.set_name(name);
if (place == src.inner_place()) { if (place == src.inner_place()) {
auto impl = std::static_pointer_cast<pten::DenseTensor>(src.impl()); auto impl = std::static_pointer_cast<pten::DenseTensor>(src.impl());
...@@ -158,10 +153,10 @@ void InitEagerTensorWithEagerTensor(TensorObject* self, ...@@ -158,10 +153,10 @@ void InitEagerTensorWithEagerTensor(TensorObject* self,
} }
} }
void InitEagerTensorWithFrameworkTensor(TensorObject* self, void InitTensorWithFrameworkTensor(TensorObject* self,
const framework::Tensor& src, const framework::Tensor& src,
const paddle::platform::Place& place, const paddle::platform::Place& place,
const std::string& name) { const std::string& name) {
self->tensor.set_name(name); self->tensor.set_name(name);
if (place == src.place()) { if (place == src.place()) {
self->tensor.set_impl(std::make_shared<pten::DenseTensor>(src)); self->tensor.set_impl(std::make_shared<pten::DenseTensor>(src));
...@@ -271,14 +266,14 @@ std::string ParseName(std::unordered_map<std::string, PyObject*> kws_map, ...@@ -271,14 +266,14 @@ std::string ParseName(std::unordered_map<std::string, PyObject*> kws_map,
return act_name; return act_name;
} }
// initialize EagerTensor by PyArray(first argument is PyArray, // initialize Tensor by PyArray(first argument is PyArray,
// mix args and kwargs) automatically. // mix args and kwargs) automatically.
void AutoInitEagerTensorByPyArray( void AutoInitTensorByPyArray(TensorObject* py_tensor_ptr,
TensorObject* py_tensor_ptr, std::unordered_map<std::string, PyObject*> kws_map,
std::unordered_map<std::string, PyObject*> kws_map, PyObject* args, PyObject* args, bool flag_kwargs,
bool flag_kwargs, Py_ssize_t args_num) { Py_ssize_t args_num) {
// The first argument of the EagerTensor constructor is PyArray, // The first argument of the Tensor constructor is PyArray,
// there are 6 arguments to construct the new EagerTensor, // there are 6 arguments to construct the new Tensor,
// kw_order_map's key is every arguments of the constructor, // kw_order_map's key is every arguments of the constructor,
// kw_order_map's value is the position of the arguments respectively. // kw_order_map's value is the position of the arguments respectively.
// If u want to update this constructor with new arguments, // If u want to update this constructor with new arguments,
...@@ -306,20 +301,21 @@ void AutoInitEagerTensorByPyArray( ...@@ -306,20 +301,21 @@ void AutoInitEagerTensorByPyArray(
stop_gradient = ParseBooleanArgs("stop_gradient", kws_map, kw_order_map, args, stop_gradient = ParseBooleanArgs("stop_gradient", kws_map, kw_order_map, args,
flag_kwargs, args_num); flag_kwargs, args_num);
EmptyEagerTensorInitializer(py_tensor_ptr, act_name, place, persistable, EmptyTensorInitializer(py_tensor_ptr, act_name, place, persistable,
stop_gradient); stop_gradient);
InitEagerTensorWithNumpyValue(py_tensor_ptr, numpy_value, zero_copy); InitTensorWithNumpyValue(py_tensor_ptr, numpy_value, zero_copy);
} }
// initialize EagerTensor by EagerTensor or framework::Tensor (mix args and // initialize Tensor by Tensor or framework::Tensor (mix args and
// kwargs) automatically. // kwargs) automatically.
void AutoInitEagerTensorByTensor( void AutoInitTensorByTensor(TensorObject* py_tensor_ptr,
TensorObject* py_tensor_ptr, std::unordered_map<std::string, PyObject*> kws_map,
std::unordered_map<std::string, PyObject*> kws_map, PyObject* args, PyObject* args, bool flag_kwargs,
bool flag_kwargs, Py_ssize_t args_num, bool init_by_egr_tensor = true) { Py_ssize_t args_num,
// The first argument of the EagerTensor constructor is EagerTensor or bool init_by_egr_tensor = true) {
// The first argument of the Tensor constructor is Tensor or
// framework Tensor, // framework Tensor,
// there are 3 arguments to construct the new EagerTensor, // there are 3 arguments to construct the new Tensor,
// kw_order_map's key is every arguments of the constructor, // kw_order_map's key is every arguments of the constructor,
// kw_order_map's value is the position of the arguments respectively. // kw_order_map's value is the position of the arguments respectively.
// If u want to update this constructor with new arguments, // If u want to update this constructor with new arguments,
...@@ -345,14 +341,14 @@ void AutoInitEagerTensorByTensor( ...@@ -345,14 +341,14 @@ void AutoInitEagerTensorByTensor(
src_tensor = CastPyArg2Tensor(kws_map["value"], 0); src_tensor = CastPyArg2Tensor(kws_map["value"], 0);
} else { } else {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"The first expected kwargs is {value: EagerTensor}, " "The first expected kwargs is {value: Tensor}, "
"but could not parse the first argument {value: EagerTensor} " "but could not parse the first argument {value: Tensor} "
"successfully. " "successfully. "
"Please check your input first and make sure you are on the right " "Please check your input first and make sure you are on the right "
"way.")); "way."));
} }
} }
InitEagerTensorWithEagerTensor(py_tensor_ptr, src_tensor, place, act_name); InitTensorWithTensor(py_tensor_ptr, src_tensor, place, act_name);
} else { } else {
// init by framework tensor // init by framework tensor
framework::Tensor src_tensor; framework::Tensor src_tensor;
...@@ -372,8 +368,7 @@ void AutoInitEagerTensorByTensor( ...@@ -372,8 +368,7 @@ void AutoInitEagerTensorByTensor(
"way.")); "way."));
} }
} }
InitEagerTensorWithFrameworkTensor(py_tensor_ptr, src_tensor, place, InitTensorWithFrameworkTensor(py_tensor_ptr, src_tensor, place, act_name);
act_name);
} }
} }
...@@ -402,12 +397,12 @@ void AutoInitEagerTensorByTensor( ...@@ -402,12 +397,12 @@ void AutoInitEagerTensorByTensor(
* ** value: ndarray) * ** value: ndarray)
* 5. * 5.
* def __init__ ( * def __init__ (
* ** tensor: EagerTensor) * ** tensor: Tensor)
* 6. (multi-place) * 6. (multi-place)
* (should have at least one parameter, one parameter equals to case 5, zero * (should have at least one parameter, one parameter equals to case 5, zero
* parameter equals to case 1.) * parameter equals to case 1.)
* def __init__ ( * def __init__ (
* ** tensor: EagerTensor, * ** tensor: Tensor,
* ** place: paddle::platform::Place, * ** place: paddle::platform::Place,
* ** name: std::string) * ** name: std::string)
* 7. (multi-place) (should have at least one parameter, one parameter similar * 7. (multi-place) (should have at least one parameter, one parameter similar
...@@ -417,7 +412,7 @@ void AutoInitEagerTensorByTensor( ...@@ -417,7 +412,7 @@ void AutoInitEagerTensorByTensor(
* ** place: paddle::platform::Place, * ** place: paddle::platform::Place,
* ** name: std::string) * ** name: std::string)
* **/ * **/
int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { int TensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
// set a flag to record use kwargs or not // set a flag to record use kwargs or not
bool flag_kwargs = false; bool flag_kwargs = false;
if (kwargs) flag_kwargs = true; if (kwargs) flag_kwargs = true;
...@@ -427,7 +422,7 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { ...@@ -427,7 +422,7 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
PyObject* kw_persistable = NULL; PyObject* kw_persistable = NULL;
PyObject* kw_stop_gradient = NULL; PyObject* kw_stop_gradient = NULL;
PyObject* kw_value = NULL; // receive PyArray or EagerTensor PyObject* kw_value = NULL; // receive PyArray or Tensor
PyObject* kw_place = NULL; PyObject* kw_place = NULL;
PyObject* kw_name = NULL; PyObject* kw_name = NULL;
PyObject* kw_dims = NULL; PyObject* kw_dims = NULL;
...@@ -490,7 +485,7 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { ...@@ -490,7 +485,7 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
if (!flag_kwargs) { if (!flag_kwargs) {
// case 1 // case 1
VLOG(6) << "Calling case1's initializer."; VLOG(6) << "Calling case1's initializer.";
EmptyEagerTensorInitializer( EmptyTensorInitializer(
py_tensor_ptr, py_tensor_ptr,
egr::Controller::Instance().GenerateUniqueName("generated_tensor"), egr::Controller::Instance().GenerateUniqueName("generated_tensor"),
egr::Controller::Instance().GetExpectedPlace()); egr::Controller::Instance().GetExpectedPlace());
...@@ -499,28 +494,28 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { ...@@ -499,28 +494,28 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
if (kw_value != NULL) { if (kw_value != NULL) {
if (pybind11::detail::npy_api::get().PyArray_Check_(kw_value)) { if (pybind11::detail::npy_api::get().PyArray_Check_(kw_value)) {
VLOG(6) << "Calling case3's or case4's initializer"; VLOG(6) << "Calling case3's or case4's initializer";
AutoInitEagerTensorByPyArray(py_tensor_ptr, kws_map, args, AutoInitTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs,
flag_kwargs, args_num); args_num);
return 0; return 0;
} else if (PyObject_IsInstance( } else if (PyObject_IsInstance(
kw_value, reinterpret_cast<PyObject*>(p_tensor_type))) { kw_value, reinterpret_cast<PyObject*>(p_tensor_type))) {
VLOG(6) << "Calling case5's or case6's initializer"; VLOG(6) << "Calling case5's or case6's initializer";
AutoInitEagerTensorByTensor(py_tensor_ptr, kws_map, args, flag_kwargs, AutoInitTensorByTensor(py_tensor_ptr, kws_map, args, flag_kwargs,
args_num); args_num);
return 0; return 0;
} else if (PyObject_IsInstance(kw_value, } else if (PyObject_IsInstance(kw_value,
reinterpret_cast<PyObject*>( reinterpret_cast<PyObject*>(
g_framework_tensor_pytype))) { g_framework_tensor_pytype))) {
VLOG(6) << "Calling case7's initializer."; VLOG(6) << "Calling case7's initializer.";
AutoInitEagerTensorByTensor( AutoInitTensorByTensor(py_tensor_ptr, kws_map, args, flag_kwargs,
py_tensor_ptr, kws_map, args, flag_kwargs, args_num, args_num,
/* false means not init by egr tensor*/ false); /* false means not init by egr tensor*/ false);
return 0; return 0;
} else { } else {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"Could not parse the first keyword argument successfully, " "Could not parse the first keyword argument successfully, "
"the first keyword argument is value, but it should be PyArray " "the first keyword argument is value, but it should be PyArray "
"or EagerTensor or framework::Tensor. " "or Tensor or framework::Tensor. "
"Please check your input first and make sure you are on the " "Please check your input first and make sure you are on the "
"right way.")); "right way."));
} }
...@@ -573,18 +568,18 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { ...@@ -573,18 +568,18 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
CastPyArg2ProtoType(kw_type, 0); CastPyArg2ProtoType(kw_type, 0);
bool persistable = CastPyArg2AttrBoolean(kw_persistable, 0); bool persistable = CastPyArg2AttrBoolean(kw_persistable, 0);
EmptyEagerTensorInitializer( EmptyTensorInitializer(py_tensor_ptr, act_name,
py_tensor_ptr, act_name, egr::Controller::Instance().GetExpectedPlace(),
egr::Controller::Instance().GetExpectedPlace(), persistable, persistable,
/* stop_gradient */ true, dtype, dims, var_type); /* stop_gradient */ true, dtype, dims, var_type);
return 0; return 0;
} else { } else {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"We not only support construct EagerTensor from numpy value " "We not only support construct Tensor from numpy value "
"or tensor(EagerTensor or framework::Tensor) " "or tensor(Tensor or framework::Tensor) "
"with python kwargs by this initializer, " "with python kwargs by this initializer, "
"but also even support dtype to init a empty EagerTensor. " "but also even support dtype to init a empty Tensor. "
"Please check your input first and make sure you call the existed " "Please check your input first and make sure you call the existed "
"constructor.")); "constructor."));
} }
...@@ -595,28 +590,28 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { ...@@ -595,28 +590,28 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0); PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0);
if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) { if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) {
VLOG(6) << "Calling case3's or case4's initializer."; VLOG(6) << "Calling case3's or case4's initializer.";
AutoInitEagerTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs, AutoInitTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs,
args_num); args_num);
return 0; return 0;
} else if (PyObject_IsInstance( } else if (PyObject_IsInstance(
arg0_ptr, reinterpret_cast<PyObject*>(p_tensor_type))) { arg0_ptr, reinterpret_cast<PyObject*>(p_tensor_type))) {
VLOG(6) << "Calling case5's or case6's initializer."; VLOG(6) << "Calling case5's or case6's initializer.";
AutoInitEagerTensorByTensor(py_tensor_ptr, kws_map, args, flag_kwargs, AutoInitTensorByTensor(py_tensor_ptr, kws_map, args, flag_kwargs,
args_num); args_num);
return 0; return 0;
} else if (PyObject_IsInstance(arg0_ptr, reinterpret_cast<PyObject*>( } else if (PyObject_IsInstance(arg0_ptr, reinterpret_cast<PyObject*>(
g_framework_tensor_pytype))) { g_framework_tensor_pytype))) {
VLOG(6) << "Calling case7's initializer."; VLOG(6) << "Calling case7's initializer.";
AutoInitEagerTensorByTensor( AutoInitTensorByTensor(py_tensor_ptr, kws_map, args, flag_kwargs,
py_tensor_ptr, kws_map, args, flag_kwargs, args_num, args_num,
/* false means not init by egr tensor*/ false); /* false means not init by egr tensor*/ false);
return 0; return 0;
} else { } else {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"We support construct EagerTensor from numpy value " "We support construct Tensor from numpy value "
"or tensor(EagerTensor or framework::Tensor) " "or tensor(Tensor or framework::Tensor) "
"with python args and kwargs by this initializer, " "with python args and kwargs by this initializer, "
"but the first argument should be PyArray or EagerTensor or " "but the first argument should be PyArray or Tensor or "
"framework::Tensor. " "framework::Tensor. "
"Please check your input first and make sure you call the existed " "Please check your input first and make sure you call the existed "
"constructor.")); "constructor."));
...@@ -626,8 +621,8 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { ...@@ -626,8 +621,8 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0); PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0);
if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) { if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) {
VLOG(6) << "Calling case3's or case4's initializer."; VLOG(6) << "Calling case3's or case4's initializer.";
AutoInitEagerTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs, AutoInitTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs,
args_num); args_num);
return 0; return 0;
} else { } else {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
...@@ -658,15 +653,14 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { ...@@ -658,15 +653,14 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
paddle::framework::proto::VarType::Type var_type = paddle::framework::proto::VarType::Type var_type =
CastPyArg2ProtoType(PyTuple_GET_ITEM(args, 3), 3); CastPyArg2ProtoType(PyTuple_GET_ITEM(args, 3), 3);
bool persistable = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 4), 4); bool persistable = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 4), 4);
EmptyEagerTensorInitializer( EmptyTensorInitializer(py_tensor_ptr, act_name,
py_tensor_ptr, act_name, egr::Controller::Instance().GetExpectedPlace(),
egr::Controller::Instance().GetExpectedPlace(), persistable, true, persistable, true, dtype, dims, var_type);
dtype, dims, var_type);
return 0; return 0;
} else if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) { } else if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) {
VLOG(6) << "Calling case3's initializer."; VLOG(6) << "Calling case3's initializer.";
AutoInitEagerTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs, AutoInitTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs,
args_num); args_num);
return 0; return 0;
} else { } else {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
...@@ -680,8 +674,8 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { ...@@ -680,8 +674,8 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0); PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0);
if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) { if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) {
VLOG(6) << "Calling case3's or case4's initializer"; VLOG(6) << "Calling case3's or case4's initializer";
AutoInitEagerTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs, AutoInitTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs,
args_num); args_num);
return 0; return 0;
} else { } else {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
...@@ -696,8 +690,8 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { ...@@ -696,8 +690,8 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
if (!flag_kwargs) { if (!flag_kwargs) {
// case 3 // case 3
VLOG(6) << "Calling case3's initializer."; VLOG(6) << "Calling case3's initializer.";
AutoInitEagerTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs, AutoInitTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs,
args_num); args_num);
return 0; return 0;
} else { // six position args, remainting arguments are kwargs, but this } else { // six position args, remainting arguments are kwargs, but this
// is not a right way // is not a right way
...@@ -716,7 +710,7 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { ...@@ -716,7 +710,7 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
return 1; return 1;
} }
static void EagerTensorDealloc(TensorObject* self) { static void TensorDealloc(TensorObject* self) {
self->tensor.~Tensor(); self->tensor.~Tensor();
Py_TYPE(self)->tp_free(reinterpret_cast<PyObject*>(self)); Py_TYPE(self)->tp_free(reinterpret_cast<PyObject*>(self));
} }
...@@ -735,19 +729,19 @@ void BindEager(pybind11::module* module) { ...@@ -735,19 +729,19 @@ void BindEager(pybind11::module* module) {
auto& internals = pybind11::detail::get_internals(); auto& internals = pybind11::detail::get_internals();
auto heap_type = reinterpret_cast<PyHeapTypeObject*>( auto heap_type = reinterpret_cast<PyHeapTypeObject*>(
internals.default_metaclass->tp_alloc(internals.default_metaclass, 0)); internals.default_metaclass->tp_alloc(internals.default_metaclass, 0));
heap_type->ht_name = ToPyObject("EagerTensor"); heap_type->ht_name = ToPyObject("Tensor");
heap_type->ht_qualname = ToPyObject("EagerTensor"); heap_type->ht_qualname = ToPyObject("Tensor");
auto type = &heap_type->ht_type; auto type = &heap_type->ht_type;
type->tp_name = "EagerTensor"; type->tp_name = "Tensor";
type->tp_basicsize = sizeof(TensorObject); type->tp_basicsize = sizeof(TensorObject);
type->tp_dealloc = (destructor)EagerTensorDealloc; type->tp_dealloc = (destructor)TensorDealloc;
type->tp_as_number = &number_methods; type->tp_as_number = &number_methods;
type->tp_as_sequence = &sequence_methods; type->tp_as_sequence = &sequence_methods;
type->tp_as_mapping = &mapping_methods; type->tp_as_mapping = &mapping_methods;
type->tp_methods = variable_methods; type->tp_methods = variable_methods;
type->tp_getset = variable_properties; type->tp_getset = variable_properties;
type->tp_init = EagerTensorInit; type->tp_init = TensorInit;
type->tp_new = EagerTensorNew; type->tp_new = TensorNew;
Py_INCREF(internals.instance_base); Py_INCREF(internals.instance_base);
type->tp_base = reinterpret_cast<PyTypeObject*>(internals.instance_base); type->tp_base = reinterpret_cast<PyTypeObject*>(internals.instance_base);
type->tp_flags |= type->tp_flags |=
...@@ -764,8 +758,8 @@ void BindEager(pybind11::module* module) { ...@@ -764,8 +758,8 @@ void BindEager(pybind11::module* module) {
} }
Py_INCREF(type); Py_INCREF(type);
if (PyModule_AddObject(m.ptr(), "EagerTensor", if (PyModule_AddObject(m.ptr(), "Tensor", reinterpret_cast<PyObject*>(type)) <
reinterpret_cast<PyObject*>(type)) < 0) { 0) {
Py_DECREF(type); Py_DECREF(type);
Py_DECREF(m.ptr()); Py_DECREF(m.ptr());
PADDLE_THROW(platform::errors::Fatal( PADDLE_THROW(platform::errors::Fatal(
......
...@@ -145,9 +145,8 @@ static PyObject* eager_api_tensor_copy(PyObject* self, PyObject* args, ...@@ -145,9 +145,8 @@ static PyObject* eager_api_tensor_copy(PyObject* self, PyObject* args,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* eager_api_read_next_eager_tensor_list(PyObject* self, static PyObject* eager_api_read_next_tensor_list(PyObject* self, PyObject* args,
PyObject* args, PyObject* kwargs) {
PyObject* kwargs) {
EAGER_TRY EAGER_TRY
auto tensor_base_list = auto tensor_base_list =
CastPyArg2VectorOfTensorBase(PyTuple_GET_ITEM(args, 0), 0); CastPyArg2VectorOfTensorBase(PyTuple_GET_ITEM(args, 0), 0);
...@@ -182,8 +181,8 @@ PyMethodDef variable_functions[] = { ...@@ -182,8 +181,8 @@ PyMethodDef variable_functions[] = {
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"tensor_copy", (PyCFunction)(void (*)(void))eager_api_tensor_copy, {"tensor_copy", (PyCFunction)(void (*)(void))eager_api_tensor_copy,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"read_next_eager_tensor_list", {"read_next_tensor_list",
(PyCFunction)(void (*)(void))eager_api_read_next_eager_tensor_list, (PyCFunction)(void (*)(void))eager_api_read_next_tensor_list,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{NULL, NULL, 0, NULL}}; {NULL, NULL, 0, NULL}};
......
...@@ -35,15 +35,15 @@ limitations under the License. */ ...@@ -35,15 +35,15 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace pybind { namespace pybind {
extern void InitEagerTensorWithNumpyValue(TensorObject* self, extern void InitTensorWithNumpyValue(TensorObject* self,
const pybind11::object& array, const pybind11::object& array,
bool zero_copy); bool zero_copy);
extern PyTypeObject* p_tensor_type; extern PyTypeObject* p_tensor_type;
static PyObject* eager_tensor_method_numpy(TensorObject* self, PyObject* args, static PyObject* tensor_method_numpy(TensorObject* self, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_SYNC_TRY EAGER_TRY
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
self->tensor.initialized(), true, self->tensor.initialized(), true,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
...@@ -99,18 +99,17 @@ static PyObject* eager_tensor_method_numpy(TensorObject* self, PyObject* args, ...@@ -99,18 +99,17 @@ static PyObject* eager_tensor_method_numpy(TensorObject* self, PyObject* args,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* eager_tensor_method__is_initialized(TensorObject* self, static PyObject* tensor_method__is_initialized(TensorObject* self,
PyObject* args, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_SYNC_TRY EAGER_TRY
return ToPyObject(self->tensor.initialized()); return ToPyObject(self->tensor.initialized());
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* eager_tensor_method__copy_to(TensorObject* self, static PyObject* tensor_method__copy_to(TensorObject* self, PyObject* args,
PyObject* args, PyObject* kwargs) {
PyObject* kwargs) { EAGER_TRY
EAGER_SYNC_TRY
bool blocking = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 0), 0); bool blocking = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 0), 0);
auto place = CastPyArg2Place(PyTuple_GET_ITEM(args, 1), 1); auto place = CastPyArg2Place(PyTuple_GET_ITEM(args, 1), 1);
auto cp_tensor = auto cp_tensor =
...@@ -123,10 +122,10 @@ static PyObject* eager_tensor_method__copy_to(TensorObject* self, ...@@ -123,10 +122,10 @@ static PyObject* eager_tensor_method__copy_to(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* eager_tensor_method_reconstruct_from_(TensorObject* self, static PyObject* tensor_method_reconstruct_from_(TensorObject* self,
PyObject* args, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_SYNC_TRY EAGER_TRY
paddle::experimental::Tensor src_tensor = paddle::experimental::Tensor src_tensor =
CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0); CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0);
std::string orig_name = self->tensor.name(); std::string orig_name = self->tensor.name();
...@@ -144,9 +143,9 @@ static PyObject* eager_tensor_method_reconstruct_from_(TensorObject* self, ...@@ -144,9 +143,9 @@ static PyObject* eager_tensor_method_reconstruct_from_(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* eager_tensor_method_copy_(TensorObject* self, PyObject* args, static PyObject* tensor_method_copy_(TensorObject* self, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_SYNC_TRY EAGER_TRY
paddle::experimental::Tensor src_tensor = paddle::experimental::Tensor src_tensor =
CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0); CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0);
bool blocking = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 1), 1); bool blocking = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 1), 1);
...@@ -170,8 +169,8 @@ static PyObject* eager_tensor_method_copy_(TensorObject* self, PyObject* args, ...@@ -170,8 +169,8 @@ static PyObject* eager_tensor_method_copy_(TensorObject* self, PyObject* args,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* eager_tensor_retain_grads(TensorObject* self, PyObject* args, static PyObject* tensor_retain_grads(TensorObject* self, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_TRY EAGER_TRY
if (egr::Controller::Instance().HasGrad()) { if (egr::Controller::Instance().HasGrad()) {
auto meta = egr::EagerUtils::autograd_meta(&(self->tensor)); auto meta = egr::EagerUtils::autograd_meta(&(self->tensor));
...@@ -187,10 +186,9 @@ static PyObject* eager_tensor_retain_grads(TensorObject* self, PyObject* args, ...@@ -187,10 +186,9 @@ static PyObject* eager_tensor_retain_grads(TensorObject* self, PyObject* args,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* eager_tensor__clear_gradient(TensorObject* self, static PyObject* tensor__clear_gradient(TensorObject* self, PyObject* args,
PyObject* args, PyObject* kwargs) {
PyObject* kwargs) { EAGER_TRY
EAGER_SYNC_TRY
VLOG(4) << "ClearGradient " << self->tensor.name(); VLOG(4) << "ClearGradient " << self->tensor.name();
paddle::experimental::Tensor* grad; paddle::experimental::Tensor* grad;
...@@ -223,8 +221,8 @@ static PyObject* eager_tensor__clear_gradient(TensorObject* self, ...@@ -223,8 +221,8 @@ static PyObject* eager_tensor__clear_gradient(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* eager_tensor__zero_grads(TensorObject* self, PyObject* args, static PyObject* tensor__zero_grads(TensorObject* self, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_TRY EAGER_TRY
VLOG(4) << "ZeroGrads " << self->tensor.name(); VLOG(4) << "ZeroGrads " << self->tensor.name();
...@@ -257,10 +255,9 @@ static PyObject* eager_tensor__zero_grads(TensorObject* self, PyObject* args, ...@@ -257,10 +255,9 @@ static PyObject* eager_tensor__zero_grads(TensorObject* self, PyObject* args,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* eager_tensor__share_buffer_to(TensorObject* self, static PyObject* tensor__share_buffer_to(TensorObject* self, PyObject* args,
PyObject* args, PyObject* kwargs) {
PyObject* kwargs) { EAGER_TRY
EAGER_SYNC_TRY
paddle::experimental::Tensor* dst_ptr = paddle::experimental::Tensor* dst_ptr =
&(reinterpret_cast<TensorObject*>(PyTuple_GET_ITEM(args, 0))->tensor); &(reinterpret_cast<TensorObject*>(PyTuple_GET_ITEM(args, 0))->tensor);
PADDLE_ENFORCE_EQ(self->tensor.initialized(), true, PADDLE_ENFORCE_EQ(self->tensor.initialized(), true,
...@@ -279,10 +276,10 @@ static PyObject* eager_tensor__share_buffer_to(TensorObject* self, ...@@ -279,10 +276,10 @@ static PyObject* eager_tensor__share_buffer_to(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* eager_tensor__is_shared_buffer_with(TensorObject* self, static PyObject* tensor__is_shared_buffer_with(TensorObject* self,
PyObject* args, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_SYNC_TRY EAGER_TRY
paddle::experimental::Tensor* dst_ptr = paddle::experimental::Tensor* dst_ptr =
&(reinterpret_cast<TensorObject*>(PyTuple_GET_ITEM(args, 0))->tensor); &(reinterpret_cast<TensorObject*>(PyTuple_GET_ITEM(args, 0))->tensor);
PADDLE_ENFORCE_EQ(self->tensor.initialized(), true, PADDLE_ENFORCE_EQ(self->tensor.initialized(), true,
...@@ -303,10 +300,10 @@ static PyObject* eager_tensor__is_shared_buffer_with(TensorObject* self, ...@@ -303,10 +300,10 @@ static PyObject* eager_tensor__is_shared_buffer_with(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* eager_tensor__share_underline_tensor_to(TensorObject* self, static PyObject* tensor__share_underline_tensor_to(TensorObject* self,
PyObject* args, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_SYNC_TRY EAGER_TRY
paddle::experimental::Tensor* src_ptr = paddle::experimental::Tensor* src_ptr =
&(reinterpret_cast<TensorObject*>(PyTuple_GET_ITEM(args, 0))->tensor); &(reinterpret_cast<TensorObject*>(PyTuple_GET_ITEM(args, 0))->tensor);
PADDLE_ENFORCE_EQ(self->tensor.initialized(), true, PADDLE_ENFORCE_EQ(self->tensor.initialized(), true,
...@@ -320,9 +317,10 @@ static PyObject* eager_tensor__share_underline_tensor_to(TensorObject* self, ...@@ -320,9 +317,10 @@ static PyObject* eager_tensor__share_underline_tensor_to(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* eager_tensor__is_shared_underline_tensor_with( static PyObject* tensor__is_shared_underline_tensor_with(TensorObject* self,
TensorObject* self, PyObject* args, PyObject* kwargs) { PyObject* args,
EAGER_SYNC_TRY PyObject* kwargs) {
EAGER_TRY
paddle::experimental::Tensor src_tensor = paddle::experimental::Tensor src_tensor =
CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0); CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0);
PADDLE_ENFORCE_EQ(src_tensor.initialized(), true, PADDLE_ENFORCE_EQ(src_tensor.initialized(), true,
...@@ -339,9 +337,9 @@ static PyObject* eager_tensor__is_shared_underline_tensor_with( ...@@ -339,9 +337,9 @@ static PyObject* eager_tensor__is_shared_underline_tensor_with(
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* eager_tensor_method_detach(TensorObject* self, PyObject* args, static PyObject* tensor_method_detach(TensorObject* self, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_SYNC_TRY EAGER_TRY
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
self->tensor.initialized(), true, self->tensor.initialized(), true,
platform::errors::InvalidArgument("Tensor %s has not been initialized!", platform::errors::InvalidArgument("Tensor %s has not been initialized!",
...@@ -365,10 +363,10 @@ static PyObject* eager_tensor_method_detach(TensorObject* self, PyObject* args, ...@@ -365,10 +363,10 @@ static PyObject* eager_tensor_method_detach(TensorObject* self, PyObject* args,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* eager_tensor_method_get_underline_tensor(TensorObject* self, static PyObject* tensor_method_get_underline_tensor(TensorObject* self,
PyObject* args, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_SYNC_TRY EAGER_TRY
if (self->tensor.is_dense_tensor()) { if (self->tensor.is_dense_tensor()) {
auto* tensor = auto* tensor =
static_cast<paddle::framework::LoDTensor*>(self->tensor.impl().get()); static_cast<paddle::framework::LoDTensor*>(self->tensor.impl().get());
...@@ -382,57 +380,54 @@ static PyObject* eager_tensor_method_get_underline_tensor(TensorObject* self, ...@@ -382,57 +380,54 @@ static PyObject* eager_tensor_method_get_underline_tensor(TensorObject* self,
} }
// NOTE(wuweilong): Set value and not change self's original place // NOTE(wuweilong): Set value and not change self's original place
static PyObject* eager_tensor_method_set_value(TensorObject* self, static PyObject* tensor_method_set_value(TensorObject* self, PyObject* args,
PyObject* args, PyObject* kwargs) {
PyObject* kwargs) {
EAGER_TRY EAGER_TRY
VLOG(4) << "Value " << self->tensor.name(); VLOG(4) << "Value " << self->tensor.name();
pybind11::object numpy_value = pybind11::object numpy_value =
pybind11::object(pybind11::handle(PyTuple_GET_ITEM(args, 0)), true); pybind11::object(pybind11::handle(PyTuple_GET_ITEM(args, 0)), true);
InitEagerTensorWithNumpyValue(self, numpy_value, false); InitTensorWithNumpyValue(self, numpy_value, false);
Py_INCREF(Py_None); Py_INCREF(Py_None);
return Py_None; return Py_None;
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyMethodDef variable_methods[] = { PyMethodDef variable_methods[] = {
{"numpy", (PyCFunction)(void (*)(void))eager_tensor_method_numpy, {"numpy", (PyCFunction)(void (*)(void))tensor_method_numpy,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"_is_initialized", {"_is_initialized",
(PyCFunction)(void (*)(void))eager_tensor_method__is_initialized, (PyCFunction)(void (*)(void))tensor_method__is_initialized,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"_copy_to", (PyCFunction)(void (*)(void))eager_tensor_method__copy_to, {"_copy_to", (PyCFunction)(void (*)(void))tensor_method__copy_to,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"copy_", (PyCFunction)(void (*)(void))eager_tensor_method_copy_, {"copy_", (PyCFunction)(void (*)(void))tensor_method_copy_,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"reconstruct_from_", {"reconstruct_from_",
(PyCFunction)(void (*)(void))eager_tensor_method_reconstruct_from_, (PyCFunction)(void (*)(void))tensor_method_reconstruct_from_,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"retain_grads", (PyCFunction)(void (*)(void))eager_tensor_retain_grads, {"retain_grads", (PyCFunction)(void (*)(void))tensor_retain_grads,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"_clear_gradient", {"_clear_gradient", (PyCFunction)(void (*)(void))tensor__clear_gradient,
(PyCFunction)(void (*)(void))eager_tensor__clear_gradient,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"_zero_grads", (PyCFunction)(void (*)(void))eager_tensor__zero_grads, {"_zero_grads", (PyCFunction)(void (*)(void))tensor__zero_grads,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"_share_buffer_to", {"_share_buffer_to", (PyCFunction)(void (*)(void))tensor__share_buffer_to,
(PyCFunction)(void (*)(void))eager_tensor__share_buffer_to,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"_is_shared_buffer_with", {"_is_shared_buffer_with",
(PyCFunction)(void (*)(void))eager_tensor__is_shared_buffer_with, (PyCFunction)(void (*)(void))tensor__is_shared_buffer_with,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"_share_underline_tensor_to", {"_share_underline_tensor_to",
(PyCFunction)(void (*)(void))eager_tensor__share_underline_tensor_to, (PyCFunction)(void (*)(void))tensor__share_underline_tensor_to,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"_is_shared_underline_tensor_with", {"_is_shared_underline_tensor_with",
(PyCFunction)(void (*)(void))eager_tensor__is_shared_underline_tensor_with, (PyCFunction)(void (*)(void))tensor__is_shared_underline_tensor_with,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"detach", (PyCFunction)(void (*)(void))eager_tensor_method_detach, {"detach", (PyCFunction)(void (*)(void))tensor_method_detach,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"get_tensor", {"get_tensor",
(PyCFunction)(void (*)(void))eager_tensor_method_get_underline_tensor, (PyCFunction)(void (*)(void))tensor_method_get_underline_tensor,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"_set_value", (PyCFunction)(void (*)(void))eager_tensor_method_set_value, {"_set_value", (PyCFunction)(void (*)(void))tensor_method_set_value,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{NULL, NULL, 0, NULL}}; {NULL, NULL, 0, NULL}};
......
...@@ -79,10 +79,10 @@ const char* CAST_VAR_LIST_TEMPLATE = R"( ...@@ -79,10 +79,10 @@ const char* CAST_VAR_LIST_TEMPLATE = R"(
auto %s = GetTensorListFromArgs("%s", "%s", args, %d, %s);)"; auto %s = GetTensorListFromArgs("%s", "%s", args, %d, %s);)";
const char* CAST_VAR_PTR_TEMPLATE = R"( const char* CAST_VAR_PTR_TEMPLATE = R"(
auto %s = GetEagerTensorPtrFromArgs("%s", "%s", args, %d, %s);)"; auto %s = GetTensorPtrFromArgs("%s", "%s", args, %d, %s);)";
const char* CAST_VAR_PTR_LIST_TEMPLATE = R"( const char* CAST_VAR_PTR_LIST_TEMPLATE = R"(
auto %s = GetEagerTensorPtrListFromArgs("%s", "%s", args, %d, %s);)"; auto %s = GetTensorPtrListFromArgs("%s", "%s", args, %d, %s);)";
const char* CAST_SIZE_T_TEMPLATE = R"( const char* CAST_SIZE_T_TEMPLATE = R"(
auto %s = GetUnsignedLongFromArgs("%s", "%s", args, %d, %s);)"; auto %s = GetUnsignedLongFromArgs("%s", "%s", args, %d, %s);)";
......
...@@ -35,14 +35,14 @@ namespace pybind { ...@@ -35,14 +35,14 @@ namespace pybind {
extern PyTypeObject* p_tensor_type; extern PyTypeObject* p_tensor_type;
PyObject* eager_tensor_properties_get_name(TensorObject* self, void* closure) { PyObject* tensor_properties_get_name(TensorObject* self, void* closure) {
EAGER_SYNC_TRY EAGER_TRY
return ToPyObject(self->tensor.name()); return ToPyObject(self->tensor.name());
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyObject* eager_tensor_properties_get_type(TensorObject* self, void* closure) { PyObject* tensor_properties_get_type(TensorObject* self, void* closure) {
EAGER_SYNC_TRY EAGER_TRY
if (self->tensor.is_dense_tensor()) { if (self->tensor.is_dense_tensor()) {
return ToPyObject(paddle::framework::proto::VarType::LOD_TENSOR); return ToPyObject(paddle::framework::proto::VarType::LOD_TENSOR);
} else { } else {
...@@ -52,24 +52,24 @@ PyObject* eager_tensor_properties_get_type(TensorObject* self, void* closure) { ...@@ -52,24 +52,24 @@ PyObject* eager_tensor_properties_get_type(TensorObject* self, void* closure) {
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
int eager_tensor_properties_set_name(TensorObject* self, PyObject* value, int tensor_properties_set_name(TensorObject* self, PyObject* value,
void* closure) { void* closure) {
EAGER_SYNC_TRY EAGER_TRY
self->tensor.set_name(CastPyArg2AttrString(value, 0)); self->tensor.set_name(CastPyArg2AttrString(value, 0));
return 0; return 0;
EAGER_CATCH_AND_THROW_RETURN_ZERO EAGER_CATCH_AND_THROW_RETURN_ZERO
} }
PyObject* eager_tensor_properties_get_stop_gradient(TensorObject* self, PyObject* tensor_properties_get_stop_gradient(TensorObject* self,
void* closure) { void* closure) {
EAGER_SYNC_TRY EAGER_TRY
auto meta = egr::EagerUtils::autograd_meta(&self->tensor); auto meta = egr::EagerUtils::autograd_meta(&self->tensor);
return ToPyObject(meta->StopGradient()); return ToPyObject(meta->StopGradient());
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyObject* eager_tensor_properties_get_grad(TensorObject* self, void* closure) { PyObject* tensor_properties_get_grad(TensorObject* self, void* closure) {
EAGER_SYNC_TRY EAGER_TRY
if (egr::egr_utils_api::IsLeafTensor(self->tensor)) { if (egr::egr_utils_api::IsLeafTensor(self->tensor)) {
std::shared_ptr<egr::GradNodeBase> grad_node = std::shared_ptr<egr::GradNodeBase> grad_node =
egr::EagerUtils::grad_node(self->tensor); egr::EagerUtils::grad_node(self->tensor);
...@@ -94,9 +94,9 @@ PyObject* eager_tensor_properties_get_grad(TensorObject* self, void* closure) { ...@@ -94,9 +94,9 @@ PyObject* eager_tensor_properties_get_grad(TensorObject* self, void* closure) {
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
int eager_tensor_properties_set_grad(TensorObject* self, PyObject* value, int tensor_properties_set_grad(TensorObject* self, PyObject* value,
void* closure) { void* closure) {
EAGER_SYNC_TRY EAGER_TRY
auto src = CastPyArg2Tensor(value, 0); auto src = CastPyArg2Tensor(value, 0);
PADDLE_ENFORCE( PADDLE_ENFORCE(
egr::egr_utils_api::IsLeafTensor(self->tensor), egr::egr_utils_api::IsLeafTensor(self->tensor),
...@@ -115,34 +115,33 @@ int eager_tensor_properties_set_grad(TensorObject* self, PyObject* value, ...@@ -115,34 +115,33 @@ int eager_tensor_properties_set_grad(TensorObject* self, PyObject* value,
EAGER_CATCH_AND_THROW_RETURN_ZERO EAGER_CATCH_AND_THROW_RETURN_ZERO
} }
int eager_tensor_properties_set_stop_gradient(TensorObject* self, int tensor_properties_set_stop_gradient(TensorObject* self, PyObject* value,
PyObject* value, void* closure) { void* closure) {
EAGER_SYNC_TRY EAGER_TRY
auto meta = egr::EagerUtils::autograd_meta(&self->tensor); auto meta = egr::EagerUtils::autograd_meta(&self->tensor);
meta->SetStopGradient(CastPyArg2AttrBoolean(value, 0)); meta->SetStopGradient(CastPyArg2AttrBoolean(value, 0));
return 0; return 0;
EAGER_CATCH_AND_THROW_RETURN_ZERO EAGER_CATCH_AND_THROW_RETURN_ZERO
} }
PyObject* eager_tensor_properties_get_persistable(TensorObject* self, PyObject* tensor_properties_get_persistable(TensorObject* self, void* closure) {
void* closure) { EAGER_TRY
EAGER_SYNC_TRY
auto meta = egr::EagerUtils::autograd_meta(&self->tensor); auto meta = egr::EagerUtils::autograd_meta(&self->tensor);
return ToPyObject(meta->Persistable()); return ToPyObject(meta->Persistable());
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
int eager_tensor_properties_set_persistable(TensorObject* self, PyObject* value, int tensor_properties_set_persistable(TensorObject* self, PyObject* value,
void* closure) { void* closure) {
EAGER_SYNC_TRY EAGER_TRY
auto meta = egr::EagerUtils::autograd_meta(&self->tensor); auto meta = egr::EagerUtils::autograd_meta(&self->tensor);
meta->SetPersistable(CastPyArg2AttrBoolean(value, 0)); meta->SetPersistable(CastPyArg2AttrBoolean(value, 0));
return 0; return 0;
EAGER_CATCH_AND_THROW_RETURN_ZERO EAGER_CATCH_AND_THROW_RETURN_ZERO
} }
PyObject* eager_tensor_properties_get_shape(TensorObject* self, void* closure) { PyObject* tensor_properties_get_shape(TensorObject* self, void* closure) {
EAGER_SYNC_TRY EAGER_TRY
auto ddim = self->tensor.shape(); auto ddim = self->tensor.shape();
std::vector<int64_t> value; std::vector<int64_t> value;
size_t rank = static_cast<size_t>(ddim.size()); size_t rank = static_cast<size_t>(ddim.size());
...@@ -155,50 +154,45 @@ PyObject* eager_tensor_properties_get_shape(TensorObject* self, void* closure) { ...@@ -155,50 +154,45 @@ PyObject* eager_tensor_properties_get_shape(TensorObject* self, void* closure) {
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyObject* eager_tensor_properties_get_place(TensorObject* self, void* closure) { PyObject* tensor_properties_get_place(TensorObject* self, void* closure) {
EAGER_SYNC_TRY EAGER_TRY
return ToPyObject(self->tensor.inner_place()); return ToPyObject(self->tensor.inner_place());
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyObject* eager_tensor_properties_get_place_str(TensorObject* self, PyObject* tensor_properties_get_place_str(TensorObject* self, void* closure) {
void* closure) { EAGER_TRY
EAGER_SYNC_TRY
std::stringstream ostr; std::stringstream ostr;
ostr << self->tensor.inner_place(); ostr << self->tensor.inner_place();
return ToPyObject(ostr.str()); return ToPyObject(ostr.str());
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyObject* eager_tensor_properties_get_dtype(TensorObject* self, void* closure) { PyObject* tensor_properties_get_dtype(TensorObject* self, void* closure) {
EAGER_SYNC_TRY EAGER_TRY
return ToPyObject( return ToPyObject(
paddle::framework::TransToProtoVarType(self->tensor.type())); paddle::framework::TransToProtoVarType(self->tensor.type()));
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
struct PyGetSetDef variable_properties[] = { struct PyGetSetDef variable_properties[] = {
{"grad", (getter)eager_tensor_properties_get_grad, {"grad", (getter)tensor_properties_get_grad,
(setter)eager_tensor_properties_set_grad, nullptr, nullptr}, (setter)tensor_properties_set_grad, nullptr, nullptr},
{"name", (getter)eager_tensor_properties_get_name, {"name", (getter)tensor_properties_get_name,
(setter)eager_tensor_properties_set_name, nullptr, nullptr}, (setter)tensor_properties_set_name, nullptr, nullptr},
{"stop_gradient", (getter)eager_tensor_properties_get_stop_gradient, {"stop_gradient", (getter)tensor_properties_get_stop_gradient,
(setter)eager_tensor_properties_set_stop_gradient, nullptr, nullptr}, (setter)tensor_properties_set_stop_gradient, nullptr, nullptr},
{"persistable", (getter)eager_tensor_properties_get_persistable, {"persistable", (getter)tensor_properties_get_persistable,
(setter)eager_tensor_properties_set_persistable, nullptr, nullptr}, (setter)tensor_properties_set_persistable, nullptr, nullptr},
{"shape", (getter)eager_tensor_properties_get_shape, nullptr, nullptr, {"shape", (getter)tensor_properties_get_shape, nullptr, nullptr, nullptr},
nullptr}, // {"is_leaf", (getter)tensor_properties_get_is_leaf, nullptr,
// {"is_leaf", (getter)eager_tensor_properties_get_is_leaf, nullptr,
// nullptr, // nullptr,
// nullptr}, // nullptr},
{"place", (getter)eager_tensor_properties_get_place, nullptr, nullptr, {"place", (getter)tensor_properties_get_place, nullptr, nullptr, nullptr},
nullptr}, {"_place_str", (getter)tensor_properties_get_place_str, nullptr, nullptr,
{"_place_str", (getter)eager_tensor_properties_get_place_str, nullptr,
nullptr, nullptr},
{"dtype", (getter)eager_tensor_properties_get_dtype, nullptr, nullptr,
nullptr},
{"type", (getter)eager_tensor_properties_get_type, nullptr, nullptr,
nullptr}, nullptr},
{"dtype", (getter)tensor_properties_get_dtype, nullptr, nullptr, nullptr},
{"type", (getter)tensor_properties_get_type, nullptr, nullptr, nullptr},
{nullptr, nullptr, nullptr, nullptr, nullptr}}; {nullptr, nullptr, nullptr, nullptr, nullptr}};
} // namespace pybind } // namespace pybind
......
...@@ -179,7 +179,7 @@ paddle::experimental::Tensor CastPyArg2Tensor(PyObject* obj, ssize_t arg_pos) { ...@@ -179,7 +179,7 @@ paddle::experimental::Tensor CastPyArg2Tensor(PyObject* obj, ssize_t arg_pos) {
} else { } else {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"argument (position %d) must be " "argument (position %d) must be "
"EagerTensor, but got %s", "EagerVariable, but got %s",
arg_pos + 1, reinterpret_cast<PyTypeObject*>(obj->ob_type)->tp_name)); arg_pos + 1, reinterpret_cast<PyTypeObject*>(obj->ob_type)->tp_name));
} }
} }
...@@ -309,7 +309,7 @@ framework::Tensor CastPyArg2FrameworkTensor(PyObject* obj, ssize_t arg_pos) { ...@@ -309,7 +309,7 @@ framework::Tensor CastPyArg2FrameworkTensor(PyObject* obj, ssize_t arg_pos) {
} else { } else {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"argument (position %d) must be " "argument (position %d) must be "
"EagerTensor, but got %s", "EagerVariable, but got %s",
arg_pos + 1, reinterpret_cast<PyTypeObject*>(obj->ob_type)->tp_name)); arg_pos + 1, reinterpret_cast<PyTypeObject*>(obj->ob_type)->tp_name));
} }
} }
...@@ -597,6 +597,7 @@ std::vector<paddle::experimental::Tensor> GetTensorListFromArgs( ...@@ -597,6 +597,7 @@ std::vector<paddle::experimental::Tensor> GetTensorListFromArgs(
if (PyList_Check(list)) { if (PyList_Check(list)) {
Py_ssize_t len = PyList_Size(list); Py_ssize_t len = PyList_Size(list);
result.reserve(static_cast<size_t>(len));
if (len == 0) { if (len == 0) {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"%s(): argument '%s' (position %d) must be list of Tensors, but got " "%s(): argument '%s' (position %d) must be list of Tensors, but got "
...@@ -609,6 +610,7 @@ std::vector<paddle::experimental::Tensor> GetTensorListFromArgs( ...@@ -609,6 +610,7 @@ std::vector<paddle::experimental::Tensor> GetTensorListFromArgs(
} }
} else if (PyTuple_Check(list)) { } else if (PyTuple_Check(list)) {
Py_ssize_t len = PyTuple_Size(list); Py_ssize_t len = PyTuple_Size(list);
result.reserve(static_cast<size_t>(len));
if (len == 0) { if (len == 0) {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"%s(): argument '%s' (position %d) must be list of Tensors, but got " "%s(): argument '%s' (position %d) must be list of Tensors, but got "
...@@ -632,9 +634,11 @@ std::vector<paddle::experimental::Tensor> GetTensorListFromArgs( ...@@ -632,9 +634,11 @@ std::vector<paddle::experimental::Tensor> GetTensorListFromArgs(
return result; return result;
} }
paddle::experimental::Tensor* GetEagerTensorPtrFromArgs( paddle::experimental::Tensor* GetTensorPtrFromArgs(const std::string& op_type,
const std::string& op_type, const std::string& arg_name, PyObject* args, const std::string& arg_name,
ssize_t arg_idx, bool dispensable) { PyObject* args,
ssize_t arg_idx,
bool dispensable) {
PyObject* obj = PyTuple_GET_ITEM(args, arg_idx); PyObject* obj = PyTuple_GET_ITEM(args, arg_idx);
if (PyTuple_Check(obj)) { if (PyTuple_Check(obj)) {
...@@ -654,7 +658,7 @@ paddle::experimental::Tensor* GetEagerTensorPtrFromArgs( ...@@ -654,7 +658,7 @@ paddle::experimental::Tensor* GetEagerTensorPtrFromArgs(
return &(reinterpret_cast<TensorObject*>(obj)->tensor); return &(reinterpret_cast<TensorObject*>(obj)->tensor);
} }
std::vector<paddle::experimental::Tensor*> GetEagerTensorPtrListFromArgs( std::vector<paddle::experimental::Tensor*> GetTensorPtrListFromArgs(
const std::string& op_type, const std::string& arg_name, PyObject* args, const std::string& op_type, const std::string& arg_name, PyObject* args,
ssize_t arg_idx, bool dispensable) { ssize_t arg_idx, bool dispensable) {
PyObject* list = PyTuple_GET_ITEM(args, arg_idx); PyObject* list = PyTuple_GET_ITEM(args, arg_idx);
......
...@@ -65,15 +65,15 @@ PyObject* ToPyObject( ...@@ -65,15 +65,15 @@ PyObject* ToPyObject(
const std::unordered_map<std::string, std::vector<std::string>>& value); const std::unordered_map<std::string, std::vector<std::string>>& value);
template <typename Tuple, size_t N> template <typename Tuple, size_t N>
struct TupleEagerTensorResult { struct TupleTensorResult {
static void Run(const Tuple& out, PyObject* result) { static void Run(const Tuple& out, PyObject* result) {
TupleEagerTensorResult<Tuple, N - 1>::Run(out, result); TupleTensorResult<Tuple, N - 1>::Run(out, result);
PyTuple_SET_ITEM(result, N - 1, ToPyObject(std::get<N - 1>(out))); PyTuple_SET_ITEM(result, N - 1, ToPyObject(std::get<N - 1>(out)));
} }
}; };
template <typename Tuple> template <typename Tuple>
struct TupleEagerTensorResult<Tuple, 1> { struct TupleTensorResult<Tuple, 1> {
static void Run(const Tuple& out, PyObject* result) { static void Run(const Tuple& out, PyObject* result) {
PyTuple_SET_ITEM(result, 0, ToPyObject(std::get<0>(out))); PyTuple_SET_ITEM(result, 0, ToPyObject(std::get<0>(out)));
} }
...@@ -84,7 +84,7 @@ PyObject* ToPyObject(const std::tuple<Args...>& out) { ...@@ -84,7 +84,7 @@ PyObject* ToPyObject(const std::tuple<Args...>& out) {
auto len = sizeof...(Args); auto len = sizeof...(Args);
PyObject* result = PyTuple_New(len); PyObject* result = PyTuple_New(len);
TupleEagerTensorResult<decltype(out), sizeof...(Args)>::Run(out, result); TupleTensorResult<decltype(out), sizeof...(Args)>::Run(out, result);
return result; return result;
} }
...@@ -97,10 +97,12 @@ std::vector<paddle::experimental::Tensor> GetTensorListFromArgs( ...@@ -97,10 +97,12 @@ std::vector<paddle::experimental::Tensor> GetTensorListFromArgs(
const std::string& op_type, const std::string& arg_name, PyObject* args, const std::string& op_type, const std::string& arg_name, PyObject* args,
ssize_t arg_idx, bool dispensable = false); ssize_t arg_idx, bool dispensable = false);
paddle::experimental::Tensor* GetEagerTensorPtrFromArgs( paddle::experimental::Tensor* GetTensorPtrFromArgs(const std::string& op_type,
const std::string& op_type, const std::string& arg_name, PyObject* args, const std::string& arg_name,
ssize_t arg_idx, bool dispensable = false); PyObject* args,
std::vector<paddle::experimental::Tensor*> GetEagerTensorPtrListFromArgs( ssize_t arg_idx,
bool dispensable = false);
std::vector<paddle::experimental::Tensor*> GetTensorPtrListFromArgs(
const std::string& op_type, const std::string& arg_name, PyObject* args, const std::string& op_type, const std::string& arg_name, PyObject* args,
ssize_t arg_idx, bool dispensable = false); ssize_t arg_idx, bool dispensable = false);
......
...@@ -19,7 +19,6 @@ limitations under the License. */ ...@@ -19,7 +19,6 @@ limitations under the License. */
#include "pybind11/pybind11.h" #include "pybind11/pybind11.h"
#define EAGER_TRY try { #define EAGER_TRY try {
#define EAGER_SYNC_TRY try {
#define EAGER_CATCH_AND_THROW_RETURN_NULL \ #define EAGER_CATCH_AND_THROW_RETURN_NULL \
} \ } \
catch (...) { \ catch (...) { \
......
...@@ -222,6 +222,14 @@ class PADDLE_API Tensor final { ...@@ -222,6 +222,14 @@ class PADDLE_API Tensor final {
*/ */
bool is_dense_tensor() const; bool is_dense_tensor() const;
/**
* @brief Determine whether tensor is SelectedRows
*
* @return true
* @return false
*/
bool is_selected_rows() const;
/* Part 3: Device and Backend methods */ /* Part 3: Device and Backend methods */
/** /**
......
...@@ -29,7 +29,6 @@ limitations under the License. */ ...@@ -29,7 +29,6 @@ limitations under the License. */
#include "paddle/pten/core/tensor_base.h" #include "paddle/pten/core/tensor_base.h"
#include "paddle/pten/core/tensor_meta.h" #include "paddle/pten/core/tensor_meta.h"
#include "paddle/pten/core/tensor_utils.h" #include "paddle/pten/core/tensor_utils.h"
/** /**
* [ Why still include the fluid headers? ] * [ Why still include the fluid headers? ]
* *
...@@ -133,7 +132,9 @@ DataLayout Tensor::layout() const { return impl_->layout(); } ...@@ -133,7 +132,9 @@ DataLayout Tensor::layout() const { return impl_->layout(); }
bool Tensor::is_dense_tensor() const { bool Tensor::is_dense_tensor() const {
return pten::DenseTensor::classof(impl_.get()); return pten::DenseTensor::classof(impl_.get());
} }
bool Tensor::is_selected_rows() const {
return pten::SelectedRows::classof(impl_.get());
}
/* Part 3: Device and Backend methods */ /* Part 3: Device and Backend methods */
PlaceType Tensor::place() const { PlaceType Tensor::place() const {
......
...@@ -24,7 +24,7 @@ limitations under the License. */ ...@@ -24,7 +24,7 @@ limitations under the License. */
#include <boost/variant.hpp> #include <boost/variant.hpp>
namespace egr { namespace egr {
class EagerTensor; class EagerVariable;
} }
namespace paddle { namespace paddle {
namespace framework { namespace framework {
...@@ -76,9 +76,9 @@ struct NameVarMapTrait<VariableWrapper> { ...@@ -76,9 +76,9 @@ struct NameVarMapTrait<VariableWrapper> {
}; };
template <> template <>
struct NameVarMapTrait<egr::EagerTensor> { struct NameVarMapTrait<egr::EagerVariable> {
using Type = using Type =
std::map<std::string, std::vector<std::shared_ptr<egr::EagerTensor>>>; std::map<std::string, std::vector<std::shared_ptr<egr::EagerVariable>>>;
}; };
} // namespace details } // namespace details
...@@ -88,7 +88,7 @@ using NameVarMap = typename details::NameVarMapTrait<T>::Type; ...@@ -88,7 +88,7 @@ using NameVarMap = typename details::NameVarMapTrait<T>::Type;
using NameVarBaseMap = NameVarMap<VarBase>; using NameVarBaseMap = NameVarMap<VarBase>;
using NameVariableWrapperMap = NameVarMap<VariableWrapper>; using NameVariableWrapperMap = NameVarMap<VariableWrapper>;
using NameTensorMap = NameVarMap<egr::EagerTensor>; using NameTensorMap = NameVarMap<egr::EagerVariable>;
using VariableWrapperList = std::vector<std::shared_ptr<VariableWrapper>>; using VariableWrapperList = std::vector<std::shared_ptr<VariableWrapper>>;
......
...@@ -29,10 +29,6 @@ limitations under the License. */ ...@@ -29,10 +29,6 @@ limitations under the License. */
// See Note [ Why still include the fluid headers? ] // See Note [ Why still include the fluid headers? ]
#include "paddle/fluid/framework/mixed_vector.h" #include "paddle/fluid/framework/mixed_vector.h"
namespace egr {
class EagerTensor;
} // namespace egr
namespace pten { namespace pten {
class SelectedRows : public TensorBase, class SelectedRows : public TensorBase,
public TypeInfoTraits<TensorBase, SelectedRows> { public TypeInfoTraits<TensorBase, SelectedRows> {
...@@ -199,39 +195,6 @@ class SelectedRows : public TensorBase, ...@@ -199,39 +195,6 @@ class SelectedRows : public TensorBase,
std::unique_ptr<DenseTensor> value_{nullptr}; std::unique_ptr<DenseTensor> value_{nullptr};
int64_t height_; // height indicates the underline tensor's height int64_t height_; // height indicates the underline tensor's height
std::unique_ptr<RWLock> rwlock_{nullptr}; std::unique_ptr<RWLock> rwlock_{nullptr};
// TODO(jiabin): Remove this when we don't need EagerTensor support
// SelectedRows which is expected in next version.
/** Why we need this weird friend class?
* In eager mode, since some of ops doesn't support C++ API for now we need to
*use 'imperative::TraceOp' to run it.
* So, we need to support get a SelectedRows from egr::EagerTensor's
*framework::Variable obj and used it to reconstruct
* a new paddle::experimental::Tensor to support framework usage. However, we
*got 2 problems here.
* First, we got 2 unique_ptr in SelectedRows so that we can't support
*std::make_shared in EagerTensor's SetImplWithSelectedRows method,
* since we have to construct a shared_ptr for paddle::experimental::Tensor's
*impl.
* Second, when we are trying to support move constructor for SelectedRows we
*found that we can't get its rvalue from
* framework::Variable because it holds an obj of target type.
*
*
* The only three way to solve this problem is:
* 1. Just like what we have done, using friend class and just copy/move each
*member. In this way, we can avoid additional API
* and symbols.
* 2. Make pten::SelectedRows's member from unique_ptr to shared_ptr. However,
*this may cause some cost of performance.
* 3. Add some api to return or move member of framework::SelectedRows.
*However, it's not as safe as first solution.
* 4. Support all framework::SelectedRows related ops and make sure
*EagerTensor never holds framework::SelectedRows.
*
* If anyone got better ideas, welcome to contact JiabinYang, we are open for
*your help.
**/
friend class egr::EagerTensor;
}; };
} // namespace pten } // namespace pten
...@@ -104,14 +104,14 @@ def check_type(input, input_name, expected_type, op_name, extra_message=''): ...@@ -104,14 +104,14 @@ def check_type(input, input_name, expected_type, op_name, extra_message=''):
expected_type += (core.VarBase, ) expected_type += (core.VarBase, )
# TODO(jiabin): uncomment it when we support declarative mode in eager # TODO(jiabin): uncomment it when we support declarative mode in eager
# if _in_eager_mode(): # if _in_eager_mode():
# expected_type += (core.eager.EagerTensor, ) # expected_type += (core.eager.Tensor, )
elif isinstance(input, core.VarBase): elif isinstance(input, core.VarBase):
raise TypeError( raise TypeError(
"Please use `with fluid.dygraph.guard()` as context or `fluid.enable_dygraph()` to switch to imperative mode firstly. " "Please use `with fluid.dygraph.guard()` as context or `fluid.enable_dygraph()` to switch to imperative mode firstly. "
"Because received '{}' in {} is a imperative Variable.".format( "Because received '{}' in {} is a imperative Variable.".format(
input_name, op_name)) input_name, op_name))
elif hasattr(core, "eager"): elif hasattr(core, "eager"):
if isinstance(input, core.eager.EagerTensor): if isinstance(input, core.eager.Tensor):
raise TypeError( raise TypeError(
"Please use `with fluid.dygraph.guard()` as context or `fluid.enable_dygraph()` to switch to imperative mode firstly. " "Please use `with fluid.dygraph.guard()` as context or `fluid.enable_dygraph()` to switch to imperative mode firstly. "
"Because received '{}' in {} is a imperative Variable.".format( "Because received '{}' in {} is a imperative Variable.".format(
......
...@@ -253,7 +253,7 @@ class _DataLoaderIterSingleProcess(_DataLoaderIterBase): ...@@ -253,7 +253,7 @@ class _DataLoaderIterSingleProcess(_DataLoaderIterBase):
try: try:
if in_dygraph_mode(): if in_dygraph_mode():
if _in_eager_mode(): if _in_eager_mode():
data = core.eager.read_next_eager_tensor_list( data = core.eager.read_next_tensor_list(
self._reader.read_next_list()[0]) self._reader.read_next_list()[0])
else: else:
data = self._reader.read_next_var_list() data = self._reader.read_next_var_list()
...@@ -449,7 +449,7 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase): ...@@ -449,7 +449,7 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase):
while self._blocking_queue.size() >= len(self._places): while self._blocking_queue.size() >= len(self._places):
if in_dygraph_mode(): if in_dygraph_mode():
if _in_eager_mode(): if _in_eager_mode():
data = core.eager.read_next_eager_tensor_list( data = core.eager.read_next_tensor_list(
self._reader.read_next_list()[0]) self._reader.read_next_list()[0])
else: else:
self._reader.read_next_var_list() self._reader.read_next_var_list()
...@@ -705,7 +705,7 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase): ...@@ -705,7 +705,7 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase):
if in_dygraph_mode(): if in_dygraph_mode():
if _in_eager_mode(): if _in_eager_mode():
data = core.eager.read_next_eager_tensor_list( data = core.eager.read_next_tensor_list(
self._reader.read_next_list()[0]) self._reader.read_next_list()[0])
else: else:
data = self._reader.read_next_var_list() data = self._reader.read_next_var_list()
......
...@@ -721,10 +721,9 @@ def to_variable(value, name=None, zero_copy=None, dtype=None): ...@@ -721,10 +721,9 @@ def to_variable(value, name=None, zero_copy=None, dtype=None):
value = value.astype(dtype) value = value.astype(dtype)
if _in_eager_mode(): if _in_eager_mode():
return core.eager.EagerTensor(value, return core.eager.Tensor(value,
framework._current_expected_place(), framework._current_expected_place(), False,
False, zero_copy, name zero_copy, name if name else None, True)
if name else None, True)
else: else:
py_var = core.VarBase( py_var = core.VarBase(
value=value, value=value,
......
...@@ -222,7 +222,7 @@ def monkey_patch_math_varbase(): ...@@ -222,7 +222,7 @@ def monkey_patch_math_varbase():
# 2. create varbase for scalar # 2. create varbase for scalar
lhs_dtype = self.dtype lhs_dtype = self.dtype
if _in_eager_mode(): if _in_eager_mode():
other_var_should_be = core.eager.EagerTensor other_var_should_be = core.eager.Tensor
else: else:
other_var_should_be = core.VarBase other_var_should_be = core.VarBase
if not isinstance(other_var, other_var_should_be): if not isinstance(other_var, other_var_should_be):
...@@ -343,7 +343,7 @@ def monkey_patch_math_varbase(): ...@@ -343,7 +343,7 @@ def monkey_patch_math_varbase():
if core._in_eager_mode(): if core._in_eager_mode():
local_already_patch = _already_patch_eager_tensor local_already_patch = _already_patch_eager_tensor
_already_patch_eager_tensor = True _already_patch_eager_tensor = True
local_tensor = core.eager.EagerTensor local_tensor = core.eager.Tensor
else: else:
local_already_patch = _already_patch_varbase local_already_patch = _already_patch_varbase
_already_patch_varbase = True _already_patch_varbase = True
......
...@@ -150,7 +150,7 @@ def monkey_patch_varbase(): ...@@ -150,7 +150,7 @@ def monkey_patch_varbase():
""" """
if core._in_eager_mode(): if core._in_eager_mode():
base_tensor = core.eager.EagerTensor base_tensor = core.eager.Tensor
else: else:
base_tensor = core.VarBase base_tensor = core.VarBase
assert isinstance(value, (np.ndarray, base_tensor, dict, str)), \ assert isinstance(value, (np.ndarray, base_tensor, dict, str)), \
...@@ -180,9 +180,9 @@ def monkey_patch_varbase(): ...@@ -180,9 +180,9 @@ def monkey_patch_varbase():
"Variable dtype not match, Variable [ {} ] need tensor with dtype {} but load tensor with dtype {}".format( "Variable dtype not match, Variable [ {} ] need tensor with dtype {} but load tensor with dtype {}".format(
self.name, self_tensor_np.dtype, value_np.dtype) self.name, self_tensor_np.dtype, value_np.dtype)
# NOTE(wuweilong): self could be VarBase or EagerTensor, the subsequent behavior are defined in different files # NOTE(wuweilong): self could be VarBase or Tensor, the subsequent behavior are defined in different files
# if self is VarBase, method value() return Variable that bindded in imperative.cc, get_tensor() bindded in pybind.cc # if self is VarBase, method value() return Variable that bindded in imperative.cc, get_tensor() bindded in pybind.cc
# if self is EagerTensor, method value() return self that defined in this file, get_tensor() defined in eager_method.cc # if self is Tensor, method value() return self that defined in this file, get_tensor() defined in eager_method.cc
# this Interface behavior will be unifed in the future. # this Interface behavior will be unifed in the future.
self.value().get_tensor().set(value_np, self.value().get_tensor().set(value_np,
framework._current_expected_place()) framework._current_expected_place())
...@@ -244,8 +244,8 @@ def monkey_patch_varbase(): ...@@ -244,8 +244,8 @@ def monkey_patch_varbase():
if grad_tensor is not None: if grad_tensor is not None:
if core._in_eager_mode(): if core._in_eager_mode():
assert isinstance( assert isinstance(
grad_tensor, core.eager.EagerTensor grad_tensor, core.eager.
), "The type of grad_tensor must be paddle.Tensor" Tensor), "The type of grad_tensor must be paddle.Tensor"
else: else:
assert isinstance( assert isinstance(
grad_tensor, paddle. grad_tensor, paddle.
...@@ -592,8 +592,8 @@ def monkey_patch_varbase(): ...@@ -592,8 +592,8 @@ def monkey_patch_varbase():
# [0.79010487, 0.53972793, 0.09495186, 0.44267157, 0.72112119]]) # [0.79010487, 0.53972793, 0.09495186, 0.44267157, 0.72112119]])
""" """
if core._in_eager_mode(): if core._in_eager_mode():
from paddle.tensor.to_string import eager_tensor_to_string from paddle.tensor.to_string import tensor_to_string
return eager_tensor_to_string(self) return tensor_to_string(self)
else: else:
from paddle.tensor.to_string import to_string from paddle.tensor.to_string import to_string
return to_string(self) return to_string(self)
...@@ -624,7 +624,7 @@ def monkey_patch_varbase(): ...@@ -624,7 +624,7 @@ def monkey_patch_varbase():
"Only Leaf Tensor support the deepcopy at the moment, non-Leaf Tensors contains graph information that does't support deepcopy" "Only Leaf Tensor support the deepcopy at the moment, non-Leaf Tensors contains graph information that does't support deepcopy"
) )
if core._in_eager_mode(): if core._in_eager_mode():
new_varbase = core.eager.EagerTensor() new_varbase = core.eager.Tensor()
else: else:
new_varbase = core.VarBase() new_varbase = core.VarBase()
new_varbase.name = self.name + unique_name.generate("_deepcopy") new_varbase.name = self.name + unique_name.generate("_deepcopy")
...@@ -808,16 +808,16 @@ def monkey_patch_varbase(): ...@@ -808,16 +808,16 @@ def monkey_patch_varbase():
("__getitem__", __getitem__), ("item", item), ("__getitem__", __getitem__), ("item", item),
("__setitem__", __setitem__), ("_to", _to)): ("__setitem__", __setitem__), ("_to", _to)):
if core._in_eager_mode(): if core._in_eager_mode():
setattr(core.eager.EagerTensor, method_name, method) setattr(core.eager.Tensor, method_name, method)
else: else:
setattr(core.VarBase, method_name, method) setattr(core.VarBase, method_name, method)
if core._in_eager_mode(): if core._in_eager_mode():
setattr(core.eager.EagerTensor, "_grad_ivar", _grad_ivar) setattr(core.eager.Tensor, "_grad_ivar", _grad_ivar)
setattr(core.eager.EagerTensor, "_set_grad_ivar", _set_grad_ivar) setattr(core.eager.Tensor, "_set_grad_ivar", _set_grad_ivar)
setattr(core.eager.EagerTensor, "clear_gradient", clear_gradient) setattr(core.eager.Tensor, "clear_gradient", clear_gradient)
setattr(core.eager.EagerTensor, "clone", clone) setattr(core.eager.Tensor, "clone", clone)
setattr(core.eager.EagerTensor, "value", value) setattr(core.eager.Tensor, "value", value)
else: else:
setattr(core.VarBase, "__name__", "Tensor") setattr(core.VarBase, "__name__", "Tensor")
setattr(core.VarBase, "grad", grad) setattr(core.VarBase, "grad", grad)
......
...@@ -1057,7 +1057,7 @@ def _varbase_creator(type=core.VarDesc.VarType.LOD_TENSOR, ...@@ -1057,7 +1057,7 @@ def _varbase_creator(type=core.VarDesc.VarType.LOD_TENSOR,
dtype = convert_np_dtype_to_dtype_(dtype) dtype = convert_np_dtype_to_dtype_(dtype)
if _in_eager_mode(): if _in_eager_mode():
eager_tensor = core.eager.EagerTensor( eager_tensor = core.eager.Tensor(
dtype if dtype else core.VarDesc.VarType.FP32, dtype if dtype else core.VarDesc.VarType.FP32,
list(shape) if shape else [], name, type list(shape) if shape else [], name, type
if type else core.VarDesc.VarType.LOD_TENSOR, True if type else core.VarDesc.VarType.LOD_TENSOR, True
...@@ -1076,7 +1076,7 @@ class VariableMetaClass(type): ...@@ -1076,7 +1076,7 @@ class VariableMetaClass(type):
t = type(instance) t = type(instance)
if in_dygraph_mode(): if in_dygraph_mode():
if _in_eager_mode(): if _in_eager_mode():
return issubclass(t, core.eager.EagerTensor) return issubclass(t, core.eager.Tensor)
return issubclass(t, core.VarBase) return issubclass(t, core.VarBase)
else: else:
return issubclass(t, Variable) return issubclass(t, Variable)
...@@ -6412,7 +6412,7 @@ class ParamBase(core.VarBase): ...@@ -6412,7 +6412,7 @@ class ParamBase(core.VarBase):
if hasattr(core, "eager"): if hasattr(core, "eager"):
_core_eager_eagertensor = core.eager.EagerTensor _core_eager_eagertensor = core.eager.Tensor
else: else:
_core_eager_eagertensor = object _core_eager_eagertensor = object
......
...@@ -85,10 +85,9 @@ class LayerHelperBase(object): ...@@ -85,10 +85,9 @@ class LayerHelperBase(object):
assert in_dygraph_mode( assert in_dygraph_mode(
), "to_variable could only be called in dygraph mode" ), "to_variable could only be called in dygraph mode"
if _in_eager_mode(): if _in_eager_mode():
return core.eager.EagerTensor(value, return core.eager.Tensor(value,
_current_expected_place(), False, _current_expected_place(), False,
False, name False, name if name else None, True)
if name else None, True)
else: else:
py_var = core.VarBase( py_var = core.VarBase(
value=value, value=value,
......
...@@ -972,7 +972,7 @@ class DygraphGeneratorLoader(DataLoaderBase): ...@@ -972,7 +972,7 @@ class DygraphGeneratorLoader(DataLoaderBase):
def __next__(self): def __next__(self):
try: try:
if _in_eager_mode(): if _in_eager_mode():
return core.eager.read_next_eager_tensor_list( return core.eager.read_next_tensor_list(
self._reader.read_next_list()[0]) self._reader.read_next_list()[0])
else: else:
return self._reader.read_next_var_list() return self._reader.read_next_var_list()
......
...@@ -109,26 +109,26 @@ class EagerDtypeTestCase(unittest.TestCase): ...@@ -109,26 +109,26 @@ class EagerDtypeTestCase(unittest.TestCase):
core.VarDesc.VarType.COMPLEX128) core.VarDesc.VarType.COMPLEX128)
class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
def constructor(self, place): def constructor(self, place):
egr_tensor = core.eager.EagerTensor() egr_tensor = core.eager.Tensor()
self.assertEqual(egr_tensor.persistable, False) self.assertEqual(egr_tensor.persistable, False)
self.assertTrue("generated" in egr_tensor.name) self.assertTrue("generated" in egr_tensor.name)
self.assertEqual(egr_tensor.shape, []) self.assertEqual(egr_tensor.shape, [])
self.assertEqual(egr_tensor.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor.stop_gradient, True) self.assertEqual(egr_tensor.stop_gradient, True)
egr_tensor0 = core.eager.EagerTensor( egr_tensor0 = core.eager.Tensor(core.VarDesc.VarType.FP32,
core.VarDesc.VarType.FP32, [4, 16, 16, 32], "test_eager_tensor", [4, 16, 16, 32], "test_eager_tensor",
core.VarDesc.VarType.LOD_TENSOR, True) core.VarDesc.VarType.LOD_TENSOR, True)
self.assertEqual(egr_tensor0.persistable, True) self.assertEqual(egr_tensor0.persistable, True)
self.assertEqual(egr_tensor0.name, "test_eager_tensor") self.assertEqual(egr_tensor0.name, "test_eager_tensor")
self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32]) self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32])
self.assertEqual(egr_tensor0.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor0.dtype, core.VarDesc.VarType.FP32)
arr0 = np.random.rand(4, 16, 16, 32).astype('float32') arr0 = np.random.rand(4, 16, 16, 32).astype('float32')
egr_tensor1 = core.eager.EagerTensor(arr0, place, True, False, egr_tensor1 = core.eager.Tensor(arr0, place, True, False,
"numpy_tensor1", False) "numpy_tensor1", False)
self.assertEqual(egr_tensor1.persistable, True) self.assertEqual(egr_tensor1.persistable, True)
self.assertEqual(egr_tensor1.name, "numpy_tensor1") self.assertEqual(egr_tensor1.name, "numpy_tensor1")
self.assertEqual(egr_tensor1.shape, [4, 16, 16, 32]) self.assertEqual(egr_tensor1.shape, [4, 16, 16, 32])
...@@ -138,8 +138,8 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -138,8 +138,8 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue(np.array_equal(egr_tensor1.numpy(), arr0)) self.assertTrue(np.array_equal(egr_tensor1.numpy(), arr0))
arr1 = np.random.randint(100, size=(4, 16, 16, 32), dtype=np.int64) arr1 = np.random.randint(100, size=(4, 16, 16, 32), dtype=np.int64)
egr_tensor2 = core.eager.EagerTensor(arr1, place, False, True, egr_tensor2 = core.eager.Tensor(arr1, place, False, True,
"numpy_tensor2", True) "numpy_tensor2", True)
self.assertEqual(egr_tensor2.persistable, False) self.assertEqual(egr_tensor2.persistable, False)
self.assertEqual(egr_tensor2.name, "numpy_tensor2") self.assertEqual(egr_tensor2.name, "numpy_tensor2")
self.assertEqual(egr_tensor2.shape, [4, 16, 16, 32]) self.assertEqual(egr_tensor2.shape, [4, 16, 16, 32])
...@@ -149,7 +149,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -149,7 +149,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue(np.array_equal(egr_tensor2.numpy(), arr1)) self.assertTrue(np.array_equal(egr_tensor2.numpy(), arr1))
arr2 = np.random.rand(4, 16, 16, 32, 64).astype('float32') arr2 = np.random.rand(4, 16, 16, 32, 64).astype('float32')
egr_tensor3 = core.eager.EagerTensor(arr2) egr_tensor3 = core.eager.Tensor(arr2)
self.assertEqual(egr_tensor3.persistable, False) self.assertEqual(egr_tensor3.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor3.name) self.assertTrue("generated_tensor" in egr_tensor3.name)
self.assertEqual(egr_tensor3.shape, [4, 16, 16, 32, 64]) self.assertEqual(egr_tensor3.shape, [4, 16, 16, 32, 64])
...@@ -161,7 +161,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -161,7 +161,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue(np.array_equal(egr_tensor3.numpy(), arr2)) self.assertTrue(np.array_equal(egr_tensor3.numpy(), arr2))
egr_tensor3.stop_gradient = False egr_tensor3.stop_gradient = False
egr_tensor4 = core.eager.EagerTensor(egr_tensor3) egr_tensor4 = core.eager.Tensor(egr_tensor3)
self.assertEqual(egr_tensor4.persistable, False) self.assertEqual(egr_tensor4.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor4.name) self.assertTrue("generated_tensor" in egr_tensor4.name)
self.assertEqual(egr_tensor4.shape, egr_tensor3.shape) self.assertEqual(egr_tensor4.shape, egr_tensor3.shape)
...@@ -174,7 +174,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -174,7 +174,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
np.array_equal(egr_tensor4.numpy(), egr_tensor3.numpy())) np.array_equal(egr_tensor4.numpy(), egr_tensor3.numpy()))
arr4 = np.random.rand(4, 16, 16, 32).astype('float32') arr4 = np.random.rand(4, 16, 16, 32).astype('float32')
egr_tensor5 = core.eager.EagerTensor(arr4, place) egr_tensor5 = core.eager.Tensor(arr4, place)
self.assertEqual(egr_tensor5.persistable, False) self.assertEqual(egr_tensor5.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor5.name) self.assertTrue("generated_tensor" in egr_tensor5.name)
self.assertEqual(egr_tensor5.shape, [4, 16, 16, 32]) self.assertEqual(egr_tensor5.shape, [4, 16, 16, 32])
...@@ -183,7 +183,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -183,7 +183,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue(egr_tensor5.place._equals(place)) self.assertTrue(egr_tensor5.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor5.numpy(), arr4)) self.assertTrue(np.array_equal(egr_tensor5.numpy(), arr4))
egr_tensor6 = core.eager.EagerTensor(egr_tensor5, core.CPUPlace()) egr_tensor6 = core.eager.Tensor(egr_tensor5, core.CPUPlace())
self.assertEqual(egr_tensor6.persistable, False) self.assertEqual(egr_tensor6.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor6.name) self.assertTrue("generated_tensor" in egr_tensor6.name)
self.assertEqual(egr_tensor6.shape, [4, 16, 16, 32]) self.assertEqual(egr_tensor6.shape, [4, 16, 16, 32])
...@@ -193,7 +193,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -193,7 +193,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue( self.assertTrue(
np.array_equal(egr_tensor6.numpy(), egr_tensor5.numpy())) np.array_equal(egr_tensor6.numpy(), egr_tensor5.numpy()))
egr_tensor7 = core.eager.EagerTensor(arr4, place, True) egr_tensor7 = core.eager.Tensor(arr4, place, True)
self.assertEqual(egr_tensor7.persistable, True) self.assertEqual(egr_tensor7.persistable, True)
self.assertTrue("generated_tensor" in egr_tensor7.name) self.assertTrue("generated_tensor" in egr_tensor7.name)
self.assertEqual(egr_tensor7.shape, [4, 16, 16, 32]) self.assertEqual(egr_tensor7.shape, [4, 16, 16, 32])
...@@ -202,7 +202,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -202,7 +202,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue(egr_tensor7.place._equals(place)) self.assertTrue(egr_tensor7.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor7.numpy(), arr4)) self.assertTrue(np.array_equal(egr_tensor7.numpy(), arr4))
egr_tensor8 = core.eager.EagerTensor(egr_tensor6, place, "egr_tensor8") egr_tensor8 = core.eager.Tensor(egr_tensor6, place, "egr_tensor8")
self.assertEqual(egr_tensor8.persistable, False) self.assertEqual(egr_tensor8.persistable, False)
self.assertEqual(egr_tensor8.name, "egr_tensor8") self.assertEqual(egr_tensor8.name, "egr_tensor8")
self.assertEqual(egr_tensor8.shape, [4, 16, 16, 32]) self.assertEqual(egr_tensor8.shape, [4, 16, 16, 32])
...@@ -212,7 +212,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -212,7 +212,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue( self.assertTrue(
np.array_equal(egr_tensor8.numpy(), egr_tensor5.numpy())) np.array_equal(egr_tensor8.numpy(), egr_tensor5.numpy()))
egr_tensor9 = core.eager.EagerTensor(arr4, place, True, True) egr_tensor9 = core.eager.Tensor(arr4, place, True, True)
self.assertEqual(egr_tensor9.persistable, True) self.assertEqual(egr_tensor9.persistable, True)
self.assertTrue("generated_tensor" in egr_tensor9.name) self.assertTrue("generated_tensor" in egr_tensor9.name)
self.assertEqual(egr_tensor9.shape, [4, 16, 16, 32]) self.assertEqual(egr_tensor9.shape, [4, 16, 16, 32])
...@@ -224,7 +224,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -224,7 +224,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
x = np.random.rand(3, 3).astype('float32') x = np.random.rand(3, 3).astype('float32')
t = paddle.fluid.Tensor() t = paddle.fluid.Tensor()
t.set(x, paddle.fluid.CPUPlace()) t.set(x, paddle.fluid.CPUPlace())
egr_tensor10 = core.eager.EagerTensor(t, place) egr_tensor10 = core.eager.Tensor(t, place)
self.assertEqual(egr_tensor10.persistable, False) self.assertEqual(egr_tensor10.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor10.name) self.assertTrue("generated_tensor" in egr_tensor10.name)
self.assertEqual(egr_tensor10.shape, [3, 3]) self.assertEqual(egr_tensor10.shape, [3, 3])
...@@ -233,7 +233,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -233,7 +233,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue(egr_tensor10.place._equals(place)) self.assertTrue(egr_tensor10.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor10.numpy(), x)) self.assertTrue(np.array_equal(egr_tensor10.numpy(), x))
egr_tensor11 = core.eager.EagerTensor(t, place, "framework_constructed") egr_tensor11 = core.eager.Tensor(t, place, "framework_constructed")
self.assertEqual(egr_tensor11.persistable, False) self.assertEqual(egr_tensor11.persistable, False)
self.assertTrue("framework_constructed" in egr_tensor11.name) self.assertTrue("framework_constructed" in egr_tensor11.name)
self.assertEqual(egr_tensor11.shape, [3, 3]) self.assertEqual(egr_tensor11.shape, [3, 3])
...@@ -242,7 +242,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -242,7 +242,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue(egr_tensor11.place._equals(place)) self.assertTrue(egr_tensor11.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor11.numpy(), x)) self.assertTrue(np.array_equal(egr_tensor11.numpy(), x))
egr_tensor12 = core.eager.EagerTensor(t) egr_tensor12 = core.eager.Tensor(t)
self.assertEqual(egr_tensor12.persistable, False) self.assertEqual(egr_tensor12.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor12.name) self.assertTrue("generated_tensor" in egr_tensor12.name)
self.assertEqual(egr_tensor12.shape, [3, 3]) self.assertEqual(egr_tensor12.shape, [3, 3])
...@@ -290,10 +290,10 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -290,10 +290,10 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.constructor(p) self.constructor(p)
def constructor_with_kwargs(self, place): def constructor_with_kwargs(self, place):
# init EagerTensor by Python array # init Tensor by Python array
arr = np.random.rand(4, 16, 16, 32).astype('float32') arr = np.random.rand(4, 16, 16, 32).astype('float32')
egr_tensor0 = core.eager.EagerTensor(value=arr) egr_tensor0 = core.eager.Tensor(value=arr)
self.assertEqual(egr_tensor0.persistable, False) self.assertEqual(egr_tensor0.persistable, False)
self.assertTrue("generated" in egr_tensor0.name) self.assertTrue("generated" in egr_tensor0.name)
self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32]) self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32])
...@@ -303,7 +303,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -303,7 +303,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertEqual(egr_tensor0.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor0.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor0.stop_gradient, True) self.assertEqual(egr_tensor0.stop_gradient, True)
egr_tensor1 = core.eager.EagerTensor(value=arr, place=place) egr_tensor1 = core.eager.Tensor(value=arr, place=place)
self.assertEqual(egr_tensor1.persistable, False) self.assertEqual(egr_tensor1.persistable, False)
self.assertTrue("generated" in egr_tensor1.name) self.assertTrue("generated" in egr_tensor1.name)
self.assertEqual(egr_tensor1.shape, [4, 16, 16, 32]) self.assertEqual(egr_tensor1.shape, [4, 16, 16, 32])
...@@ -311,7 +311,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -311,7 +311,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertEqual(egr_tensor1.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor1.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor1.stop_gradient, True) self.assertEqual(egr_tensor1.stop_gradient, True)
egr_tensor2 = core.eager.EagerTensor(arr, place=place) egr_tensor2 = core.eager.Tensor(arr, place=place)
self.assertEqual(egr_tensor2.persistable, False) self.assertEqual(egr_tensor2.persistable, False)
self.assertTrue("generated" in egr_tensor2.name) self.assertTrue("generated" in egr_tensor2.name)
self.assertEqual(egr_tensor2.shape, [4, 16, 16, 32]) self.assertEqual(egr_tensor2.shape, [4, 16, 16, 32])
...@@ -319,7 +319,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -319,7 +319,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertEqual(egr_tensor2.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor2.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor2.stop_gradient, True) self.assertEqual(egr_tensor2.stop_gradient, True)
egr_tensor3 = core.eager.EagerTensor( egr_tensor3 = core.eager.Tensor(
arr, place=place, name="new_eager_tensor") arr, place=place, name="new_eager_tensor")
self.assertEqual(egr_tensor3.persistable, False) self.assertEqual(egr_tensor3.persistable, False)
self.assertTrue("new_eager_tensor" in egr_tensor3.name) self.assertTrue("new_eager_tensor" in egr_tensor3.name)
...@@ -328,7 +328,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -328,7 +328,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertEqual(egr_tensor3.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor3.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor3.stop_gradient, True) self.assertEqual(egr_tensor3.stop_gradient, True)
egr_tensor4 = core.eager.EagerTensor( egr_tensor4 = core.eager.Tensor(
arr, place=place, persistable=True, name="new_eager_tensor") arr, place=place, persistable=True, name="new_eager_tensor")
self.assertEqual(egr_tensor4.persistable, True) self.assertEqual(egr_tensor4.persistable, True)
self.assertTrue("new_eager_tensor" in egr_tensor4.name) self.assertTrue("new_eager_tensor" in egr_tensor4.name)
...@@ -337,7 +337,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -337,7 +337,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertEqual(egr_tensor4.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor4.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor4.stop_gradient, True) self.assertEqual(egr_tensor4.stop_gradient, True)
egr_tensor5 = core.eager.EagerTensor( egr_tensor5 = core.eager.Tensor(
arr, arr,
core.CPUPlace(), core.CPUPlace(),
persistable=True, persistable=True,
...@@ -350,7 +350,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -350,7 +350,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertEqual(egr_tensor5.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor5.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor5.stop_gradient, True) self.assertEqual(egr_tensor5.stop_gradient, True)
egr_tensor6 = core.eager.EagerTensor( egr_tensor6 = core.eager.Tensor(
arr, arr,
place=core.CPUPlace(), place=core.CPUPlace(),
persistable=True, persistable=True,
...@@ -363,7 +363,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -363,7 +363,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertEqual(egr_tensor6.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor6.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor6.stop_gradient, True) self.assertEqual(egr_tensor6.stop_gradient, True)
egr_tensor7 = core.eager.EagerTensor( egr_tensor7 = core.eager.Tensor(
arr, arr,
place=place, place=place,
persistable=True, persistable=True,
...@@ -376,7 +376,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -376,7 +376,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertEqual(egr_tensor7.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor7.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor7.stop_gradient, True) self.assertEqual(egr_tensor7.stop_gradient, True)
egr_tensor8 = core.eager.EagerTensor( egr_tensor8 = core.eager.Tensor(
arr, arr,
place=place, place=place,
persistable=True, persistable=True,
...@@ -390,7 +390,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -390,7 +390,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertEqual(egr_tensor8.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor8.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor8.stop_gradient, False) self.assertEqual(egr_tensor8.stop_gradient, False)
egr_tensor9 = core.eager.EagerTensor( egr_tensor9 = core.eager.Tensor(
arr, place, True, True, "new_eager_tensor", stop_gradient=False) arr, place, True, True, "new_eager_tensor", stop_gradient=False)
self.assertEqual(egr_tensor9.persistable, True) self.assertEqual(egr_tensor9.persistable, True)
self.assertTrue("new_eager_tensor" in egr_tensor9.name) self.assertTrue("new_eager_tensor" in egr_tensor9.name)
...@@ -399,7 +399,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -399,7 +399,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertEqual(egr_tensor9.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor9.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor9.stop_gradient, False) self.assertEqual(egr_tensor9.stop_gradient, False)
egr_tensor10 = core.eager.EagerTensor( egr_tensor10 = core.eager.Tensor(
arr, arr,
place, place,
True, True,
...@@ -413,7 +413,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -413,7 +413,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertEqual(egr_tensor10.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor10.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor10.stop_gradient, False) self.assertEqual(egr_tensor10.stop_gradient, False)
egr_tensor11 = core.eager.EagerTensor( egr_tensor11 = core.eager.Tensor(
arr, arr,
place, place,
True, True,
...@@ -427,7 +427,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -427,7 +427,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertEqual(egr_tensor11.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor11.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor11.stop_gradient, False) self.assertEqual(egr_tensor11.stop_gradient, False)
egr_tensor12 = core.eager.EagerTensor( egr_tensor12 = core.eager.Tensor(
arr, arr,
place, place,
persistable=True, persistable=True,
...@@ -441,7 +441,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -441,7 +441,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertEqual(egr_tensor12.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor12.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor12.stop_gradient, False) self.assertEqual(egr_tensor12.stop_gradient, False)
egr_tensor13 = core.eager.EagerTensor( egr_tensor13 = core.eager.Tensor(
value=arr, value=arr,
place=place, place=place,
persistable=True, persistable=True,
...@@ -456,7 +456,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -456,7 +456,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertEqual(egr_tensor13.stop_gradient, False) self.assertEqual(egr_tensor13.stop_gradient, False)
# special case # special case
egr_tensor14 = core.eager.EagerTensor( egr_tensor14 = core.eager.Tensor(
dtype=core.VarDesc.VarType.FP32, dtype=core.VarDesc.VarType.FP32,
dims=[4, 16, 16, 32], dims=[4, 16, 16, 32],
name="special_eager_tensor", name="special_eager_tensor",
...@@ -467,8 +467,8 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -467,8 +467,8 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertEqual(egr_tensor14.shape, [4, 16, 16, 32]) self.assertEqual(egr_tensor14.shape, [4, 16, 16, 32])
self.assertEqual(egr_tensor14.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor14.dtype, core.VarDesc.VarType.FP32)
# init EagerTensor by EagerTensor # init Tensor by Tensor
egr_tensor15 = core.eager.EagerTensor(value=egr_tensor4) egr_tensor15 = core.eager.Tensor(value=egr_tensor4)
self.assertEqual(egr_tensor15.persistable, True) self.assertEqual(egr_tensor15.persistable, True)
self.assertTrue("generated" in egr_tensor15.name) self.assertTrue("generated" in egr_tensor15.name)
self.assertEqual(egr_tensor15.shape, egr_tensor4.shape) self.assertEqual(egr_tensor15.shape, egr_tensor4.shape)
...@@ -480,7 +480,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -480,7 +480,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue( self.assertTrue(
np.array_equal(egr_tensor15.numpy(), egr_tensor4.numpy())) np.array_equal(egr_tensor15.numpy(), egr_tensor4.numpy()))
egr_tensor16 = core.eager.EagerTensor( egr_tensor16 = core.eager.Tensor(
value=egr_tensor4, name="new_eager_tensor") value=egr_tensor4, name="new_eager_tensor")
self.assertEqual(egr_tensor16.persistable, True) self.assertEqual(egr_tensor16.persistable, True)
self.assertTrue("new_eager_tensor" in egr_tensor16.name) self.assertTrue("new_eager_tensor" in egr_tensor16.name)
...@@ -493,7 +493,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -493,7 +493,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue( self.assertTrue(
np.array_equal(egr_tensor16.numpy(), egr_tensor4.numpy())) np.array_equal(egr_tensor16.numpy(), egr_tensor4.numpy()))
egr_tensor17 = core.eager.EagerTensor( egr_tensor17 = core.eager.Tensor(
value=egr_tensor4, value=egr_tensor4,
place=place, place=place,
name="new_eager_tensor", ) name="new_eager_tensor", )
...@@ -506,7 +506,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -506,7 +506,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue( self.assertTrue(
np.array_equal(egr_tensor17.numpy(), egr_tensor4.numpy())) np.array_equal(egr_tensor17.numpy(), egr_tensor4.numpy()))
egr_tensor18 = core.eager.EagerTensor( egr_tensor18 = core.eager.Tensor(
egr_tensor4, egr_tensor4,
place=place, place=place,
name="new_eager_tensor", ) name="new_eager_tensor", )
...@@ -519,7 +519,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -519,7 +519,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue( self.assertTrue(
np.array_equal(egr_tensor18.numpy(), egr_tensor4.numpy())) np.array_equal(egr_tensor18.numpy(), egr_tensor4.numpy()))
egr_tensor19 = core.eager.EagerTensor( egr_tensor19 = core.eager.Tensor(
egr_tensor4, egr_tensor4,
place, place,
name="new_eager_tensor", ) name="new_eager_tensor", )
...@@ -536,7 +536,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -536,7 +536,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
x = np.random.rand(3, 3).astype('float32') x = np.random.rand(3, 3).astype('float32')
t = paddle.fluid.Tensor() t = paddle.fluid.Tensor()
t.set(x, paddle.fluid.CPUPlace()) t.set(x, paddle.fluid.CPUPlace())
egr_tensor20 = core.eager.EagerTensor(value=t) egr_tensor20 = core.eager.Tensor(value=t)
self.assertEqual(egr_tensor20.persistable, False) self.assertEqual(egr_tensor20.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor20.name) self.assertTrue("generated_tensor" in egr_tensor20.name)
self.assertEqual(egr_tensor20.shape, [3, 3]) self.assertEqual(egr_tensor20.shape, [3, 3])
...@@ -547,7 +547,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -547,7 +547,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
paddle.fluid.framework._current_expected_place())) paddle.fluid.framework._current_expected_place()))
self.assertTrue(np.array_equal(egr_tensor20.numpy(), x)) self.assertTrue(np.array_equal(egr_tensor20.numpy(), x))
egr_tensor21 = core.eager.EagerTensor(value=t, place=place) egr_tensor21 = core.eager.Tensor(value=t, place=place)
self.assertEqual(egr_tensor21.persistable, False) self.assertEqual(egr_tensor21.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor21.name) self.assertTrue("generated_tensor" in egr_tensor21.name)
self.assertEqual(egr_tensor21.shape, [3, 3]) self.assertEqual(egr_tensor21.shape, [3, 3])
...@@ -556,7 +556,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -556,7 +556,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue(egr_tensor21.place._equals(place)) self.assertTrue(egr_tensor21.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor21.numpy(), x)) self.assertTrue(np.array_equal(egr_tensor21.numpy(), x))
egr_tensor22 = core.eager.EagerTensor(t, place=place) egr_tensor22 = core.eager.Tensor(t, place=place)
self.assertEqual(egr_tensor22.persistable, False) self.assertEqual(egr_tensor22.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor22.name) self.assertTrue("generated_tensor" in egr_tensor22.name)
self.assertEqual(egr_tensor22.shape, [3, 3]) self.assertEqual(egr_tensor22.shape, [3, 3])
...@@ -565,8 +565,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -565,8 +565,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue(egr_tensor22.place._equals(place)) self.assertTrue(egr_tensor22.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor22.numpy(), x)) self.assertTrue(np.array_equal(egr_tensor22.numpy(), x))
egr_tensor23 = core.eager.EagerTensor( egr_tensor23 = core.eager.Tensor(t, place, name="from_framework_tensor")
t, place, name="from_framework_tensor")
self.assertEqual(egr_tensor23.persistable, False) self.assertEqual(egr_tensor23.persistable, False)
self.assertTrue("from_framework_tensor" in egr_tensor23.name) self.assertTrue("from_framework_tensor" in egr_tensor23.name)
self.assertEqual(egr_tensor23.shape, [3, 3]) self.assertEqual(egr_tensor23.shape, [3, 3])
...@@ -575,7 +574,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -575,7 +574,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue(egr_tensor23.place._equals(place)) self.assertTrue(egr_tensor23.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor23.numpy(), x)) self.assertTrue(np.array_equal(egr_tensor23.numpy(), x))
egr_tensor24 = core.eager.EagerTensor( egr_tensor24 = core.eager.Tensor(
value=t, place=place, name="from_framework_tensor") value=t, place=place, name="from_framework_tensor")
self.assertEqual(egr_tensor24.persistable, False) self.assertEqual(egr_tensor24.persistable, False)
self.assertTrue("from_framework_tensor" in egr_tensor24.name) self.assertTrue("from_framework_tensor" in egr_tensor24.name)
...@@ -587,7 +586,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -587,7 +586,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
# Bad usage # Bad usage
# SyntaxError: positional argument follows keyword argument # SyntaxError: positional argument follows keyword argument
# egr_tensor25 = core.eager.EagerTensor(value=t, place) # egr_tensor25 = core.eager.Tensor(value=t, place)
def test_constructor_with_kwargs(self): def test_constructor_with_kwargs(self):
print("Test_constructor_with_kwargs") print("Test_constructor_with_kwargs")
...@@ -655,7 +654,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -655,7 +654,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
tensor2 = None tensor2 = None
tensor = paddle.to_tensor(arr, core.VarDesc.VarType.FP32, tensor = paddle.to_tensor(arr, core.VarDesc.VarType.FP32,
core.CPUPlace()) core.CPUPlace())
tensor3 = core.eager.EagerTensor() tensor3 = core.eager.Tensor()
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32, tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32,
core.CUDAPlace(0)) core.CUDAPlace(0))
...@@ -683,7 +682,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -683,7 +682,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
tensor2 = None tensor2 = None
tensor = paddle.to_tensor(arr, core.VarDesc.VarType.FP32, tensor = paddle.to_tensor(arr, core.VarDesc.VarType.FP32,
core.CPUPlace()) core.CPUPlace())
tensor3 = core.eager.EagerTensor() tensor3 = core.eager.Tensor()
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32, tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32,
core.CUDAPlace(0)) core.CUDAPlace(0))
...@@ -748,7 +747,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -748,7 +747,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
with _test_eager_guard(): with _test_eager_guard():
arr = np.random.rand(4, 16, 16, 32).astype('float64') arr = np.random.rand(4, 16, 16, 32).astype('float64')
egr_tensor0 = core.eager.EagerTensor(value=arr) egr_tensor0 = core.eager.Tensor(value=arr)
self.assertEqual(egr_tensor0.persistable, False) self.assertEqual(egr_tensor0.persistable, False)
self.assertTrue("generated" in egr_tensor0.name) self.assertTrue("generated" in egr_tensor0.name)
self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32]) self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32])
...@@ -766,7 +765,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -766,7 +765,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
def test_set_value(self): def test_set_value(self):
with _test_eager_guard(): with _test_eager_guard():
ori_arr = np.random.rand(4, 16, 16, 32).astype('float32') ori_arr = np.random.rand(4, 16, 16, 32).astype('float32')
egr_tensor = core.eager.EagerTensor(value=ori_arr) egr_tensor = core.eager.Tensor(value=ori_arr)
self.assertEqual(egr_tensor.stop_gradient, True) self.assertEqual(egr_tensor.stop_gradient, True)
self.assertEqual(egr_tensor.shape, [4, 16, 16, 32]) self.assertEqual(egr_tensor.shape, [4, 16, 16, 32])
self.assertTrue(np.array_equal(egr_tensor.numpy(), ori_arr)) self.assertTrue(np.array_equal(egr_tensor.numpy(), ori_arr))
...@@ -859,7 +858,7 @@ class EagerParamBaseUsageTestCase(unittest.TestCase): ...@@ -859,7 +858,7 @@ class EagerParamBaseUsageTestCase(unittest.TestCase):
def test_backward_with_single_tensor(self): def test_backward_with_single_tensor(self):
with _test_eager_guard(): with _test_eager_guard():
arr4 = np.random.rand(4, 16, 16, 32).astype('float32') arr4 = np.random.rand(4, 16, 16, 32).astype('float32')
egr_tensor12 = core.eager.EagerTensor(arr4, core.CPUPlace()) egr_tensor12 = core.eager.Tensor(arr4, core.CPUPlace())
egr_tensor12.retain_grads() egr_tensor12.retain_grads()
arr = np.ones([4, 16, 16, 32]).astype('float32') arr = np.ones([4, 16, 16, 32]).astype('float32')
self.assertEqual(egr_tensor12.persistable, False) self.assertEqual(egr_tensor12.persistable, False)
......
...@@ -203,7 +203,7 @@ class TestImperative(unittest.TestCase): ...@@ -203,7 +203,7 @@ class TestImperative(unittest.TestCase):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
if fluid.framework._in_eager_mode(): if fluid.framework._in_eager_mode():
var_base = paddle.to_tensor(np.array([3, 4, 5])) var_base = paddle.to_tensor(np.array([3, 4, 5]))
self.assertTrue(isinstance(var_base, core.eager.EagerTensor)) self.assertTrue(isinstance(var_base, core.eager.Tensor))
else: else:
var_base = paddle.to_tensor(np.array([3, 4, 5])) var_base = paddle.to_tensor(np.array([3, 4, 5]))
self.assertTrue(isinstance(var_base, core.VarBase)) self.assertTrue(isinstance(var_base, core.VarBase))
...@@ -221,13 +221,13 @@ class TestImperative(unittest.TestCase): ...@@ -221,13 +221,13 @@ class TestImperative(unittest.TestCase):
t.set(x, fluid.CPUPlace()) t.set(x, fluid.CPUPlace())
if _in_eager_mode(): if _in_eager_mode():
# TODO(jiabin): Support Kwargs and uncomment these tests # TODO(jiabin): Support Kwargs and uncomment these tests
# egr_tmp = fluid.core.eager.EagerTensor(value=x, place=fluid.core.CPUPlace()) # egr_tmp = fluid.core.eager.Tensor(value=x, place=fluid.core.CPUPlace())
egr_tmp2 = fluid.core.eager.EagerTensor(y, fluid.core.CPUPlace()) egr_tmp2 = fluid.core.eager.Tensor(y, fluid.core.CPUPlace())
egr_tmp3 = paddle.to_tensor(x) egr_tmp3 = paddle.to_tensor(x)
egr_tmp4 = fluid.core.eager.EagerTensor(y) egr_tmp4 = fluid.core.eager.Tensor(y)
# egr_tmp5 = fluid.core.eager.EagerTensor(value=x) # egr_tmp5 = fluid.core.eager.Tensor(value=x)
# TODO(jiabin): Support it when we merge LoDTensor with DenseTensor # TODO(jiabin): Support it when we merge LoDTensor with DenseTensor
egr_tmp6 = fluid.core.eager.EagerTensor(t) egr_tmp6 = fluid.core.eager.Tensor(t)
# self.assertTrue(np.array_equal(x, egr_tmp.numpy())) # self.assertTrue(np.array_equal(x, egr_tmp.numpy()))
self.assertTrue(np.array_equal(y, egr_tmp2.numpy())) self.assertTrue(np.array_equal(y, egr_tmp2.numpy()))
...@@ -953,8 +953,7 @@ class TestMetaclass(unittest.TestCase): ...@@ -953,8 +953,7 @@ class TestMetaclass(unittest.TestCase):
self.assertNotEqual(type(MyLayer).__name__, 'pybind11_type') self.assertNotEqual(type(MyLayer).__name__, 'pybind11_type')
if core._in_eager_mode(): if core._in_eager_mode():
self.assertEqual( self.assertEqual(
type(paddle.fluid.core.eager.EagerTensor).__name__, type(paddle.fluid.core.eager.Tensor).__name__, 'pybind11_type')
'pybind11_type')
else: else:
self.assertEqual( self.assertEqual(
type(paddle.fluid.core.VarBase).__name__, 'pybind11_type') type(paddle.fluid.core.VarBase).__name__, 'pybind11_type')
......
...@@ -41,7 +41,7 @@ class TestImperativeNumpyBridge(unittest.TestCase): ...@@ -41,7 +41,7 @@ class TestImperativeNumpyBridge(unittest.TestCase):
data_np[0][0] = -1 data_np[0][0] = -1
self.assertEqual(data_np[0][0], -1) self.assertEqual(data_np[0][0], -1)
if _in_eager_mode(): if _in_eager_mode():
# eager_mode, var2 is EagerTensor, is not subscriptable # eager_mode, var2 is Tensor, is not subscriptable
# TODO(wuweilong): to support slice in eager mode later # TODO(wuweilong): to support slice in eager mode later
self.assertNotEqual(var2.numpy()[0][0], -1) self.assertNotEqual(var2.numpy()[0][0], -1)
else: else:
......
...@@ -1358,7 +1358,7 @@ class ReduceOnPlateau(LRScheduler): ...@@ -1358,7 +1358,7 @@ class ReduceOnPlateau(LRScheduler):
self.last_epoch = epoch self.last_epoch = epoch
if _in_eager_mode(): if _in_eager_mode():
tmp = core.eager.EagerTensor tmp = core.eager.Tensor
else: else:
tmp = Tensor tmp = Tensor
# loss must be float, numpy.ndarray or 1-D Tensor with shape [1] # loss must be float, numpy.ndarray or 1-D Tensor with shape [1]
......
...@@ -169,8 +169,7 @@ def to_tensor(data, dtype=None, place=None, stop_gradient=True): ...@@ -169,8 +169,7 @@ def to_tensor(data, dtype=None, place=None, stop_gradient=True):
# TOOD(jiabin): Support kwargs in eager tensor constructor # TOOD(jiabin): Support kwargs in eager tensor constructor
if _in_eager_mode() and isinstance(data, np.ndarray): if _in_eager_mode() and isinstance(data, np.ndarray):
return core.eager.EagerTensor(data, place, False, False, None, return core.eager.Tensor(data, place, False, False, None, stop_gradient)
stop_gradient)
else: else:
return paddle.Tensor( return paddle.Tensor(
value=data, value=data,
......
...@@ -263,7 +263,7 @@ def to_string(var, prefix='Tensor'): ...@@ -263,7 +263,7 @@ def to_string(var, prefix='Tensor'):
data=data) data=data)
def eager_tensor_to_string(tensor, prefix='Tensor'): def tensor_to_string(tensor, prefix='Tensor'):
indent = len(prefix) + 1 indent = len(prefix) + 1
_template = "{prefix}(shape={shape}, dtype={dtype}, place={place}, stop_gradient={stop_gradient},\n{indent}{data})" _template = "{prefix}(shape={shape}, dtype={dtype}, place={place}, stop_gradient={stop_gradient},\n{indent}{data})"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册