未验证 提交 831fd86e 编写于 作者: J Jiabin Yang 提交者: GitHub

EagerTensor to EagerVariable (#39447)

* merge legacy to fluid

* Remove legacy code

* Remove legacy code

* Remove DataType test

* Using Tensor directly instead of using EagerTensor

* support gradient_accumulation

* make test_imperative_lod_tensor_to_selected_rows longer

* make test_imperative_lod_tensor_to_selected_rows longer

* refine code

* Rename all EagerTensor to Tensor

* Rename some EagerTensor to Tensor

* rename EagerTensor to EagerVariable

* add more test

* merge develop and refine code
上级 f21d7957
...@@ -1227,11 +1227,11 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents( ...@@ -1227,11 +1227,11 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
// Forward Function Body // Forward Function Body
// According to fwd_inputs_name_pos_map // According to fwd_inputs_name_pos_map
std::map<std::string, std::vector<std::shared_ptr<egr::EagerTensor>>> std::map<std::string, std::vector<std::shared_ptr<egr::EagerVariable>>>
ins = ins =
{ {"X" , TrySyncToVars(X)}, { "Y" , TrySyncToVars(Y)} }; { {"X" , TrySyncToVars(X)}, { "Y" , TrySyncToVars(Y)} };
std::map<std::string, std::vector<std::shared_ptr<egr::EagerTensor>>> std::map<std::string, std::vector<std::shared_ptr<egr::EagerVariable>>>
outs = outs =
{ {
{"Out0" , CreateVars(Out0Num)}, {"Out1" {"Out0" , CreateVars(Out0Num)}, {"Out1"
...@@ -1316,7 +1316,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents( ...@@ -1316,7 +1316,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
const char* FWD_INS_MAP_TEMPLATE = const char* FWD_INS_MAP_TEMPLATE =
" std::map<std::string, " " std::map<std::string, "
"std::vector<std::shared_ptr<egr::EagerTensor>>> ins = { " "std::vector<std::shared_ptr<egr::EagerVariable>>> ins = { "
"%s };\n"; "%s };\n";
std::string ins_map_str = std::string ins_map_str =
paddle::string::Sprintf(FWD_INS_MAP_TEMPLATE, ins_contents_str); paddle::string::Sprintf(FWD_INS_MAP_TEMPLATE, ins_contents_str);
...@@ -1353,8 +1353,9 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents( ...@@ -1353,8 +1353,9 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
if (op_passing_outs_map[op_type].count(output_name)) { if (op_passing_outs_map[op_type].count(output_name)) {
const std::string output_var_name = output_name + "Var"; const std::string output_var_name = output_name + "Var";
// Pass Output from function argument(EagerTensor*/vector<EagerTensor*>&), // Pass Output from function
// in form of shared_ptr<EagerTensor>/vector<shared_ptr<EagerTensor>> // argument(EagerVariable*/vector<EagerVariable*>&),
// in form of shared_ptr<EagerVariable>/vector<shared_ptr<EagerVariable>>
if (output.duplicable()) { if (output.duplicable()) {
const char* FWD_NUM_ARG_TEMPLATE = const char* FWD_NUM_ARG_TEMPLATE =
", std::vector<paddle::experimental::Tensor*>& %s"; ", std::vector<paddle::experimental::Tensor*>& %s";
...@@ -1395,7 +1396,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents( ...@@ -1395,7 +1396,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
} else { } else {
const char* FWD_OUTS_CONTENT_TEMPLATE = const char* FWD_OUTS_CONTENT_TEMPLATE =
"{ \"%s\", " "{ \"%s\", "
"{std::make_shared<egr::EagerTensor>(egr::Controller::Instance()." "{std::make_shared<egr::EagerVariable>(egr::Controller::Instance()."
"GenerateUniqueName())}},"; "GenerateUniqueName())}},";
outs_contents_str += outs_contents_str +=
paddle::string::Sprintf(FWD_OUTS_CONTENT_TEMPLATE, output_name); paddle::string::Sprintf(FWD_OUTS_CONTENT_TEMPLATE, output_name);
...@@ -1407,7 +1408,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents( ...@@ -1407,7 +1408,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
const char* FWD_OUTS_MAP_TEMPLATE = const char* FWD_OUTS_MAP_TEMPLATE =
" std::map<std::string, " " std::map<std::string, "
"std::vector<std::shared_ptr<egr::EagerTensor>>> outs = { " "std::vector<std::shared_ptr<egr::EagerVariable>>> outs = { "
"%s };\n"; "%s };\n";
std::string outs_map_str = std::string outs_map_str =
paddle::string::Sprintf(FWD_OUTS_MAP_TEMPLATE, outs_contents_str); paddle::string::Sprintf(FWD_OUTS_MAP_TEMPLATE, outs_contents_str);
...@@ -1482,7 +1483,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents( ...@@ -1482,7 +1483,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
generated_function_body += out_tensor_str; generated_function_body += out_tensor_str;
} }
generated_function_body += "\n"; generated_function_body += "\n";
VLOG(6) << "Converted Output VarBase to EagerTensor(s)"; VLOG(6) << "Converted Output VarBase to EagerVariable(s)";
// [Generation] Handle core_ops_returns_info // [Generation] Handle core_ops_returns_info
core_ops_returns_info[op_type] = return_contents; core_ops_returns_info[op_type] = return_contents;
...@@ -1627,7 +1628,7 @@ static std::string GenerateSingleOpBase( ...@@ -1627,7 +1628,7 @@ static std::string GenerateSingleOpBase(
const char* BWD_INS_MAP_TEMPLATE = const char* BWD_INS_MAP_TEMPLATE =
" std::map<std::string, " " std::map<std::string, "
"std::vector<std::shared_ptr<egr::EagerTensor>>> %s = { " "std::vector<std::shared_ptr<egr::EagerVariable>>> %s = { "
"%s };\n"; "%s };\n";
std::string ins_map_str = std::string ins_map_str =
paddle::string::Sprintf(BWD_INS_MAP_TEMPLATE, ins_name, ins_contents_str); paddle::string::Sprintf(BWD_INS_MAP_TEMPLATE, ins_name, ins_contents_str);
...@@ -1704,7 +1705,7 @@ static std::string GenerateSingleOpBase( ...@@ -1704,7 +1705,7 @@ static std::string GenerateSingleOpBase(
} else { } else {
const char* GRAD_OUTS_CONTENT_TEMPLATE = const char* GRAD_OUTS_CONTENT_TEMPLATE =
"{ \"%s\", " "{ \"%s\", "
"{std::make_shared<egr::EagerTensor>(egr::Controller::Instance(" "{std::make_shared<egr::EagerVariable>(egr::Controller::Instance("
")." ")."
"GenerateUniqueName())}},"; "GenerateUniqueName())}},";
outs_contents_str += paddle::string::Sprintf( outs_contents_str += paddle::string::Sprintf(
...@@ -1723,7 +1724,7 @@ static std::string GenerateSingleOpBase( ...@@ -1723,7 +1724,7 @@ static std::string GenerateSingleOpBase(
const char* BWD_OUTS_MAP_TEMPLATE = const char* BWD_OUTS_MAP_TEMPLATE =
" std::map<std::string, " " std::map<std::string, "
"std::vector<std::shared_ptr<egr::EagerTensor>>> %s = { " "std::vector<std::shared_ptr<egr::EagerVariable>>> %s = { "
"%s };\n"; "%s };\n";
std::string outs_map_str = paddle::string::Sprintf( std::string outs_map_str = paddle::string::Sprintf(
BWD_OUTS_MAP_TEMPLATE, outs_name, outs_contents_str); BWD_OUTS_MAP_TEMPLATE, outs_name, outs_contents_str);
......
...@@ -40,36 +40,28 @@ ...@@ -40,36 +40,28 @@
* **/ * **/
namespace egr { namespace egr {
class EagerTensor final { class EagerVariable final {
public: public:
/* Default constructor and name constructor should only be used for contruct /* Default constructor and name constructor should only be used for contruct
* output and in fluid*/ * output and in fluid*/
EagerTensor() = default; EagerVariable() = default;
explicit EagerTensor(const std::string& name) : name_(name) {} explicit EagerVariable(const std::string& name) : name_(name) {}
explicit EagerTensor(const paddle::experimental::Tensor& tensor) explicit EagerVariable(const paddle::experimental::Tensor& tensor)
: name_(tensor.name()) { : name_(tensor.name()) {
if (tensor.defined()) { if (tensor.defined()) {
if (tensor.is_dense_tensor()) { if (tensor.is_dense_tensor()) {
auto* framework_tensor = ConstructVariableFromTensor(tensor);
var_.GetMutable<paddle::framework::LoDTensor>(); } else if (tensor.is_selected_rows()) {
// Contruct framework::Tensor from egr::EagerTensor ConstructVariableFromSelectedRows(tensor);
auto tensor_dense =
std::dynamic_pointer_cast<pten::DenseTensor>(tensor.impl());
PADDLE_ENFORCE_EQ((tensor_dense.get() && tensor_dense), true,
paddle::platform::errors::Fatal(
"Failed to Trans Tensor to EagerVariable since "
"we got Tensor with type DenseTensor, and we got "
"EagerVariable with another type."));
*framework_tensor = *tensor_dense;
} else { } else {
PADDLE_THROW(paddle::platform::errors::Fatal( PADDLE_THROW(paddle::platform::errors::Fatal(
"Unrecognized egr::EagerVariable type, only " "Unrecognized egr::EagerVariable type, only "
"DenseTensor and SelectedRows is supported for now.")); "DenseTensor and SelectedRows are supported for now."));
} }
} else { } else {
VLOG(6) << "Build Empty EagerTensor with name " << name_; VLOG(6) << "Build Empty EagerVariable with name " << name_;
} }
} }
...@@ -77,21 +69,20 @@ class EagerTensor final { ...@@ -77,21 +69,20 @@ class EagerTensor final {
std::shared_ptr<pten::TensorBase> GetTensorBase() { std::shared_ptr<pten::TensorBase> GetTensorBase() {
// Construct allocation only once. // Construct allocation only once.
if (var_.IsInitialized()) { if (var_.IsInitialized()) {
if (var_.IsType<paddle::framework::LoDTensor>()) { if (var_.IsType<paddle::framework::LoDTensor>() ||
return SetImplWithLegacyTensor<pten::DenseTensor>(); var_.IsType<paddle::framework::Tensor>()) {
} else if (var_.IsType<paddle::framework::Tensor>()) { return SetImplWithLegacyTensor();
return SetImplWithLegacyTensor<pten::DenseTensor>();
} else if (var_.IsType<pten::SelectedRows>()) { } else if (var_.IsType<pten::SelectedRows>()) {
return SetImplWithSelectedRows(); return SetImplWithLegacySelectedRows();
} else { } else {
PADDLE_THROW(paddle::platform::errors::Fatal( PADDLE_THROW(paddle::platform::errors::Fatal(
"Unable to fetch underlying tensor " "Unable to fetch underlying tensor "
"from EagerTensor, only LoDTensor and " "from EagerVariable, only LoDTensor and "
"Tensor are supported for now")); "Tensor are supported for now"));
} }
} else { } else {
PADDLE_THROW(paddle::platform::errors::Fatal( PADDLE_THROW(paddle::platform::errors::Fatal(
"Can not Sync EagerTensor %s whose paddle::framework::Variable is " "Can not Sync EagerVariable %s whose paddle::framework::Variable is "
"not initialized!", "not initialized!",
name())); name()));
} }
...@@ -107,23 +98,52 @@ class EagerTensor final { ...@@ -107,23 +98,52 @@ class EagerTensor final {
void set_name(const std::string& name) { name_ = name; } void set_name(const std::string& name) { name_ = name; }
private: private:
template <typename LEGACY_TYPE>
std::shared_ptr<pten::TensorBase> SetImplWithLegacyTensor() { std::shared_ptr<pten::TensorBase> SetImplWithLegacyTensor() {
const auto& framework_tensor = var_.Get<LEGACY_TYPE>(); const auto& framework_tensor = var_.Get<pten::DenseTensor>();
VLOG(8) << "Sync Var to tensor for: " << name(); VLOG(8) << "Sync Var to tensor for: " << name();
return std::make_shared<LEGACY_TYPE>(std::move(framework_tensor)); return std::make_shared<pten::DenseTensor>(framework_tensor);
} }
std::shared_ptr<pten::TensorBase> SetImplWithSelectedRows() { std::shared_ptr<pten::TensorBase> SetImplWithLegacySelectedRows() {
auto* selected_rows = var_.GetMutable<pten::SelectedRows>(); auto* framework_tensor = var_.GetMutable<pten::SelectedRows>();
auto res = std::make_shared<pten::SelectedRows>(selected_rows->rows_, VLOG(8) << "Sync SelectedRows to tensor for: " << name();
selected_rows->height_); auto res =
res->value_.reset(selected_rows->value_.release()); std::make_shared<pten::SelectedRows>(std::move(*framework_tensor));
res->id_to_index_ = std::move(selected_rows->id_to_index_); var_.Clear();
res->rwlock_.reset(selected_rows->rwlock_.release());
return res; return res;
} }
void ConstructVariableFromTensor(const paddle::experimental::Tensor& tensor) {
auto* framework_tensor = var_.GetMutable<pten::DenseTensor>();
// Contruct framework::Tensor from egr::EagerVariable
auto tensor_dense =
std::dynamic_pointer_cast<pten::DenseTensor>(tensor.impl());
PADDLE_ENFORCE_EQ(
(tensor_dense.get() && tensor_dense), true,
paddle::platform::errors::Fatal(
"Tensor %s does not hold pten::SelectedRows or pten::DenseTensor. "
"Or it holds empty impl, this should not happend since we should "
"treat all kinds of tensor as what they are.",
tensor.name()));
*framework_tensor = *tensor_dense;
}
void ConstructVariableFromSelectedRows(
const paddle::experimental::Tensor& tensor) {
auto* framework_tensor = var_.GetMutable<pten::SelectedRows>();
// Contruct framework::Tensor from egr::EagerVariable
auto tensor_dense =
std::dynamic_pointer_cast<pten::SelectedRows>(tensor.impl());
PADDLE_ENFORCE_EQ(
(tensor_dense.get() && tensor_dense), true,
paddle::platform::errors::Fatal(
"Tensor %s does not hold pten::SelectedRows or pten::DenseTensor. "
"Or it holds empty impl, this should not happend since we should "
"treat all kinds of tensor as what they are.",
tensor.name()));
*framework_tensor = std::move(*tensor_dense);
}
private: private:
std::string name_{""}; std::string name_{""};
paddle::framework::Variable var_; paddle::framework::Variable var_;
......
...@@ -115,7 +115,7 @@ TEST(Tensor, MemberFunction) { ...@@ -115,7 +115,7 @@ TEST(Tensor, MemberFunction) {
CHECK_EQ(tmp_autograd_meta_test->val_, 2); CHECK_EQ(tmp_autograd_meta_test->val_, 2);
} }
TEST(EagerTensor, Constructor) { TEST(EagerVariable, Constructor) {
paddle::experimental::Tensor t3; paddle::experimental::Tensor t3;
pten::DenseTensorMeta meta = pten::DenseTensorMeta( pten::DenseTensorMeta meta = pten::DenseTensorMeta(
pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 2})); pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 2}));
...@@ -134,7 +134,7 @@ TEST(EagerTensor, Constructor) { ...@@ -134,7 +134,7 @@ TEST(EagerTensor, Constructor) {
CHECK_EQ(t3.defined(), false); CHECK_EQ(t3.defined(), false);
t3.set_impl(dt); t3.set_impl(dt);
egr::EagerTensor et3 = egr::EagerTensor(t3); egr::EagerVariable et3 = egr::EagerVariable(t3);
VLOG(6) << "SyncToVar"; VLOG(6) << "SyncToVar";
CHECK_EQ(et3.Var().Get<paddle::framework::LoDTensor>().data<float>()[0], CHECK_EQ(et3.Var().Get<paddle::framework::LoDTensor>().data<float>()[0],
5.0f); 5.0f);
......
...@@ -167,7 +167,7 @@ TEST(EagerUtils, PassStopGradient) { ...@@ -167,7 +167,7 @@ TEST(EagerUtils, PassStopGradient) {
TEST(EagerUtils, TrySyncToVar) { TEST(EagerUtils, TrySyncToVar) {
paddle::framework::DDim ddim = paddle::framework::make_ddim({2, 4, 4, 4}); paddle::framework::DDim ddim = paddle::framework::make_ddim({2, 4, 4, 4});
auto tensor = CreateTestCPUTensor(5.0f, ddim); auto tensor = CreateTestCPUTensor(5.0f, ddim);
std::vector<std::shared_ptr<egr::EagerTensor>> var_bases = { std::vector<std::shared_ptr<egr::EagerVariable>> var_bases = {
egr::EagerUtils::TrySyncToVar(tensor)}; egr::EagerUtils::TrySyncToVar(tensor)};
paddle::framework::Variable* var = var_bases[0]->MutableVar(); paddle::framework::Variable* var = var_bases[0]->MutableVar();
...@@ -187,7 +187,7 @@ TEST(EagerUtils, TrySyncToVars) { ...@@ -187,7 +187,7 @@ TEST(EagerUtils, TrySyncToVars) {
std::vector<paddle::experimental::Tensor> tensors = { std::vector<paddle::experimental::Tensor> tensors = {
CreateTestCPUTensor(1.0f, ddim), CreateTestCPUTensor(2.0f, ddim)}; CreateTestCPUTensor(1.0f, ddim), CreateTestCPUTensor(2.0f, ddim)};
std::vector<std::shared_ptr<egr::EagerTensor>> var_bases = std::vector<std::shared_ptr<egr::EagerVariable>> var_bases =
egr::EagerUtils::TrySyncToVars(tensors); egr::EagerUtils::TrySyncToVars(tensors);
{ {
...@@ -218,7 +218,7 @@ TEST(EagerUtils, TrySyncToVars) { ...@@ -218,7 +218,7 @@ TEST(EagerUtils, TrySyncToVars) {
TEST(EagerUtils, CreateVars) { TEST(EagerUtils, CreateVars) {
VLOG(6) << "Check CreateVars"; VLOG(6) << "Check CreateVars";
std::vector<std::shared_ptr<egr::EagerTensor>> outs = std::vector<std::shared_ptr<egr::EagerVariable>> outs =
egr::EagerUtils::CreateVars(2); egr::EagerUtils::CreateVars(2);
CHECK_EQ(outs.size(), size_t(2)); CHECK_EQ(outs.size(), size_t(2));
CHECK(outs[0]->Var().IsInitialized() == false); CHECK(outs[0]->Var().IsInitialized() == false);
......
...@@ -131,17 +131,17 @@ void EagerUtils::SetOutRankWithSlot(AutogradMeta* target, size_t slot_id) { ...@@ -131,17 +131,17 @@ void EagerUtils::SetOutRankWithSlot(AutogradMeta* target, size_t slot_id) {
target->SetSingleOutRankWithSlot(slot_id, 0); target->SetSingleOutRankWithSlot(slot_id, 0);
} }
std::shared_ptr<egr::EagerTensor> EagerUtils::TrySyncToVar( std::shared_ptr<egr::EagerVariable> EagerUtils::TrySyncToVar(
const paddle::experimental::Tensor& tensor) { const paddle::experimental::Tensor& tensor) {
return std::make_shared<egr::EagerTensor>(tensor); return std::make_shared<egr::EagerVariable>(tensor);
} }
std::vector<std::shared_ptr<egr::EagerTensor>> EagerUtils::TrySyncToVars( std::vector<std::shared_ptr<egr::EagerVariable>> EagerUtils::TrySyncToVars(
const paddle::experimental::Tensor& tensor) { const paddle::experimental::Tensor& tensor) {
return {TrySyncToVar(tensor)}; return {TrySyncToVar(tensor)};
} }
std::vector<std::shared_ptr<egr::EagerTensor>> EagerUtils::TrySyncToVars( std::vector<std::shared_ptr<egr::EagerVariable>> EagerUtils::TrySyncToVars(
paddle::experimental::Tensor* tensor) { paddle::experimental::Tensor* tensor) {
PADDLE_ENFORCE_NOT_NULL( PADDLE_ENFORCE_NOT_NULL(
tensor, tensor,
...@@ -151,9 +151,9 @@ std::vector<std::shared_ptr<egr::EagerTensor>> EagerUtils::TrySyncToVars( ...@@ -151,9 +151,9 @@ std::vector<std::shared_ptr<egr::EagerTensor>> EagerUtils::TrySyncToVars(
return {TrySyncToVar(*tensor)}; return {TrySyncToVar(*tensor)};
} }
std::vector<std::shared_ptr<egr::EagerTensor>> EagerUtils::TrySyncToVars( std::vector<std::shared_ptr<egr::EagerVariable>> EagerUtils::TrySyncToVars(
const std::vector<paddle::experimental::Tensor*>& tensors) { const std::vector<paddle::experimental::Tensor*>& tensors) {
std::vector<std::shared_ptr<EagerTensor>> res; std::vector<std::shared_ptr<EagerVariable>> res;
size_t num = tensors.size(); size_t num = tensors.size();
res.reserve(num); res.reserve(num);
for (size_t i = 0; i < num; i++) { for (size_t i = 0; i < num; i++) {
...@@ -169,9 +169,9 @@ std::vector<std::shared_ptr<egr::EagerTensor>> EagerUtils::TrySyncToVars( ...@@ -169,9 +169,9 @@ std::vector<std::shared_ptr<egr::EagerTensor>> EagerUtils::TrySyncToVars(
return res; return res;
} }
std::vector<std::shared_ptr<egr::EagerTensor>> EagerUtils::TrySyncToVars( std::vector<std::shared_ptr<egr::EagerVariable>> EagerUtils::TrySyncToVars(
const std::vector<paddle::experimental::Tensor>& tensors) { const std::vector<paddle::experimental::Tensor>& tensors) {
std::vector<std::shared_ptr<EagerTensor>> res; std::vector<std::shared_ptr<EagerVariable>> res;
size_t num = tensors.size(); size_t num = tensors.size();
res.reserve(num); res.reserve(num);
for (size_t i = 0; i < num; i++) { for (size_t i = 0; i < num; i++) {
...@@ -180,19 +180,19 @@ std::vector<std::shared_ptr<egr::EagerTensor>> EagerUtils::TrySyncToVars( ...@@ -180,19 +180,19 @@ std::vector<std::shared_ptr<egr::EagerTensor>> EagerUtils::TrySyncToVars(
return res; return res;
} }
std::vector<std::shared_ptr<EagerTensor>> EagerUtils::CreateVars( std::vector<std::shared_ptr<EagerVariable>> EagerUtils::CreateVars(
const size_t num) { const size_t num) {
std::vector<std::shared_ptr<EagerTensor>> res; std::vector<std::shared_ptr<EagerVariable>> res;
res.reserve(num); res.reserve(num);
for (size_t i = 0; i < num; i++) { for (size_t i = 0; i < num; i++) {
res.emplace_back( res.emplace_back(
new EagerTensor(egr::Controller::Instance().GenerateUniqueName())); new EagerVariable(egr::Controller::Instance().GenerateUniqueName()));
} }
return res; return res;
} }
std::vector<paddle::experimental::Tensor> EagerUtils::GetOutputs( std::vector<paddle::experimental::Tensor> EagerUtils::GetOutputs(
const std::vector<std::shared_ptr<EagerTensor>>& outs) { const std::vector<std::shared_ptr<EagerVariable>>& outs) {
std::vector<paddle::experimental::Tensor> res; std::vector<paddle::experimental::Tensor> res;
res.reserve(outs.size()); res.reserve(outs.size());
for (const auto& out : outs) { for (const auto& out : outs) {
...@@ -209,7 +209,7 @@ std::vector<paddle::experimental::Tensor> EagerUtils::GetOutputs( ...@@ -209,7 +209,7 @@ std::vector<paddle::experimental::Tensor> EagerUtils::GetOutputs(
} }
paddle::experimental::Tensor EagerUtils::GetOutput( paddle::experimental::Tensor EagerUtils::GetOutput(
const std::shared_ptr<EagerTensor>& out) { const std::shared_ptr<EagerVariable>& out) {
PADDLE_ENFORCE_NOT_NULL( PADDLE_ENFORCE_NOT_NULL(
out.get(), paddle::platform::errors::Fatal( out.get(), paddle::platform::errors::Fatal(
"Eager Tensor %s is null and cannot be copied. We " "Eager Tensor %s is null and cannot be copied. We "
...@@ -219,7 +219,7 @@ paddle::experimental::Tensor EagerUtils::GetOutput( ...@@ -219,7 +219,7 @@ paddle::experimental::Tensor EagerUtils::GetOutput(
return paddle::experimental::Tensor(out->GetTensorBase(), out->name()); return paddle::experimental::Tensor(out->GetTensorBase(), out->name());
} }
void EagerUtils::OverwriteOutputs(const std::shared_ptr<EagerTensor>& out, void EagerUtils::OverwriteOutputs(const std::shared_ptr<EagerVariable>& out,
paddle::experimental::Tensor* tensor) { paddle::experimental::Tensor* tensor) {
PADDLE_ENFORCE_NOT_NULL( PADDLE_ENFORCE_NOT_NULL(
tensor, paddle::platform::errors::Fatal( tensor, paddle::platform::errors::Fatal(
...@@ -231,7 +231,7 @@ void EagerUtils::OverwriteOutputs(const std::shared_ptr<EagerTensor>& out, ...@@ -231,7 +231,7 @@ void EagerUtils::OverwriteOutputs(const std::shared_ptr<EagerTensor>& out,
} }
void EagerUtils::OverwriteOutputs( void EagerUtils::OverwriteOutputs(
const std::vector<std::shared_ptr<EagerTensor>>& outs, const std::vector<std::shared_ptr<EagerVariable>>& outs,
const std::vector<paddle::experimental::Tensor*>& tensors) { const std::vector<paddle::experimental::Tensor*>& tensors) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
outs.size(), tensors.size(), outs.size(), tensors.size(),
......
...@@ -88,7 +88,7 @@ class EagerUtils { ...@@ -88,7 +88,7 @@ class EagerUtils {
/** /**
* We have to use autograd_meta and multi_autograd_meta to initialize * We have to use autograd_meta and multi_autograd_meta to initialize
* autograd_meta for tensor, since we can't init it in * autograd_meta for tensor, since we can't init it in
* egr::EagerTensor's * egr::EagerVariable's
* constructor (it's abstract class there) * constructor (it's abstract class there)
* *
* **/ * **/
...@@ -151,34 +151,35 @@ class EagerUtils { ...@@ -151,34 +151,35 @@ class EagerUtils {
// Intermidate needed remove this once we don't need legacy // Intermidate needed remove this once we don't need legacy
// Inner Method // Inner Method
static std::shared_ptr<egr::EagerTensor> TrySyncToVar( static std::shared_ptr<egr::EagerVariable> TrySyncToVar(
const paddle::experimental::Tensor& tensor); const paddle::experimental::Tensor& tensor);
// Basic Input // Basic Input
static std::vector<std::shared_ptr<egr::EagerTensor>> TrySyncToVars( static std::vector<std::shared_ptr<egr::EagerVariable>> TrySyncToVars(
const paddle::experimental::Tensor& tensor); const paddle::experimental::Tensor& tensor);
// Basic Output // Basic Output
static std::vector<std::shared_ptr<egr::EagerTensor>> TrySyncToVars( static std::vector<std::shared_ptr<egr::EagerVariable>> TrySyncToVars(
paddle::experimental::Tensor* tensor); paddle::experimental::Tensor* tensor);
// Multi Output // Multi Output
static std::vector<std::shared_ptr<egr::EagerTensor>> TrySyncToVars( static std::vector<std::shared_ptr<egr::EagerVariable>> TrySyncToVars(
const std::vector<paddle::experimental::Tensor*>& tensors); const std::vector<paddle::experimental::Tensor*>& tensors);
// Multi Input // Multi Input
static std::vector<std::shared_ptr<egr::EagerTensor>> TrySyncToVars( static std::vector<std::shared_ptr<egr::EagerVariable>> TrySyncToVars(
const std::vector<paddle::experimental::Tensor>& tensors); const std::vector<paddle::experimental::Tensor>& tensors);
// Construct empty output // Construct empty output
static std::vector<std::shared_ptr<EagerTensor>> CreateVars(const size_t num); static std::vector<std::shared_ptr<EagerVariable>> CreateVars(
const size_t num);
// Construct Tensor From var // Construct Tensor From var
static std::vector<paddle::experimental::Tensor> GetOutputs( static std::vector<paddle::experimental::Tensor> GetOutputs(
const std::vector<std::shared_ptr<EagerTensor>>& outs); const std::vector<std::shared_ptr<EagerVariable>>& outs);
static paddle::experimental::Tensor GetOutput( static paddle::experimental::Tensor GetOutput(
const std::shared_ptr<EagerTensor>& out); const std::shared_ptr<EagerVariable>& out);
// Sync Back to origin output Tensor // Sync Back to origin output Tensor
static void OverwriteOutputs(const std::shared_ptr<EagerTensor>& out, static void OverwriteOutputs(const std::shared_ptr<EagerVariable>& out,
paddle::experimental::Tensor* tensor); paddle::experimental::Tensor* tensor);
static void OverwriteOutputs(const paddle::experimental::Tensor& out, static void OverwriteOutputs(const paddle::experimental::Tensor& out,
paddle::experimental::Tensor* tensor); paddle::experimental::Tensor* tensor);
static void OverwriteOutputs( static void OverwriteOutputs(
const std::vector<std::shared_ptr<EagerTensor>>& outs, const std::vector<std::shared_ptr<EagerVariable>>& outs,
const std::vector<paddle::experimental::Tensor*>& tensors); const std::vector<paddle::experimental::Tensor*>& tensors);
static void OverwriteOutputs( static void OverwriteOutputs(
const std::vector<paddle::experimental::Tensor>& outs, const std::vector<paddle::experimental::Tensor>& outs,
......
...@@ -340,8 +340,8 @@ NameVarMap<VarType> AutoCastInputs(const std::string& op_type, ...@@ -340,8 +340,8 @@ NameVarMap<VarType> AutoCastInputs(const std::string& op_type,
} }
template NameVarMap<VarBase> AutoCastInputs<VarBase>( template NameVarMap<VarBase> AutoCastInputs<VarBase>(
const std::string& op_type, const NameVarMap<VarBase>& ins); const std::string& op_type, const NameVarMap<VarBase>& ins);
template NameVarMap<egr::EagerTensor> AutoCastInputs<egr::EagerTensor>( template NameVarMap<egr::EagerVariable> AutoCastInputs<egr::EagerVariable>(
const std::string& op_type, const NameVarMap<egr::EagerTensor>& ins); const std::string& op_type, const NameVarMap<egr::EagerVariable>& ins);
template <typename VarType> template <typename VarType>
NameVarMap<VarType> CastPureFp16Inputs(const std::string& op_type, NameVarMap<VarType> CastPureFp16Inputs(const std::string& op_type,
const NameVarMap<VarType>& ins) { const NameVarMap<VarType>& ins) {
...@@ -384,7 +384,7 @@ NameVarMap<VarType> CastPureFp16Inputs(const std::string& op_type, ...@@ -384,7 +384,7 @@ NameVarMap<VarType> CastPureFp16Inputs(const std::string& op_type,
} }
template NameVarMap<VarBase> CastPureFp16Inputs<VarBase>( template NameVarMap<VarBase> CastPureFp16Inputs<VarBase>(
const std::string& op_type, const NameVarMap<VarBase>& ins); const std::string& op_type, const NameVarMap<VarBase>& ins);
template NameVarMap<egr::EagerTensor> CastPureFp16Inputs<egr::EagerTensor>( template NameVarMap<egr::EagerVariable> CastPureFp16Inputs<egr::EagerVariable>(
const std::string& op_type, const NameVarMap<egr::EagerTensor>& ins); const std::string& op_type, const NameVarMap<egr::EagerVariable>& ins);
} // namespace imperative } // namespace imperative
} // namespace paddle } // namespace paddle
...@@ -177,9 +177,9 @@ std::string LayerDebugString(const std::string& op_type, ...@@ -177,9 +177,9 @@ std::string LayerDebugString(const std::string& op_type,
} }
std::string LayerDebugString(const std::string& op_type, std::string LayerDebugString(const std::string& op_type,
const NameVarMap<egr::EagerTensor>& ins, const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerTensor>& outs) { const NameVarMap<egr::EagerVariable>& outs) {
return LayerDebugStringImpl<egr::EagerTensor>(op_type, ins, outs); return LayerDebugStringImpl<egr::EagerVariable>(op_type, ins, outs);
} }
template <typename VarType> template <typename VarType>
...@@ -194,11 +194,16 @@ static void SetForwardDataTypeOfGradVars(const NameVarMap<VarType>& outs) { ...@@ -194,11 +194,16 @@ static void SetForwardDataTypeOfGradVars(const NameVarMap<VarType>& outs) {
} }
} }
template <> template <>
void SetForwardDataTypeOfGradVars<egr::EagerTensor>( void SetForwardDataTypeOfGradVars<egr::EagerVariable>(
const NameVarMap<egr::EagerTensor>& outs) { const NameVarMap<egr::EagerVariable>& outs) {
// In eager mode we don't need this. // In eager mode we don't need this.
} }
void TestSetForwardDataTypeOfGradVarsEager(
const NameVarMap<egr::EagerVariable>& outs) {
SetForwardDataTypeOfGradVars<egr::EagerVariable>(outs);
}
VarBase::VarBase(const std::shared_ptr<VariableWrapper>& var) VarBase::VarBase(const std::shared_ptr<VariableWrapper>& var)
: var_(var), grad_node_(var->GetGradNode()) { : var_(var), grad_node_(var->GetGradNode()) {
if (auto grad_var = var_->GetGradVar()) { if (auto grad_var = var_->GetGradVar()) {
...@@ -528,12 +533,12 @@ void OpBase::Run(const framework::OperatorBase& op, ...@@ -528,12 +533,12 @@ void OpBase::Run(const framework::OperatorBase& op,
} }
void OpBase::Run(const framework::OperatorBase& op, void OpBase::Run(const framework::OperatorBase& op,
const NameVarMap<egr::EagerTensor>& ins, const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerTensor>& outs, const NameVarMap<egr::EagerVariable>& outs,
const framework::AttributeMap& attrs, const framework::AttributeMap& attrs,
const framework::AttributeMap& default_attrs, const framework::AttributeMap& default_attrs,
const platform::Place& place) { const platform::Place& place) {
OpBaseRunImpl<egr::EagerTensor>(op, ins, outs, attrs, default_attrs, place); OpBaseRunImpl<egr::EagerVariable>(op, ins, outs, attrs, default_attrs, place);
} }
void ClearNoNeedBufferInputs(OpBase* op) { void ClearNoNeedBufferInputs(OpBase* op) {
......
...@@ -185,8 +185,8 @@ class OpBase { ...@@ -185,8 +185,8 @@ class OpBase {
const framework::AttributeMap& default_attrs, const framework::AttributeMap& default_attrs,
const platform::Place& place); const platform::Place& place);
static void Run(const framework::OperatorBase& op, static void Run(const framework::OperatorBase& op,
const NameVarMap<egr::EagerTensor>& ins, const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerTensor>& outs, const NameVarMap<egr::EagerVariable>& outs,
const framework::AttributeMap& attrs, const framework::AttributeMap& attrs,
const framework::AttributeMap& default_attrs, const framework::AttributeMap& default_attrs,
const platform::Place& place); const platform::Place& place);
......
...@@ -89,11 +89,16 @@ void HandleComplexGradToRealGrad(const NameVarMap<VarType>& outs) { ...@@ -89,11 +89,16 @@ void HandleComplexGradToRealGrad(const NameVarMap<VarType>& outs) {
} }
template <> template <>
void HandleComplexGradToRealGrad<egr::EagerTensor>( void HandleComplexGradToRealGrad<egr::EagerVariable>(
const NameVarMap<egr::EagerTensor>& outs) { const NameVarMap<egr::EagerVariable>& outs) {
// TODO(jiabin): Support Complex here. // TODO(jiabin): Support Complex here.
} }
void TestHandleComplexGradToRealGradEager(
const NameVarMap<egr::EagerVariable>& outs) {
HandleComplexGradToRealGrad<egr::EagerVariable>(outs);
}
PreparedOp::PreparedOp(const framework::OperatorBase& op, PreparedOp::PreparedOp(const framework::OperatorBase& op,
const framework::RuntimeContext& ctx, const framework::RuntimeContext& ctx,
const framework::OpKernelType& kernel_type, const framework::OpKernelType& kernel_type,
...@@ -322,14 +327,14 @@ PreparedOp PreparedOp::Prepare(const NameVarMap<VariableWrapper>& ins, ...@@ -322,14 +327,14 @@ PreparedOp PreparedOp::Prepare(const NameVarMap<VariableWrapper>& ins,
default_attrs); default_attrs);
} }
PreparedOp PreparedOp::Prepare(const NameVarMap<egr::EagerTensor>& ins, PreparedOp PreparedOp::Prepare(const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerTensor>& outs, const NameVarMap<egr::EagerVariable>& outs,
const framework::OperatorWithKernel& op, const framework::OperatorWithKernel& op,
const platform::Place& place, const platform::Place& place,
const framework::AttributeMap& attrs, const framework::AttributeMap& attrs,
const framework::AttributeMap& default_attrs) { const framework::AttributeMap& default_attrs) {
return PrepareImpl<egr::EagerTensor>(ins, outs, op, place, attrs, return PrepareImpl<egr::EagerVariable>(ins, outs, op, place, attrs,
default_attrs); default_attrs);
} }
template <typename VarType> template <typename VarType>
static void PreparedOpRunImpl( static void PreparedOpRunImpl(
...@@ -461,18 +466,18 @@ void PreparedOp::Run(const NameVarMap<VariableWrapper>& ins, ...@@ -461,18 +466,18 @@ void PreparedOp::Run(const NameVarMap<VariableWrapper>& ins,
} }
} }
void PreparedOp::Run(const NameVarMap<egr::EagerTensor>& ins, void PreparedOp::Run(const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerTensor>& outs, const NameVarMap<egr::EagerVariable>& outs,
const framework::AttributeMap& attrs, const framework::AttributeMap& attrs,
const framework::AttributeMap& default_attrs) { const framework::AttributeMap& default_attrs) {
if (run_pten_kernel_) { if (run_pten_kernel_) {
PreparedOpRunPtImpl<egr::EagerTensor>( PreparedOpRunPtImpl<egr::EagerVariable>(
op_, kernel_type_, pt_kernel_signature_, pt_kernel_, dev_ctx_, ins, op_, kernel_type_, pt_kernel_signature_, pt_kernel_, dev_ctx_, ins,
outs, attrs, default_attrs); outs, attrs, default_attrs);
} else { } else {
PreparedOpRunImpl<egr::EagerTensor>(op_, ctx_, kernel_type_, func_, PreparedOpRunImpl<egr::EagerVariable>(op_, ctx_, kernel_type_, func_,
dev_ctx_, ins, outs, attrs, dev_ctx_, ins, outs, attrs,
default_attrs); default_attrs);
} }
} }
......
...@@ -63,8 +63,8 @@ void SetForwardDataTypeOfGradVar<VarBase>(const std::shared_ptr<VarBase>& var) { ...@@ -63,8 +63,8 @@ void SetForwardDataTypeOfGradVar<VarBase>(const std::shared_ptr<VarBase>& var) {
} }
template <> template <>
void SetForwardDataTypeOfGradVar<egr::EagerTensor>( void SetForwardDataTypeOfGradVar<egr::EagerVariable>(
const std::shared_ptr<egr::EagerTensor>& var) { const std::shared_ptr<egr::EagerVariable>& var) {
VLOG(10) << "Var in Eager dose not support SetForwardDataTypeOfGradVar: " VLOG(10) << "Var in Eager dose not support SetForwardDataTypeOfGradVar: "
<< var->name(); << var->name();
// TODO(jiabin): SetForwardDataType of Grad var is not supported yet in // TODO(jiabin): SetForwardDataType of Grad var is not supported yet in
...@@ -171,8 +171,8 @@ class PreparedOp { ...@@ -171,8 +171,8 @@ class PreparedOp {
const framework::AttributeMap& attrs, const framework::AttributeMap& attrs,
const framework::AttributeMap& default_attrs); const framework::AttributeMap& default_attrs);
static PreparedOp Prepare(const NameVarMap<egr::EagerTensor>& ins, static PreparedOp Prepare(const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerTensor>& outs, const NameVarMap<egr::EagerVariable>& outs,
const framework::OperatorWithKernel& op, const framework::OperatorWithKernel& op,
const platform::Place& place, const platform::Place& place,
const framework::AttributeMap& attrs, const framework::AttributeMap& attrs,
...@@ -187,8 +187,8 @@ class PreparedOp { ...@@ -187,8 +187,8 @@ class PreparedOp {
const framework::AttributeMap& attrs, const framework::AttributeMap& attrs,
const framework::AttributeMap& default_attrs); const framework::AttributeMap& default_attrs);
void Run(const NameVarMap<egr::EagerTensor>& ins, void Run(const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerTensor>& outs, const NameVarMap<egr::EagerVariable>& outs,
const framework::AttributeMap& attrs, const framework::AttributeMap& attrs,
const framework::AttributeMap& default_attrs); const framework::AttributeMap& default_attrs);
......
...@@ -31,8 +31,8 @@ ...@@ -31,8 +31,8 @@
namespace paddle { namespace paddle {
namespace imperative { namespace imperative {
extern std::string LayerDebugString(const std::string& op_type, extern std::string LayerDebugString(const std::string& op_type,
const NameVarMap<egr::EagerTensor>& ins, const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerTensor>& outs); const NameVarMap<egr::EagerVariable>& outs);
extern std::shared_ptr<GradOpNode> CreateGradOpNode( extern std::shared_ptr<GradOpNode> CreateGradOpNode(
const framework::OperatorBase& op, const NameTensorMap& ins, const framework::OperatorBase& op, const NameTensorMap& ins,
...@@ -41,20 +41,21 @@ extern std::shared_ptr<GradOpNode> CreateGradOpNode( ...@@ -41,20 +41,21 @@ extern std::shared_ptr<GradOpNode> CreateGradOpNode(
const std::map<std::string, std::string>& inplace_map); const std::map<std::string, std::string>& inplace_map);
TEST(test_eager, eager_debug) { TEST(test_eager, eager_debug) {
std::shared_ptr<egr::EagerTensor> x_in(new egr::EagerTensor("x_in")); std::shared_ptr<egr::EagerVariable> x_in(new egr::EagerVariable("x_in"));
std::shared_ptr<egr::EagerTensor> y_in(new egr::EagerTensor("y_in")); std::shared_ptr<egr::EagerVariable> y_in(new egr::EagerVariable("y_in"));
std::shared_ptr<egr::EagerTensor> vout(new egr::EagerTensor("vout")); std::shared_ptr<egr::EagerVariable> vout(new egr::EagerVariable("vout"));
imperative::NameVarMap<egr::EagerTensor> ins = {{"X", {x_in}}, {"Y", {y_in}}}; imperative::NameVarMap<egr::EagerVariable> ins = {{"X", {x_in}},
imperative::NameVarMap<egr::EagerTensor> outs = {{"Out", {vout}}}; {"Y", {y_in}}};
imperative::NameVarMap<egr::EagerVariable> outs = {{"Out", {vout}}};
LayerDebugString("mul", ins, outs); LayerDebugString("mul", ins, outs);
} }
TEST(test_create_node, eager_node) { TEST(test_create_node, eager_node) {
auto op = framework::OpRegistry::CreateOp("mul", {}, {}, {}, false); auto op = framework::OpRegistry::CreateOp("mul", {}, {}, {}, false);
framework::Scope scope; framework::Scope scope;
auto ctx = framework::RuntimeContext({}, {}); auto ctx = framework::RuntimeContext({}, {});
imperative::NameVarMap<egr::EagerTensor> ins = {{"X", {nullptr}}, imperative::NameVarMap<egr::EagerVariable> ins = {{"X", {nullptr}},
{"Y", {nullptr}}}; {"Y", {nullptr}}};
imperative::NameVarMap<egr::EagerTensor> outs = {{"Out", {nullptr}}}; imperative::NameVarMap<egr::EagerVariable> outs = {{"Out", {nullptr}}};
CreateGradOpNode((*op.get()), ins, outs, framework::AttributeMap{}, CreateGradOpNode((*op.get()), ins, outs, framework::AttributeMap{},
framework::AttributeMap{}, platform::CPUPlace(), {}); framework::AttributeMap{}, platform::CPUPlace(), {});
} }
...@@ -72,26 +73,26 @@ TEST(test_var_helper, eager_var_helper) { ...@@ -72,26 +73,26 @@ TEST(test_var_helper, eager_var_helper) {
ASSERT_ANY_THROW( ASSERT_ANY_THROW(
InitializeVariable(&var8, paddle::framework::proto::VarType::FP64)); InitializeVariable(&var8, paddle::framework::proto::VarType::FP64));
auto egr_tensor = std::make_shared<egr::EagerTensor>(); auto egr_tensor = std::make_shared<egr::EagerVariable>();
auto egr_tensor2 = std::make_shared<egr::EagerTensor>(); auto egr_tensor2 = std::make_shared<egr::EagerVariable>();
egr_tensor->MutableVar() egr_tensor->MutableVar()
->GetMutable<pten::SelectedRows>() ->GetMutable<pten::SelectedRows>()
->mutable_value() ->mutable_value()
->mutable_data<float>(platform::CPUPlace()); ->mutable_data<float>(platform::CPUPlace());
egr_tensor2->MutableVar()->GetMutable<framework::LoDRankTable>(); egr_tensor2->MutableVar()->GetMutable<framework::LoDRankTable>();
VLOG(6) << "egr_tensor create with "; VLOG(6) << "egr_tensor create with ";
ASSERT_TRUE(platform::is_cpu_place(GetPlace<egr::EagerTensor>(egr_tensor))); ASSERT_TRUE(platform::is_cpu_place(GetPlace<egr::EagerVariable>(egr_tensor)));
ASSERT_TRUE(GetDataType<egr::EagerTensor>(egr_tensor) == ASSERT_TRUE(GetDataType<egr::EagerVariable>(egr_tensor) ==
framework::proto::VarType::FP32); framework::proto::VarType::FP32);
GetCachedValue<egr::EagerTensor>( GetCachedValue<egr::EagerVariable>(
egr_tensor, framework::OpKernelType(framework::proto::VarType::FP32, egr_tensor, framework::OpKernelType(framework::proto::VarType::FP32,
platform::CPUPlace())); platform::CPUPlace()));
SetCachedValue<egr::EagerTensor>( SetCachedValue<egr::EagerVariable>(
egr_tensor, framework::OpKernelType(framework::proto::VarType::FP32, egr_tensor, framework::OpKernelType(framework::proto::VarType::FP32,
platform::CPUPlace()), platform::CPUPlace()),
egr_tensor2); egr_tensor2);
ASSERT_ANY_THROW(GetPlace<egr::EagerTensor>(egr_tensor2)); ASSERT_ANY_THROW(GetPlace<egr::EagerVariable>(egr_tensor2));
ASSERT_ANY_THROW(SetType<egr::EagerTensor>( ASSERT_ANY_THROW(SetType<egr::EagerVariable>(
egr_tensor, paddle::framework::proto::VarType::LOD_TENSOR_ARRAY)); egr_tensor, paddle::framework::proto::VarType::LOD_TENSOR_ARRAY));
} }
} // namespace imperative } // namespace imperative
......
...@@ -39,6 +39,8 @@ using vb_vector = std::vector<std::shared_ptr<imperative::VarBase>>; ...@@ -39,6 +39,8 @@ using vb_vector = std::vector<std::shared_ptr<imperative::VarBase>>;
using var_pair = std::pair<std::string, vb_vector>; using var_pair = std::pair<std::string, vb_vector>;
extern void TestSetForwardDataTypeOfGradVarsEager(
const NameVarMap<egr::EagerVariable>& outs);
template <typename VarType> template <typename VarType>
class TestRuntimeInferVarTypeContext class TestRuntimeInferVarTypeContext
: public RuntimeInferVarTypeContext<VarType> { : public RuntimeInferVarTypeContext<VarType> {
...@@ -406,6 +408,11 @@ TEST(test_layer, test_inner_op_not_inited) { ...@@ -406,6 +408,11 @@ TEST(test_layer, test_inner_op_not_inited) {
ASSERT_THROW(op.CheckAttrs(), platform::EnforceNotMet); ASSERT_THROW(op.CheckAttrs(), platform::EnforceNotMet);
} }
TEST(test_layer, test_eager) {
imperative::NameTensorMap ins = {};
TestSetForwardDataTypeOfGradVarsEager(ins);
}
} // namespace imperative } // namespace imperative
} // namespace paddle } // namespace paddle
......
...@@ -32,6 +32,9 @@ namespace framework = paddle::framework; ...@@ -32,6 +32,9 @@ namespace framework = paddle::framework;
namespace paddle { namespace paddle {
namespace imperative { namespace imperative {
extern void TestHandleComplexGradToRealGradEager(
const NameVarMap<egr::EagerVariable>& outs);
static framework::VariableNameMap CreateVarNameMap( static framework::VariableNameMap CreateVarNameMap(
const framework::OpInfo& op_info, const std::string& op_type, const framework::OpInfo& op_info, const std::string& op_type,
const NameVarBaseMap& varbase_map, bool is_input) { const NameVarBaseMap& varbase_map, bool is_input) {
...@@ -209,6 +212,11 @@ TEST(test_prepare_op, test_prepare_data_same_place) { ...@@ -209,6 +212,11 @@ TEST(test_prepare_op, test_prepare_data_same_place) {
TestPrepareDataSamePlace({}); TestPrepareDataSamePlace({});
} }
TEST(test_prepare_op, test_complex_eager) {
NameVarMap<egr::EagerVariable> outs = {};
TestHandleComplexGradToRealGradEager(outs);
}
#ifdef PADDLE_WITH_MKLDNN #ifdef PADDLE_WITH_MKLDNN
TEST(test_prepare_op, test_prepare_data_cpu_mkldnn) { TEST(test_prepare_op, test_prepare_data_cpu_mkldnn) {
TestPrepareDataSamePlace({{"use_mkldnn", true}}); TestPrepareDataSamePlace({{"use_mkldnn", true}});
......
...@@ -37,9 +37,10 @@ namespace paddle { ...@@ -37,9 +37,10 @@ namespace paddle {
namespace imperative { namespace imperative {
using vb_vector = std::vector<std::shared_ptr<imperative::VarBase>>; using vb_vector = std::vector<std::shared_ptr<imperative::VarBase>>;
using var_pair = std::pair<std::string, vb_vector>; using var_pair = std::pair<std::string, vb_vector>;
using ev_vector = std::vector<std::shared_ptr<egr::EagerVariable>>;
using ev_pair = std::pair<std::string, ev_vector>;
TEST(test_tracer, test_trace_op) { TEST(test_tracer, test_trace_op) {
// Doing an mul // Doing an mul
imperative::Tracer tracer; imperative::Tracer tracer;
...@@ -546,6 +547,44 @@ TEST(test_tracer, test_execution_context) { ...@@ -546,6 +547,44 @@ TEST(test_tracer, test_execution_context) {
ASSERT_EQ(dy_ctx.OutputName("Out"), framework::kEmptyVarName); ASSERT_EQ(dy_ctx.OutputName("Out"), framework::kEmptyVarName);
} }
TEST(test_tracer, eager_tracer) {
// Doing an mul
imperative::Tracer tracer;
std::shared_ptr<egr::EagerVariable> x_in(new egr::EagerVariable("x_in"));
std::shared_ptr<egr::EagerVariable> y_in(new egr::EagerVariable("y_in"));
std::shared_ptr<egr::EagerVariable> vout(new egr::EagerVariable("vout"));
platform::CPUPlace place;
std::vector<float> src_data(10, 2.0);
std::vector<int64_t> dims1 = {2, 5};
std::vector<int64_t> dims2 = {5, 2};
auto* x_in_tensor = x_in->MutableVar()->GetMutable<framework::LoDTensor>();
auto* y_in_tensor = y_in->MutableVar()->GetMutable<framework::LoDTensor>();
x_in_tensor->Resize(framework::make_ddim(dims1));
auto* mutable_x = x_in_tensor->mutable_data<float>(place);
paddle::memory::Copy(place, mutable_x, place, src_data.data(),
sizeof(float) * src_data.size());
y_in_tensor->Resize(framework::make_ddim(dims2));
auto* mutable_y = y_in_tensor->mutable_data<float>(place);
paddle::memory::Copy(place, mutable_y, place, src_data.data(),
sizeof(float) * src_data.size());
ev_pair x_pair = ev_pair("X", ev_vector(1, x_in));
ev_pair y_pair = ev_pair("Y", ev_vector(1, y_in));
ev_pair out_pair = ev_pair("Out", ev_vector(1, vout));
imperative::NameTensorMap ins = {x_pair, y_pair};
imperative::NameTensorMap outs = {out_pair};
framework::AttributeMap mul_attr_map;
mul_attr_map["use_mkldnn"] = false;
tracer.TraceOp<egr::EagerVariable>("mul", ins, outs, mul_attr_map, place,
true);
const auto& out_tensor = vout->Var().Get<framework::LoDTensor>();
for (int i = 0; i < vout->Var().Get<framework::LoDTensor>().numel(); i++) {
ASSERT_EQ(out_tensor.data<float>()[i], 20.0);
}
}
} // namespace imperative } // namespace imperative
} // namespace paddle } // namespace paddle
......
...@@ -168,7 +168,7 @@ void Tracer::TraceOp(const std::string& type, const NameVarMap<VarType>& ins, ...@@ -168,7 +168,7 @@ void Tracer::TraceOp(const std::string& type, const NameVarMap<VarType>& ins,
const platform::Place& place, bool trace_backward, const platform::Place& place, bool trace_backward,
const std::map<std::string, std::string>& inplace_map, const std::map<std::string, std::string>& inplace_map,
paddle::framework::AttributeMap* passed_default_attrs_, paddle::framework::AttributeMap* passed_default_attrs_,
bool override_default_attr_map) { bool use_default_attr_map) {
platform::RecordEvent op_type_record_event(type); platform::RecordEvent op_type_record_event(type);
platform::ScopedFlushDenormal flush; platform::ScopedFlushDenormal flush;
VLOG(1) << "Trace Op: " << type; VLOG(1) << "Trace Op: " << type;
...@@ -244,7 +244,7 @@ void Tracer::TraceOp(const std::string& type, const NameVarMap<VarType>& ins, ...@@ -244,7 +244,7 @@ void Tracer::TraceOp(const std::string& type, const NameVarMap<VarType>& ins,
"CustomPlace.")); "CustomPlace."));
#endif #endif
} }
if (!override_default_attr_map) { if (!use_default_attr_map) {
PADDLE_ENFORCE_NOT_NULL(passed_default_attrs_, PADDLE_ENFORCE_NOT_NULL(passed_default_attrs_,
paddle::platform::errors::PermissionDenied( paddle::platform::errors::PermissionDenied(
"Detected default_attrs = nullptr.")); "Detected default_attrs = nullptr."));
...@@ -280,16 +280,14 @@ void Tracer::TraceOp(const std::string& type, const NameVarMap<VarType>& ins, ...@@ -280,16 +280,14 @@ void Tracer::TraceOp(const std::string& type, const NameVarMap<VarType>& ins,
} }
if (ComputeRequiredGrad(new_ins, outs, trace_backward)) { if (ComputeRequiredGrad(new_ins, outs, trace_backward)) {
if (!override_default_attr_map) { PADDLE_ENFORCE_EQ(
PADDLE_ENFORCE_NOT_NULL(passed_default_attrs_, passed_default_attrs_, nullptr,
paddle::platform::errors::PermissionDenied( paddle::platform::errors::PermissionDenied(
"Detected default_attrs = nullptr.")); "We expect passed_default_attrs_ is nullptr while "
CreateGradOpNode(*op, new_ins, outs, attrs, *passed_default_attrs_, place, "use_default_attr_map is true, however we got not null "
inplace_map); "passed_default_attrs_. Please check your usage of trace_op. "));
} else { CreateGradOpNode(*op, new_ins, outs, attrs, default_attrs, place,
CreateGradOpNode(*op, new_ins, outs, attrs, default_attrs, place, inplace_map);
inplace_map);
}
} else { } else {
VLOG(3) << "No Grad to track for Op: " << type; VLOG(3) << "No Grad to track for Op: " << type;
} }
...@@ -301,16 +299,14 @@ template void Tracer::TraceOp<VarBase>( ...@@ -301,16 +299,14 @@ template void Tracer::TraceOp<VarBase>(
const NameVarMap<VarBase>& outs, framework::AttributeMap attrs, const NameVarMap<VarBase>& outs, framework::AttributeMap attrs,
const platform::Place& place, bool trace_backward, const platform::Place& place, bool trace_backward,
const std::map<std::string, std::string>& inplace_map, const std::map<std::string, std::string>& inplace_map,
paddle::framework::AttributeMap* default_attrs, paddle::framework::AttributeMap* default_attrs, bool use_default_attr_map);
bool override_default_attr_map);
template void Tracer::TraceOp<egr::EagerTensor>( template void Tracer::TraceOp<egr::EagerVariable>(
const std::string& type, const NameVarMap<egr::EagerTensor>& ins, const std::string& type, const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerTensor>& outs, framework::AttributeMap attrs, const NameVarMap<egr::EagerVariable>& outs, framework::AttributeMap attrs,
const platform::Place& place, bool trace_backward, const platform::Place& place, bool trace_backward,
const std::map<std::string, std::string>& inplace_map_, const std::map<std::string, std::string>& inplace_map_,
paddle::framework::AttributeMap* default_attrs, paddle::framework::AttributeMap* default_attrs, bool use_default_attr_map);
bool override_default_attr_map);
void Tracer::TraceOp(const std::string& type, const NameVarBaseMap& ins, void Tracer::TraceOp(const std::string& type, const NameVarBaseMap& ins,
const NameVarBaseMap& outs, framework::AttributeMap attrs, const NameVarBaseMap& outs, framework::AttributeMap attrs,
...@@ -324,13 +320,12 @@ void Tracer::TraceOp(const std::string& type, const NameTensorMap& ins, ...@@ -324,13 +320,12 @@ void Tracer::TraceOp(const std::string& type, const NameTensorMap& ins,
paddle::framework::AttributeMap attrs, paddle::framework::AttributeMap attrs,
const paddle::platform::Place& place, const paddle::platform::Place& place,
paddle::framework::AttributeMap* default_attrs, paddle::framework::AttributeMap* default_attrs,
bool override_default_attr_map, bool use_default_attr_map,
const std::map<std::string, std::string>& inplace_map) { const std::map<std::string, std::string>& inplace_map) {
VLOG(6) << "Running On Eager TraceOp with override_default_attr_map: " VLOG(6) << "Running On Eager TraceOp with use_default_attr_map: "
<< override_default_attr_map; << use_default_attr_map;
TraceOp<egr::EagerTensor>(type, ins, outs, std::move(attrs), place, false, TraceOp<egr::EagerVariable>(type, ins, outs, std::move(attrs), place, false,
inplace_map, default_attrs, inplace_map, default_attrs, use_default_attr_map);
override_default_attr_map);
} }
void Tracer::TraceOp(const std::string& type, const NameTensorMap& ins, void Tracer::TraceOp(const std::string& type, const NameTensorMap& ins,
...@@ -338,8 +333,9 @@ void Tracer::TraceOp(const std::string& type, const NameTensorMap& ins, ...@@ -338,8 +333,9 @@ void Tracer::TraceOp(const std::string& type, const NameTensorMap& ins,
paddle::framework::AttributeMap attrs, paddle::framework::AttributeMap attrs,
const std::map<std::string, std::string>& inplace_map) { const std::map<std::string, std::string>& inplace_map) {
VLOG(6) << "Running On Eager TraceOp(less): "; VLOG(6) << "Running On Eager TraceOp(less): ";
TraceOp<egr::EagerTensor>(type, ins, outs, std::move(attrs), expected_place_, TraceOp<egr::EagerVariable>(type, ins, outs, std::move(attrs),
false, inplace_map, nullptr, true); expected_place_, false, inplace_map, nullptr,
true);
} }
void Tracer::SetExpectedPlace(platform::Place place) { void Tracer::SetExpectedPlace(platform::Place place) {
......
...@@ -69,7 +69,7 @@ class Tracer { ...@@ -69,7 +69,7 @@ class Tracer {
const platform::Place& place, bool trace_backward, const platform::Place& place, bool trace_backward,
const std::map<std::string, std::string>& inplace_map = {}, const std::map<std::string, std::string>& inplace_map = {},
paddle::framework::AttributeMap* passed_default_attrs_ = nullptr, paddle::framework::AttributeMap* passed_default_attrs_ = nullptr,
bool override_default_attr_map = true); bool use_default_attr_map = true);
void TraceOp(const std::string& type, const NameVarBaseMap& ins, void TraceOp(const std::string& type, const NameVarBaseMap& ins,
const NameVarBaseMap& outs, framework::AttributeMap attrs, const NameVarBaseMap& outs, framework::AttributeMap attrs,
...@@ -83,7 +83,7 @@ class Tracer { ...@@ -83,7 +83,7 @@ class Tracer {
const NameTensorMap& outs, paddle::framework::AttributeMap attrs, const NameTensorMap& outs, paddle::framework::AttributeMap attrs,
const paddle::platform::Place& place, const paddle::platform::Place& place,
paddle::framework::AttributeMap* default_attrs, paddle::framework::AttributeMap* default_attrs,
bool override_default_attr_map, bool use_default_attr_map,
const std::map<std::string, std::string>& inplace_map = {}); const std::map<std::string, std::string>& inplace_map = {});
bool ComputeRequiredGrad(const NameVarBaseMap& ins, bool ComputeRequiredGrad(const NameVarBaseMap& ins,
......
...@@ -95,8 +95,8 @@ template const paddle::platform::Place &GetPlace<VarBase>( ...@@ -95,8 +95,8 @@ template const paddle::platform::Place &GetPlace<VarBase>(
const std::shared_ptr<VarBase> &var); const std::shared_ptr<VarBase> &var);
template const paddle::platform::Place &GetPlace<VariableWrapper>( template const paddle::platform::Place &GetPlace<VariableWrapper>(
const std::shared_ptr<VariableWrapper> &var); const std::shared_ptr<VariableWrapper> &var);
template const paddle::platform::Place &GetPlace<egr::EagerTensor>( template const paddle::platform::Place &GetPlace<egr::EagerVariable>(
const std::shared_ptr<egr::EagerTensor> &var); const std::shared_ptr<egr::EagerVariable> &var);
/* GetNameFromVar */ /* GetNameFromVar */
template <typename VarType> template <typename VarType>
...@@ -104,8 +104,8 @@ const std::string &GetNameFromVar(std::shared_ptr<VarType> var) { ...@@ -104,8 +104,8 @@ const std::string &GetNameFromVar(std::shared_ptr<VarType> var) {
return var->Name(); return var->Name();
} }
template <> template <>
const std::string &GetNameFromVar<egr::EagerTensor>( const std::string &GetNameFromVar<egr::EagerVariable>(
std::shared_ptr<egr::EagerTensor> tensor) { std::shared_ptr<egr::EagerVariable> tensor) {
return tensor->name(); return tensor->name();
} }
template const std::string &GetNameFromVar<VariableWrapper>( template const std::string &GetNameFromVar<VariableWrapper>(
...@@ -120,8 +120,8 @@ void SetType(std::shared_ptr<VarType> var, ...@@ -120,8 +120,8 @@ void SetType(std::shared_ptr<VarType> var,
var->SetType(type); var->SetType(type);
} }
template <> template <>
void SetType<egr::EagerTensor>(std::shared_ptr<egr::EagerTensor> var, void SetType<egr::EagerVariable>(std::shared_ptr<egr::EagerVariable> var,
framework::proto::VarType::Type type) { framework::proto::VarType::Type type) {
switch (type) { switch (type) {
case paddle::framework::proto::VarType::LOD_TENSOR: { case paddle::framework::proto::VarType::LOD_TENSOR: {
var->MutableVar()->GetMutable<paddle::framework::LoDTensor>(); var->MutableVar()->GetMutable<paddle::framework::LoDTensor>();
...@@ -149,8 +149,8 @@ framework::proto::VarType::Type GetType(std::shared_ptr<VarType> var) { ...@@ -149,8 +149,8 @@ framework::proto::VarType::Type GetType(std::shared_ptr<VarType> var) {
return var->Type(); return var->Type();
} }
template <> template <>
framework::proto::VarType::Type GetType<egr::EagerTensor>( framework::proto::VarType::Type GetType<egr::EagerVariable>(
std::shared_ptr<egr::EagerTensor> var) { std::shared_ptr<egr::EagerVariable> var) {
if (var->Var().IsInitialized()) { if (var->Var().IsInitialized()) {
return paddle::framework::ToVarType(var->Var().Type()); return paddle::framework::ToVarType(var->Var().Type());
} else { } else {
...@@ -168,8 +168,8 @@ framework::proto::VarType::Type GetDataType(std::shared_ptr<VarType> var) { ...@@ -168,8 +168,8 @@ framework::proto::VarType::Type GetDataType(std::shared_ptr<VarType> var) {
return var->DataType(); return var->DataType();
} }
template <> template <>
framework::proto::VarType::Type GetDataType<egr::EagerTensor>( framework::proto::VarType::Type GetDataType<egr::EagerVariable>(
std::shared_ptr<egr::EagerTensor> var) { std::shared_ptr<egr::EagerVariable> var) {
if (var->Var().IsType<pten::SelectedRows>()) { if (var->Var().IsType<pten::SelectedRows>()) {
return framework::TransToProtoVarType( return framework::TransToProtoVarType(
var->Var().Get<pten::SelectedRows>().value().type()); var->Var().Get<pten::SelectedRows>().value().type());
...@@ -197,8 +197,8 @@ bool CheckCachedKey(std::shared_ptr<VarType> var, ...@@ -197,8 +197,8 @@ bool CheckCachedKey(std::shared_ptr<VarType> var,
return GetVariableWrapper(var)->hasCacheKey(key); return GetVariableWrapper(var)->hasCacheKey(key);
} }
template <> template <>
bool CheckCachedKey<egr::EagerTensor>( bool CheckCachedKey<egr::EagerVariable>(
std::shared_ptr<egr::EagerTensor> tensor, std::shared_ptr<egr::EagerVariable> tensor,
const paddle::framework::OpKernelType &key) { const paddle::framework::OpKernelType &key) {
// TODO(jiabin): Support this later // TODO(jiabin): Support this later
// VLOG(10) << "CheckCachedKey with tensor: " << tensor->name() << "and key is // VLOG(10) << "CheckCachedKey with tensor: " << tensor->name() << "and key is
...@@ -219,7 +219,7 @@ std::shared_ptr<VariableWrapper> GetCachedValue( ...@@ -219,7 +219,7 @@ std::shared_ptr<VariableWrapper> GetCachedValue(
} }
template <> template <>
std::shared_ptr<VariableWrapper> GetCachedValue( std::shared_ptr<VariableWrapper> GetCachedValue(
std::shared_ptr<egr::EagerTensor> var, std::shared_ptr<egr::EagerVariable> var,
const paddle::framework::OpKernelType &key) { const paddle::framework::OpKernelType &key) {
// TODO(jiabin): Support this later // TODO(jiabin): Support this later
// PADDLE_THROW(platform::errors::Fatal("In eager mode program should not // PADDLE_THROW(platform::errors::Fatal("In eager mode program should not
...@@ -243,10 +243,10 @@ void SetCachedValue(std::shared_ptr<VarType> var, ...@@ -243,10 +243,10 @@ void SetCachedValue(std::shared_ptr<VarType> var,
GetVariableWrapper(var)->setCacheValue(key, GetVariableWrapper(res)); GetVariableWrapper(var)->setCacheValue(key, GetVariableWrapper(res));
} }
template <> template <>
void SetCachedValue<egr::EagerTensor>( void SetCachedValue<egr::EagerVariable>(
std::shared_ptr<egr::EagerTensor> tensor, std::shared_ptr<egr::EagerVariable> tensor,
const paddle::framework::OpKernelType &key, const paddle::framework::OpKernelType &key,
std::shared_ptr<egr::EagerTensor> res) { std::shared_ptr<egr::EagerVariable> res) {
// PADDLE_THROW(platform::errors::Fatal("In eager mode program should not // PADDLE_THROW(platform::errors::Fatal("In eager mode program should not
// reach this, support cache and remove this error check later, or this // reach this, support cache and remove this error check later, or this
// should not be supported.")); // should not be supported."));
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#include "paddle/fluid/framework/variable.h" #include "paddle/fluid/framework/variable.h"
namespace egr { namespace egr {
class EagerTensor; class EagerVariable;
} // namespace egr } // namespace egr
namespace pten { namespace pten {
class DenseTensor; class DenseTensor;
......
此差异已折叠。
...@@ -145,9 +145,8 @@ static PyObject* eager_api_tensor_copy(PyObject* self, PyObject* args, ...@@ -145,9 +145,8 @@ static PyObject* eager_api_tensor_copy(PyObject* self, PyObject* args,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* eager_api_read_next_eager_tensor_list(PyObject* self, static PyObject* eager_api_read_next_tensor_list(PyObject* self, PyObject* args,
PyObject* args, PyObject* kwargs) {
PyObject* kwargs) {
EAGER_TRY EAGER_TRY
auto tensor_base_list = auto tensor_base_list =
CastPyArg2VectorOfTensorBase(PyTuple_GET_ITEM(args, 0), 0); CastPyArg2VectorOfTensorBase(PyTuple_GET_ITEM(args, 0), 0);
...@@ -182,8 +181,8 @@ PyMethodDef variable_functions[] = { ...@@ -182,8 +181,8 @@ PyMethodDef variable_functions[] = {
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"tensor_copy", (PyCFunction)(void (*)(void))eager_api_tensor_copy, {"tensor_copy", (PyCFunction)(void (*)(void))eager_api_tensor_copy,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"read_next_eager_tensor_list", {"read_next_tensor_list",
(PyCFunction)(void (*)(void))eager_api_read_next_eager_tensor_list, (PyCFunction)(void (*)(void))eager_api_read_next_tensor_list,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{NULL, NULL, 0, NULL}}; {NULL, NULL, 0, NULL}};
......
...@@ -35,15 +35,15 @@ limitations under the License. */ ...@@ -35,15 +35,15 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace pybind { namespace pybind {
extern void InitEagerTensorWithNumpyValue(TensorObject* self, extern void InitTensorWithNumpyValue(TensorObject* self,
const pybind11::object& array, const pybind11::object& array,
bool zero_copy); bool zero_copy);
extern PyTypeObject* p_tensor_type; extern PyTypeObject* p_tensor_type;
static PyObject* eager_tensor_method_numpy(TensorObject* self, PyObject* args, static PyObject* tensor_method_numpy(TensorObject* self, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_SYNC_TRY EAGER_TRY
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
self->tensor.initialized(), true, self->tensor.initialized(), true,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
...@@ -99,18 +99,17 @@ static PyObject* eager_tensor_method_numpy(TensorObject* self, PyObject* args, ...@@ -99,18 +99,17 @@ static PyObject* eager_tensor_method_numpy(TensorObject* self, PyObject* args,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* eager_tensor_method__is_initialized(TensorObject* self, static PyObject* tensor_method__is_initialized(TensorObject* self,
PyObject* args, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_SYNC_TRY EAGER_TRY
return ToPyObject(self->tensor.initialized()); return ToPyObject(self->tensor.initialized());
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* eager_tensor_method__copy_to(TensorObject* self, static PyObject* tensor_method__copy_to(TensorObject* self, PyObject* args,
PyObject* args, PyObject* kwargs) {
PyObject* kwargs) { EAGER_TRY
EAGER_SYNC_TRY
bool blocking = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 0), 0); bool blocking = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 0), 0);
auto place = CastPyArg2Place(PyTuple_GET_ITEM(args, 1), 1); auto place = CastPyArg2Place(PyTuple_GET_ITEM(args, 1), 1);
auto cp_tensor = auto cp_tensor =
...@@ -123,10 +122,10 @@ static PyObject* eager_tensor_method__copy_to(TensorObject* self, ...@@ -123,10 +122,10 @@ static PyObject* eager_tensor_method__copy_to(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* eager_tensor_method_reconstruct_from_(TensorObject* self, static PyObject* tensor_method_reconstruct_from_(TensorObject* self,
PyObject* args, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_SYNC_TRY EAGER_TRY
paddle::experimental::Tensor src_tensor = paddle::experimental::Tensor src_tensor =
CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0); CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0);
std::string orig_name = self->tensor.name(); std::string orig_name = self->tensor.name();
...@@ -144,9 +143,9 @@ static PyObject* eager_tensor_method_reconstruct_from_(TensorObject* self, ...@@ -144,9 +143,9 @@ static PyObject* eager_tensor_method_reconstruct_from_(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* eager_tensor_method_copy_(TensorObject* self, PyObject* args, static PyObject* tensor_method_copy_(TensorObject* self, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_SYNC_TRY EAGER_TRY
paddle::experimental::Tensor src_tensor = paddle::experimental::Tensor src_tensor =
CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0); CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0);
bool blocking = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 1), 1); bool blocking = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 1), 1);
...@@ -170,8 +169,8 @@ static PyObject* eager_tensor_method_copy_(TensorObject* self, PyObject* args, ...@@ -170,8 +169,8 @@ static PyObject* eager_tensor_method_copy_(TensorObject* self, PyObject* args,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* eager_tensor_retain_grads(TensorObject* self, PyObject* args, static PyObject* tensor_retain_grads(TensorObject* self, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_TRY EAGER_TRY
if (egr::Controller::Instance().HasGrad()) { if (egr::Controller::Instance().HasGrad()) {
auto meta = egr::EagerUtils::autograd_meta(&(self->tensor)); auto meta = egr::EagerUtils::autograd_meta(&(self->tensor));
...@@ -187,10 +186,9 @@ static PyObject* eager_tensor_retain_grads(TensorObject* self, PyObject* args, ...@@ -187,10 +186,9 @@ static PyObject* eager_tensor_retain_grads(TensorObject* self, PyObject* args,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* eager_tensor__clear_gradient(TensorObject* self, static PyObject* tensor__clear_gradient(TensorObject* self, PyObject* args,
PyObject* args, PyObject* kwargs) {
PyObject* kwargs) { EAGER_TRY
EAGER_SYNC_TRY
VLOG(4) << "ClearGradient " << self->tensor.name(); VLOG(4) << "ClearGradient " << self->tensor.name();
paddle::experimental::Tensor* grad; paddle::experimental::Tensor* grad;
...@@ -223,8 +221,8 @@ static PyObject* eager_tensor__clear_gradient(TensorObject* self, ...@@ -223,8 +221,8 @@ static PyObject* eager_tensor__clear_gradient(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* eager_tensor__zero_grads(TensorObject* self, PyObject* args, static PyObject* tensor__zero_grads(TensorObject* self, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_TRY EAGER_TRY
VLOG(4) << "ZeroGrads " << self->tensor.name(); VLOG(4) << "ZeroGrads " << self->tensor.name();
...@@ -257,10 +255,9 @@ static PyObject* eager_tensor__zero_grads(TensorObject* self, PyObject* args, ...@@ -257,10 +255,9 @@ static PyObject* eager_tensor__zero_grads(TensorObject* self, PyObject* args,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* eager_tensor__share_buffer_to(TensorObject* self, static PyObject* tensor__share_buffer_to(TensorObject* self, PyObject* args,
PyObject* args, PyObject* kwargs) {
PyObject* kwargs) { EAGER_TRY
EAGER_SYNC_TRY
paddle::experimental::Tensor* dst_ptr = paddle::experimental::Tensor* dst_ptr =
&(reinterpret_cast<TensorObject*>(PyTuple_GET_ITEM(args, 0))->tensor); &(reinterpret_cast<TensorObject*>(PyTuple_GET_ITEM(args, 0))->tensor);
PADDLE_ENFORCE_EQ(self->tensor.initialized(), true, PADDLE_ENFORCE_EQ(self->tensor.initialized(), true,
...@@ -279,10 +276,10 @@ static PyObject* eager_tensor__share_buffer_to(TensorObject* self, ...@@ -279,10 +276,10 @@ static PyObject* eager_tensor__share_buffer_to(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* eager_tensor__is_shared_buffer_with(TensorObject* self, static PyObject* tensor__is_shared_buffer_with(TensorObject* self,
PyObject* args, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_SYNC_TRY EAGER_TRY
paddle::experimental::Tensor* dst_ptr = paddle::experimental::Tensor* dst_ptr =
&(reinterpret_cast<TensorObject*>(PyTuple_GET_ITEM(args, 0))->tensor); &(reinterpret_cast<TensorObject*>(PyTuple_GET_ITEM(args, 0))->tensor);
PADDLE_ENFORCE_EQ(self->tensor.initialized(), true, PADDLE_ENFORCE_EQ(self->tensor.initialized(), true,
...@@ -303,10 +300,10 @@ static PyObject* eager_tensor__is_shared_buffer_with(TensorObject* self, ...@@ -303,10 +300,10 @@ static PyObject* eager_tensor__is_shared_buffer_with(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* eager_tensor__share_underline_tensor_to(TensorObject* self, static PyObject* tensor__share_underline_tensor_to(TensorObject* self,
PyObject* args, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_SYNC_TRY EAGER_TRY
paddle::experimental::Tensor* src_ptr = paddle::experimental::Tensor* src_ptr =
&(reinterpret_cast<TensorObject*>(PyTuple_GET_ITEM(args, 0))->tensor); &(reinterpret_cast<TensorObject*>(PyTuple_GET_ITEM(args, 0))->tensor);
PADDLE_ENFORCE_EQ(self->tensor.initialized(), true, PADDLE_ENFORCE_EQ(self->tensor.initialized(), true,
...@@ -320,9 +317,10 @@ static PyObject* eager_tensor__share_underline_tensor_to(TensorObject* self, ...@@ -320,9 +317,10 @@ static PyObject* eager_tensor__share_underline_tensor_to(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* eager_tensor__is_shared_underline_tensor_with( static PyObject* tensor__is_shared_underline_tensor_with(TensorObject* self,
TensorObject* self, PyObject* args, PyObject* kwargs) { PyObject* args,
EAGER_SYNC_TRY PyObject* kwargs) {
EAGER_TRY
paddle::experimental::Tensor src_tensor = paddle::experimental::Tensor src_tensor =
CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0); CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0);
PADDLE_ENFORCE_EQ(src_tensor.initialized(), true, PADDLE_ENFORCE_EQ(src_tensor.initialized(), true,
...@@ -339,9 +337,9 @@ static PyObject* eager_tensor__is_shared_underline_tensor_with( ...@@ -339,9 +337,9 @@ static PyObject* eager_tensor__is_shared_underline_tensor_with(
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* eager_tensor_method_detach(TensorObject* self, PyObject* args, static PyObject* tensor_method_detach(TensorObject* self, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_SYNC_TRY EAGER_TRY
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
self->tensor.initialized(), true, self->tensor.initialized(), true,
platform::errors::InvalidArgument("Tensor %s has not been initialized!", platform::errors::InvalidArgument("Tensor %s has not been initialized!",
...@@ -365,10 +363,10 @@ static PyObject* eager_tensor_method_detach(TensorObject* self, PyObject* args, ...@@ -365,10 +363,10 @@ static PyObject* eager_tensor_method_detach(TensorObject* self, PyObject* args,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* eager_tensor_method_get_underline_tensor(TensorObject* self, static PyObject* tensor_method_get_underline_tensor(TensorObject* self,
PyObject* args, PyObject* args,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_SYNC_TRY EAGER_TRY
if (self->tensor.is_dense_tensor()) { if (self->tensor.is_dense_tensor()) {
auto* tensor = auto* tensor =
static_cast<paddle::framework::LoDTensor*>(self->tensor.impl().get()); static_cast<paddle::framework::LoDTensor*>(self->tensor.impl().get());
...@@ -382,57 +380,54 @@ static PyObject* eager_tensor_method_get_underline_tensor(TensorObject* self, ...@@ -382,57 +380,54 @@ static PyObject* eager_tensor_method_get_underline_tensor(TensorObject* self,
} }
// NOTE(wuweilong): Set value and not change self's original place // NOTE(wuweilong): Set value and not change self's original place
static PyObject* eager_tensor_method_set_value(TensorObject* self, static PyObject* tensor_method_set_value(TensorObject* self, PyObject* args,
PyObject* args, PyObject* kwargs) {
PyObject* kwargs) {
EAGER_TRY EAGER_TRY
VLOG(4) << "Value " << self->tensor.name(); VLOG(4) << "Value " << self->tensor.name();
pybind11::object numpy_value = pybind11::object numpy_value =
pybind11::object(pybind11::handle(PyTuple_GET_ITEM(args, 0)), true); pybind11::object(pybind11::handle(PyTuple_GET_ITEM(args, 0)), true);
InitEagerTensorWithNumpyValue(self, numpy_value, false); InitTensorWithNumpyValue(self, numpy_value, false);
Py_INCREF(Py_None); Py_INCREF(Py_None);
return Py_None; return Py_None;
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyMethodDef variable_methods[] = { PyMethodDef variable_methods[] = {
{"numpy", (PyCFunction)(void (*)(void))eager_tensor_method_numpy, {"numpy", (PyCFunction)(void (*)(void))tensor_method_numpy,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"_is_initialized", {"_is_initialized",
(PyCFunction)(void (*)(void))eager_tensor_method__is_initialized, (PyCFunction)(void (*)(void))tensor_method__is_initialized,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"_copy_to", (PyCFunction)(void (*)(void))eager_tensor_method__copy_to, {"_copy_to", (PyCFunction)(void (*)(void))tensor_method__copy_to,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"copy_", (PyCFunction)(void (*)(void))eager_tensor_method_copy_, {"copy_", (PyCFunction)(void (*)(void))tensor_method_copy_,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"reconstruct_from_", {"reconstruct_from_",
(PyCFunction)(void (*)(void))eager_tensor_method_reconstruct_from_, (PyCFunction)(void (*)(void))tensor_method_reconstruct_from_,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"retain_grads", (PyCFunction)(void (*)(void))eager_tensor_retain_grads, {"retain_grads", (PyCFunction)(void (*)(void))tensor_retain_grads,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"_clear_gradient", {"_clear_gradient", (PyCFunction)(void (*)(void))tensor__clear_gradient,
(PyCFunction)(void (*)(void))eager_tensor__clear_gradient,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"_zero_grads", (PyCFunction)(void (*)(void))eager_tensor__zero_grads, {"_zero_grads", (PyCFunction)(void (*)(void))tensor__zero_grads,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"_share_buffer_to", {"_share_buffer_to", (PyCFunction)(void (*)(void))tensor__share_buffer_to,
(PyCFunction)(void (*)(void))eager_tensor__share_buffer_to,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"_is_shared_buffer_with", {"_is_shared_buffer_with",
(PyCFunction)(void (*)(void))eager_tensor__is_shared_buffer_with, (PyCFunction)(void (*)(void))tensor__is_shared_buffer_with,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"_share_underline_tensor_to", {"_share_underline_tensor_to",
(PyCFunction)(void (*)(void))eager_tensor__share_underline_tensor_to, (PyCFunction)(void (*)(void))tensor__share_underline_tensor_to,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"_is_shared_underline_tensor_with", {"_is_shared_underline_tensor_with",
(PyCFunction)(void (*)(void))eager_tensor__is_shared_underline_tensor_with, (PyCFunction)(void (*)(void))tensor__is_shared_underline_tensor_with,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"detach", (PyCFunction)(void (*)(void))eager_tensor_method_detach, {"detach", (PyCFunction)(void (*)(void))tensor_method_detach,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"get_tensor", {"get_tensor",
(PyCFunction)(void (*)(void))eager_tensor_method_get_underline_tensor, (PyCFunction)(void (*)(void))tensor_method_get_underline_tensor,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"_set_value", (PyCFunction)(void (*)(void))eager_tensor_method_set_value, {"_set_value", (PyCFunction)(void (*)(void))tensor_method_set_value,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{NULL, NULL, 0, NULL}}; {NULL, NULL, 0, NULL}};
......
...@@ -79,10 +79,10 @@ const char* CAST_VAR_LIST_TEMPLATE = R"( ...@@ -79,10 +79,10 @@ const char* CAST_VAR_LIST_TEMPLATE = R"(
auto %s = GetTensorListFromArgs("%s", "%s", args, %d, %s);)"; auto %s = GetTensorListFromArgs("%s", "%s", args, %d, %s);)";
const char* CAST_VAR_PTR_TEMPLATE = R"( const char* CAST_VAR_PTR_TEMPLATE = R"(
auto %s = GetEagerTensorPtrFromArgs("%s", "%s", args, %d, %s);)"; auto %s = GetTensorPtrFromArgs("%s", "%s", args, %d, %s);)";
const char* CAST_VAR_PTR_LIST_TEMPLATE = R"( const char* CAST_VAR_PTR_LIST_TEMPLATE = R"(
auto %s = GetEagerTensorPtrListFromArgs("%s", "%s", args, %d, %s);)"; auto %s = GetTensorPtrListFromArgs("%s", "%s", args, %d, %s);)";
const char* CAST_SIZE_T_TEMPLATE = R"( const char* CAST_SIZE_T_TEMPLATE = R"(
auto %s = GetUnsignedLongFromArgs("%s", "%s", args, %d, %s);)"; auto %s = GetUnsignedLongFromArgs("%s", "%s", args, %d, %s);)";
......
...@@ -35,14 +35,14 @@ namespace pybind { ...@@ -35,14 +35,14 @@ namespace pybind {
extern PyTypeObject* p_tensor_type; extern PyTypeObject* p_tensor_type;
PyObject* eager_tensor_properties_get_name(TensorObject* self, void* closure) { PyObject* tensor_properties_get_name(TensorObject* self, void* closure) {
EAGER_SYNC_TRY EAGER_TRY
return ToPyObject(self->tensor.name()); return ToPyObject(self->tensor.name());
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyObject* eager_tensor_properties_get_type(TensorObject* self, void* closure) { PyObject* tensor_properties_get_type(TensorObject* self, void* closure) {
EAGER_SYNC_TRY EAGER_TRY
if (self->tensor.is_dense_tensor()) { if (self->tensor.is_dense_tensor()) {
return ToPyObject(paddle::framework::proto::VarType::LOD_TENSOR); return ToPyObject(paddle::framework::proto::VarType::LOD_TENSOR);
} else { } else {
...@@ -52,24 +52,24 @@ PyObject* eager_tensor_properties_get_type(TensorObject* self, void* closure) { ...@@ -52,24 +52,24 @@ PyObject* eager_tensor_properties_get_type(TensorObject* self, void* closure) {
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
int eager_tensor_properties_set_name(TensorObject* self, PyObject* value, int tensor_properties_set_name(TensorObject* self, PyObject* value,
void* closure) { void* closure) {
EAGER_SYNC_TRY EAGER_TRY
self->tensor.set_name(CastPyArg2AttrString(value, 0)); self->tensor.set_name(CastPyArg2AttrString(value, 0));
return 0; return 0;
EAGER_CATCH_AND_THROW_RETURN_ZERO EAGER_CATCH_AND_THROW_RETURN_ZERO
} }
PyObject* eager_tensor_properties_get_stop_gradient(TensorObject* self, PyObject* tensor_properties_get_stop_gradient(TensorObject* self,
void* closure) { void* closure) {
EAGER_SYNC_TRY EAGER_TRY
auto meta = egr::EagerUtils::autograd_meta(&self->tensor); auto meta = egr::EagerUtils::autograd_meta(&self->tensor);
return ToPyObject(meta->StopGradient()); return ToPyObject(meta->StopGradient());
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyObject* eager_tensor_properties_get_grad(TensorObject* self, void* closure) { PyObject* tensor_properties_get_grad(TensorObject* self, void* closure) {
EAGER_SYNC_TRY EAGER_TRY
if (egr::egr_utils_api::IsLeafTensor(self->tensor)) { if (egr::egr_utils_api::IsLeafTensor(self->tensor)) {
std::shared_ptr<egr::GradNodeBase> grad_node = std::shared_ptr<egr::GradNodeBase> grad_node =
egr::EagerUtils::grad_node(self->tensor); egr::EagerUtils::grad_node(self->tensor);
...@@ -94,9 +94,9 @@ PyObject* eager_tensor_properties_get_grad(TensorObject* self, void* closure) { ...@@ -94,9 +94,9 @@ PyObject* eager_tensor_properties_get_grad(TensorObject* self, void* closure) {
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
int eager_tensor_properties_set_grad(TensorObject* self, PyObject* value, int tensor_properties_set_grad(TensorObject* self, PyObject* value,
void* closure) { void* closure) {
EAGER_SYNC_TRY EAGER_TRY
auto src = CastPyArg2Tensor(value, 0); auto src = CastPyArg2Tensor(value, 0);
PADDLE_ENFORCE( PADDLE_ENFORCE(
egr::egr_utils_api::IsLeafTensor(self->tensor), egr::egr_utils_api::IsLeafTensor(self->tensor),
...@@ -115,34 +115,33 @@ int eager_tensor_properties_set_grad(TensorObject* self, PyObject* value, ...@@ -115,34 +115,33 @@ int eager_tensor_properties_set_grad(TensorObject* self, PyObject* value,
EAGER_CATCH_AND_THROW_RETURN_ZERO EAGER_CATCH_AND_THROW_RETURN_ZERO
} }
int eager_tensor_properties_set_stop_gradient(TensorObject* self, int tensor_properties_set_stop_gradient(TensorObject* self, PyObject* value,
PyObject* value, void* closure) { void* closure) {
EAGER_SYNC_TRY EAGER_TRY
auto meta = egr::EagerUtils::autograd_meta(&self->tensor); auto meta = egr::EagerUtils::autograd_meta(&self->tensor);
meta->SetStopGradient(CastPyArg2AttrBoolean(value, 0)); meta->SetStopGradient(CastPyArg2AttrBoolean(value, 0));
return 0; return 0;
EAGER_CATCH_AND_THROW_RETURN_ZERO EAGER_CATCH_AND_THROW_RETURN_ZERO
} }
PyObject* eager_tensor_properties_get_persistable(TensorObject* self, PyObject* tensor_properties_get_persistable(TensorObject* self, void* closure) {
void* closure) { EAGER_TRY
EAGER_SYNC_TRY
auto meta = egr::EagerUtils::autograd_meta(&self->tensor); auto meta = egr::EagerUtils::autograd_meta(&self->tensor);
return ToPyObject(meta->Persistable()); return ToPyObject(meta->Persistable());
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
int eager_tensor_properties_set_persistable(TensorObject* self, PyObject* value, int tensor_properties_set_persistable(TensorObject* self, PyObject* value,
void* closure) { void* closure) {
EAGER_SYNC_TRY EAGER_TRY
auto meta = egr::EagerUtils::autograd_meta(&self->tensor); auto meta = egr::EagerUtils::autograd_meta(&self->tensor);
meta->SetPersistable(CastPyArg2AttrBoolean(value, 0)); meta->SetPersistable(CastPyArg2AttrBoolean(value, 0));
return 0; return 0;
EAGER_CATCH_AND_THROW_RETURN_ZERO EAGER_CATCH_AND_THROW_RETURN_ZERO
} }
PyObject* eager_tensor_properties_get_shape(TensorObject* self, void* closure) { PyObject* tensor_properties_get_shape(TensorObject* self, void* closure) {
EAGER_SYNC_TRY EAGER_TRY
auto ddim = self->tensor.shape(); auto ddim = self->tensor.shape();
std::vector<int64_t> value; std::vector<int64_t> value;
size_t rank = static_cast<size_t>(ddim.size()); size_t rank = static_cast<size_t>(ddim.size());
...@@ -155,50 +154,45 @@ PyObject* eager_tensor_properties_get_shape(TensorObject* self, void* closure) { ...@@ -155,50 +154,45 @@ PyObject* eager_tensor_properties_get_shape(TensorObject* self, void* closure) {
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyObject* eager_tensor_properties_get_place(TensorObject* self, void* closure) { PyObject* tensor_properties_get_place(TensorObject* self, void* closure) {
EAGER_SYNC_TRY EAGER_TRY
return ToPyObject(self->tensor.inner_place()); return ToPyObject(self->tensor.inner_place());
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyObject* eager_tensor_properties_get_place_str(TensorObject* self, PyObject* tensor_properties_get_place_str(TensorObject* self, void* closure) {
void* closure) { EAGER_TRY
EAGER_SYNC_TRY
std::stringstream ostr; std::stringstream ostr;
ostr << self->tensor.inner_place(); ostr << self->tensor.inner_place();
return ToPyObject(ostr.str()); return ToPyObject(ostr.str());
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
PyObject* eager_tensor_properties_get_dtype(TensorObject* self, void* closure) { PyObject* tensor_properties_get_dtype(TensorObject* self, void* closure) {
EAGER_SYNC_TRY EAGER_TRY
return ToPyObject( return ToPyObject(
paddle::framework::TransToProtoVarType(self->tensor.type())); paddle::framework::TransToProtoVarType(self->tensor.type()));
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
struct PyGetSetDef variable_properties[] = { struct PyGetSetDef variable_properties[] = {
{"grad", (getter)eager_tensor_properties_get_grad, {"grad", (getter)tensor_properties_get_grad,
(setter)eager_tensor_properties_set_grad, nullptr, nullptr}, (setter)tensor_properties_set_grad, nullptr, nullptr},
{"name", (getter)eager_tensor_properties_get_name, {"name", (getter)tensor_properties_get_name,
(setter)eager_tensor_properties_set_name, nullptr, nullptr}, (setter)tensor_properties_set_name, nullptr, nullptr},
{"stop_gradient", (getter)eager_tensor_properties_get_stop_gradient, {"stop_gradient", (getter)tensor_properties_get_stop_gradient,
(setter)eager_tensor_properties_set_stop_gradient, nullptr, nullptr}, (setter)tensor_properties_set_stop_gradient, nullptr, nullptr},
{"persistable", (getter)eager_tensor_properties_get_persistable, {"persistable", (getter)tensor_properties_get_persistable,
(setter)eager_tensor_properties_set_persistable, nullptr, nullptr}, (setter)tensor_properties_set_persistable, nullptr, nullptr},
{"shape", (getter)eager_tensor_properties_get_shape, nullptr, nullptr, {"shape", (getter)tensor_properties_get_shape, nullptr, nullptr, nullptr},
nullptr}, // {"is_leaf", (getter)tensor_properties_get_is_leaf, nullptr,
// {"is_leaf", (getter)eager_tensor_properties_get_is_leaf, nullptr,
// nullptr, // nullptr,
// nullptr}, // nullptr},
{"place", (getter)eager_tensor_properties_get_place, nullptr, nullptr, {"place", (getter)tensor_properties_get_place, nullptr, nullptr, nullptr},
nullptr}, {"_place_str", (getter)tensor_properties_get_place_str, nullptr, nullptr,
{"_place_str", (getter)eager_tensor_properties_get_place_str, nullptr,
nullptr, nullptr},
{"dtype", (getter)eager_tensor_properties_get_dtype, nullptr, nullptr,
nullptr},
{"type", (getter)eager_tensor_properties_get_type, nullptr, nullptr,
nullptr}, nullptr},
{"dtype", (getter)tensor_properties_get_dtype, nullptr, nullptr, nullptr},
{"type", (getter)tensor_properties_get_type, nullptr, nullptr, nullptr},
{nullptr, nullptr, nullptr, nullptr, nullptr}}; {nullptr, nullptr, nullptr, nullptr, nullptr}};
} // namespace pybind } // namespace pybind
......
...@@ -179,7 +179,7 @@ paddle::experimental::Tensor CastPyArg2Tensor(PyObject* obj, ssize_t arg_pos) { ...@@ -179,7 +179,7 @@ paddle::experimental::Tensor CastPyArg2Tensor(PyObject* obj, ssize_t arg_pos) {
} else { } else {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"argument (position %d) must be " "argument (position %d) must be "
"EagerTensor, but got %s", "EagerVariable, but got %s",
arg_pos + 1, reinterpret_cast<PyTypeObject*>(obj->ob_type)->tp_name)); arg_pos + 1, reinterpret_cast<PyTypeObject*>(obj->ob_type)->tp_name));
} }
} }
...@@ -309,7 +309,7 @@ framework::Tensor CastPyArg2FrameworkTensor(PyObject* obj, ssize_t arg_pos) { ...@@ -309,7 +309,7 @@ framework::Tensor CastPyArg2FrameworkTensor(PyObject* obj, ssize_t arg_pos) {
} else { } else {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"argument (position %d) must be " "argument (position %d) must be "
"EagerTensor, but got %s", "EagerVariable, but got %s",
arg_pos + 1, reinterpret_cast<PyTypeObject*>(obj->ob_type)->tp_name)); arg_pos + 1, reinterpret_cast<PyTypeObject*>(obj->ob_type)->tp_name));
} }
} }
...@@ -597,6 +597,7 @@ std::vector<paddle::experimental::Tensor> GetTensorListFromArgs( ...@@ -597,6 +597,7 @@ std::vector<paddle::experimental::Tensor> GetTensorListFromArgs(
if (PyList_Check(list)) { if (PyList_Check(list)) {
Py_ssize_t len = PyList_Size(list); Py_ssize_t len = PyList_Size(list);
result.reserve(static_cast<size_t>(len));
if (len == 0) { if (len == 0) {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"%s(): argument '%s' (position %d) must be list of Tensors, but got " "%s(): argument '%s' (position %d) must be list of Tensors, but got "
...@@ -609,6 +610,7 @@ std::vector<paddle::experimental::Tensor> GetTensorListFromArgs( ...@@ -609,6 +610,7 @@ std::vector<paddle::experimental::Tensor> GetTensorListFromArgs(
} }
} else if (PyTuple_Check(list)) { } else if (PyTuple_Check(list)) {
Py_ssize_t len = PyTuple_Size(list); Py_ssize_t len = PyTuple_Size(list);
result.reserve(static_cast<size_t>(len));
if (len == 0) { if (len == 0) {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
"%s(): argument '%s' (position %d) must be list of Tensors, but got " "%s(): argument '%s' (position %d) must be list of Tensors, but got "
...@@ -632,9 +634,11 @@ std::vector<paddle::experimental::Tensor> GetTensorListFromArgs( ...@@ -632,9 +634,11 @@ std::vector<paddle::experimental::Tensor> GetTensorListFromArgs(
return result; return result;
} }
paddle::experimental::Tensor* GetEagerTensorPtrFromArgs( paddle::experimental::Tensor* GetTensorPtrFromArgs(const std::string& op_type,
const std::string& op_type, const std::string& arg_name, PyObject* args, const std::string& arg_name,
ssize_t arg_idx, bool dispensable) { PyObject* args,
ssize_t arg_idx,
bool dispensable) {
PyObject* obj = PyTuple_GET_ITEM(args, arg_idx); PyObject* obj = PyTuple_GET_ITEM(args, arg_idx);
if (PyTuple_Check(obj)) { if (PyTuple_Check(obj)) {
...@@ -654,7 +658,7 @@ paddle::experimental::Tensor* GetEagerTensorPtrFromArgs( ...@@ -654,7 +658,7 @@ paddle::experimental::Tensor* GetEagerTensorPtrFromArgs(
return &(reinterpret_cast<TensorObject*>(obj)->tensor); return &(reinterpret_cast<TensorObject*>(obj)->tensor);
} }
std::vector<paddle::experimental::Tensor*> GetEagerTensorPtrListFromArgs( std::vector<paddle::experimental::Tensor*> GetTensorPtrListFromArgs(
const std::string& op_type, const std::string& arg_name, PyObject* args, const std::string& op_type, const std::string& arg_name, PyObject* args,
ssize_t arg_idx, bool dispensable) { ssize_t arg_idx, bool dispensable) {
PyObject* list = PyTuple_GET_ITEM(args, arg_idx); PyObject* list = PyTuple_GET_ITEM(args, arg_idx);
......
...@@ -65,15 +65,15 @@ PyObject* ToPyObject( ...@@ -65,15 +65,15 @@ PyObject* ToPyObject(
const std::unordered_map<std::string, std::vector<std::string>>& value); const std::unordered_map<std::string, std::vector<std::string>>& value);
template <typename Tuple, size_t N> template <typename Tuple, size_t N>
struct TupleEagerTensorResult { struct TupleTensorResult {
static void Run(const Tuple& out, PyObject* result) { static void Run(const Tuple& out, PyObject* result) {
TupleEagerTensorResult<Tuple, N - 1>::Run(out, result); TupleTensorResult<Tuple, N - 1>::Run(out, result);
PyTuple_SET_ITEM(result, N - 1, ToPyObject(std::get<N - 1>(out))); PyTuple_SET_ITEM(result, N - 1, ToPyObject(std::get<N - 1>(out)));
} }
}; };
template <typename Tuple> template <typename Tuple>
struct TupleEagerTensorResult<Tuple, 1> { struct TupleTensorResult<Tuple, 1> {
static void Run(const Tuple& out, PyObject* result) { static void Run(const Tuple& out, PyObject* result) {
PyTuple_SET_ITEM(result, 0, ToPyObject(std::get<0>(out))); PyTuple_SET_ITEM(result, 0, ToPyObject(std::get<0>(out)));
} }
...@@ -84,7 +84,7 @@ PyObject* ToPyObject(const std::tuple<Args...>& out) { ...@@ -84,7 +84,7 @@ PyObject* ToPyObject(const std::tuple<Args...>& out) {
auto len = sizeof...(Args); auto len = sizeof...(Args);
PyObject* result = PyTuple_New(len); PyObject* result = PyTuple_New(len);
TupleEagerTensorResult<decltype(out), sizeof...(Args)>::Run(out, result); TupleTensorResult<decltype(out), sizeof...(Args)>::Run(out, result);
return result; return result;
} }
...@@ -97,10 +97,12 @@ std::vector<paddle::experimental::Tensor> GetTensorListFromArgs( ...@@ -97,10 +97,12 @@ std::vector<paddle::experimental::Tensor> GetTensorListFromArgs(
const std::string& op_type, const std::string& arg_name, PyObject* args, const std::string& op_type, const std::string& arg_name, PyObject* args,
ssize_t arg_idx, bool dispensable = false); ssize_t arg_idx, bool dispensable = false);
paddle::experimental::Tensor* GetEagerTensorPtrFromArgs( paddle::experimental::Tensor* GetTensorPtrFromArgs(const std::string& op_type,
const std::string& op_type, const std::string& arg_name, PyObject* args, const std::string& arg_name,
ssize_t arg_idx, bool dispensable = false); PyObject* args,
std::vector<paddle::experimental::Tensor*> GetEagerTensorPtrListFromArgs( ssize_t arg_idx,
bool dispensable = false);
std::vector<paddle::experimental::Tensor*> GetTensorPtrListFromArgs(
const std::string& op_type, const std::string& arg_name, PyObject* args, const std::string& op_type, const std::string& arg_name, PyObject* args,
ssize_t arg_idx, bool dispensable = false); ssize_t arg_idx, bool dispensable = false);
......
...@@ -19,7 +19,6 @@ limitations under the License. */ ...@@ -19,7 +19,6 @@ limitations under the License. */
#include "pybind11/pybind11.h" #include "pybind11/pybind11.h"
#define EAGER_TRY try { #define EAGER_TRY try {
#define EAGER_SYNC_TRY try {
#define EAGER_CATCH_AND_THROW_RETURN_NULL \ #define EAGER_CATCH_AND_THROW_RETURN_NULL \
} \ } \
catch (...) { \ catch (...) { \
......
...@@ -222,6 +222,14 @@ class PADDLE_API Tensor final { ...@@ -222,6 +222,14 @@ class PADDLE_API Tensor final {
*/ */
bool is_dense_tensor() const; bool is_dense_tensor() const;
/**
* @brief Determine whether tensor is SelectedRows
*
* @return true
* @return false
*/
bool is_selected_rows() const;
/* Part 3: Device and Backend methods */ /* Part 3: Device and Backend methods */
/** /**
......
...@@ -29,7 +29,6 @@ limitations under the License. */ ...@@ -29,7 +29,6 @@ limitations under the License. */
#include "paddle/pten/core/tensor_base.h" #include "paddle/pten/core/tensor_base.h"
#include "paddle/pten/core/tensor_meta.h" #include "paddle/pten/core/tensor_meta.h"
#include "paddle/pten/core/tensor_utils.h" #include "paddle/pten/core/tensor_utils.h"
/** /**
* [ Why still include the fluid headers? ] * [ Why still include the fluid headers? ]
* *
...@@ -133,7 +132,9 @@ DataLayout Tensor::layout() const { return impl_->layout(); } ...@@ -133,7 +132,9 @@ DataLayout Tensor::layout() const { return impl_->layout(); }
bool Tensor::is_dense_tensor() const { bool Tensor::is_dense_tensor() const {
return pten::DenseTensor::classof(impl_.get()); return pten::DenseTensor::classof(impl_.get());
} }
bool Tensor::is_selected_rows() const {
return pten::SelectedRows::classof(impl_.get());
}
/* Part 3: Device and Backend methods */ /* Part 3: Device and Backend methods */
PlaceType Tensor::place() const { PlaceType Tensor::place() const {
......
...@@ -24,7 +24,7 @@ limitations under the License. */ ...@@ -24,7 +24,7 @@ limitations under the License. */
#include <boost/variant.hpp> #include <boost/variant.hpp>
namespace egr { namespace egr {
class EagerTensor; class EagerVariable;
} }
namespace paddle { namespace paddle {
namespace framework { namespace framework {
...@@ -76,9 +76,9 @@ struct NameVarMapTrait<VariableWrapper> { ...@@ -76,9 +76,9 @@ struct NameVarMapTrait<VariableWrapper> {
}; };
template <> template <>
struct NameVarMapTrait<egr::EagerTensor> { struct NameVarMapTrait<egr::EagerVariable> {
using Type = using Type =
std::map<std::string, std::vector<std::shared_ptr<egr::EagerTensor>>>; std::map<std::string, std::vector<std::shared_ptr<egr::EagerVariable>>>;
}; };
} // namespace details } // namespace details
...@@ -88,7 +88,7 @@ using NameVarMap = typename details::NameVarMapTrait<T>::Type; ...@@ -88,7 +88,7 @@ using NameVarMap = typename details::NameVarMapTrait<T>::Type;
using NameVarBaseMap = NameVarMap<VarBase>; using NameVarBaseMap = NameVarMap<VarBase>;
using NameVariableWrapperMap = NameVarMap<VariableWrapper>; using NameVariableWrapperMap = NameVarMap<VariableWrapper>;
using NameTensorMap = NameVarMap<egr::EagerTensor>; using NameTensorMap = NameVarMap<egr::EagerVariable>;
using VariableWrapperList = std::vector<std::shared_ptr<VariableWrapper>>; using VariableWrapperList = std::vector<std::shared_ptr<VariableWrapper>>;
......
...@@ -29,10 +29,6 @@ limitations under the License. */ ...@@ -29,10 +29,6 @@ limitations under the License. */
// See Note [ Why still include the fluid headers? ] // See Note [ Why still include the fluid headers? ]
#include "paddle/fluid/framework/mixed_vector.h" #include "paddle/fluid/framework/mixed_vector.h"
namespace egr {
class EagerTensor;
} // namespace egr
namespace pten { namespace pten {
class SelectedRows : public TensorBase, class SelectedRows : public TensorBase,
public TypeInfoTraits<TensorBase, SelectedRows> { public TypeInfoTraits<TensorBase, SelectedRows> {
...@@ -199,39 +195,6 @@ class SelectedRows : public TensorBase, ...@@ -199,39 +195,6 @@ class SelectedRows : public TensorBase,
std::unique_ptr<DenseTensor> value_{nullptr}; std::unique_ptr<DenseTensor> value_{nullptr};
int64_t height_; // height indicates the underline tensor's height int64_t height_; // height indicates the underline tensor's height
std::unique_ptr<RWLock> rwlock_{nullptr}; std::unique_ptr<RWLock> rwlock_{nullptr};
// TODO(jiabin): Remove this when we don't need EagerTensor support
// SelectedRows which is expected in next version.
/** Why we need this weird friend class?
* In eager mode, since some of ops doesn't support C++ API for now we need to
*use 'imperative::TraceOp' to run it.
* So, we need to support get a SelectedRows from egr::EagerTensor's
*framework::Variable obj and used it to reconstruct
* a new paddle::experimental::Tensor to support framework usage. However, we
*got 2 problems here.
* First, we got 2 unique_ptr in SelectedRows so that we can't support
*std::make_shared in EagerTensor's SetImplWithSelectedRows method,
* since we have to construct a shared_ptr for paddle::experimental::Tensor's
*impl.
* Second, when we are trying to support move constructor for SelectedRows we
*found that we can't get its rvalue from
* framework::Variable because it holds an obj of target type.
*
*
* The only three way to solve this problem is:
* 1. Just like what we have done, using friend class and just copy/move each
*member. In this way, we can avoid additional API
* and symbols.
* 2. Make pten::SelectedRows's member from unique_ptr to shared_ptr. However,
*this may cause some cost of performance.
* 3. Add some api to return or move member of framework::SelectedRows.
*However, it's not as safe as first solution.
* 4. Support all framework::SelectedRows related ops and make sure
*EagerTensor never holds framework::SelectedRows.
*
* If anyone got better ideas, welcome to contact JiabinYang, we are open for
*your help.
**/
friend class egr::EagerTensor;
}; };
} // namespace pten } // namespace pten
...@@ -104,14 +104,14 @@ def check_type(input, input_name, expected_type, op_name, extra_message=''): ...@@ -104,14 +104,14 @@ def check_type(input, input_name, expected_type, op_name, extra_message=''):
expected_type += (core.VarBase, ) expected_type += (core.VarBase, )
# TODO(jiabin): uncomment it when we support declarative mode in eager # TODO(jiabin): uncomment it when we support declarative mode in eager
# if _in_eager_mode(): # if _in_eager_mode():
# expected_type += (core.eager.EagerTensor, ) # expected_type += (core.eager.Tensor, )
elif isinstance(input, core.VarBase): elif isinstance(input, core.VarBase):
raise TypeError( raise TypeError(
"Please use `with fluid.dygraph.guard()` as context or `fluid.enable_dygraph()` to switch to imperative mode firstly. " "Please use `with fluid.dygraph.guard()` as context or `fluid.enable_dygraph()` to switch to imperative mode firstly. "
"Because received '{}' in {} is a imperative Variable.".format( "Because received '{}' in {} is a imperative Variable.".format(
input_name, op_name)) input_name, op_name))
elif hasattr(core, "eager"): elif hasattr(core, "eager"):
if isinstance(input, core.eager.EagerTensor): if isinstance(input, core.eager.Tensor):
raise TypeError( raise TypeError(
"Please use `with fluid.dygraph.guard()` as context or `fluid.enable_dygraph()` to switch to imperative mode firstly. " "Please use `with fluid.dygraph.guard()` as context or `fluid.enable_dygraph()` to switch to imperative mode firstly. "
"Because received '{}' in {} is a imperative Variable.".format( "Because received '{}' in {} is a imperative Variable.".format(
......
...@@ -253,7 +253,7 @@ class _DataLoaderIterSingleProcess(_DataLoaderIterBase): ...@@ -253,7 +253,7 @@ class _DataLoaderIterSingleProcess(_DataLoaderIterBase):
try: try:
if in_dygraph_mode(): if in_dygraph_mode():
if _in_eager_mode(): if _in_eager_mode():
data = core.eager.read_next_eager_tensor_list( data = core.eager.read_next_tensor_list(
self._reader.read_next_list()[0]) self._reader.read_next_list()[0])
else: else:
data = self._reader.read_next_var_list() data = self._reader.read_next_var_list()
...@@ -449,7 +449,7 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase): ...@@ -449,7 +449,7 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase):
while self._blocking_queue.size() >= len(self._places): while self._blocking_queue.size() >= len(self._places):
if in_dygraph_mode(): if in_dygraph_mode():
if _in_eager_mode(): if _in_eager_mode():
data = core.eager.read_next_eager_tensor_list( data = core.eager.read_next_tensor_list(
self._reader.read_next_list()[0]) self._reader.read_next_list()[0])
else: else:
self._reader.read_next_var_list() self._reader.read_next_var_list()
...@@ -705,7 +705,7 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase): ...@@ -705,7 +705,7 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase):
if in_dygraph_mode(): if in_dygraph_mode():
if _in_eager_mode(): if _in_eager_mode():
data = core.eager.read_next_eager_tensor_list( data = core.eager.read_next_tensor_list(
self._reader.read_next_list()[0]) self._reader.read_next_list()[0])
else: else:
data = self._reader.read_next_var_list() data = self._reader.read_next_var_list()
......
...@@ -721,10 +721,9 @@ def to_variable(value, name=None, zero_copy=None, dtype=None): ...@@ -721,10 +721,9 @@ def to_variable(value, name=None, zero_copy=None, dtype=None):
value = value.astype(dtype) value = value.astype(dtype)
if _in_eager_mode(): if _in_eager_mode():
return core.eager.EagerTensor(value, return core.eager.Tensor(value,
framework._current_expected_place(), framework._current_expected_place(), False,
False, zero_copy, name zero_copy, name if name else None, True)
if name else None, True)
else: else:
py_var = core.VarBase( py_var = core.VarBase(
value=value, value=value,
......
...@@ -222,7 +222,7 @@ def monkey_patch_math_varbase(): ...@@ -222,7 +222,7 @@ def monkey_patch_math_varbase():
# 2. create varbase for scalar # 2. create varbase for scalar
lhs_dtype = self.dtype lhs_dtype = self.dtype
if _in_eager_mode(): if _in_eager_mode():
other_var_should_be = core.eager.EagerTensor other_var_should_be = core.eager.Tensor
else: else:
other_var_should_be = core.VarBase other_var_should_be = core.VarBase
if not isinstance(other_var, other_var_should_be): if not isinstance(other_var, other_var_should_be):
...@@ -343,7 +343,7 @@ def monkey_patch_math_varbase(): ...@@ -343,7 +343,7 @@ def monkey_patch_math_varbase():
if core._in_eager_mode(): if core._in_eager_mode():
local_already_patch = _already_patch_eager_tensor local_already_patch = _already_patch_eager_tensor
_already_patch_eager_tensor = True _already_patch_eager_tensor = True
local_tensor = core.eager.EagerTensor local_tensor = core.eager.Tensor
else: else:
local_already_patch = _already_patch_varbase local_already_patch = _already_patch_varbase
_already_patch_varbase = True _already_patch_varbase = True
......
...@@ -150,7 +150,7 @@ def monkey_patch_varbase(): ...@@ -150,7 +150,7 @@ def monkey_patch_varbase():
""" """
if core._in_eager_mode(): if core._in_eager_mode():
base_tensor = core.eager.EagerTensor base_tensor = core.eager.Tensor
else: else:
base_tensor = core.VarBase base_tensor = core.VarBase
assert isinstance(value, (np.ndarray, base_tensor, dict, str)), \ assert isinstance(value, (np.ndarray, base_tensor, dict, str)), \
...@@ -180,9 +180,9 @@ def monkey_patch_varbase(): ...@@ -180,9 +180,9 @@ def monkey_patch_varbase():
"Variable dtype not match, Variable [ {} ] need tensor with dtype {} but load tensor with dtype {}".format( "Variable dtype not match, Variable [ {} ] need tensor with dtype {} but load tensor with dtype {}".format(
self.name, self_tensor_np.dtype, value_np.dtype) self.name, self_tensor_np.dtype, value_np.dtype)
# NOTE(wuweilong): self could be VarBase or EagerTensor, the subsequent behavior are defined in different files # NOTE(wuweilong): self could be VarBase or Tensor, the subsequent behavior are defined in different files
# if self is VarBase, method value() return Variable that bindded in imperative.cc, get_tensor() bindded in pybind.cc # if self is VarBase, method value() return Variable that bindded in imperative.cc, get_tensor() bindded in pybind.cc
# if self is EagerTensor, method value() return self that defined in this file, get_tensor() defined in eager_method.cc # if self is Tensor, method value() return self that defined in this file, get_tensor() defined in eager_method.cc
# this Interface behavior will be unifed in the future. # this Interface behavior will be unifed in the future.
self.value().get_tensor().set(value_np, self.value().get_tensor().set(value_np,
framework._current_expected_place()) framework._current_expected_place())
...@@ -244,8 +244,8 @@ def monkey_patch_varbase(): ...@@ -244,8 +244,8 @@ def monkey_patch_varbase():
if grad_tensor is not None: if grad_tensor is not None:
if core._in_eager_mode(): if core._in_eager_mode():
assert isinstance( assert isinstance(
grad_tensor, core.eager.EagerTensor grad_tensor, core.eager.
), "The type of grad_tensor must be paddle.Tensor" Tensor), "The type of grad_tensor must be paddle.Tensor"
else: else:
assert isinstance( assert isinstance(
grad_tensor, paddle. grad_tensor, paddle.
...@@ -592,8 +592,8 @@ def monkey_patch_varbase(): ...@@ -592,8 +592,8 @@ def monkey_patch_varbase():
# [0.79010487, 0.53972793, 0.09495186, 0.44267157, 0.72112119]]) # [0.79010487, 0.53972793, 0.09495186, 0.44267157, 0.72112119]])
""" """
if core._in_eager_mode(): if core._in_eager_mode():
from paddle.tensor.to_string import eager_tensor_to_string from paddle.tensor.to_string import tensor_to_string
return eager_tensor_to_string(self) return tensor_to_string(self)
else: else:
from paddle.tensor.to_string import to_string from paddle.tensor.to_string import to_string
return to_string(self) return to_string(self)
...@@ -624,7 +624,7 @@ def monkey_patch_varbase(): ...@@ -624,7 +624,7 @@ def monkey_patch_varbase():
"Only Leaf Tensor support the deepcopy at the moment, non-Leaf Tensors contains graph information that does't support deepcopy" "Only Leaf Tensor support the deepcopy at the moment, non-Leaf Tensors contains graph information that does't support deepcopy"
) )
if core._in_eager_mode(): if core._in_eager_mode():
new_varbase = core.eager.EagerTensor() new_varbase = core.eager.Tensor()
else: else:
new_varbase = core.VarBase() new_varbase = core.VarBase()
new_varbase.name = self.name + unique_name.generate("_deepcopy") new_varbase.name = self.name + unique_name.generate("_deepcopy")
...@@ -808,16 +808,16 @@ def monkey_patch_varbase(): ...@@ -808,16 +808,16 @@ def monkey_patch_varbase():
("__getitem__", __getitem__), ("item", item), ("__getitem__", __getitem__), ("item", item),
("__setitem__", __setitem__), ("_to", _to)): ("__setitem__", __setitem__), ("_to", _to)):
if core._in_eager_mode(): if core._in_eager_mode():
setattr(core.eager.EagerTensor, method_name, method) setattr(core.eager.Tensor, method_name, method)
else: else:
setattr(core.VarBase, method_name, method) setattr(core.VarBase, method_name, method)
if core._in_eager_mode(): if core._in_eager_mode():
setattr(core.eager.EagerTensor, "_grad_ivar", _grad_ivar) setattr(core.eager.Tensor, "_grad_ivar", _grad_ivar)
setattr(core.eager.EagerTensor, "_set_grad_ivar", _set_grad_ivar) setattr(core.eager.Tensor, "_set_grad_ivar", _set_grad_ivar)
setattr(core.eager.EagerTensor, "clear_gradient", clear_gradient) setattr(core.eager.Tensor, "clear_gradient", clear_gradient)
setattr(core.eager.EagerTensor, "clone", clone) setattr(core.eager.Tensor, "clone", clone)
setattr(core.eager.EagerTensor, "value", value) setattr(core.eager.Tensor, "value", value)
else: else:
setattr(core.VarBase, "__name__", "Tensor") setattr(core.VarBase, "__name__", "Tensor")
setattr(core.VarBase, "grad", grad) setattr(core.VarBase, "grad", grad)
......
...@@ -1057,7 +1057,7 @@ def _varbase_creator(type=core.VarDesc.VarType.LOD_TENSOR, ...@@ -1057,7 +1057,7 @@ def _varbase_creator(type=core.VarDesc.VarType.LOD_TENSOR,
dtype = convert_np_dtype_to_dtype_(dtype) dtype = convert_np_dtype_to_dtype_(dtype)
if _in_eager_mode(): if _in_eager_mode():
eager_tensor = core.eager.EagerTensor( eager_tensor = core.eager.Tensor(
dtype if dtype else core.VarDesc.VarType.FP32, dtype if dtype else core.VarDesc.VarType.FP32,
list(shape) if shape else [], name, type list(shape) if shape else [], name, type
if type else core.VarDesc.VarType.LOD_TENSOR, True if type else core.VarDesc.VarType.LOD_TENSOR, True
...@@ -1076,7 +1076,7 @@ class VariableMetaClass(type): ...@@ -1076,7 +1076,7 @@ class VariableMetaClass(type):
t = type(instance) t = type(instance)
if in_dygraph_mode(): if in_dygraph_mode():
if _in_eager_mode(): if _in_eager_mode():
return issubclass(t, core.eager.EagerTensor) return issubclass(t, core.eager.Tensor)
return issubclass(t, core.VarBase) return issubclass(t, core.VarBase)
else: else:
return issubclass(t, Variable) return issubclass(t, Variable)
...@@ -6412,7 +6412,7 @@ class ParamBase(core.VarBase): ...@@ -6412,7 +6412,7 @@ class ParamBase(core.VarBase):
if hasattr(core, "eager"): if hasattr(core, "eager"):
_core_eager_eagertensor = core.eager.EagerTensor _core_eager_eagertensor = core.eager.Tensor
else: else:
_core_eager_eagertensor = object _core_eager_eagertensor = object
......
...@@ -85,10 +85,9 @@ class LayerHelperBase(object): ...@@ -85,10 +85,9 @@ class LayerHelperBase(object):
assert in_dygraph_mode( assert in_dygraph_mode(
), "to_variable could only be called in dygraph mode" ), "to_variable could only be called in dygraph mode"
if _in_eager_mode(): if _in_eager_mode():
return core.eager.EagerTensor(value, return core.eager.Tensor(value,
_current_expected_place(), False, _current_expected_place(), False,
False, name False, name if name else None, True)
if name else None, True)
else: else:
py_var = core.VarBase( py_var = core.VarBase(
value=value, value=value,
......
...@@ -972,7 +972,7 @@ class DygraphGeneratorLoader(DataLoaderBase): ...@@ -972,7 +972,7 @@ class DygraphGeneratorLoader(DataLoaderBase):
def __next__(self): def __next__(self):
try: try:
if _in_eager_mode(): if _in_eager_mode():
return core.eager.read_next_eager_tensor_list( return core.eager.read_next_tensor_list(
self._reader.read_next_list()[0]) self._reader.read_next_list()[0])
else: else:
return self._reader.read_next_var_list() return self._reader.read_next_var_list()
......
...@@ -203,7 +203,7 @@ class TestImperative(unittest.TestCase): ...@@ -203,7 +203,7 @@ class TestImperative(unittest.TestCase):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
if fluid.framework._in_eager_mode(): if fluid.framework._in_eager_mode():
var_base = paddle.to_tensor(np.array([3, 4, 5])) var_base = paddle.to_tensor(np.array([3, 4, 5]))
self.assertTrue(isinstance(var_base, core.eager.EagerTensor)) self.assertTrue(isinstance(var_base, core.eager.Tensor))
else: else:
var_base = paddle.to_tensor(np.array([3, 4, 5])) var_base = paddle.to_tensor(np.array([3, 4, 5]))
self.assertTrue(isinstance(var_base, core.VarBase)) self.assertTrue(isinstance(var_base, core.VarBase))
...@@ -221,13 +221,13 @@ class TestImperative(unittest.TestCase): ...@@ -221,13 +221,13 @@ class TestImperative(unittest.TestCase):
t.set(x, fluid.CPUPlace()) t.set(x, fluid.CPUPlace())
if _in_eager_mode(): if _in_eager_mode():
# TODO(jiabin): Support Kwargs and uncomment these tests # TODO(jiabin): Support Kwargs and uncomment these tests
# egr_tmp = fluid.core.eager.EagerTensor(value=x, place=fluid.core.CPUPlace()) # egr_tmp = fluid.core.eager.Tensor(value=x, place=fluid.core.CPUPlace())
egr_tmp2 = fluid.core.eager.EagerTensor(y, fluid.core.CPUPlace()) egr_tmp2 = fluid.core.eager.Tensor(y, fluid.core.CPUPlace())
egr_tmp3 = paddle.to_tensor(x) egr_tmp3 = paddle.to_tensor(x)
egr_tmp4 = fluid.core.eager.EagerTensor(y) egr_tmp4 = fluid.core.eager.Tensor(y)
# egr_tmp5 = fluid.core.eager.EagerTensor(value=x) # egr_tmp5 = fluid.core.eager.Tensor(value=x)
# TODO(jiabin): Support it when we merge LoDTensor with DenseTensor # TODO(jiabin): Support it when we merge LoDTensor with DenseTensor
egr_tmp6 = fluid.core.eager.EagerTensor(t) egr_tmp6 = fluid.core.eager.Tensor(t)
# self.assertTrue(np.array_equal(x, egr_tmp.numpy())) # self.assertTrue(np.array_equal(x, egr_tmp.numpy()))
self.assertTrue(np.array_equal(y, egr_tmp2.numpy())) self.assertTrue(np.array_equal(y, egr_tmp2.numpy()))
...@@ -953,8 +953,7 @@ class TestMetaclass(unittest.TestCase): ...@@ -953,8 +953,7 @@ class TestMetaclass(unittest.TestCase):
self.assertNotEqual(type(MyLayer).__name__, 'pybind11_type') self.assertNotEqual(type(MyLayer).__name__, 'pybind11_type')
if core._in_eager_mode(): if core._in_eager_mode():
self.assertEqual( self.assertEqual(
type(paddle.fluid.core.eager.EagerTensor).__name__, type(paddle.fluid.core.eager.Tensor).__name__, 'pybind11_type')
'pybind11_type')
else: else:
self.assertEqual( self.assertEqual(
type(paddle.fluid.core.VarBase).__name__, 'pybind11_type') type(paddle.fluid.core.VarBase).__name__, 'pybind11_type')
......
...@@ -41,7 +41,7 @@ class TestImperativeNumpyBridge(unittest.TestCase): ...@@ -41,7 +41,7 @@ class TestImperativeNumpyBridge(unittest.TestCase):
data_np[0][0] = -1 data_np[0][0] = -1
self.assertEqual(data_np[0][0], -1) self.assertEqual(data_np[0][0], -1)
if _in_eager_mode(): if _in_eager_mode():
# eager_mode, var2 is EagerTensor, is not subscriptable # eager_mode, var2 is Tensor, is not subscriptable
# TODO(wuweilong): to support slice in eager mode later # TODO(wuweilong): to support slice in eager mode later
self.assertNotEqual(var2.numpy()[0][0], -1) self.assertNotEqual(var2.numpy()[0][0], -1)
else: else:
......
...@@ -1358,7 +1358,7 @@ class ReduceOnPlateau(LRScheduler): ...@@ -1358,7 +1358,7 @@ class ReduceOnPlateau(LRScheduler):
self.last_epoch = epoch self.last_epoch = epoch
if _in_eager_mode(): if _in_eager_mode():
tmp = core.eager.EagerTensor tmp = core.eager.Tensor
else: else:
tmp = Tensor tmp = Tensor
# loss must be float, numpy.ndarray or 1-D Tensor with shape [1] # loss must be float, numpy.ndarray or 1-D Tensor with shape [1]
......
...@@ -169,8 +169,7 @@ def to_tensor(data, dtype=None, place=None, stop_gradient=True): ...@@ -169,8 +169,7 @@ def to_tensor(data, dtype=None, place=None, stop_gradient=True):
# TOOD(jiabin): Support kwargs in eager tensor constructor # TOOD(jiabin): Support kwargs in eager tensor constructor
if _in_eager_mode() and isinstance(data, np.ndarray): if _in_eager_mode() and isinstance(data, np.ndarray):
return core.eager.EagerTensor(data, place, False, False, None, return core.eager.Tensor(data, place, False, False, None, stop_gradient)
stop_gradient)
else: else:
return paddle.Tensor( return paddle.Tensor(
value=data, value=data,
......
...@@ -263,7 +263,7 @@ def to_string(var, prefix='Tensor'): ...@@ -263,7 +263,7 @@ def to_string(var, prefix='Tensor'):
data=data) data=data)
def eager_tensor_to_string(tensor, prefix='Tensor'): def tensor_to_string(tensor, prefix='Tensor'):
indent = len(prefix) + 1 indent = len(prefix) + 1
_template = "{prefix}(shape={shape}, dtype={dtype}, place={place}, stop_gradient={stop_gradient},\n{indent}{data})" _template = "{prefix}(shape={shape}, dtype={dtype}, place={place}, stop_gradient={stop_gradient},\n{indent}{data})"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册