未验证 提交 831fd86e 编写于 作者: J Jiabin Yang 提交者: GitHub

EagerTensor to EagerVariable (#39447)

* merge legacy to fluid

* Remove legacy code

* Remove legacy code

* Remove DataType test

* Using Tensor directly instead of using EagerTensor

* support gradient_accumulation

* make test_imperative_lod_tensor_to_selected_rows longer

* make test_imperative_lod_tensor_to_selected_rows longer

* refine code

* Rename all EagerTensor to Tensor

* Rename some EagerTensor to Tensor

* rename EagerTensor to EagerVariable

* add more test

* merge develop and refine code
上级 f21d7957
......@@ -1227,11 +1227,11 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
// Forward Function Body
// According to fwd_inputs_name_pos_map
std::map<std::string, std::vector<std::shared_ptr<egr::EagerTensor>>>
std::map<std::string, std::vector<std::shared_ptr<egr::EagerVariable>>>
ins =
{ {"X" , TrySyncToVars(X)}, { "Y" , TrySyncToVars(Y)} };
std::map<std::string, std::vector<std::shared_ptr<egr::EagerTensor>>>
std::map<std::string, std::vector<std::shared_ptr<egr::EagerVariable>>>
outs =
{
{"Out0" , CreateVars(Out0Num)}, {"Out1"
......@@ -1316,7 +1316,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
const char* FWD_INS_MAP_TEMPLATE =
" std::map<std::string, "
"std::vector<std::shared_ptr<egr::EagerTensor>>> ins = { "
"std::vector<std::shared_ptr<egr::EagerVariable>>> ins = { "
"%s };\n";
std::string ins_map_str =
paddle::string::Sprintf(FWD_INS_MAP_TEMPLATE, ins_contents_str);
......@@ -1353,8 +1353,9 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
if (op_passing_outs_map[op_type].count(output_name)) {
const std::string output_var_name = output_name + "Var";
// Pass Output from function argument(EagerTensor*/vector<EagerTensor*>&),
// in form of shared_ptr<EagerTensor>/vector<shared_ptr<EagerTensor>>
// Pass Output from function
// argument(EagerVariable*/vector<EagerVariable*>&),
// in form of shared_ptr<EagerVariable>/vector<shared_ptr<EagerVariable>>
if (output.duplicable()) {
const char* FWD_NUM_ARG_TEMPLATE =
", std::vector<paddle::experimental::Tensor*>& %s";
......@@ -1395,7 +1396,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
} else {
const char* FWD_OUTS_CONTENT_TEMPLATE =
"{ \"%s\", "
"{std::make_shared<egr::EagerTensor>(egr::Controller::Instance()."
"{std::make_shared<egr::EagerVariable>(egr::Controller::Instance()."
"GenerateUniqueName())}},";
outs_contents_str +=
paddle::string::Sprintf(FWD_OUTS_CONTENT_TEMPLATE, output_name);
......@@ -1407,7 +1408,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
const char* FWD_OUTS_MAP_TEMPLATE =
" std::map<std::string, "
"std::vector<std::shared_ptr<egr::EagerTensor>>> outs = { "
"std::vector<std::shared_ptr<egr::EagerVariable>>> outs = { "
"%s };\n";
std::string outs_map_str =
paddle::string::Sprintf(FWD_OUTS_MAP_TEMPLATE, outs_contents_str);
......@@ -1482,7 +1483,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
generated_function_body += out_tensor_str;
}
generated_function_body += "\n";
VLOG(6) << "Converted Output VarBase to EagerTensor(s)";
VLOG(6) << "Converted Output VarBase to EagerVariable(s)";
// [Generation] Handle core_ops_returns_info
core_ops_returns_info[op_type] = return_contents;
......@@ -1627,7 +1628,7 @@ static std::string GenerateSingleOpBase(
const char* BWD_INS_MAP_TEMPLATE =
" std::map<std::string, "
"std::vector<std::shared_ptr<egr::EagerTensor>>> %s = { "
"std::vector<std::shared_ptr<egr::EagerVariable>>> %s = { "
"%s };\n";
std::string ins_map_str =
paddle::string::Sprintf(BWD_INS_MAP_TEMPLATE, ins_name, ins_contents_str);
......@@ -1704,7 +1705,7 @@ static std::string GenerateSingleOpBase(
} else {
const char* GRAD_OUTS_CONTENT_TEMPLATE =
"{ \"%s\", "
"{std::make_shared<egr::EagerTensor>(egr::Controller::Instance("
"{std::make_shared<egr::EagerVariable>(egr::Controller::Instance("
")."
"GenerateUniqueName())}},";
outs_contents_str += paddle::string::Sprintf(
......@@ -1723,7 +1724,7 @@ static std::string GenerateSingleOpBase(
const char* BWD_OUTS_MAP_TEMPLATE =
" std::map<std::string, "
"std::vector<std::shared_ptr<egr::EagerTensor>>> %s = { "
"std::vector<std::shared_ptr<egr::EagerVariable>>> %s = { "
"%s };\n";
std::string outs_map_str = paddle::string::Sprintf(
BWD_OUTS_MAP_TEMPLATE, outs_name, outs_contents_str);
......
......@@ -40,36 +40,28 @@
* **/
namespace egr {
class EagerTensor final {
class EagerVariable final {
public:
/* Default constructor and name constructor should only be used for contruct
* output and in fluid*/
EagerTensor() = default;
EagerVariable() = default;
explicit EagerTensor(const std::string& name) : name_(name) {}
explicit EagerVariable(const std::string& name) : name_(name) {}
explicit EagerTensor(const paddle::experimental::Tensor& tensor)
explicit EagerVariable(const paddle::experimental::Tensor& tensor)
: name_(tensor.name()) {
if (tensor.defined()) {
if (tensor.is_dense_tensor()) {
auto* framework_tensor =
var_.GetMutable<paddle::framework::LoDTensor>();
// Contruct framework::Tensor from egr::EagerTensor
auto tensor_dense =
std::dynamic_pointer_cast<pten::DenseTensor>(tensor.impl());
PADDLE_ENFORCE_EQ((tensor_dense.get() && tensor_dense), true,
paddle::platform::errors::Fatal(
"Failed to Trans Tensor to EagerVariable since "
"we got Tensor with type DenseTensor, and we got "
"EagerVariable with another type."));
*framework_tensor = *tensor_dense;
ConstructVariableFromTensor(tensor);
} else if (tensor.is_selected_rows()) {
ConstructVariableFromSelectedRows(tensor);
} else {
PADDLE_THROW(paddle::platform::errors::Fatal(
"Unrecognized egr::EagerVariable type, only "
"DenseTensor and SelectedRows is supported for now."));
"DenseTensor and SelectedRows are supported for now."));
}
} else {
VLOG(6) << "Build Empty EagerTensor with name " << name_;
VLOG(6) << "Build Empty EagerVariable with name " << name_;
}
}
......@@ -77,21 +69,20 @@ class EagerTensor final {
std::shared_ptr<pten::TensorBase> GetTensorBase() {
// Construct allocation only once.
if (var_.IsInitialized()) {
if (var_.IsType<paddle::framework::LoDTensor>()) {
return SetImplWithLegacyTensor<pten::DenseTensor>();
} else if (var_.IsType<paddle::framework::Tensor>()) {
return SetImplWithLegacyTensor<pten::DenseTensor>();
if (var_.IsType<paddle::framework::LoDTensor>() ||
var_.IsType<paddle::framework::Tensor>()) {
return SetImplWithLegacyTensor();
} else if (var_.IsType<pten::SelectedRows>()) {
return SetImplWithSelectedRows();
return SetImplWithLegacySelectedRows();
} else {
PADDLE_THROW(paddle::platform::errors::Fatal(
"Unable to fetch underlying tensor "
"from EagerTensor, only LoDTensor and "
"from EagerVariable, only LoDTensor and "
"Tensor are supported for now"));
}
} else {
PADDLE_THROW(paddle::platform::errors::Fatal(
"Can not Sync EagerTensor %s whose paddle::framework::Variable is "
"Can not Sync EagerVariable %s whose paddle::framework::Variable is "
"not initialized!",
name()));
}
......@@ -107,23 +98,52 @@ class EagerTensor final {
void set_name(const std::string& name) { name_ = name; }
private:
template <typename LEGACY_TYPE>
std::shared_ptr<pten::TensorBase> SetImplWithLegacyTensor() {
const auto& framework_tensor = var_.Get<LEGACY_TYPE>();
const auto& framework_tensor = var_.Get<pten::DenseTensor>();
VLOG(8) << "Sync Var to tensor for: " << name();
return std::make_shared<LEGACY_TYPE>(std::move(framework_tensor));
return std::make_shared<pten::DenseTensor>(framework_tensor);
}
std::shared_ptr<pten::TensorBase> SetImplWithSelectedRows() {
auto* selected_rows = var_.GetMutable<pten::SelectedRows>();
auto res = std::make_shared<pten::SelectedRows>(selected_rows->rows_,
selected_rows->height_);
res->value_.reset(selected_rows->value_.release());
res->id_to_index_ = std::move(selected_rows->id_to_index_);
res->rwlock_.reset(selected_rows->rwlock_.release());
std::shared_ptr<pten::TensorBase> SetImplWithLegacySelectedRows() {
auto* framework_tensor = var_.GetMutable<pten::SelectedRows>();
VLOG(8) << "Sync SelectedRows to tensor for: " << name();
auto res =
std::make_shared<pten::SelectedRows>(std::move(*framework_tensor));
var_.Clear();
return res;
}
void ConstructVariableFromTensor(const paddle::experimental::Tensor& tensor) {
auto* framework_tensor = var_.GetMutable<pten::DenseTensor>();
// Contruct framework::Tensor from egr::EagerVariable
auto tensor_dense =
std::dynamic_pointer_cast<pten::DenseTensor>(tensor.impl());
PADDLE_ENFORCE_EQ(
(tensor_dense.get() && tensor_dense), true,
paddle::platform::errors::Fatal(
"Tensor %s does not hold pten::SelectedRows or pten::DenseTensor. "
"Or it holds empty impl, this should not happend since we should "
"treat all kinds of tensor as what they are.",
tensor.name()));
*framework_tensor = *tensor_dense;
}
void ConstructVariableFromSelectedRows(
const paddle::experimental::Tensor& tensor) {
auto* framework_tensor = var_.GetMutable<pten::SelectedRows>();
// Contruct framework::Tensor from egr::EagerVariable
auto tensor_dense =
std::dynamic_pointer_cast<pten::SelectedRows>(tensor.impl());
PADDLE_ENFORCE_EQ(
(tensor_dense.get() && tensor_dense), true,
paddle::platform::errors::Fatal(
"Tensor %s does not hold pten::SelectedRows or pten::DenseTensor. "
"Or it holds empty impl, this should not happend since we should "
"treat all kinds of tensor as what they are.",
tensor.name()));
*framework_tensor = std::move(*tensor_dense);
}
private:
std::string name_{""};
paddle::framework::Variable var_;
......
......@@ -115,7 +115,7 @@ TEST(Tensor, MemberFunction) {
CHECK_EQ(tmp_autograd_meta_test->val_, 2);
}
TEST(EagerTensor, Constructor) {
TEST(EagerVariable, Constructor) {
paddle::experimental::Tensor t3;
pten::DenseTensorMeta meta = pten::DenseTensorMeta(
pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 2}));
......@@ -134,7 +134,7 @@ TEST(EagerTensor, Constructor) {
CHECK_EQ(t3.defined(), false);
t3.set_impl(dt);
egr::EagerTensor et3 = egr::EagerTensor(t3);
egr::EagerVariable et3 = egr::EagerVariable(t3);
VLOG(6) << "SyncToVar";
CHECK_EQ(et3.Var().Get<paddle::framework::LoDTensor>().data<float>()[0],
5.0f);
......
......@@ -167,7 +167,7 @@ TEST(EagerUtils, PassStopGradient) {
TEST(EagerUtils, TrySyncToVar) {
paddle::framework::DDim ddim = paddle::framework::make_ddim({2, 4, 4, 4});
auto tensor = CreateTestCPUTensor(5.0f, ddim);
std::vector<std::shared_ptr<egr::EagerTensor>> var_bases = {
std::vector<std::shared_ptr<egr::EagerVariable>> var_bases = {
egr::EagerUtils::TrySyncToVar(tensor)};
paddle::framework::Variable* var = var_bases[0]->MutableVar();
......@@ -187,7 +187,7 @@ TEST(EagerUtils, TrySyncToVars) {
std::vector<paddle::experimental::Tensor> tensors = {
CreateTestCPUTensor(1.0f, ddim), CreateTestCPUTensor(2.0f, ddim)};
std::vector<std::shared_ptr<egr::EagerTensor>> var_bases =
std::vector<std::shared_ptr<egr::EagerVariable>> var_bases =
egr::EagerUtils::TrySyncToVars(tensors);
{
......@@ -218,7 +218,7 @@ TEST(EagerUtils, TrySyncToVars) {
TEST(EagerUtils, CreateVars) {
VLOG(6) << "Check CreateVars";
std::vector<std::shared_ptr<egr::EagerTensor>> outs =
std::vector<std::shared_ptr<egr::EagerVariable>> outs =
egr::EagerUtils::CreateVars(2);
CHECK_EQ(outs.size(), size_t(2));
CHECK(outs[0]->Var().IsInitialized() == false);
......
......@@ -131,17 +131,17 @@ void EagerUtils::SetOutRankWithSlot(AutogradMeta* target, size_t slot_id) {
target->SetSingleOutRankWithSlot(slot_id, 0);
}
std::shared_ptr<egr::EagerTensor> EagerUtils::TrySyncToVar(
std::shared_ptr<egr::EagerVariable> EagerUtils::TrySyncToVar(
const paddle::experimental::Tensor& tensor) {
return std::make_shared<egr::EagerTensor>(tensor);
return std::make_shared<egr::EagerVariable>(tensor);
}
std::vector<std::shared_ptr<egr::EagerTensor>> EagerUtils::TrySyncToVars(
std::vector<std::shared_ptr<egr::EagerVariable>> EagerUtils::TrySyncToVars(
const paddle::experimental::Tensor& tensor) {
return {TrySyncToVar(tensor)};
}
std::vector<std::shared_ptr<egr::EagerTensor>> EagerUtils::TrySyncToVars(
std::vector<std::shared_ptr<egr::EagerVariable>> EagerUtils::TrySyncToVars(
paddle::experimental::Tensor* tensor) {
PADDLE_ENFORCE_NOT_NULL(
tensor,
......@@ -151,9 +151,9 @@ std::vector<std::shared_ptr<egr::EagerTensor>> EagerUtils::TrySyncToVars(
return {TrySyncToVar(*tensor)};
}
std::vector<std::shared_ptr<egr::EagerTensor>> EagerUtils::TrySyncToVars(
std::vector<std::shared_ptr<egr::EagerVariable>> EagerUtils::TrySyncToVars(
const std::vector<paddle::experimental::Tensor*>& tensors) {
std::vector<std::shared_ptr<EagerTensor>> res;
std::vector<std::shared_ptr<EagerVariable>> res;
size_t num = tensors.size();
res.reserve(num);
for (size_t i = 0; i < num; i++) {
......@@ -169,9 +169,9 @@ std::vector<std::shared_ptr<egr::EagerTensor>> EagerUtils::TrySyncToVars(
return res;
}
std::vector<std::shared_ptr<egr::EagerTensor>> EagerUtils::TrySyncToVars(
std::vector<std::shared_ptr<egr::EagerVariable>> EagerUtils::TrySyncToVars(
const std::vector<paddle::experimental::Tensor>& tensors) {
std::vector<std::shared_ptr<EagerTensor>> res;
std::vector<std::shared_ptr<EagerVariable>> res;
size_t num = tensors.size();
res.reserve(num);
for (size_t i = 0; i < num; i++) {
......@@ -180,19 +180,19 @@ std::vector<std::shared_ptr<egr::EagerTensor>> EagerUtils::TrySyncToVars(
return res;
}
std::vector<std::shared_ptr<EagerTensor>> EagerUtils::CreateVars(
std::vector<std::shared_ptr<EagerVariable>> EagerUtils::CreateVars(
const size_t num) {
std::vector<std::shared_ptr<EagerTensor>> res;
std::vector<std::shared_ptr<EagerVariable>> res;
res.reserve(num);
for (size_t i = 0; i < num; i++) {
res.emplace_back(
new EagerTensor(egr::Controller::Instance().GenerateUniqueName()));
new EagerVariable(egr::Controller::Instance().GenerateUniqueName()));
}
return res;
}
std::vector<paddle::experimental::Tensor> EagerUtils::GetOutputs(
const std::vector<std::shared_ptr<EagerTensor>>& outs) {
const std::vector<std::shared_ptr<EagerVariable>>& outs) {
std::vector<paddle::experimental::Tensor> res;
res.reserve(outs.size());
for (const auto& out : outs) {
......@@ -209,7 +209,7 @@ std::vector<paddle::experimental::Tensor> EagerUtils::GetOutputs(
}
paddle::experimental::Tensor EagerUtils::GetOutput(
const std::shared_ptr<EagerTensor>& out) {
const std::shared_ptr<EagerVariable>& out) {
PADDLE_ENFORCE_NOT_NULL(
out.get(), paddle::platform::errors::Fatal(
"Eager Tensor %s is null and cannot be copied. We "
......@@ -219,7 +219,7 @@ paddle::experimental::Tensor EagerUtils::GetOutput(
return paddle::experimental::Tensor(out->GetTensorBase(), out->name());
}
void EagerUtils::OverwriteOutputs(const std::shared_ptr<EagerTensor>& out,
void EagerUtils::OverwriteOutputs(const std::shared_ptr<EagerVariable>& out,
paddle::experimental::Tensor* tensor) {
PADDLE_ENFORCE_NOT_NULL(
tensor, paddle::platform::errors::Fatal(
......@@ -231,7 +231,7 @@ void EagerUtils::OverwriteOutputs(const std::shared_ptr<EagerTensor>& out,
}
void EagerUtils::OverwriteOutputs(
const std::vector<std::shared_ptr<EagerTensor>>& outs,
const std::vector<std::shared_ptr<EagerVariable>>& outs,
const std::vector<paddle::experimental::Tensor*>& tensors) {
PADDLE_ENFORCE_EQ(
outs.size(), tensors.size(),
......
......@@ -88,7 +88,7 @@ class EagerUtils {
/**
* We have to use autograd_meta and multi_autograd_meta to initialize
* autograd_meta for tensor, since we can't init it in
* egr::EagerTensor's
* egr::EagerVariable's
* constructor (it's abstract class there)
*
* **/
......@@ -151,34 +151,35 @@ class EagerUtils {
// Intermidate needed remove this once we don't need legacy
// Inner Method
static std::shared_ptr<egr::EagerTensor> TrySyncToVar(
static std::shared_ptr<egr::EagerVariable> TrySyncToVar(
const paddle::experimental::Tensor& tensor);
// Basic Input
static std::vector<std::shared_ptr<egr::EagerTensor>> TrySyncToVars(
static std::vector<std::shared_ptr<egr::EagerVariable>> TrySyncToVars(
const paddle::experimental::Tensor& tensor);
// Basic Output
static std::vector<std::shared_ptr<egr::EagerTensor>> TrySyncToVars(
static std::vector<std::shared_ptr<egr::EagerVariable>> TrySyncToVars(
paddle::experimental::Tensor* tensor);
// Multi Output
static std::vector<std::shared_ptr<egr::EagerTensor>> TrySyncToVars(
static std::vector<std::shared_ptr<egr::EagerVariable>> TrySyncToVars(
const std::vector<paddle::experimental::Tensor*>& tensors);
// Multi Input
static std::vector<std::shared_ptr<egr::EagerTensor>> TrySyncToVars(
static std::vector<std::shared_ptr<egr::EagerVariable>> TrySyncToVars(
const std::vector<paddle::experimental::Tensor>& tensors);
// Construct empty output
static std::vector<std::shared_ptr<EagerTensor>> CreateVars(const size_t num);
static std::vector<std::shared_ptr<EagerVariable>> CreateVars(
const size_t num);
// Construct Tensor From var
static std::vector<paddle::experimental::Tensor> GetOutputs(
const std::vector<std::shared_ptr<EagerTensor>>& outs);
const std::vector<std::shared_ptr<EagerVariable>>& outs);
static paddle::experimental::Tensor GetOutput(
const std::shared_ptr<EagerTensor>& out);
const std::shared_ptr<EagerVariable>& out);
// Sync Back to origin output Tensor
static void OverwriteOutputs(const std::shared_ptr<EagerTensor>& out,
static void OverwriteOutputs(const std::shared_ptr<EagerVariable>& out,
paddle::experimental::Tensor* tensor);
static void OverwriteOutputs(const paddle::experimental::Tensor& out,
paddle::experimental::Tensor* tensor);
static void OverwriteOutputs(
const std::vector<std::shared_ptr<EagerTensor>>& outs,
const std::vector<std::shared_ptr<EagerVariable>>& outs,
const std::vector<paddle::experimental::Tensor*>& tensors);
static void OverwriteOutputs(
const std::vector<paddle::experimental::Tensor>& outs,
......
......@@ -340,8 +340,8 @@ NameVarMap<VarType> AutoCastInputs(const std::string& op_type,
}
template NameVarMap<VarBase> AutoCastInputs<VarBase>(
const std::string& op_type, const NameVarMap<VarBase>& ins);
template NameVarMap<egr::EagerTensor> AutoCastInputs<egr::EagerTensor>(
const std::string& op_type, const NameVarMap<egr::EagerTensor>& ins);
template NameVarMap<egr::EagerVariable> AutoCastInputs<egr::EagerVariable>(
const std::string& op_type, const NameVarMap<egr::EagerVariable>& ins);
template <typename VarType>
NameVarMap<VarType> CastPureFp16Inputs(const std::string& op_type,
const NameVarMap<VarType>& ins) {
......@@ -384,7 +384,7 @@ NameVarMap<VarType> CastPureFp16Inputs(const std::string& op_type,
}
template NameVarMap<VarBase> CastPureFp16Inputs<VarBase>(
const std::string& op_type, const NameVarMap<VarBase>& ins);
template NameVarMap<egr::EagerTensor> CastPureFp16Inputs<egr::EagerTensor>(
const std::string& op_type, const NameVarMap<egr::EagerTensor>& ins);
template NameVarMap<egr::EagerVariable> CastPureFp16Inputs<egr::EagerVariable>(
const std::string& op_type, const NameVarMap<egr::EagerVariable>& ins);
} // namespace imperative
} // namespace paddle
......@@ -177,9 +177,9 @@ std::string LayerDebugString(const std::string& op_type,
}
std::string LayerDebugString(const std::string& op_type,
const NameVarMap<egr::EagerTensor>& ins,
const NameVarMap<egr::EagerTensor>& outs) {
return LayerDebugStringImpl<egr::EagerTensor>(op_type, ins, outs);
const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerVariable>& outs) {
return LayerDebugStringImpl<egr::EagerVariable>(op_type, ins, outs);
}
template <typename VarType>
......@@ -194,11 +194,16 @@ static void SetForwardDataTypeOfGradVars(const NameVarMap<VarType>& outs) {
}
}
template <>
void SetForwardDataTypeOfGradVars<egr::EagerTensor>(
const NameVarMap<egr::EagerTensor>& outs) {
void SetForwardDataTypeOfGradVars<egr::EagerVariable>(
const NameVarMap<egr::EagerVariable>& outs) {
// In eager mode we don't need this.
}
void TestSetForwardDataTypeOfGradVarsEager(
const NameVarMap<egr::EagerVariable>& outs) {
SetForwardDataTypeOfGradVars<egr::EagerVariable>(outs);
}
VarBase::VarBase(const std::shared_ptr<VariableWrapper>& var)
: var_(var), grad_node_(var->GetGradNode()) {
if (auto grad_var = var_->GetGradVar()) {
......@@ -528,12 +533,12 @@ void OpBase::Run(const framework::OperatorBase& op,
}
void OpBase::Run(const framework::OperatorBase& op,
const NameVarMap<egr::EagerTensor>& ins,
const NameVarMap<egr::EagerTensor>& outs,
const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerVariable>& outs,
const framework::AttributeMap& attrs,
const framework::AttributeMap& default_attrs,
const platform::Place& place) {
OpBaseRunImpl<egr::EagerTensor>(op, ins, outs, attrs, default_attrs, place);
OpBaseRunImpl<egr::EagerVariable>(op, ins, outs, attrs, default_attrs, place);
}
void ClearNoNeedBufferInputs(OpBase* op) {
......
......@@ -185,8 +185,8 @@ class OpBase {
const framework::AttributeMap& default_attrs,
const platform::Place& place);
static void Run(const framework::OperatorBase& op,
const NameVarMap<egr::EagerTensor>& ins,
const NameVarMap<egr::EagerTensor>& outs,
const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerVariable>& outs,
const framework::AttributeMap& attrs,
const framework::AttributeMap& default_attrs,
const platform::Place& place);
......
......@@ -89,11 +89,16 @@ void HandleComplexGradToRealGrad(const NameVarMap<VarType>& outs) {
}
template <>
void HandleComplexGradToRealGrad<egr::EagerTensor>(
const NameVarMap<egr::EagerTensor>& outs) {
void HandleComplexGradToRealGrad<egr::EagerVariable>(
const NameVarMap<egr::EagerVariable>& outs) {
// TODO(jiabin): Support Complex here.
}
void TestHandleComplexGradToRealGradEager(
const NameVarMap<egr::EagerVariable>& outs) {
HandleComplexGradToRealGrad<egr::EagerVariable>(outs);
}
PreparedOp::PreparedOp(const framework::OperatorBase& op,
const framework::RuntimeContext& ctx,
const framework::OpKernelType& kernel_type,
......@@ -322,14 +327,14 @@ PreparedOp PreparedOp::Prepare(const NameVarMap<VariableWrapper>& ins,
default_attrs);
}
PreparedOp PreparedOp::Prepare(const NameVarMap<egr::EagerTensor>& ins,
const NameVarMap<egr::EagerTensor>& outs,
PreparedOp PreparedOp::Prepare(const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerVariable>& outs,
const framework::OperatorWithKernel& op,
const platform::Place& place,
const framework::AttributeMap& attrs,
const framework::AttributeMap& default_attrs) {
return PrepareImpl<egr::EagerTensor>(ins, outs, op, place, attrs,
default_attrs);
return PrepareImpl<egr::EagerVariable>(ins, outs, op, place, attrs,
default_attrs);
}
template <typename VarType>
static void PreparedOpRunImpl(
......@@ -461,18 +466,18 @@ void PreparedOp::Run(const NameVarMap<VariableWrapper>& ins,
}
}
void PreparedOp::Run(const NameVarMap<egr::EagerTensor>& ins,
const NameVarMap<egr::EagerTensor>& outs,
void PreparedOp::Run(const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerVariable>& outs,
const framework::AttributeMap& attrs,
const framework::AttributeMap& default_attrs) {
if (run_pten_kernel_) {
PreparedOpRunPtImpl<egr::EagerTensor>(
PreparedOpRunPtImpl<egr::EagerVariable>(
op_, kernel_type_, pt_kernel_signature_, pt_kernel_, dev_ctx_, ins,
outs, attrs, default_attrs);
} else {
PreparedOpRunImpl<egr::EagerTensor>(op_, ctx_, kernel_type_, func_,
dev_ctx_, ins, outs, attrs,
default_attrs);
PreparedOpRunImpl<egr::EagerVariable>(op_, ctx_, kernel_type_, func_,
dev_ctx_, ins, outs, attrs,
default_attrs);
}
}
......
......@@ -63,8 +63,8 @@ void SetForwardDataTypeOfGradVar<VarBase>(const std::shared_ptr<VarBase>& var) {
}
template <>
void SetForwardDataTypeOfGradVar<egr::EagerTensor>(
const std::shared_ptr<egr::EagerTensor>& var) {
void SetForwardDataTypeOfGradVar<egr::EagerVariable>(
const std::shared_ptr<egr::EagerVariable>& var) {
VLOG(10) << "Var in Eager dose not support SetForwardDataTypeOfGradVar: "
<< var->name();
// TODO(jiabin): SetForwardDataType of Grad var is not supported yet in
......@@ -171,8 +171,8 @@ class PreparedOp {
const framework::AttributeMap& attrs,
const framework::AttributeMap& default_attrs);
static PreparedOp Prepare(const NameVarMap<egr::EagerTensor>& ins,
const NameVarMap<egr::EagerTensor>& outs,
static PreparedOp Prepare(const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerVariable>& outs,
const framework::OperatorWithKernel& op,
const platform::Place& place,
const framework::AttributeMap& attrs,
......@@ -187,8 +187,8 @@ class PreparedOp {
const framework::AttributeMap& attrs,
const framework::AttributeMap& default_attrs);
void Run(const NameVarMap<egr::EagerTensor>& ins,
const NameVarMap<egr::EagerTensor>& outs,
void Run(const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerVariable>& outs,
const framework::AttributeMap& attrs,
const framework::AttributeMap& default_attrs);
......
......@@ -31,8 +31,8 @@
namespace paddle {
namespace imperative {
extern std::string LayerDebugString(const std::string& op_type,
const NameVarMap<egr::EagerTensor>& ins,
const NameVarMap<egr::EagerTensor>& outs);
const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerVariable>& outs);
extern std::shared_ptr<GradOpNode> CreateGradOpNode(
const framework::OperatorBase& op, const NameTensorMap& ins,
......@@ -41,20 +41,21 @@ extern std::shared_ptr<GradOpNode> CreateGradOpNode(
const std::map<std::string, std::string>& inplace_map);
TEST(test_eager, eager_debug) {
std::shared_ptr<egr::EagerTensor> x_in(new egr::EagerTensor("x_in"));
std::shared_ptr<egr::EagerTensor> y_in(new egr::EagerTensor("y_in"));
std::shared_ptr<egr::EagerTensor> vout(new egr::EagerTensor("vout"));
imperative::NameVarMap<egr::EagerTensor> ins = {{"X", {x_in}}, {"Y", {y_in}}};
imperative::NameVarMap<egr::EagerTensor> outs = {{"Out", {vout}}};
std::shared_ptr<egr::EagerVariable> x_in(new egr::EagerVariable("x_in"));
std::shared_ptr<egr::EagerVariable> y_in(new egr::EagerVariable("y_in"));
std::shared_ptr<egr::EagerVariable> vout(new egr::EagerVariable("vout"));
imperative::NameVarMap<egr::EagerVariable> ins = {{"X", {x_in}},
{"Y", {y_in}}};
imperative::NameVarMap<egr::EagerVariable> outs = {{"Out", {vout}}};
LayerDebugString("mul", ins, outs);
}
TEST(test_create_node, eager_node) {
auto op = framework::OpRegistry::CreateOp("mul", {}, {}, {}, false);
framework::Scope scope;
auto ctx = framework::RuntimeContext({}, {});
imperative::NameVarMap<egr::EagerTensor> ins = {{"X", {nullptr}},
{"Y", {nullptr}}};
imperative::NameVarMap<egr::EagerTensor> outs = {{"Out", {nullptr}}};
imperative::NameVarMap<egr::EagerVariable> ins = {{"X", {nullptr}},
{"Y", {nullptr}}};
imperative::NameVarMap<egr::EagerVariable> outs = {{"Out", {nullptr}}};
CreateGradOpNode((*op.get()), ins, outs, framework::AttributeMap{},
framework::AttributeMap{}, platform::CPUPlace(), {});
}
......@@ -72,26 +73,26 @@ TEST(test_var_helper, eager_var_helper) {
ASSERT_ANY_THROW(
InitializeVariable(&var8, paddle::framework::proto::VarType::FP64));
auto egr_tensor = std::make_shared<egr::EagerTensor>();
auto egr_tensor2 = std::make_shared<egr::EagerTensor>();
auto egr_tensor = std::make_shared<egr::EagerVariable>();
auto egr_tensor2 = std::make_shared<egr::EagerVariable>();
egr_tensor->MutableVar()
->GetMutable<pten::SelectedRows>()
->mutable_value()
->mutable_data<float>(platform::CPUPlace());
egr_tensor2->MutableVar()->GetMutable<framework::LoDRankTable>();
VLOG(6) << "egr_tensor create with ";
ASSERT_TRUE(platform::is_cpu_place(GetPlace<egr::EagerTensor>(egr_tensor)));
ASSERT_TRUE(GetDataType<egr::EagerTensor>(egr_tensor) ==
ASSERT_TRUE(platform::is_cpu_place(GetPlace<egr::EagerVariable>(egr_tensor)));
ASSERT_TRUE(GetDataType<egr::EagerVariable>(egr_tensor) ==
framework::proto::VarType::FP32);
GetCachedValue<egr::EagerTensor>(
GetCachedValue<egr::EagerVariable>(
egr_tensor, framework::OpKernelType(framework::proto::VarType::FP32,
platform::CPUPlace()));
SetCachedValue<egr::EagerTensor>(
SetCachedValue<egr::EagerVariable>(
egr_tensor, framework::OpKernelType(framework::proto::VarType::FP32,
platform::CPUPlace()),
egr_tensor2);
ASSERT_ANY_THROW(GetPlace<egr::EagerTensor>(egr_tensor2));
ASSERT_ANY_THROW(SetType<egr::EagerTensor>(
ASSERT_ANY_THROW(GetPlace<egr::EagerVariable>(egr_tensor2));
ASSERT_ANY_THROW(SetType<egr::EagerVariable>(
egr_tensor, paddle::framework::proto::VarType::LOD_TENSOR_ARRAY));
}
} // namespace imperative
......
......@@ -39,6 +39,8 @@ using vb_vector = std::vector<std::shared_ptr<imperative::VarBase>>;
using var_pair = std::pair<std::string, vb_vector>;
extern void TestSetForwardDataTypeOfGradVarsEager(
const NameVarMap<egr::EagerVariable>& outs);
template <typename VarType>
class TestRuntimeInferVarTypeContext
: public RuntimeInferVarTypeContext<VarType> {
......@@ -406,6 +408,11 @@ TEST(test_layer, test_inner_op_not_inited) {
ASSERT_THROW(op.CheckAttrs(), platform::EnforceNotMet);
}
TEST(test_layer, test_eager) {
imperative::NameTensorMap ins = {};
TestSetForwardDataTypeOfGradVarsEager(ins);
}
} // namespace imperative
} // namespace paddle
......
......@@ -32,6 +32,9 @@ namespace framework = paddle::framework;
namespace paddle {
namespace imperative {
extern void TestHandleComplexGradToRealGradEager(
const NameVarMap<egr::EagerVariable>& outs);
static framework::VariableNameMap CreateVarNameMap(
const framework::OpInfo& op_info, const std::string& op_type,
const NameVarBaseMap& varbase_map, bool is_input) {
......@@ -209,6 +212,11 @@ TEST(test_prepare_op, test_prepare_data_same_place) {
TestPrepareDataSamePlace({});
}
TEST(test_prepare_op, test_complex_eager) {
NameVarMap<egr::EagerVariable> outs = {};
TestHandleComplexGradToRealGradEager(outs);
}
#ifdef PADDLE_WITH_MKLDNN
TEST(test_prepare_op, test_prepare_data_cpu_mkldnn) {
TestPrepareDataSamePlace({{"use_mkldnn", true}});
......
......@@ -37,9 +37,10 @@ namespace paddle {
namespace imperative {
using vb_vector = std::vector<std::shared_ptr<imperative::VarBase>>;
using var_pair = std::pair<std::string, vb_vector>;
using ev_vector = std::vector<std::shared_ptr<egr::EagerVariable>>;
using ev_pair = std::pair<std::string, ev_vector>;
TEST(test_tracer, test_trace_op) {
// Doing an mul
imperative::Tracer tracer;
......@@ -546,6 +547,44 @@ TEST(test_tracer, test_execution_context) {
ASSERT_EQ(dy_ctx.OutputName("Out"), framework::kEmptyVarName);
}
TEST(test_tracer, eager_tracer) {
// Doing an mul
imperative::Tracer tracer;
std::shared_ptr<egr::EagerVariable> x_in(new egr::EagerVariable("x_in"));
std::shared_ptr<egr::EagerVariable> y_in(new egr::EagerVariable("y_in"));
std::shared_ptr<egr::EagerVariable> vout(new egr::EagerVariable("vout"));
platform::CPUPlace place;
std::vector<float> src_data(10, 2.0);
std::vector<int64_t> dims1 = {2, 5};
std::vector<int64_t> dims2 = {5, 2};
auto* x_in_tensor = x_in->MutableVar()->GetMutable<framework::LoDTensor>();
auto* y_in_tensor = y_in->MutableVar()->GetMutable<framework::LoDTensor>();
x_in_tensor->Resize(framework::make_ddim(dims1));
auto* mutable_x = x_in_tensor->mutable_data<float>(place);
paddle::memory::Copy(place, mutable_x, place, src_data.data(),
sizeof(float) * src_data.size());
y_in_tensor->Resize(framework::make_ddim(dims2));
auto* mutable_y = y_in_tensor->mutable_data<float>(place);
paddle::memory::Copy(place, mutable_y, place, src_data.data(),
sizeof(float) * src_data.size());
ev_pair x_pair = ev_pair("X", ev_vector(1, x_in));
ev_pair y_pair = ev_pair("Y", ev_vector(1, y_in));
ev_pair out_pair = ev_pair("Out", ev_vector(1, vout));
imperative::NameTensorMap ins = {x_pair, y_pair};
imperative::NameTensorMap outs = {out_pair};
framework::AttributeMap mul_attr_map;
mul_attr_map["use_mkldnn"] = false;
tracer.TraceOp<egr::EagerVariable>("mul", ins, outs, mul_attr_map, place,
true);
const auto& out_tensor = vout->Var().Get<framework::LoDTensor>();
for (int i = 0; i < vout->Var().Get<framework::LoDTensor>().numel(); i++) {
ASSERT_EQ(out_tensor.data<float>()[i], 20.0);
}
}
} // namespace imperative
} // namespace paddle
......
......@@ -168,7 +168,7 @@ void Tracer::TraceOp(const std::string& type, const NameVarMap<VarType>& ins,
const platform::Place& place, bool trace_backward,
const std::map<std::string, std::string>& inplace_map,
paddle::framework::AttributeMap* passed_default_attrs_,
bool override_default_attr_map) {
bool use_default_attr_map) {
platform::RecordEvent op_type_record_event(type);
platform::ScopedFlushDenormal flush;
VLOG(1) << "Trace Op: " << type;
......@@ -244,7 +244,7 @@ void Tracer::TraceOp(const std::string& type, const NameVarMap<VarType>& ins,
"CustomPlace."));
#endif
}
if (!override_default_attr_map) {
if (!use_default_attr_map) {
PADDLE_ENFORCE_NOT_NULL(passed_default_attrs_,
paddle::platform::errors::PermissionDenied(
"Detected default_attrs = nullptr."));
......@@ -280,16 +280,14 @@ void Tracer::TraceOp(const std::string& type, const NameVarMap<VarType>& ins,
}
if (ComputeRequiredGrad(new_ins, outs, trace_backward)) {
if (!override_default_attr_map) {
PADDLE_ENFORCE_NOT_NULL(passed_default_attrs_,
paddle::platform::errors::PermissionDenied(
"Detected default_attrs = nullptr."));
CreateGradOpNode(*op, new_ins, outs, attrs, *passed_default_attrs_, place,
inplace_map);
} else {
CreateGradOpNode(*op, new_ins, outs, attrs, default_attrs, place,
inplace_map);
}
PADDLE_ENFORCE_EQ(
passed_default_attrs_, nullptr,
paddle::platform::errors::PermissionDenied(
"We expect passed_default_attrs_ is nullptr while "
"use_default_attr_map is true, however we got not null "
"passed_default_attrs_. Please check your usage of trace_op. "));
CreateGradOpNode(*op, new_ins, outs, attrs, default_attrs, place,
inplace_map);
} else {
VLOG(3) << "No Grad to track for Op: " << type;
}
......@@ -301,16 +299,14 @@ template void Tracer::TraceOp<VarBase>(
const NameVarMap<VarBase>& outs, framework::AttributeMap attrs,
const platform::Place& place, bool trace_backward,
const std::map<std::string, std::string>& inplace_map,
paddle::framework::AttributeMap* default_attrs,
bool override_default_attr_map);
paddle::framework::AttributeMap* default_attrs, bool use_default_attr_map);
template void Tracer::TraceOp<egr::EagerTensor>(
const std::string& type, const NameVarMap<egr::EagerTensor>& ins,
const NameVarMap<egr::EagerTensor>& outs, framework::AttributeMap attrs,
template void Tracer::TraceOp<egr::EagerVariable>(
const std::string& type, const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerVariable>& outs, framework::AttributeMap attrs,
const platform::Place& place, bool trace_backward,
const std::map<std::string, std::string>& inplace_map_,
paddle::framework::AttributeMap* default_attrs,
bool override_default_attr_map);
paddle::framework::AttributeMap* default_attrs, bool use_default_attr_map);
void Tracer::TraceOp(const std::string& type, const NameVarBaseMap& ins,
const NameVarBaseMap& outs, framework::AttributeMap attrs,
......@@ -324,13 +320,12 @@ void Tracer::TraceOp(const std::string& type, const NameTensorMap& ins,
paddle::framework::AttributeMap attrs,
const paddle::platform::Place& place,
paddle::framework::AttributeMap* default_attrs,
bool override_default_attr_map,
bool use_default_attr_map,
const std::map<std::string, std::string>& inplace_map) {
VLOG(6) << "Running On Eager TraceOp with override_default_attr_map: "
<< override_default_attr_map;
TraceOp<egr::EagerTensor>(type, ins, outs, std::move(attrs), place, false,
inplace_map, default_attrs,
override_default_attr_map);
VLOG(6) << "Running On Eager TraceOp with use_default_attr_map: "
<< use_default_attr_map;
TraceOp<egr::EagerVariable>(type, ins, outs, std::move(attrs), place, false,
inplace_map, default_attrs, use_default_attr_map);
}
void Tracer::TraceOp(const std::string& type, const NameTensorMap& ins,
......@@ -338,8 +333,9 @@ void Tracer::TraceOp(const std::string& type, const NameTensorMap& ins,
paddle::framework::AttributeMap attrs,
const std::map<std::string, std::string>& inplace_map) {
VLOG(6) << "Running On Eager TraceOp(less): ";
TraceOp<egr::EagerTensor>(type, ins, outs, std::move(attrs), expected_place_,
false, inplace_map, nullptr, true);
TraceOp<egr::EagerVariable>(type, ins, outs, std::move(attrs),
expected_place_, false, inplace_map, nullptr,
true);
}
void Tracer::SetExpectedPlace(platform::Place place) {
......
......@@ -69,7 +69,7 @@ class Tracer {
const platform::Place& place, bool trace_backward,
const std::map<std::string, std::string>& inplace_map = {},
paddle::framework::AttributeMap* passed_default_attrs_ = nullptr,
bool override_default_attr_map = true);
bool use_default_attr_map = true);
void TraceOp(const std::string& type, const NameVarBaseMap& ins,
const NameVarBaseMap& outs, framework::AttributeMap attrs,
......@@ -83,7 +83,7 @@ class Tracer {
const NameTensorMap& outs, paddle::framework::AttributeMap attrs,
const paddle::platform::Place& place,
paddle::framework::AttributeMap* default_attrs,
bool override_default_attr_map,
bool use_default_attr_map,
const std::map<std::string, std::string>& inplace_map = {});
bool ComputeRequiredGrad(const NameVarBaseMap& ins,
......
......@@ -95,8 +95,8 @@ template const paddle::platform::Place &GetPlace<VarBase>(
const std::shared_ptr<VarBase> &var);
template const paddle::platform::Place &GetPlace<VariableWrapper>(
const std::shared_ptr<VariableWrapper> &var);
template const paddle::platform::Place &GetPlace<egr::EagerTensor>(
const std::shared_ptr<egr::EagerTensor> &var);
template const paddle::platform::Place &GetPlace<egr::EagerVariable>(
const std::shared_ptr<egr::EagerVariable> &var);
/* GetNameFromVar */
template <typename VarType>
......@@ -104,8 +104,8 @@ const std::string &GetNameFromVar(std::shared_ptr<VarType> var) {
return var->Name();
}
template <>
const std::string &GetNameFromVar<egr::EagerTensor>(
std::shared_ptr<egr::EagerTensor> tensor) {
const std::string &GetNameFromVar<egr::EagerVariable>(
std::shared_ptr<egr::EagerVariable> tensor) {
return tensor->name();
}
template const std::string &GetNameFromVar<VariableWrapper>(
......@@ -120,8 +120,8 @@ void SetType(std::shared_ptr<VarType> var,
var->SetType(type);
}
template <>
void SetType<egr::EagerTensor>(std::shared_ptr<egr::EagerTensor> var,
framework::proto::VarType::Type type) {
void SetType<egr::EagerVariable>(std::shared_ptr<egr::EagerVariable> var,
framework::proto::VarType::Type type) {
switch (type) {
case paddle::framework::proto::VarType::LOD_TENSOR: {
var->MutableVar()->GetMutable<paddle::framework::LoDTensor>();
......@@ -149,8 +149,8 @@ framework::proto::VarType::Type GetType(std::shared_ptr<VarType> var) {
return var->Type();
}
template <>
framework::proto::VarType::Type GetType<egr::EagerTensor>(
std::shared_ptr<egr::EagerTensor> var) {
framework::proto::VarType::Type GetType<egr::EagerVariable>(
std::shared_ptr<egr::EagerVariable> var) {
if (var->Var().IsInitialized()) {
return paddle::framework::ToVarType(var->Var().Type());
} else {
......@@ -168,8 +168,8 @@ framework::proto::VarType::Type GetDataType(std::shared_ptr<VarType> var) {
return var->DataType();
}
template <>
framework::proto::VarType::Type GetDataType<egr::EagerTensor>(
std::shared_ptr<egr::EagerTensor> var) {
framework::proto::VarType::Type GetDataType<egr::EagerVariable>(
std::shared_ptr<egr::EagerVariable> var) {
if (var->Var().IsType<pten::SelectedRows>()) {
return framework::TransToProtoVarType(
var->Var().Get<pten::SelectedRows>().value().type());
......@@ -197,8 +197,8 @@ bool CheckCachedKey(std::shared_ptr<VarType> var,
return GetVariableWrapper(var)->hasCacheKey(key);
}
template <>
bool CheckCachedKey<egr::EagerTensor>(
std::shared_ptr<egr::EagerTensor> tensor,
bool CheckCachedKey<egr::EagerVariable>(
std::shared_ptr<egr::EagerVariable> tensor,
const paddle::framework::OpKernelType &key) {
// TODO(jiabin): Support this later
// VLOG(10) << "CheckCachedKey with tensor: " << tensor->name() << "and key is
......@@ -219,7 +219,7 @@ std::shared_ptr<VariableWrapper> GetCachedValue(
}
template <>
std::shared_ptr<VariableWrapper> GetCachedValue(
std::shared_ptr<egr::EagerTensor> var,
std::shared_ptr<egr::EagerVariable> var,
const paddle::framework::OpKernelType &key) {
// TODO(jiabin): Support this later
// PADDLE_THROW(platform::errors::Fatal("In eager mode program should not
......@@ -243,10 +243,10 @@ void SetCachedValue(std::shared_ptr<VarType> var,
GetVariableWrapper(var)->setCacheValue(key, GetVariableWrapper(res));
}
template <>
void SetCachedValue<egr::EagerTensor>(
std::shared_ptr<egr::EagerTensor> tensor,
void SetCachedValue<egr::EagerVariable>(
std::shared_ptr<egr::EagerVariable> tensor,
const paddle::framework::OpKernelType &key,
std::shared_ptr<egr::EagerTensor> res) {
std::shared_ptr<egr::EagerVariable> res) {
// PADDLE_THROW(platform::errors::Fatal("In eager mode program should not
// reach this, support cache and remove this error check later, or this
// should not be supported."));
......
......@@ -18,7 +18,7 @@
#include "paddle/fluid/framework/variable.h"
namespace egr {
class EagerTensor;
class EagerVariable;
} // namespace egr
namespace pten {
class DenseTensor;
......
此差异已折叠。
......@@ -145,9 +145,8 @@ static PyObject* eager_api_tensor_copy(PyObject* self, PyObject* args,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_api_read_next_eager_tensor_list(PyObject* self,
PyObject* args,
PyObject* kwargs) {
static PyObject* eager_api_read_next_tensor_list(PyObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
auto tensor_base_list =
CastPyArg2VectorOfTensorBase(PyTuple_GET_ITEM(args, 0), 0);
......@@ -182,8 +181,8 @@ PyMethodDef variable_functions[] = {
METH_VARARGS | METH_KEYWORDS, NULL},
{"tensor_copy", (PyCFunction)(void (*)(void))eager_api_tensor_copy,
METH_VARARGS | METH_KEYWORDS, NULL},
{"read_next_eager_tensor_list",
(PyCFunction)(void (*)(void))eager_api_read_next_eager_tensor_list,
{"read_next_tensor_list",
(PyCFunction)(void (*)(void))eager_api_read_next_tensor_list,
METH_VARARGS | METH_KEYWORDS, NULL},
{NULL, NULL, 0, NULL}};
......
......@@ -35,15 +35,15 @@ limitations under the License. */
namespace paddle {
namespace pybind {
extern void InitEagerTensorWithNumpyValue(TensorObject* self,
const pybind11::object& array,
bool zero_copy);
extern void InitTensorWithNumpyValue(TensorObject* self,
const pybind11::object& array,
bool zero_copy);
extern PyTypeObject* p_tensor_type;
static PyObject* eager_tensor_method_numpy(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_SYNC_TRY
static PyObject* tensor_method_numpy(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
PADDLE_ENFORCE_EQ(
self->tensor.initialized(), true,
platform::errors::InvalidArgument(
......@@ -99,18 +99,17 @@ static PyObject* eager_tensor_method_numpy(TensorObject* self, PyObject* args,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_tensor_method__is_initialized(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_SYNC_TRY
static PyObject* tensor_method__is_initialized(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_TRY
return ToPyObject(self->tensor.initialized());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_tensor_method__copy_to(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_SYNC_TRY
static PyObject* tensor_method__copy_to(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
bool blocking = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 0), 0);
auto place = CastPyArg2Place(PyTuple_GET_ITEM(args, 1), 1);
auto cp_tensor =
......@@ -123,10 +122,10 @@ static PyObject* eager_tensor_method__copy_to(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_tensor_method_reconstruct_from_(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_SYNC_TRY
static PyObject* tensor_method_reconstruct_from_(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_TRY
paddle::experimental::Tensor src_tensor =
CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0);
std::string orig_name = self->tensor.name();
......@@ -144,9 +143,9 @@ static PyObject* eager_tensor_method_reconstruct_from_(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_tensor_method_copy_(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_SYNC_TRY
static PyObject* tensor_method_copy_(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
paddle::experimental::Tensor src_tensor =
CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0);
bool blocking = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 1), 1);
......@@ -170,8 +169,8 @@ static PyObject* eager_tensor_method_copy_(TensorObject* self, PyObject* args,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_tensor_retain_grads(TensorObject* self, PyObject* args,
PyObject* kwargs) {
static PyObject* tensor_retain_grads(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
if (egr::Controller::Instance().HasGrad()) {
auto meta = egr::EagerUtils::autograd_meta(&(self->tensor));
......@@ -187,10 +186,9 @@ static PyObject* eager_tensor_retain_grads(TensorObject* self, PyObject* args,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_tensor__clear_gradient(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_SYNC_TRY
static PyObject* tensor__clear_gradient(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
VLOG(4) << "ClearGradient " << self->tensor.name();
paddle::experimental::Tensor* grad;
......@@ -223,8 +221,8 @@ static PyObject* eager_tensor__clear_gradient(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_tensor__zero_grads(TensorObject* self, PyObject* args,
PyObject* kwargs) {
static PyObject* tensor__zero_grads(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
VLOG(4) << "ZeroGrads " << self->tensor.name();
......@@ -257,10 +255,9 @@ static PyObject* eager_tensor__zero_grads(TensorObject* self, PyObject* args,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_tensor__share_buffer_to(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_SYNC_TRY
static PyObject* tensor__share_buffer_to(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
paddle::experimental::Tensor* dst_ptr =
&(reinterpret_cast<TensorObject*>(PyTuple_GET_ITEM(args, 0))->tensor);
PADDLE_ENFORCE_EQ(self->tensor.initialized(), true,
......@@ -279,10 +276,10 @@ static PyObject* eager_tensor__share_buffer_to(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_tensor__is_shared_buffer_with(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_SYNC_TRY
static PyObject* tensor__is_shared_buffer_with(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_TRY
paddle::experimental::Tensor* dst_ptr =
&(reinterpret_cast<TensorObject*>(PyTuple_GET_ITEM(args, 0))->tensor);
PADDLE_ENFORCE_EQ(self->tensor.initialized(), true,
......@@ -303,10 +300,10 @@ static PyObject* eager_tensor__is_shared_buffer_with(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_tensor__share_underline_tensor_to(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_SYNC_TRY
static PyObject* tensor__share_underline_tensor_to(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_TRY
paddle::experimental::Tensor* src_ptr =
&(reinterpret_cast<TensorObject*>(PyTuple_GET_ITEM(args, 0))->tensor);
PADDLE_ENFORCE_EQ(self->tensor.initialized(), true,
......@@ -320,9 +317,10 @@ static PyObject* eager_tensor__share_underline_tensor_to(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_tensor__is_shared_underline_tensor_with(
TensorObject* self, PyObject* args, PyObject* kwargs) {
EAGER_SYNC_TRY
static PyObject* tensor__is_shared_underline_tensor_with(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_TRY
paddle::experimental::Tensor src_tensor =
CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0);
PADDLE_ENFORCE_EQ(src_tensor.initialized(), true,
......@@ -339,9 +337,9 @@ static PyObject* eager_tensor__is_shared_underline_tensor_with(
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_tensor_method_detach(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_SYNC_TRY
static PyObject* tensor_method_detach(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
PADDLE_ENFORCE_EQ(
self->tensor.initialized(), true,
platform::errors::InvalidArgument("Tensor %s has not been initialized!",
......@@ -365,10 +363,10 @@ static PyObject* eager_tensor_method_detach(TensorObject* self, PyObject* args,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_tensor_method_get_underline_tensor(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_SYNC_TRY
static PyObject* tensor_method_get_underline_tensor(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_TRY
if (self->tensor.is_dense_tensor()) {
auto* tensor =
static_cast<paddle::framework::LoDTensor*>(self->tensor.impl().get());
......@@ -382,57 +380,54 @@ static PyObject* eager_tensor_method_get_underline_tensor(TensorObject* self,
}
// NOTE(wuweilong): Set value and not change self's original place
static PyObject* eager_tensor_method_set_value(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
static PyObject* tensor_method_set_value(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
VLOG(4) << "Value " << self->tensor.name();
pybind11::object numpy_value =
pybind11::object(pybind11::handle(PyTuple_GET_ITEM(args, 0)), true);
InitEagerTensorWithNumpyValue(self, numpy_value, false);
InitTensorWithNumpyValue(self, numpy_value, false);
Py_INCREF(Py_None);
return Py_None;
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyMethodDef variable_methods[] = {
{"numpy", (PyCFunction)(void (*)(void))eager_tensor_method_numpy,
{"numpy", (PyCFunction)(void (*)(void))tensor_method_numpy,
METH_VARARGS | METH_KEYWORDS, NULL},
{"_is_initialized",
(PyCFunction)(void (*)(void))eager_tensor_method__is_initialized,
(PyCFunction)(void (*)(void))tensor_method__is_initialized,
METH_VARARGS | METH_KEYWORDS, NULL},
{"_copy_to", (PyCFunction)(void (*)(void))eager_tensor_method__copy_to,
{"_copy_to", (PyCFunction)(void (*)(void))tensor_method__copy_to,
METH_VARARGS | METH_KEYWORDS, NULL},
{"copy_", (PyCFunction)(void (*)(void))eager_tensor_method_copy_,
{"copy_", (PyCFunction)(void (*)(void))tensor_method_copy_,
METH_VARARGS | METH_KEYWORDS, NULL},
{"reconstruct_from_",
(PyCFunction)(void (*)(void))eager_tensor_method_reconstruct_from_,
(PyCFunction)(void (*)(void))tensor_method_reconstruct_from_,
METH_VARARGS | METH_KEYWORDS, NULL},
{"retain_grads", (PyCFunction)(void (*)(void))eager_tensor_retain_grads,
{"retain_grads", (PyCFunction)(void (*)(void))tensor_retain_grads,
METH_VARARGS | METH_KEYWORDS, NULL},
{"_clear_gradient",
(PyCFunction)(void (*)(void))eager_tensor__clear_gradient,
{"_clear_gradient", (PyCFunction)(void (*)(void))tensor__clear_gradient,
METH_VARARGS | METH_KEYWORDS, NULL},
{"_zero_grads", (PyCFunction)(void (*)(void))eager_tensor__zero_grads,
{"_zero_grads", (PyCFunction)(void (*)(void))tensor__zero_grads,
METH_VARARGS | METH_KEYWORDS, NULL},
{"_share_buffer_to",
(PyCFunction)(void (*)(void))eager_tensor__share_buffer_to,
{"_share_buffer_to", (PyCFunction)(void (*)(void))tensor__share_buffer_to,
METH_VARARGS | METH_KEYWORDS, NULL},
{"_is_shared_buffer_with",
(PyCFunction)(void (*)(void))eager_tensor__is_shared_buffer_with,
(PyCFunction)(void (*)(void))tensor__is_shared_buffer_with,
METH_VARARGS | METH_KEYWORDS, NULL},
{"_share_underline_tensor_to",
(PyCFunction)(void (*)(void))eager_tensor__share_underline_tensor_to,
(PyCFunction)(void (*)(void))tensor__share_underline_tensor_to,
METH_VARARGS | METH_KEYWORDS, NULL},
{"_is_shared_underline_tensor_with",
(PyCFunction)(void (*)(void))eager_tensor__is_shared_underline_tensor_with,
(PyCFunction)(void (*)(void))tensor__is_shared_underline_tensor_with,
METH_VARARGS | METH_KEYWORDS, NULL},
{"detach", (PyCFunction)(void (*)(void))eager_tensor_method_detach,
{"detach", (PyCFunction)(void (*)(void))tensor_method_detach,
METH_VARARGS | METH_KEYWORDS, NULL},
{"get_tensor",
(PyCFunction)(void (*)(void))eager_tensor_method_get_underline_tensor,
(PyCFunction)(void (*)(void))tensor_method_get_underline_tensor,
METH_VARARGS | METH_KEYWORDS, NULL},
{"_set_value", (PyCFunction)(void (*)(void))eager_tensor_method_set_value,
{"_set_value", (PyCFunction)(void (*)(void))tensor_method_set_value,
METH_VARARGS | METH_KEYWORDS, NULL},
{NULL, NULL, 0, NULL}};
......
......@@ -79,10 +79,10 @@ const char* CAST_VAR_LIST_TEMPLATE = R"(
auto %s = GetTensorListFromArgs("%s", "%s", args, %d, %s);)";
const char* CAST_VAR_PTR_TEMPLATE = R"(
auto %s = GetEagerTensorPtrFromArgs("%s", "%s", args, %d, %s);)";
auto %s = GetTensorPtrFromArgs("%s", "%s", args, %d, %s);)";
const char* CAST_VAR_PTR_LIST_TEMPLATE = R"(
auto %s = GetEagerTensorPtrListFromArgs("%s", "%s", args, %d, %s);)";
auto %s = GetTensorPtrListFromArgs("%s", "%s", args, %d, %s);)";
const char* CAST_SIZE_T_TEMPLATE = R"(
auto %s = GetUnsignedLongFromArgs("%s", "%s", args, %d, %s);)";
......
......@@ -35,14 +35,14 @@ namespace pybind {
extern PyTypeObject* p_tensor_type;
PyObject* eager_tensor_properties_get_name(TensorObject* self, void* closure) {
EAGER_SYNC_TRY
PyObject* tensor_properties_get_name(TensorObject* self, void* closure) {
EAGER_TRY
return ToPyObject(self->tensor.name());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyObject* eager_tensor_properties_get_type(TensorObject* self, void* closure) {
EAGER_SYNC_TRY
PyObject* tensor_properties_get_type(TensorObject* self, void* closure) {
EAGER_TRY
if (self->tensor.is_dense_tensor()) {
return ToPyObject(paddle::framework::proto::VarType::LOD_TENSOR);
} else {
......@@ -52,24 +52,24 @@ PyObject* eager_tensor_properties_get_type(TensorObject* self, void* closure) {
EAGER_CATCH_AND_THROW_RETURN_NULL
}
int eager_tensor_properties_set_name(TensorObject* self, PyObject* value,
void* closure) {
EAGER_SYNC_TRY
int tensor_properties_set_name(TensorObject* self, PyObject* value,
void* closure) {
EAGER_TRY
self->tensor.set_name(CastPyArg2AttrString(value, 0));
return 0;
EAGER_CATCH_AND_THROW_RETURN_ZERO
}
PyObject* eager_tensor_properties_get_stop_gradient(TensorObject* self,
void* closure) {
EAGER_SYNC_TRY
PyObject* tensor_properties_get_stop_gradient(TensorObject* self,
void* closure) {
EAGER_TRY
auto meta = egr::EagerUtils::autograd_meta(&self->tensor);
return ToPyObject(meta->StopGradient());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyObject* eager_tensor_properties_get_grad(TensorObject* self, void* closure) {
EAGER_SYNC_TRY
PyObject* tensor_properties_get_grad(TensorObject* self, void* closure) {
EAGER_TRY
if (egr::egr_utils_api::IsLeafTensor(self->tensor)) {
std::shared_ptr<egr::GradNodeBase> grad_node =
egr::EagerUtils::grad_node(self->tensor);
......@@ -94,9 +94,9 @@ PyObject* eager_tensor_properties_get_grad(TensorObject* self, void* closure) {
EAGER_CATCH_AND_THROW_RETURN_NULL
}
int eager_tensor_properties_set_grad(TensorObject* self, PyObject* value,
void* closure) {
EAGER_SYNC_TRY
int tensor_properties_set_grad(TensorObject* self, PyObject* value,
void* closure) {
EAGER_TRY
auto src = CastPyArg2Tensor(value, 0);
PADDLE_ENFORCE(
egr::egr_utils_api::IsLeafTensor(self->tensor),
......@@ -115,34 +115,33 @@ int eager_tensor_properties_set_grad(TensorObject* self, PyObject* value,
EAGER_CATCH_AND_THROW_RETURN_ZERO
}
int eager_tensor_properties_set_stop_gradient(TensorObject* self,
PyObject* value, void* closure) {
EAGER_SYNC_TRY
int tensor_properties_set_stop_gradient(TensorObject* self, PyObject* value,
void* closure) {
EAGER_TRY
auto meta = egr::EagerUtils::autograd_meta(&self->tensor);
meta->SetStopGradient(CastPyArg2AttrBoolean(value, 0));
return 0;
EAGER_CATCH_AND_THROW_RETURN_ZERO
}
PyObject* eager_tensor_properties_get_persistable(TensorObject* self,
void* closure) {
EAGER_SYNC_TRY
PyObject* tensor_properties_get_persistable(TensorObject* self, void* closure) {
EAGER_TRY
auto meta = egr::EagerUtils::autograd_meta(&self->tensor);
return ToPyObject(meta->Persistable());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
int eager_tensor_properties_set_persistable(TensorObject* self, PyObject* value,
void* closure) {
EAGER_SYNC_TRY
int tensor_properties_set_persistable(TensorObject* self, PyObject* value,
void* closure) {
EAGER_TRY
auto meta = egr::EagerUtils::autograd_meta(&self->tensor);
meta->SetPersistable(CastPyArg2AttrBoolean(value, 0));
return 0;
EAGER_CATCH_AND_THROW_RETURN_ZERO
}
PyObject* eager_tensor_properties_get_shape(TensorObject* self, void* closure) {
EAGER_SYNC_TRY
PyObject* tensor_properties_get_shape(TensorObject* self, void* closure) {
EAGER_TRY
auto ddim = self->tensor.shape();
std::vector<int64_t> value;
size_t rank = static_cast<size_t>(ddim.size());
......@@ -155,50 +154,45 @@ PyObject* eager_tensor_properties_get_shape(TensorObject* self, void* closure) {
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyObject* eager_tensor_properties_get_place(TensorObject* self, void* closure) {
EAGER_SYNC_TRY
PyObject* tensor_properties_get_place(TensorObject* self, void* closure) {
EAGER_TRY
return ToPyObject(self->tensor.inner_place());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyObject* eager_tensor_properties_get_place_str(TensorObject* self,
void* closure) {
EAGER_SYNC_TRY
PyObject* tensor_properties_get_place_str(TensorObject* self, void* closure) {
EAGER_TRY
std::stringstream ostr;
ostr << self->tensor.inner_place();
return ToPyObject(ostr.str());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyObject* eager_tensor_properties_get_dtype(TensorObject* self, void* closure) {
EAGER_SYNC_TRY
PyObject* tensor_properties_get_dtype(TensorObject* self, void* closure) {
EAGER_TRY
return ToPyObject(
paddle::framework::TransToProtoVarType(self->tensor.type()));
EAGER_CATCH_AND_THROW_RETURN_NULL
}
struct PyGetSetDef variable_properties[] = {
{"grad", (getter)eager_tensor_properties_get_grad,
(setter)eager_tensor_properties_set_grad, nullptr, nullptr},
{"name", (getter)eager_tensor_properties_get_name,
(setter)eager_tensor_properties_set_name, nullptr, nullptr},
{"stop_gradient", (getter)eager_tensor_properties_get_stop_gradient,
(setter)eager_tensor_properties_set_stop_gradient, nullptr, nullptr},
{"persistable", (getter)eager_tensor_properties_get_persistable,
(setter)eager_tensor_properties_set_persistable, nullptr, nullptr},
{"shape", (getter)eager_tensor_properties_get_shape, nullptr, nullptr,
nullptr},
// {"is_leaf", (getter)eager_tensor_properties_get_is_leaf, nullptr,
{"grad", (getter)tensor_properties_get_grad,
(setter)tensor_properties_set_grad, nullptr, nullptr},
{"name", (getter)tensor_properties_get_name,
(setter)tensor_properties_set_name, nullptr, nullptr},
{"stop_gradient", (getter)tensor_properties_get_stop_gradient,
(setter)tensor_properties_set_stop_gradient, nullptr, nullptr},
{"persistable", (getter)tensor_properties_get_persistable,
(setter)tensor_properties_set_persistable, nullptr, nullptr},
{"shape", (getter)tensor_properties_get_shape, nullptr, nullptr, nullptr},
// {"is_leaf", (getter)tensor_properties_get_is_leaf, nullptr,
// nullptr,
// nullptr},
{"place", (getter)eager_tensor_properties_get_place, nullptr, nullptr,
nullptr},
{"_place_str", (getter)eager_tensor_properties_get_place_str, nullptr,
nullptr, nullptr},
{"dtype", (getter)eager_tensor_properties_get_dtype, nullptr, nullptr,
nullptr},
{"type", (getter)eager_tensor_properties_get_type, nullptr, nullptr,
{"place", (getter)tensor_properties_get_place, nullptr, nullptr, nullptr},
{"_place_str", (getter)tensor_properties_get_place_str, nullptr, nullptr,
nullptr},
{"dtype", (getter)tensor_properties_get_dtype, nullptr, nullptr, nullptr},
{"type", (getter)tensor_properties_get_type, nullptr, nullptr, nullptr},
{nullptr, nullptr, nullptr, nullptr, nullptr}};
} // namespace pybind
......
......@@ -179,7 +179,7 @@ paddle::experimental::Tensor CastPyArg2Tensor(PyObject* obj, ssize_t arg_pos) {
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"argument (position %d) must be "
"EagerTensor, but got %s",
"EagerVariable, but got %s",
arg_pos + 1, reinterpret_cast<PyTypeObject*>(obj->ob_type)->tp_name));
}
}
......@@ -309,7 +309,7 @@ framework::Tensor CastPyArg2FrameworkTensor(PyObject* obj, ssize_t arg_pos) {
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"argument (position %d) must be "
"EagerTensor, but got %s",
"EagerVariable, but got %s",
arg_pos + 1, reinterpret_cast<PyTypeObject*>(obj->ob_type)->tp_name));
}
}
......@@ -597,6 +597,7 @@ std::vector<paddle::experimental::Tensor> GetTensorListFromArgs(
if (PyList_Check(list)) {
Py_ssize_t len = PyList_Size(list);
result.reserve(static_cast<size_t>(len));
if (len == 0) {
PADDLE_THROW(platform::errors::InvalidArgument(
"%s(): argument '%s' (position %d) must be list of Tensors, but got "
......@@ -609,6 +610,7 @@ std::vector<paddle::experimental::Tensor> GetTensorListFromArgs(
}
} else if (PyTuple_Check(list)) {
Py_ssize_t len = PyTuple_Size(list);
result.reserve(static_cast<size_t>(len));
if (len == 0) {
PADDLE_THROW(platform::errors::InvalidArgument(
"%s(): argument '%s' (position %d) must be list of Tensors, but got "
......@@ -632,9 +634,11 @@ std::vector<paddle::experimental::Tensor> GetTensorListFromArgs(
return result;
}
paddle::experimental::Tensor* GetEagerTensorPtrFromArgs(
const std::string& op_type, const std::string& arg_name, PyObject* args,
ssize_t arg_idx, bool dispensable) {
paddle::experimental::Tensor* GetTensorPtrFromArgs(const std::string& op_type,
const std::string& arg_name,
PyObject* args,
ssize_t arg_idx,
bool dispensable) {
PyObject* obj = PyTuple_GET_ITEM(args, arg_idx);
if (PyTuple_Check(obj)) {
......@@ -654,7 +658,7 @@ paddle::experimental::Tensor* GetEagerTensorPtrFromArgs(
return &(reinterpret_cast<TensorObject*>(obj)->tensor);
}
std::vector<paddle::experimental::Tensor*> GetEagerTensorPtrListFromArgs(
std::vector<paddle::experimental::Tensor*> GetTensorPtrListFromArgs(
const std::string& op_type, const std::string& arg_name, PyObject* args,
ssize_t arg_idx, bool dispensable) {
PyObject* list = PyTuple_GET_ITEM(args, arg_idx);
......
......@@ -65,15 +65,15 @@ PyObject* ToPyObject(
const std::unordered_map<std::string, std::vector<std::string>>& value);
template <typename Tuple, size_t N>
struct TupleEagerTensorResult {
struct TupleTensorResult {
static void Run(const Tuple& out, PyObject* result) {
TupleEagerTensorResult<Tuple, N - 1>::Run(out, result);
TupleTensorResult<Tuple, N - 1>::Run(out, result);
PyTuple_SET_ITEM(result, N - 1, ToPyObject(std::get<N - 1>(out)));
}
};
template <typename Tuple>
struct TupleEagerTensorResult<Tuple, 1> {
struct TupleTensorResult<Tuple, 1> {
static void Run(const Tuple& out, PyObject* result) {
PyTuple_SET_ITEM(result, 0, ToPyObject(std::get<0>(out)));
}
......@@ -84,7 +84,7 @@ PyObject* ToPyObject(const std::tuple<Args...>& out) {
auto len = sizeof...(Args);
PyObject* result = PyTuple_New(len);
TupleEagerTensorResult<decltype(out), sizeof...(Args)>::Run(out, result);
TupleTensorResult<decltype(out), sizeof...(Args)>::Run(out, result);
return result;
}
......@@ -97,10 +97,12 @@ std::vector<paddle::experimental::Tensor> GetTensorListFromArgs(
const std::string& op_type, const std::string& arg_name, PyObject* args,
ssize_t arg_idx, bool dispensable = false);
paddle::experimental::Tensor* GetEagerTensorPtrFromArgs(
const std::string& op_type, const std::string& arg_name, PyObject* args,
ssize_t arg_idx, bool dispensable = false);
std::vector<paddle::experimental::Tensor*> GetEagerTensorPtrListFromArgs(
paddle::experimental::Tensor* GetTensorPtrFromArgs(const std::string& op_type,
const std::string& arg_name,
PyObject* args,
ssize_t arg_idx,
bool dispensable = false);
std::vector<paddle::experimental::Tensor*> GetTensorPtrListFromArgs(
const std::string& op_type, const std::string& arg_name, PyObject* args,
ssize_t arg_idx, bool dispensable = false);
......
......@@ -19,7 +19,6 @@ limitations under the License. */
#include "pybind11/pybind11.h"
#define EAGER_TRY try {
#define EAGER_SYNC_TRY try {
#define EAGER_CATCH_AND_THROW_RETURN_NULL \
} \
catch (...) { \
......
......@@ -222,6 +222,14 @@ class PADDLE_API Tensor final {
*/
bool is_dense_tensor() const;
/**
* @brief Determine whether tensor is SelectedRows
*
* @return true
* @return false
*/
bool is_selected_rows() const;
/* Part 3: Device and Backend methods */
/**
......
......@@ -29,7 +29,6 @@ limitations under the License. */
#include "paddle/pten/core/tensor_base.h"
#include "paddle/pten/core/tensor_meta.h"
#include "paddle/pten/core/tensor_utils.h"
/**
* [ Why still include the fluid headers? ]
*
......@@ -133,7 +132,9 @@ DataLayout Tensor::layout() const { return impl_->layout(); }
bool Tensor::is_dense_tensor() const {
return pten::DenseTensor::classof(impl_.get());
}
bool Tensor::is_selected_rows() const {
return pten::SelectedRows::classof(impl_.get());
}
/* Part 3: Device and Backend methods */
PlaceType Tensor::place() const {
......
......@@ -24,7 +24,7 @@ limitations under the License. */
#include <boost/variant.hpp>
namespace egr {
class EagerTensor;
class EagerVariable;
}
namespace paddle {
namespace framework {
......@@ -76,9 +76,9 @@ struct NameVarMapTrait<VariableWrapper> {
};
template <>
struct NameVarMapTrait<egr::EagerTensor> {
struct NameVarMapTrait<egr::EagerVariable> {
using Type =
std::map<std::string, std::vector<std::shared_ptr<egr::EagerTensor>>>;
std::map<std::string, std::vector<std::shared_ptr<egr::EagerVariable>>>;
};
} // namespace details
......@@ -88,7 +88,7 @@ using NameVarMap = typename details::NameVarMapTrait<T>::Type;
using NameVarBaseMap = NameVarMap<VarBase>;
using NameVariableWrapperMap = NameVarMap<VariableWrapper>;
using NameTensorMap = NameVarMap<egr::EagerTensor>;
using NameTensorMap = NameVarMap<egr::EagerVariable>;
using VariableWrapperList = std::vector<std::shared_ptr<VariableWrapper>>;
......
......@@ -29,10 +29,6 @@ limitations under the License. */
// See Note [ Why still include the fluid headers? ]
#include "paddle/fluid/framework/mixed_vector.h"
namespace egr {
class EagerTensor;
} // namespace egr
namespace pten {
class SelectedRows : public TensorBase,
public TypeInfoTraits<TensorBase, SelectedRows> {
......@@ -199,39 +195,6 @@ class SelectedRows : public TensorBase,
std::unique_ptr<DenseTensor> value_{nullptr};
int64_t height_; // height indicates the underline tensor's height
std::unique_ptr<RWLock> rwlock_{nullptr};
// TODO(jiabin): Remove this when we don't need EagerTensor support
// SelectedRows which is expected in next version.
/** Why we need this weird friend class?
* In eager mode, since some of ops doesn't support C++ API for now we need to
*use 'imperative::TraceOp' to run it.
* So, we need to support get a SelectedRows from egr::EagerTensor's
*framework::Variable obj and used it to reconstruct
* a new paddle::experimental::Tensor to support framework usage. However, we
*got 2 problems here.
* First, we got 2 unique_ptr in SelectedRows so that we can't support
*std::make_shared in EagerTensor's SetImplWithSelectedRows method,
* since we have to construct a shared_ptr for paddle::experimental::Tensor's
*impl.
* Second, when we are trying to support move constructor for SelectedRows we
*found that we can't get its rvalue from
* framework::Variable because it holds an obj of target type.
*
*
* The only three way to solve this problem is:
* 1. Just like what we have done, using friend class and just copy/move each
*member. In this way, we can avoid additional API
* and symbols.
* 2. Make pten::SelectedRows's member from unique_ptr to shared_ptr. However,
*this may cause some cost of performance.
* 3. Add some api to return or move member of framework::SelectedRows.
*However, it's not as safe as first solution.
* 4. Support all framework::SelectedRows related ops and make sure
*EagerTensor never holds framework::SelectedRows.
*
* If anyone got better ideas, welcome to contact JiabinYang, we are open for
*your help.
**/
friend class egr::EagerTensor;
};
} // namespace pten
......@@ -104,14 +104,14 @@ def check_type(input, input_name, expected_type, op_name, extra_message=''):
expected_type += (core.VarBase, )
# TODO(jiabin): uncomment it when we support declarative mode in eager
# if _in_eager_mode():
# expected_type += (core.eager.EagerTensor, )
# expected_type += (core.eager.Tensor, )
elif isinstance(input, core.VarBase):
raise TypeError(
"Please use `with fluid.dygraph.guard()` as context or `fluid.enable_dygraph()` to switch to imperative mode firstly. "
"Because received '{}' in {} is a imperative Variable.".format(
input_name, op_name))
elif hasattr(core, "eager"):
if isinstance(input, core.eager.EagerTensor):
if isinstance(input, core.eager.Tensor):
raise TypeError(
"Please use `with fluid.dygraph.guard()` as context or `fluid.enable_dygraph()` to switch to imperative mode firstly. "
"Because received '{}' in {} is a imperative Variable.".format(
......
......@@ -253,7 +253,7 @@ class _DataLoaderIterSingleProcess(_DataLoaderIterBase):
try:
if in_dygraph_mode():
if _in_eager_mode():
data = core.eager.read_next_eager_tensor_list(
data = core.eager.read_next_tensor_list(
self._reader.read_next_list()[0])
else:
data = self._reader.read_next_var_list()
......@@ -449,7 +449,7 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase):
while self._blocking_queue.size() >= len(self._places):
if in_dygraph_mode():
if _in_eager_mode():
data = core.eager.read_next_eager_tensor_list(
data = core.eager.read_next_tensor_list(
self._reader.read_next_list()[0])
else:
self._reader.read_next_var_list()
......@@ -705,7 +705,7 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase):
if in_dygraph_mode():
if _in_eager_mode():
data = core.eager.read_next_eager_tensor_list(
data = core.eager.read_next_tensor_list(
self._reader.read_next_list()[0])
else:
data = self._reader.read_next_var_list()
......
......@@ -721,10 +721,9 @@ def to_variable(value, name=None, zero_copy=None, dtype=None):
value = value.astype(dtype)
if _in_eager_mode():
return core.eager.EagerTensor(value,
framework._current_expected_place(),
False, zero_copy, name
if name else None, True)
return core.eager.Tensor(value,
framework._current_expected_place(), False,
zero_copy, name if name else None, True)
else:
py_var = core.VarBase(
value=value,
......
......@@ -222,7 +222,7 @@ def monkey_patch_math_varbase():
# 2. create varbase for scalar
lhs_dtype = self.dtype
if _in_eager_mode():
other_var_should_be = core.eager.EagerTensor
other_var_should_be = core.eager.Tensor
else:
other_var_should_be = core.VarBase
if not isinstance(other_var, other_var_should_be):
......@@ -343,7 +343,7 @@ def monkey_patch_math_varbase():
if core._in_eager_mode():
local_already_patch = _already_patch_eager_tensor
_already_patch_eager_tensor = True
local_tensor = core.eager.EagerTensor
local_tensor = core.eager.Tensor
else:
local_already_patch = _already_patch_varbase
_already_patch_varbase = True
......
......@@ -150,7 +150,7 @@ def monkey_patch_varbase():
"""
if core._in_eager_mode():
base_tensor = core.eager.EagerTensor
base_tensor = core.eager.Tensor
else:
base_tensor = core.VarBase
assert isinstance(value, (np.ndarray, base_tensor, dict, str)), \
......@@ -180,9 +180,9 @@ def monkey_patch_varbase():
"Variable dtype not match, Variable [ {} ] need tensor with dtype {} but load tensor with dtype {}".format(
self.name, self_tensor_np.dtype, value_np.dtype)
# NOTE(wuweilong): self could be VarBase or EagerTensor, the subsequent behavior are defined in different files
# NOTE(wuweilong): self could be VarBase or Tensor, the subsequent behavior are defined in different files
# if self is VarBase, method value() return Variable that bindded in imperative.cc, get_tensor() bindded in pybind.cc
# if self is EagerTensor, method value() return self that defined in this file, get_tensor() defined in eager_method.cc
# if self is Tensor, method value() return self that defined in this file, get_tensor() defined in eager_method.cc
# this Interface behavior will be unifed in the future.
self.value().get_tensor().set(value_np,
framework._current_expected_place())
......@@ -244,8 +244,8 @@ def monkey_patch_varbase():
if grad_tensor is not None:
if core._in_eager_mode():
assert isinstance(
grad_tensor, core.eager.EagerTensor
), "The type of grad_tensor must be paddle.Tensor"
grad_tensor, core.eager.
Tensor), "The type of grad_tensor must be paddle.Tensor"
else:
assert isinstance(
grad_tensor, paddle.
......@@ -592,8 +592,8 @@ def monkey_patch_varbase():
# [0.79010487, 0.53972793, 0.09495186, 0.44267157, 0.72112119]])
"""
if core._in_eager_mode():
from paddle.tensor.to_string import eager_tensor_to_string
return eager_tensor_to_string(self)
from paddle.tensor.to_string import tensor_to_string
return tensor_to_string(self)
else:
from paddle.tensor.to_string import to_string
return to_string(self)
......@@ -624,7 +624,7 @@ def monkey_patch_varbase():
"Only Leaf Tensor support the deepcopy at the moment, non-Leaf Tensors contains graph information that does't support deepcopy"
)
if core._in_eager_mode():
new_varbase = core.eager.EagerTensor()
new_varbase = core.eager.Tensor()
else:
new_varbase = core.VarBase()
new_varbase.name = self.name + unique_name.generate("_deepcopy")
......@@ -808,16 +808,16 @@ def monkey_patch_varbase():
("__getitem__", __getitem__), ("item", item),
("__setitem__", __setitem__), ("_to", _to)):
if core._in_eager_mode():
setattr(core.eager.EagerTensor, method_name, method)
setattr(core.eager.Tensor, method_name, method)
else:
setattr(core.VarBase, method_name, method)
if core._in_eager_mode():
setattr(core.eager.EagerTensor, "_grad_ivar", _grad_ivar)
setattr(core.eager.EagerTensor, "_set_grad_ivar", _set_grad_ivar)
setattr(core.eager.EagerTensor, "clear_gradient", clear_gradient)
setattr(core.eager.EagerTensor, "clone", clone)
setattr(core.eager.EagerTensor, "value", value)
setattr(core.eager.Tensor, "_grad_ivar", _grad_ivar)
setattr(core.eager.Tensor, "_set_grad_ivar", _set_grad_ivar)
setattr(core.eager.Tensor, "clear_gradient", clear_gradient)
setattr(core.eager.Tensor, "clone", clone)
setattr(core.eager.Tensor, "value", value)
else:
setattr(core.VarBase, "__name__", "Tensor")
setattr(core.VarBase, "grad", grad)
......
......@@ -1057,7 +1057,7 @@ def _varbase_creator(type=core.VarDesc.VarType.LOD_TENSOR,
dtype = convert_np_dtype_to_dtype_(dtype)
if _in_eager_mode():
eager_tensor = core.eager.EagerTensor(
eager_tensor = core.eager.Tensor(
dtype if dtype else core.VarDesc.VarType.FP32,
list(shape) if shape else [], name, type
if type else core.VarDesc.VarType.LOD_TENSOR, True
......@@ -1076,7 +1076,7 @@ class VariableMetaClass(type):
t = type(instance)
if in_dygraph_mode():
if _in_eager_mode():
return issubclass(t, core.eager.EagerTensor)
return issubclass(t, core.eager.Tensor)
return issubclass(t, core.VarBase)
else:
return issubclass(t, Variable)
......@@ -6412,7 +6412,7 @@ class ParamBase(core.VarBase):
if hasattr(core, "eager"):
_core_eager_eagertensor = core.eager.EagerTensor
_core_eager_eagertensor = core.eager.Tensor
else:
_core_eager_eagertensor = object
......
......@@ -85,10 +85,9 @@ class LayerHelperBase(object):
assert in_dygraph_mode(
), "to_variable could only be called in dygraph mode"
if _in_eager_mode():
return core.eager.EagerTensor(value,
_current_expected_place(), False,
False, name
if name else None, True)
return core.eager.Tensor(value,
_current_expected_place(), False,
False, name if name else None, True)
else:
py_var = core.VarBase(
value=value,
......
......@@ -972,7 +972,7 @@ class DygraphGeneratorLoader(DataLoaderBase):
def __next__(self):
try:
if _in_eager_mode():
return core.eager.read_next_eager_tensor_list(
return core.eager.read_next_tensor_list(
self._reader.read_next_list()[0])
else:
return self._reader.read_next_var_list()
......
......@@ -203,7 +203,7 @@ class TestImperative(unittest.TestCase):
with fluid.dygraph.guard():
if fluid.framework._in_eager_mode():
var_base = paddle.to_tensor(np.array([3, 4, 5]))
self.assertTrue(isinstance(var_base, core.eager.EagerTensor))
self.assertTrue(isinstance(var_base, core.eager.Tensor))
else:
var_base = paddle.to_tensor(np.array([3, 4, 5]))
self.assertTrue(isinstance(var_base, core.VarBase))
......@@ -221,13 +221,13 @@ class TestImperative(unittest.TestCase):
t.set(x, fluid.CPUPlace())
if _in_eager_mode():
# TODO(jiabin): Support Kwargs and uncomment these tests
# egr_tmp = fluid.core.eager.EagerTensor(value=x, place=fluid.core.CPUPlace())
egr_tmp2 = fluid.core.eager.EagerTensor(y, fluid.core.CPUPlace())
# egr_tmp = fluid.core.eager.Tensor(value=x, place=fluid.core.CPUPlace())
egr_tmp2 = fluid.core.eager.Tensor(y, fluid.core.CPUPlace())
egr_tmp3 = paddle.to_tensor(x)
egr_tmp4 = fluid.core.eager.EagerTensor(y)
# egr_tmp5 = fluid.core.eager.EagerTensor(value=x)
egr_tmp4 = fluid.core.eager.Tensor(y)
# egr_tmp5 = fluid.core.eager.Tensor(value=x)
# TODO(jiabin): Support it when we merge LoDTensor with DenseTensor
egr_tmp6 = fluid.core.eager.EagerTensor(t)
egr_tmp6 = fluid.core.eager.Tensor(t)
# self.assertTrue(np.array_equal(x, egr_tmp.numpy()))
self.assertTrue(np.array_equal(y, egr_tmp2.numpy()))
......@@ -953,8 +953,7 @@ class TestMetaclass(unittest.TestCase):
self.assertNotEqual(type(MyLayer).__name__, 'pybind11_type')
if core._in_eager_mode():
self.assertEqual(
type(paddle.fluid.core.eager.EagerTensor).__name__,
'pybind11_type')
type(paddle.fluid.core.eager.Tensor).__name__, 'pybind11_type')
else:
self.assertEqual(
type(paddle.fluid.core.VarBase).__name__, 'pybind11_type')
......
......@@ -41,7 +41,7 @@ class TestImperativeNumpyBridge(unittest.TestCase):
data_np[0][0] = -1
self.assertEqual(data_np[0][0], -1)
if _in_eager_mode():
# eager_mode, var2 is EagerTensor, is not subscriptable
# eager_mode, var2 is Tensor, is not subscriptable
# TODO(wuweilong): to support slice in eager mode later
self.assertNotEqual(var2.numpy()[0][0], -1)
else:
......
......@@ -1358,7 +1358,7 @@ class ReduceOnPlateau(LRScheduler):
self.last_epoch = epoch
if _in_eager_mode():
tmp = core.eager.EagerTensor
tmp = core.eager.Tensor
else:
tmp = Tensor
# loss must be float, numpy.ndarray or 1-D Tensor with shape [1]
......
......@@ -169,8 +169,7 @@ def to_tensor(data, dtype=None, place=None, stop_gradient=True):
# TOOD(jiabin): Support kwargs in eager tensor constructor
if _in_eager_mode() and isinstance(data, np.ndarray):
return core.eager.EagerTensor(data, place, False, False, None,
stop_gradient)
return core.eager.Tensor(data, place, False, False, None, stop_gradient)
else:
return paddle.Tensor(
value=data,
......
......@@ -263,7 +263,7 @@ def to_string(var, prefix='Tensor'):
data=data)
def eager_tensor_to_string(tensor, prefix='Tensor'):
def tensor_to_string(tensor, prefix='Tensor'):
indent = len(prefix) + 1
_template = "{prefix}(shape={shape}, dtype={dtype}, place={place}, stop_gradient={stop_gradient},\n{indent}{data})"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册