未验证 提交 831fd86e 编写于 作者: J Jiabin Yang 提交者: GitHub

EagerTensor to EagerVariable (#39447)

* merge legacy to fluid

* Remove legacy code

* Remove legacy code

* Remove DataType test

* Using Tensor directly instead of using EagerTensor

* support gradient_accumulation

* make test_imperative_lod_tensor_to_selected_rows longer

* make test_imperative_lod_tensor_to_selected_rows longer

* refine code

* Rename all EagerTensor to Tensor

* Rename some EagerTensor to Tensor

* rename EagerTensor to EagerVariable

* add more test

* merge develop and refine code
上级 f21d7957
......@@ -1227,11 +1227,11 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
// Forward Function Body
// According to fwd_inputs_name_pos_map
std::map<std::string, std::vector<std::shared_ptr<egr::EagerTensor>>>
std::map<std::string, std::vector<std::shared_ptr<egr::EagerVariable>>>
ins =
{ {"X" , TrySyncToVars(X)}, { "Y" , TrySyncToVars(Y)} };
std::map<std::string, std::vector<std::shared_ptr<egr::EagerTensor>>>
std::map<std::string, std::vector<std::shared_ptr<egr::EagerVariable>>>
outs =
{
{"Out0" , CreateVars(Out0Num)}, {"Out1"
......@@ -1316,7 +1316,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
const char* FWD_INS_MAP_TEMPLATE =
" std::map<std::string, "
"std::vector<std::shared_ptr<egr::EagerTensor>>> ins = { "
"std::vector<std::shared_ptr<egr::EagerVariable>>> ins = { "
"%s };\n";
std::string ins_map_str =
paddle::string::Sprintf(FWD_INS_MAP_TEMPLATE, ins_contents_str);
......@@ -1353,8 +1353,9 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
if (op_passing_outs_map[op_type].count(output_name)) {
const std::string output_var_name = output_name + "Var";
// Pass Output from function argument(EagerTensor*/vector<EagerTensor*>&),
// in form of shared_ptr<EagerTensor>/vector<shared_ptr<EagerTensor>>
// Pass Output from function
// argument(EagerVariable*/vector<EagerVariable*>&),
// in form of shared_ptr<EagerVariable>/vector<shared_ptr<EagerVariable>>
if (output.duplicable()) {
const char* FWD_NUM_ARG_TEMPLATE =
", std::vector<paddle::experimental::Tensor*>& %s";
......@@ -1395,7 +1396,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
} else {
const char* FWD_OUTS_CONTENT_TEMPLATE =
"{ \"%s\", "
"{std::make_shared<egr::EagerTensor>(egr::Controller::Instance()."
"{std::make_shared<egr::EagerVariable>(egr::Controller::Instance()."
"GenerateUniqueName())}},";
outs_contents_str +=
paddle::string::Sprintf(FWD_OUTS_CONTENT_TEMPLATE, output_name);
......@@ -1407,7 +1408,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
const char* FWD_OUTS_MAP_TEMPLATE =
" std::map<std::string, "
"std::vector<std::shared_ptr<egr::EagerTensor>>> outs = { "
"std::vector<std::shared_ptr<egr::EagerVariable>>> outs = { "
"%s };\n";
std::string outs_map_str =
paddle::string::Sprintf(FWD_OUTS_MAP_TEMPLATE, outs_contents_str);
......@@ -1482,7 +1483,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
generated_function_body += out_tensor_str;
}
generated_function_body += "\n";
VLOG(6) << "Converted Output VarBase to EagerTensor(s)";
VLOG(6) << "Converted Output VarBase to EagerVariable(s)";
// [Generation] Handle core_ops_returns_info
core_ops_returns_info[op_type] = return_contents;
......@@ -1627,7 +1628,7 @@ static std::string GenerateSingleOpBase(
const char* BWD_INS_MAP_TEMPLATE =
" std::map<std::string, "
"std::vector<std::shared_ptr<egr::EagerTensor>>> %s = { "
"std::vector<std::shared_ptr<egr::EagerVariable>>> %s = { "
"%s };\n";
std::string ins_map_str =
paddle::string::Sprintf(BWD_INS_MAP_TEMPLATE, ins_name, ins_contents_str);
......@@ -1704,7 +1705,7 @@ static std::string GenerateSingleOpBase(
} else {
const char* GRAD_OUTS_CONTENT_TEMPLATE =
"{ \"%s\", "
"{std::make_shared<egr::EagerTensor>(egr::Controller::Instance("
"{std::make_shared<egr::EagerVariable>(egr::Controller::Instance("
")."
"GenerateUniqueName())}},";
outs_contents_str += paddle::string::Sprintf(
......@@ -1723,7 +1724,7 @@ static std::string GenerateSingleOpBase(
const char* BWD_OUTS_MAP_TEMPLATE =
" std::map<std::string, "
"std::vector<std::shared_ptr<egr::EagerTensor>>> %s = { "
"std::vector<std::shared_ptr<egr::EagerVariable>>> %s = { "
"%s };\n";
std::string outs_map_str = paddle::string::Sprintf(
BWD_OUTS_MAP_TEMPLATE, outs_name, outs_contents_str);
......
......@@ -40,36 +40,28 @@
* **/
namespace egr {
class EagerTensor final {
class EagerVariable final {
public:
/* Default constructor and name constructor should only be used for contruct
* output and in fluid*/
EagerTensor() = default;
EagerVariable() = default;
explicit EagerTensor(const std::string& name) : name_(name) {}
explicit EagerVariable(const std::string& name) : name_(name) {}
explicit EagerTensor(const paddle::experimental::Tensor& tensor)
explicit EagerVariable(const paddle::experimental::Tensor& tensor)
: name_(tensor.name()) {
if (tensor.defined()) {
if (tensor.is_dense_tensor()) {
auto* framework_tensor =
var_.GetMutable<paddle::framework::LoDTensor>();
// Contruct framework::Tensor from egr::EagerTensor
auto tensor_dense =
std::dynamic_pointer_cast<pten::DenseTensor>(tensor.impl());
PADDLE_ENFORCE_EQ((tensor_dense.get() && tensor_dense), true,
paddle::platform::errors::Fatal(
"Failed to Trans Tensor to EagerVariable since "
"we got Tensor with type DenseTensor, and we got "
"EagerVariable with another type."));
*framework_tensor = *tensor_dense;
ConstructVariableFromTensor(tensor);
} else if (tensor.is_selected_rows()) {
ConstructVariableFromSelectedRows(tensor);
} else {
PADDLE_THROW(paddle::platform::errors::Fatal(
"Unrecognized egr::EagerVariable type, only "
"DenseTensor and SelectedRows is supported for now."));
"DenseTensor and SelectedRows are supported for now."));
}
} else {
VLOG(6) << "Build Empty EagerTensor with name " << name_;
VLOG(6) << "Build Empty EagerVariable with name " << name_;
}
}
......@@ -77,21 +69,20 @@ class EagerTensor final {
std::shared_ptr<pten::TensorBase> GetTensorBase() {
// Construct allocation only once.
if (var_.IsInitialized()) {
if (var_.IsType<paddle::framework::LoDTensor>()) {
return SetImplWithLegacyTensor<pten::DenseTensor>();
} else if (var_.IsType<paddle::framework::Tensor>()) {
return SetImplWithLegacyTensor<pten::DenseTensor>();
if (var_.IsType<paddle::framework::LoDTensor>() ||
var_.IsType<paddle::framework::Tensor>()) {
return SetImplWithLegacyTensor();
} else if (var_.IsType<pten::SelectedRows>()) {
return SetImplWithSelectedRows();
return SetImplWithLegacySelectedRows();
} else {
PADDLE_THROW(paddle::platform::errors::Fatal(
"Unable to fetch underlying tensor "
"from EagerTensor, only LoDTensor and "
"from EagerVariable, only LoDTensor and "
"Tensor are supported for now"));
}
} else {
PADDLE_THROW(paddle::platform::errors::Fatal(
"Can not Sync EagerTensor %s whose paddle::framework::Variable is "
"Can not Sync EagerVariable %s whose paddle::framework::Variable is "
"not initialized!",
name()));
}
......@@ -107,23 +98,52 @@ class EagerTensor final {
void set_name(const std::string& name) { name_ = name; }
private:
template <typename LEGACY_TYPE>
std::shared_ptr<pten::TensorBase> SetImplWithLegacyTensor() {
const auto& framework_tensor = var_.Get<LEGACY_TYPE>();
const auto& framework_tensor = var_.Get<pten::DenseTensor>();
VLOG(8) << "Sync Var to tensor for: " << name();
return std::make_shared<LEGACY_TYPE>(std::move(framework_tensor));
return std::make_shared<pten::DenseTensor>(framework_tensor);
}
std::shared_ptr<pten::TensorBase> SetImplWithSelectedRows() {
auto* selected_rows = var_.GetMutable<pten::SelectedRows>();
auto res = std::make_shared<pten::SelectedRows>(selected_rows->rows_,
selected_rows->height_);
res->value_.reset(selected_rows->value_.release());
res->id_to_index_ = std::move(selected_rows->id_to_index_);
res->rwlock_.reset(selected_rows->rwlock_.release());
std::shared_ptr<pten::TensorBase> SetImplWithLegacySelectedRows() {
auto* framework_tensor = var_.GetMutable<pten::SelectedRows>();
VLOG(8) << "Sync SelectedRows to tensor for: " << name();
auto res =
std::make_shared<pten::SelectedRows>(std::move(*framework_tensor));
var_.Clear();
return res;
}
void ConstructVariableFromTensor(const paddle::experimental::Tensor& tensor) {
auto* framework_tensor = var_.GetMutable<pten::DenseTensor>();
// Contruct framework::Tensor from egr::EagerVariable
auto tensor_dense =
std::dynamic_pointer_cast<pten::DenseTensor>(tensor.impl());
PADDLE_ENFORCE_EQ(
(tensor_dense.get() && tensor_dense), true,
paddle::platform::errors::Fatal(
"Tensor %s does not hold pten::SelectedRows or pten::DenseTensor. "
"Or it holds empty impl, this should not happend since we should "
"treat all kinds of tensor as what they are.",
tensor.name()));
*framework_tensor = *tensor_dense;
}
void ConstructVariableFromSelectedRows(
const paddle::experimental::Tensor& tensor) {
auto* framework_tensor = var_.GetMutable<pten::SelectedRows>();
// Contruct framework::Tensor from egr::EagerVariable
auto tensor_dense =
std::dynamic_pointer_cast<pten::SelectedRows>(tensor.impl());
PADDLE_ENFORCE_EQ(
(tensor_dense.get() && tensor_dense), true,
paddle::platform::errors::Fatal(
"Tensor %s does not hold pten::SelectedRows or pten::DenseTensor. "
"Or it holds empty impl, this should not happend since we should "
"treat all kinds of tensor as what they are.",
tensor.name()));
*framework_tensor = std::move(*tensor_dense);
}
private:
std::string name_{""};
paddle::framework::Variable var_;
......
......@@ -115,7 +115,7 @@ TEST(Tensor, MemberFunction) {
CHECK_EQ(tmp_autograd_meta_test->val_, 2);
}
TEST(EagerTensor, Constructor) {
TEST(EagerVariable, Constructor) {
paddle::experimental::Tensor t3;
pten::DenseTensorMeta meta = pten::DenseTensorMeta(
pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 2}));
......@@ -134,7 +134,7 @@ TEST(EagerTensor, Constructor) {
CHECK_EQ(t3.defined(), false);
t3.set_impl(dt);
egr::EagerTensor et3 = egr::EagerTensor(t3);
egr::EagerVariable et3 = egr::EagerVariable(t3);
VLOG(6) << "SyncToVar";
CHECK_EQ(et3.Var().Get<paddle::framework::LoDTensor>().data<float>()[0],
5.0f);
......
......@@ -167,7 +167,7 @@ TEST(EagerUtils, PassStopGradient) {
TEST(EagerUtils, TrySyncToVar) {
paddle::framework::DDim ddim = paddle::framework::make_ddim({2, 4, 4, 4});
auto tensor = CreateTestCPUTensor(5.0f, ddim);
std::vector<std::shared_ptr<egr::EagerTensor>> var_bases = {
std::vector<std::shared_ptr<egr::EagerVariable>> var_bases = {
egr::EagerUtils::TrySyncToVar(tensor)};
paddle::framework::Variable* var = var_bases[0]->MutableVar();
......@@ -187,7 +187,7 @@ TEST(EagerUtils, TrySyncToVars) {
std::vector<paddle::experimental::Tensor> tensors = {
CreateTestCPUTensor(1.0f, ddim), CreateTestCPUTensor(2.0f, ddim)};
std::vector<std::shared_ptr<egr::EagerTensor>> var_bases =
std::vector<std::shared_ptr<egr::EagerVariable>> var_bases =
egr::EagerUtils::TrySyncToVars(tensors);
{
......@@ -218,7 +218,7 @@ TEST(EagerUtils, TrySyncToVars) {
TEST(EagerUtils, CreateVars) {
VLOG(6) << "Check CreateVars";
std::vector<std::shared_ptr<egr::EagerTensor>> outs =
std::vector<std::shared_ptr<egr::EagerVariable>> outs =
egr::EagerUtils::CreateVars(2);
CHECK_EQ(outs.size(), size_t(2));
CHECK(outs[0]->Var().IsInitialized() == false);
......
......@@ -131,17 +131,17 @@ void EagerUtils::SetOutRankWithSlot(AutogradMeta* target, size_t slot_id) {
target->SetSingleOutRankWithSlot(slot_id, 0);
}
std::shared_ptr<egr::EagerTensor> EagerUtils::TrySyncToVar(
std::shared_ptr<egr::EagerVariable> EagerUtils::TrySyncToVar(
const paddle::experimental::Tensor& tensor) {
return std::make_shared<egr::EagerTensor>(tensor);
return std::make_shared<egr::EagerVariable>(tensor);
}
std::vector<std::shared_ptr<egr::EagerTensor>> EagerUtils::TrySyncToVars(
std::vector<std::shared_ptr<egr::EagerVariable>> EagerUtils::TrySyncToVars(
const paddle::experimental::Tensor& tensor) {
return {TrySyncToVar(tensor)};
}
std::vector<std::shared_ptr<egr::EagerTensor>> EagerUtils::TrySyncToVars(
std::vector<std::shared_ptr<egr::EagerVariable>> EagerUtils::TrySyncToVars(
paddle::experimental::Tensor* tensor) {
PADDLE_ENFORCE_NOT_NULL(
tensor,
......@@ -151,9 +151,9 @@ std::vector<std::shared_ptr<egr::EagerTensor>> EagerUtils::TrySyncToVars(
return {TrySyncToVar(*tensor)};
}
std::vector<std::shared_ptr<egr::EagerTensor>> EagerUtils::TrySyncToVars(
std::vector<std::shared_ptr<egr::EagerVariable>> EagerUtils::TrySyncToVars(
const std::vector<paddle::experimental::Tensor*>& tensors) {
std::vector<std::shared_ptr<EagerTensor>> res;
std::vector<std::shared_ptr<EagerVariable>> res;
size_t num = tensors.size();
res.reserve(num);
for (size_t i = 0; i < num; i++) {
......@@ -169,9 +169,9 @@ std::vector<std::shared_ptr<egr::EagerTensor>> EagerUtils::TrySyncToVars(
return res;
}
std::vector<std::shared_ptr<egr::EagerTensor>> EagerUtils::TrySyncToVars(
std::vector<std::shared_ptr<egr::EagerVariable>> EagerUtils::TrySyncToVars(
const std::vector<paddle::experimental::Tensor>& tensors) {
std::vector<std::shared_ptr<EagerTensor>> res;
std::vector<std::shared_ptr<EagerVariable>> res;
size_t num = tensors.size();
res.reserve(num);
for (size_t i = 0; i < num; i++) {
......@@ -180,19 +180,19 @@ std::vector<std::shared_ptr<egr::EagerTensor>> EagerUtils::TrySyncToVars(
return res;
}
std::vector<std::shared_ptr<EagerTensor>> EagerUtils::CreateVars(
std::vector<std::shared_ptr<EagerVariable>> EagerUtils::CreateVars(
const size_t num) {
std::vector<std::shared_ptr<EagerTensor>> res;
std::vector<std::shared_ptr<EagerVariable>> res;
res.reserve(num);
for (size_t i = 0; i < num; i++) {
res.emplace_back(
new EagerTensor(egr::Controller::Instance().GenerateUniqueName()));
new EagerVariable(egr::Controller::Instance().GenerateUniqueName()));
}
return res;
}
std::vector<paddle::experimental::Tensor> EagerUtils::GetOutputs(
const std::vector<std::shared_ptr<EagerTensor>>& outs) {
const std::vector<std::shared_ptr<EagerVariable>>& outs) {
std::vector<paddle::experimental::Tensor> res;
res.reserve(outs.size());
for (const auto& out : outs) {
......@@ -209,7 +209,7 @@ std::vector<paddle::experimental::Tensor> EagerUtils::GetOutputs(
}
paddle::experimental::Tensor EagerUtils::GetOutput(
const std::shared_ptr<EagerTensor>& out) {
const std::shared_ptr<EagerVariable>& out) {
PADDLE_ENFORCE_NOT_NULL(
out.get(), paddle::platform::errors::Fatal(
"Eager Tensor %s is null and cannot be copied. We "
......@@ -219,7 +219,7 @@ paddle::experimental::Tensor EagerUtils::GetOutput(
return paddle::experimental::Tensor(out->GetTensorBase(), out->name());
}
void EagerUtils::OverwriteOutputs(const std::shared_ptr<EagerTensor>& out,
void EagerUtils::OverwriteOutputs(const std::shared_ptr<EagerVariable>& out,
paddle::experimental::Tensor* tensor) {
PADDLE_ENFORCE_NOT_NULL(
tensor, paddle::platform::errors::Fatal(
......@@ -231,7 +231,7 @@ void EagerUtils::OverwriteOutputs(const std::shared_ptr<EagerTensor>& out,
}
void EagerUtils::OverwriteOutputs(
const std::vector<std::shared_ptr<EagerTensor>>& outs,
const std::vector<std::shared_ptr<EagerVariable>>& outs,
const std::vector<paddle::experimental::Tensor*>& tensors) {
PADDLE_ENFORCE_EQ(
outs.size(), tensors.size(),
......
......@@ -88,7 +88,7 @@ class EagerUtils {
/**
* We have to use autograd_meta and multi_autograd_meta to initialize
* autograd_meta for tensor, since we can't init it in
* egr::EagerTensor's
* egr::EagerVariable's
* constructor (it's abstract class there)
*
* **/
......@@ -151,34 +151,35 @@ class EagerUtils {
// Intermidate needed remove this once we don't need legacy
// Inner Method
static std::shared_ptr<egr::EagerTensor> TrySyncToVar(
static std::shared_ptr<egr::EagerVariable> TrySyncToVar(
const paddle::experimental::Tensor& tensor);
// Basic Input
static std::vector<std::shared_ptr<egr::EagerTensor>> TrySyncToVars(
static std::vector<std::shared_ptr<egr::EagerVariable>> TrySyncToVars(
const paddle::experimental::Tensor& tensor);
// Basic Output
static std::vector<std::shared_ptr<egr::EagerTensor>> TrySyncToVars(
static std::vector<std::shared_ptr<egr::EagerVariable>> TrySyncToVars(
paddle::experimental::Tensor* tensor);
// Multi Output
static std::vector<std::shared_ptr<egr::EagerTensor>> TrySyncToVars(
static std::vector<std::shared_ptr<egr::EagerVariable>> TrySyncToVars(
const std::vector<paddle::experimental::Tensor*>& tensors);
// Multi Input
static std::vector<std::shared_ptr<egr::EagerTensor>> TrySyncToVars(
static std::vector<std::shared_ptr<egr::EagerVariable>> TrySyncToVars(
const std::vector<paddle::experimental::Tensor>& tensors);
// Construct empty output
static std::vector<std::shared_ptr<EagerTensor>> CreateVars(const size_t num);
static std::vector<std::shared_ptr<EagerVariable>> CreateVars(
const size_t num);
// Construct Tensor From var
static std::vector<paddle::experimental::Tensor> GetOutputs(
const std::vector<std::shared_ptr<EagerTensor>>& outs);
const std::vector<std::shared_ptr<EagerVariable>>& outs);
static paddle::experimental::Tensor GetOutput(
const std::shared_ptr<EagerTensor>& out);
const std::shared_ptr<EagerVariable>& out);
// Sync Back to origin output Tensor
static void OverwriteOutputs(const std::shared_ptr<EagerTensor>& out,
static void OverwriteOutputs(const std::shared_ptr<EagerVariable>& out,
paddle::experimental::Tensor* tensor);
static void OverwriteOutputs(const paddle::experimental::Tensor& out,
paddle::experimental::Tensor* tensor);
static void OverwriteOutputs(
const std::vector<std::shared_ptr<EagerTensor>>& outs,
const std::vector<std::shared_ptr<EagerVariable>>& outs,
const std::vector<paddle::experimental::Tensor*>& tensors);
static void OverwriteOutputs(
const std::vector<paddle::experimental::Tensor>& outs,
......
......@@ -340,8 +340,8 @@ NameVarMap<VarType> AutoCastInputs(const std::string& op_type,
}
template NameVarMap<VarBase> AutoCastInputs<VarBase>(
const std::string& op_type, const NameVarMap<VarBase>& ins);
template NameVarMap<egr::EagerTensor> AutoCastInputs<egr::EagerTensor>(
const std::string& op_type, const NameVarMap<egr::EagerTensor>& ins);
template NameVarMap<egr::EagerVariable> AutoCastInputs<egr::EagerVariable>(
const std::string& op_type, const NameVarMap<egr::EagerVariable>& ins);
template <typename VarType>
NameVarMap<VarType> CastPureFp16Inputs(const std::string& op_type,
const NameVarMap<VarType>& ins) {
......@@ -384,7 +384,7 @@ NameVarMap<VarType> CastPureFp16Inputs(const std::string& op_type,
}
template NameVarMap<VarBase> CastPureFp16Inputs<VarBase>(
const std::string& op_type, const NameVarMap<VarBase>& ins);
template NameVarMap<egr::EagerTensor> CastPureFp16Inputs<egr::EagerTensor>(
const std::string& op_type, const NameVarMap<egr::EagerTensor>& ins);
template NameVarMap<egr::EagerVariable> CastPureFp16Inputs<egr::EagerVariable>(
const std::string& op_type, const NameVarMap<egr::EagerVariable>& ins);
} // namespace imperative
} // namespace paddle
......@@ -177,9 +177,9 @@ std::string LayerDebugString(const std::string& op_type,
}
std::string LayerDebugString(const std::string& op_type,
const NameVarMap<egr::EagerTensor>& ins,
const NameVarMap<egr::EagerTensor>& outs) {
return LayerDebugStringImpl<egr::EagerTensor>(op_type, ins, outs);
const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerVariable>& outs) {
return LayerDebugStringImpl<egr::EagerVariable>(op_type, ins, outs);
}
template <typename VarType>
......@@ -194,11 +194,16 @@ static void SetForwardDataTypeOfGradVars(const NameVarMap<VarType>& outs) {
}
}
template <>
void SetForwardDataTypeOfGradVars<egr::EagerTensor>(
const NameVarMap<egr::EagerTensor>& outs) {
void SetForwardDataTypeOfGradVars<egr::EagerVariable>(
const NameVarMap<egr::EagerVariable>& outs) {
// In eager mode we don't need this.
}
void TestSetForwardDataTypeOfGradVarsEager(
const NameVarMap<egr::EagerVariable>& outs) {
SetForwardDataTypeOfGradVars<egr::EagerVariable>(outs);
}
VarBase::VarBase(const std::shared_ptr<VariableWrapper>& var)
: var_(var), grad_node_(var->GetGradNode()) {
if (auto grad_var = var_->GetGradVar()) {
......@@ -528,12 +533,12 @@ void OpBase::Run(const framework::OperatorBase& op,
}
void OpBase::Run(const framework::OperatorBase& op,
const NameVarMap<egr::EagerTensor>& ins,
const NameVarMap<egr::EagerTensor>& outs,
const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerVariable>& outs,
const framework::AttributeMap& attrs,
const framework::AttributeMap& default_attrs,
const platform::Place& place) {
OpBaseRunImpl<egr::EagerTensor>(op, ins, outs, attrs, default_attrs, place);
OpBaseRunImpl<egr::EagerVariable>(op, ins, outs, attrs, default_attrs, place);
}
void ClearNoNeedBufferInputs(OpBase* op) {
......
......@@ -185,8 +185,8 @@ class OpBase {
const framework::AttributeMap& default_attrs,
const platform::Place& place);
static void Run(const framework::OperatorBase& op,
const NameVarMap<egr::EagerTensor>& ins,
const NameVarMap<egr::EagerTensor>& outs,
const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerVariable>& outs,
const framework::AttributeMap& attrs,
const framework::AttributeMap& default_attrs,
const platform::Place& place);
......
......@@ -89,11 +89,16 @@ void HandleComplexGradToRealGrad(const NameVarMap<VarType>& outs) {
}
template <>
void HandleComplexGradToRealGrad<egr::EagerTensor>(
const NameVarMap<egr::EagerTensor>& outs) {
void HandleComplexGradToRealGrad<egr::EagerVariable>(
const NameVarMap<egr::EagerVariable>& outs) {
// TODO(jiabin): Support Complex here.
}
void TestHandleComplexGradToRealGradEager(
const NameVarMap<egr::EagerVariable>& outs) {
HandleComplexGradToRealGrad<egr::EagerVariable>(outs);
}
PreparedOp::PreparedOp(const framework::OperatorBase& op,
const framework::RuntimeContext& ctx,
const framework::OpKernelType& kernel_type,
......@@ -322,14 +327,14 @@ PreparedOp PreparedOp::Prepare(const NameVarMap<VariableWrapper>& ins,
default_attrs);
}
PreparedOp PreparedOp::Prepare(const NameVarMap<egr::EagerTensor>& ins,
const NameVarMap<egr::EagerTensor>& outs,
PreparedOp PreparedOp::Prepare(const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerVariable>& outs,
const framework::OperatorWithKernel& op,
const platform::Place& place,
const framework::AttributeMap& attrs,
const framework::AttributeMap& default_attrs) {
return PrepareImpl<egr::EagerTensor>(ins, outs, op, place, attrs,
default_attrs);
return PrepareImpl<egr::EagerVariable>(ins, outs, op, place, attrs,
default_attrs);
}
template <typename VarType>
static void PreparedOpRunImpl(
......@@ -461,18 +466,18 @@ void PreparedOp::Run(const NameVarMap<VariableWrapper>& ins,
}
}
void PreparedOp::Run(const NameVarMap<egr::EagerTensor>& ins,
const NameVarMap<egr::EagerTensor>& outs,
void PreparedOp::Run(const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerVariable>& outs,
const framework::AttributeMap& attrs,
const framework::AttributeMap& default_attrs) {
if (run_pten_kernel_) {
PreparedOpRunPtImpl<egr::EagerTensor>(
PreparedOpRunPtImpl<egr::EagerVariable>(
op_, kernel_type_, pt_kernel_signature_, pt_kernel_, dev_ctx_, ins,
outs, attrs, default_attrs);
} else {
PreparedOpRunImpl<egr::EagerTensor>(op_, ctx_, kernel_type_, func_,
dev_ctx_, ins, outs, attrs,
default_attrs);
PreparedOpRunImpl<egr::EagerVariable>(op_, ctx_, kernel_type_, func_,
dev_ctx_, ins, outs, attrs,
default_attrs);
}
}
......
......@@ -63,8 +63,8 @@ void SetForwardDataTypeOfGradVar<VarBase>(const std::shared_ptr<VarBase>& var) {
}
template <>
void SetForwardDataTypeOfGradVar<egr::EagerTensor>(
const std::shared_ptr<egr::EagerTensor>& var) {
void SetForwardDataTypeOfGradVar<egr::EagerVariable>(
const std::shared_ptr<egr::EagerVariable>& var) {
VLOG(10) << "Var in Eager dose not support SetForwardDataTypeOfGradVar: "
<< var->name();
// TODO(jiabin): SetForwardDataType of Grad var is not supported yet in
......@@ -171,8 +171,8 @@ class PreparedOp {
const framework::AttributeMap& attrs,
const framework::AttributeMap& default_attrs);
static PreparedOp Prepare(const NameVarMap<egr::EagerTensor>& ins,
const NameVarMap<egr::EagerTensor>& outs,
static PreparedOp Prepare(const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerVariable>& outs,
const framework::OperatorWithKernel& op,
const platform::Place& place,
const framework::AttributeMap& attrs,
......@@ -187,8 +187,8 @@ class PreparedOp {
const framework::AttributeMap& attrs,
const framework::AttributeMap& default_attrs);
void Run(const NameVarMap<egr::EagerTensor>& ins,
const NameVarMap<egr::EagerTensor>& outs,
void Run(const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerVariable>& outs,
const framework::AttributeMap& attrs,
const framework::AttributeMap& default_attrs);
......
......@@ -31,8 +31,8 @@
namespace paddle {
namespace imperative {
extern std::string LayerDebugString(const std::string& op_type,
const NameVarMap<egr::EagerTensor>& ins,
const NameVarMap<egr::EagerTensor>& outs);
const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerVariable>& outs);
extern std::shared_ptr<GradOpNode> CreateGradOpNode(
const framework::OperatorBase& op, const NameTensorMap& ins,
......@@ -41,20 +41,21 @@ extern std::shared_ptr<GradOpNode> CreateGradOpNode(
const std::map<std::string, std::string>& inplace_map);
TEST(test_eager, eager_debug) {
std::shared_ptr<egr::EagerTensor> x_in(new egr::EagerTensor("x_in"));
std::shared_ptr<egr::EagerTensor> y_in(new egr::EagerTensor("y_in"));
std::shared_ptr<egr::EagerTensor> vout(new egr::EagerTensor("vout"));
imperative::NameVarMap<egr::EagerTensor> ins = {{"X", {x_in}}, {"Y", {y_in}}};
imperative::NameVarMap<egr::EagerTensor> outs = {{"Out", {vout}}};
std::shared_ptr<egr::EagerVariable> x_in(new egr::EagerVariable("x_in"));
std::shared_ptr<egr::EagerVariable> y_in(new egr::EagerVariable("y_in"));
std::shared_ptr<egr::EagerVariable> vout(new egr::EagerVariable("vout"));
imperative::NameVarMap<egr::EagerVariable> ins = {{"X", {x_in}},
{"Y", {y_in}}};
imperative::NameVarMap<egr::EagerVariable> outs = {{"Out", {vout}}};
LayerDebugString("mul", ins, outs);
}
TEST(test_create_node, eager_node) {
auto op = framework::OpRegistry::CreateOp("mul", {}, {}, {}, false);
framework::Scope scope;
auto ctx = framework::RuntimeContext({}, {});
imperative::NameVarMap<egr::EagerTensor> ins = {{"X", {nullptr}},
{"Y", {nullptr}}};
imperative::NameVarMap<egr::EagerTensor> outs = {{"Out", {nullptr}}};
imperative::NameVarMap<egr::EagerVariable> ins = {{"X", {nullptr}},
{"Y", {nullptr}}};
imperative::NameVarMap<egr::EagerVariable> outs = {{"Out", {nullptr}}};
CreateGradOpNode((*op.get()), ins, outs, framework::AttributeMap{},
framework::AttributeMap{}, platform::CPUPlace(), {});
}
......@@ -72,26 +73,26 @@ TEST(test_var_helper, eager_var_helper) {
ASSERT_ANY_THROW(
InitializeVariable(&var8, paddle::framework::proto::VarType::FP64));
auto egr_tensor = std::make_shared<egr::EagerTensor>();
auto egr_tensor2 = std::make_shared<egr::EagerTensor>();
auto egr_tensor = std::make_shared<egr::EagerVariable>();
auto egr_tensor2 = std::make_shared<egr::EagerVariable>();
egr_tensor->MutableVar()
->GetMutable<pten::SelectedRows>()
->mutable_value()
->mutable_data<float>(platform::CPUPlace());
egr_tensor2->MutableVar()->GetMutable<framework::LoDRankTable>();
VLOG(6) << "egr_tensor create with ";
ASSERT_TRUE(platform::is_cpu_place(GetPlace<egr::EagerTensor>(egr_tensor)));
ASSERT_TRUE(GetDataType<egr::EagerTensor>(egr_tensor) ==
ASSERT_TRUE(platform::is_cpu_place(GetPlace<egr::EagerVariable>(egr_tensor)));
ASSERT_TRUE(GetDataType<egr::EagerVariable>(egr_tensor) ==
framework::proto::VarType::FP32);
GetCachedValue<egr::EagerTensor>(
GetCachedValue<egr::EagerVariable>(
egr_tensor, framework::OpKernelType(framework::proto::VarType::FP32,
platform::CPUPlace()));
SetCachedValue<egr::EagerTensor>(
SetCachedValue<egr::EagerVariable>(
egr_tensor, framework::OpKernelType(framework::proto::VarType::FP32,
platform::CPUPlace()),
egr_tensor2);
ASSERT_ANY_THROW(GetPlace<egr::EagerTensor>(egr_tensor2));
ASSERT_ANY_THROW(SetType<egr::EagerTensor>(
ASSERT_ANY_THROW(GetPlace<egr::EagerVariable>(egr_tensor2));
ASSERT_ANY_THROW(SetType<egr::EagerVariable>(
egr_tensor, paddle::framework::proto::VarType::LOD_TENSOR_ARRAY));
}
} // namespace imperative
......
......@@ -39,6 +39,8 @@ using vb_vector = std::vector<std::shared_ptr<imperative::VarBase>>;
using var_pair = std::pair<std::string, vb_vector>;
extern void TestSetForwardDataTypeOfGradVarsEager(
const NameVarMap<egr::EagerVariable>& outs);
template <typename VarType>
class TestRuntimeInferVarTypeContext
: public RuntimeInferVarTypeContext<VarType> {
......@@ -406,6 +408,11 @@ TEST(test_layer, test_inner_op_not_inited) {
ASSERT_THROW(op.CheckAttrs(), platform::EnforceNotMet);
}
TEST(test_layer, test_eager) {
imperative::NameTensorMap ins = {};
TestSetForwardDataTypeOfGradVarsEager(ins);
}
} // namespace imperative
} // namespace paddle
......
......@@ -32,6 +32,9 @@ namespace framework = paddle::framework;
namespace paddle {
namespace imperative {
extern void TestHandleComplexGradToRealGradEager(
const NameVarMap<egr::EagerVariable>& outs);
static framework::VariableNameMap CreateVarNameMap(
const framework::OpInfo& op_info, const std::string& op_type,
const NameVarBaseMap& varbase_map, bool is_input) {
......@@ -209,6 +212,11 @@ TEST(test_prepare_op, test_prepare_data_same_place) {
TestPrepareDataSamePlace({});
}
TEST(test_prepare_op, test_complex_eager) {
NameVarMap<egr::EagerVariable> outs = {};
TestHandleComplexGradToRealGradEager(outs);
}
#ifdef PADDLE_WITH_MKLDNN
TEST(test_prepare_op, test_prepare_data_cpu_mkldnn) {
TestPrepareDataSamePlace({{"use_mkldnn", true}});
......
......@@ -37,9 +37,10 @@ namespace paddle {
namespace imperative {
using vb_vector = std::vector<std::shared_ptr<imperative::VarBase>>;
using var_pair = std::pair<std::string, vb_vector>;
using ev_vector = std::vector<std::shared_ptr<egr::EagerVariable>>;
using ev_pair = std::pair<std::string, ev_vector>;
TEST(test_tracer, test_trace_op) {
// Doing an mul
imperative::Tracer tracer;
......@@ -546,6 +547,44 @@ TEST(test_tracer, test_execution_context) {
ASSERT_EQ(dy_ctx.OutputName("Out"), framework::kEmptyVarName);
}
TEST(test_tracer, eager_tracer) {
// Doing an mul
imperative::Tracer tracer;
std::shared_ptr<egr::EagerVariable> x_in(new egr::EagerVariable("x_in"));
std::shared_ptr<egr::EagerVariable> y_in(new egr::EagerVariable("y_in"));
std::shared_ptr<egr::EagerVariable> vout(new egr::EagerVariable("vout"));
platform::CPUPlace place;
std::vector<float> src_data(10, 2.0);
std::vector<int64_t> dims1 = {2, 5};
std::vector<int64_t> dims2 = {5, 2};
auto* x_in_tensor = x_in->MutableVar()->GetMutable<framework::LoDTensor>();
auto* y_in_tensor = y_in->MutableVar()->GetMutable<framework::LoDTensor>();
x_in_tensor->Resize(framework::make_ddim(dims1));
auto* mutable_x = x_in_tensor->mutable_data<float>(place);
paddle::memory::Copy(place, mutable_x, place, src_data.data(),
sizeof(float) * src_data.size());
y_in_tensor->Resize(framework::make_ddim(dims2));
auto* mutable_y = y_in_tensor->mutable_data<float>(place);
paddle::memory::Copy(place, mutable_y, place, src_data.data(),
sizeof(float) * src_data.size());
ev_pair x_pair = ev_pair("X", ev_vector(1, x_in));
ev_pair y_pair = ev_pair("Y", ev_vector(1, y_in));
ev_pair out_pair = ev_pair("Out", ev_vector(1, vout));
imperative::NameTensorMap ins = {x_pair, y_pair};
imperative::NameTensorMap outs = {out_pair};
framework::AttributeMap mul_attr_map;
mul_attr_map["use_mkldnn"] = false;
tracer.TraceOp<egr::EagerVariable>("mul", ins, outs, mul_attr_map, place,
true);
const auto& out_tensor = vout->Var().Get<framework::LoDTensor>();
for (int i = 0; i < vout->Var().Get<framework::LoDTensor>().numel(); i++) {
ASSERT_EQ(out_tensor.data<float>()[i], 20.0);
}
}
} // namespace imperative
} // namespace paddle
......
......@@ -168,7 +168,7 @@ void Tracer::TraceOp(const std::string& type, const NameVarMap<VarType>& ins,
const platform::Place& place, bool trace_backward,
const std::map<std::string, std::string>& inplace_map,
paddle::framework::AttributeMap* passed_default_attrs_,
bool override_default_attr_map) {
bool use_default_attr_map) {
platform::RecordEvent op_type_record_event(type);
platform::ScopedFlushDenormal flush;
VLOG(1) << "Trace Op: " << type;
......@@ -244,7 +244,7 @@ void Tracer::TraceOp(const std::string& type, const NameVarMap<VarType>& ins,
"CustomPlace."));
#endif
}
if (!override_default_attr_map) {
if (!use_default_attr_map) {
PADDLE_ENFORCE_NOT_NULL(passed_default_attrs_,
paddle::platform::errors::PermissionDenied(
"Detected default_attrs = nullptr."));
......@@ -280,16 +280,14 @@ void Tracer::TraceOp(const std::string& type, const NameVarMap<VarType>& ins,
}
if (ComputeRequiredGrad(new_ins, outs, trace_backward)) {
if (!override_default_attr_map) {
PADDLE_ENFORCE_NOT_NULL(passed_default_attrs_,
paddle::platform::errors::PermissionDenied(
"Detected default_attrs = nullptr."));
CreateGradOpNode(*op, new_ins, outs, attrs, *passed_default_attrs_, place,
inplace_map);
} else {
CreateGradOpNode(*op, new_ins, outs, attrs, default_attrs, place,
inplace_map);
}
PADDLE_ENFORCE_EQ(
passed_default_attrs_, nullptr,
paddle::platform::errors::PermissionDenied(
"We expect passed_default_attrs_ is nullptr while "
"use_default_attr_map is true, however we got not null "
"passed_default_attrs_. Please check your usage of trace_op. "));
CreateGradOpNode(*op, new_ins, outs, attrs, default_attrs, place,
inplace_map);
} else {
VLOG(3) << "No Grad to track for Op: " << type;
}
......@@ -301,16 +299,14 @@ template void Tracer::TraceOp<VarBase>(
const NameVarMap<VarBase>& outs, framework::AttributeMap attrs,
const platform::Place& place, bool trace_backward,
const std::map<std::string, std::string>& inplace_map,
paddle::framework::AttributeMap* default_attrs,
bool override_default_attr_map);
paddle::framework::AttributeMap* default_attrs, bool use_default_attr_map);
template void Tracer::TraceOp<egr::EagerTensor>(
const std::string& type, const NameVarMap<egr::EagerTensor>& ins,
const NameVarMap<egr::EagerTensor>& outs, framework::AttributeMap attrs,
template void Tracer::TraceOp<egr::EagerVariable>(
const std::string& type, const NameVarMap<egr::EagerVariable>& ins,
const NameVarMap<egr::EagerVariable>& outs, framework::AttributeMap attrs,
const platform::Place& place, bool trace_backward,
const std::map<std::string, std::string>& inplace_map_,
paddle::framework::AttributeMap* default_attrs,
bool override_default_attr_map);
paddle::framework::AttributeMap* default_attrs, bool use_default_attr_map);
void Tracer::TraceOp(const std::string& type, const NameVarBaseMap& ins,
const NameVarBaseMap& outs, framework::AttributeMap attrs,
......@@ -324,13 +320,12 @@ void Tracer::TraceOp(const std::string& type, const NameTensorMap& ins,
paddle::framework::AttributeMap attrs,
const paddle::platform::Place& place,
paddle::framework::AttributeMap* default_attrs,
bool override_default_attr_map,
bool use_default_attr_map,
const std::map<std::string, std::string>& inplace_map) {
VLOG(6) << "Running On Eager TraceOp with override_default_attr_map: "
<< override_default_attr_map;
TraceOp<egr::EagerTensor>(type, ins, outs, std::move(attrs), place, false,
inplace_map, default_attrs,
override_default_attr_map);
VLOG(6) << "Running On Eager TraceOp with use_default_attr_map: "
<< use_default_attr_map;
TraceOp<egr::EagerVariable>(type, ins, outs, std::move(attrs), place, false,
inplace_map, default_attrs, use_default_attr_map);
}
void Tracer::TraceOp(const std::string& type, const NameTensorMap& ins,
......@@ -338,8 +333,9 @@ void Tracer::TraceOp(const std::string& type, const NameTensorMap& ins,
paddle::framework::AttributeMap attrs,
const std::map<std::string, std::string>& inplace_map) {
VLOG(6) << "Running On Eager TraceOp(less): ";
TraceOp<egr::EagerTensor>(type, ins, outs, std::move(attrs), expected_place_,
false, inplace_map, nullptr, true);
TraceOp<egr::EagerVariable>(type, ins, outs, std::move(attrs),
expected_place_, false, inplace_map, nullptr,
true);
}
void Tracer::SetExpectedPlace(platform::Place place) {
......
......@@ -69,7 +69,7 @@ class Tracer {
const platform::Place& place, bool trace_backward,
const std::map<std::string, std::string>& inplace_map = {},
paddle::framework::AttributeMap* passed_default_attrs_ = nullptr,
bool override_default_attr_map = true);
bool use_default_attr_map = true);
void TraceOp(const std::string& type, const NameVarBaseMap& ins,
const NameVarBaseMap& outs, framework::AttributeMap attrs,
......@@ -83,7 +83,7 @@ class Tracer {
const NameTensorMap& outs, paddle::framework::AttributeMap attrs,
const paddle::platform::Place& place,
paddle::framework::AttributeMap* default_attrs,
bool override_default_attr_map,
bool use_default_attr_map,
const std::map<std::string, std::string>& inplace_map = {});
bool ComputeRequiredGrad(const NameVarBaseMap& ins,
......
......@@ -95,8 +95,8 @@ template const paddle::platform::Place &GetPlace<VarBase>(
const std::shared_ptr<VarBase> &var);
template const paddle::platform::Place &GetPlace<VariableWrapper>(
const std::shared_ptr<VariableWrapper> &var);
template const paddle::platform::Place &GetPlace<egr::EagerTensor>(
const std::shared_ptr<egr::EagerTensor> &var);
template const paddle::platform::Place &GetPlace<egr::EagerVariable>(
const std::shared_ptr<egr::EagerVariable> &var);
/* GetNameFromVar */
template <typename VarType>
......@@ -104,8 +104,8 @@ const std::string &GetNameFromVar(std::shared_ptr<VarType> var) {
return var->Name();
}
template <>
const std::string &GetNameFromVar<egr::EagerTensor>(
std::shared_ptr<egr::EagerTensor> tensor) {
const std::string &GetNameFromVar<egr::EagerVariable>(
std::shared_ptr<egr::EagerVariable> tensor) {
return tensor->name();
}
template const std::string &GetNameFromVar<VariableWrapper>(
......@@ -120,8 +120,8 @@ void SetType(std::shared_ptr<VarType> var,
var->SetType(type);
}
template <>
void SetType<egr::EagerTensor>(std::shared_ptr<egr::EagerTensor> var,
framework::proto::VarType::Type type) {
void SetType<egr::EagerVariable>(std::shared_ptr<egr::EagerVariable> var,
framework::proto::VarType::Type type) {
switch (type) {
case paddle::framework::proto::VarType::LOD_TENSOR: {
var->MutableVar()->GetMutable<paddle::framework::LoDTensor>();
......@@ -149,8 +149,8 @@ framework::proto::VarType::Type GetType(std::shared_ptr<VarType> var) {
return var->Type();
}
template <>
framework::proto::VarType::Type GetType<egr::EagerTensor>(
std::shared_ptr<egr::EagerTensor> var) {
framework::proto::VarType::Type GetType<egr::EagerVariable>(
std::shared_ptr<egr::EagerVariable> var) {
if (var->Var().IsInitialized()) {
return paddle::framework::ToVarType(var->Var().Type());
} else {
......@@ -168,8 +168,8 @@ framework::proto::VarType::Type GetDataType(std::shared_ptr<VarType> var) {
return var->DataType();
}
template <>
framework::proto::VarType::Type GetDataType<egr::EagerTensor>(
std::shared_ptr<egr::EagerTensor> var) {
framework::proto::VarType::Type GetDataType<egr::EagerVariable>(
std::shared_ptr<egr::EagerVariable> var) {
if (var->Var().IsType<pten::SelectedRows>()) {
return framework::TransToProtoVarType(
var->Var().Get<pten::SelectedRows>().value().type());
......@@ -197,8 +197,8 @@ bool CheckCachedKey(std::shared_ptr<VarType> var,
return GetVariableWrapper(var)->hasCacheKey(key);
}
template <>
bool CheckCachedKey<egr::EagerTensor>(
std::shared_ptr<egr::EagerTensor> tensor,
bool CheckCachedKey<egr::EagerVariable>(
std::shared_ptr<egr::EagerVariable> tensor,
const paddle::framework::OpKernelType &key) {
// TODO(jiabin): Support this later
// VLOG(10) << "CheckCachedKey with tensor: " << tensor->name() << "and key is
......@@ -219,7 +219,7 @@ std::shared_ptr<VariableWrapper> GetCachedValue(
}
template <>
std::shared_ptr<VariableWrapper> GetCachedValue(
std::shared_ptr<egr::EagerTensor> var,
std::shared_ptr<egr::EagerVariable> var,
const paddle::framework::OpKernelType &key) {
// TODO(jiabin): Support this later
// PADDLE_THROW(platform::errors::Fatal("In eager mode program should not
......@@ -243,10 +243,10 @@ void SetCachedValue(std::shared_ptr<VarType> var,
GetVariableWrapper(var)->setCacheValue(key, GetVariableWrapper(res));
}
template <>
void SetCachedValue<egr::EagerTensor>(
std::shared_ptr<egr::EagerTensor> tensor,
void SetCachedValue<egr::EagerVariable>(
std::shared_ptr<egr::EagerVariable> tensor,
const paddle::framework::OpKernelType &key,
std::shared_ptr<egr::EagerTensor> res) {
std::shared_ptr<egr::EagerVariable> res) {
// PADDLE_THROW(platform::errors::Fatal("In eager mode program should not
// reach this, support cache and remove this error check later, or this
// should not be supported."));
......
......@@ -18,7 +18,7 @@
#include "paddle/fluid/framework/variable.h"
namespace egr {
class EagerTensor;
class EagerVariable;
} // namespace egr
namespace pten {
class DenseTensor;
......
......@@ -45,7 +45,7 @@ PyTypeObject* p_tensor_type;
extern PyTypeObject* g_vartype_pytype;
extern PyTypeObject* g_framework_tensor_pytype;
PyObject* EagerTensorNew(PyTypeObject* type, PyObject* args, PyObject* kwargs) {
PyObject* TensorNew(PyTypeObject* type, PyObject* args, PyObject* kwargs) {
PyObject* obj = type->tp_alloc(type, 0);
if (obj) {
auto v = reinterpret_cast<TensorObject*>(obj);
......@@ -56,14 +56,14 @@ PyObject* EagerTensorNew(PyTypeObject* type, PyObject* args, PyObject* kwargs) {
}
// TODO(jiabin): Overload this once we need more constructor in Python
void EmptyEagerTensorInitializer(
TensorObject* self, const std::string& name,
const paddle::platform::Place& place, bool persistable = false,
bool stop_gradient = true, framework::proto::VarType::Type dtype =
paddle::framework::proto::VarType::FP32,
const std::vector<int>& dims = {},
framework::proto::VarType::Type var_type =
paddle::framework::proto::VarType::LOD_TENSOR) {
void EmptyTensorInitializer(TensorObject* self, const std::string& name,
const paddle::platform::Place& place,
bool persistable = false, bool stop_gradient = true,
framework::proto::VarType::Type dtype =
paddle::framework::proto::VarType::FP32,
const std::vector<int>& dims = {},
framework::proto::VarType::Type var_type =
paddle::framework::proto::VarType::LOD_TENSOR) {
auto ddims = paddle::framework::make_ddim(dims);
PADDLE_ENFORCE_GE(
paddle::framework::product(ddims), 0,
......@@ -98,46 +98,41 @@ void EmptyEagerTensorInitializer(
}
}
void InitEagerTensorWithNumpyValue(TensorObject* self, const py::object& array,
bool zero_copy = false) {
void InitTensorWithNumpyValue(TensorObject* self, const py::object& array,
bool zero_copy = false) {
PADDLE_ENFORCE_EQ(
self->tensor.defined(), true,
paddle::platform::errors::Fatal(
"Calling InitEagerTensorWithNumpyValue of Eager Tensor without "
"EmptyEagerTensorInitializer is "
"Calling InitTensorWithNumpyValue of Eager Tensor without "
"EmptyTensorInitializer is "
"forbidden. Please check your code and make sure you new a "
"eager tensor before init it with NumPy."));
pten::DenseTensor* impl_ptr =
static_cast<pten::DenseTensor*>(self->tensor.impl().get());
paddle::platform::Place place = impl_ptr->place();
paddle::framework::LoDTensor temp_tensor = paddle::framework::LoDTensor();
if (platform::is_cpu_place(place)) {
SetTensorFromPyArray<platform::CPUPlace>(&temp_tensor, array, place,
zero_copy);
SetTensorFromPyArray<platform::CPUPlace>(impl_ptr, array, place, zero_copy);
} else if (platform::is_xpu_place(place)) {
SetTensorFromPyArray<platform::XPUPlace>(&temp_tensor, array, place,
zero_copy);
SetTensorFromPyArray<platform::XPUPlace>(impl_ptr, array, place, zero_copy);
} else if (platform::is_gpu_place(place)) {
SetTensorFromPyArray<platform::CUDAPlace>(&temp_tensor, array, place,
SetTensorFromPyArray<platform::CUDAPlace>(impl_ptr, array, place,
zero_copy);
} else if (platform::is_cuda_pinned_place(place)) {
SetTensorFromPyArray<platform::CUDAPinnedPlace>(&temp_tensor, array, place,
SetTensorFromPyArray<platform::CUDAPinnedPlace>(impl_ptr, array, place,
zero_copy);
} else if (platform::is_npu_place(place)) {
SetTensorFromPyArray<platform::NPUPlace>(&temp_tensor, array, place,
zero_copy);
SetTensorFromPyArray<platform::NPUPlace>(impl_ptr, array, place, zero_copy);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Place should be one of "
"CPUPlace/XPUPlace/CUDAPlace/CUDAPinnedPlace/NPUPlace"));
}
*impl_ptr = temp_tensor;
}
void InitEagerTensorWithEagerTensor(TensorObject* self,
const paddle::experimental::Tensor& src,
const paddle::platform::Place& place,
const std::string& name) {
void InitTensorWithTensor(TensorObject* self,
const paddle::experimental::Tensor& src,
const paddle::platform::Place& place,
const std::string& name) {
self->tensor.set_name(name);
if (place == src.inner_place()) {
auto impl = std::static_pointer_cast<pten::DenseTensor>(src.impl());
......@@ -158,10 +153,10 @@ void InitEagerTensorWithEagerTensor(TensorObject* self,
}
}
void InitEagerTensorWithFrameworkTensor(TensorObject* self,
const framework::Tensor& src,
const paddle::platform::Place& place,
const std::string& name) {
void InitTensorWithFrameworkTensor(TensorObject* self,
const framework::Tensor& src,
const paddle::platform::Place& place,
const std::string& name) {
self->tensor.set_name(name);
if (place == src.place()) {
self->tensor.set_impl(std::make_shared<pten::DenseTensor>(src));
......@@ -271,14 +266,14 @@ std::string ParseName(std::unordered_map<std::string, PyObject*> kws_map,
return act_name;
}
// initialize EagerTensor by PyArray(first argument is PyArray,
// initialize Tensor by PyArray(first argument is PyArray,
// mix args and kwargs) automatically.
void AutoInitEagerTensorByPyArray(
TensorObject* py_tensor_ptr,
std::unordered_map<std::string, PyObject*> kws_map, PyObject* args,
bool flag_kwargs, Py_ssize_t args_num) {
// The first argument of the EagerTensor constructor is PyArray,
// there are 6 arguments to construct the new EagerTensor,
void AutoInitTensorByPyArray(TensorObject* py_tensor_ptr,
std::unordered_map<std::string, PyObject*> kws_map,
PyObject* args, bool flag_kwargs,
Py_ssize_t args_num) {
// The first argument of the Tensor constructor is PyArray,
// there are 6 arguments to construct the new Tensor,
// kw_order_map's key is every arguments of the constructor,
// kw_order_map's value is the position of the arguments respectively.
// If u want to update this constructor with new arguments,
......@@ -306,20 +301,21 @@ void AutoInitEagerTensorByPyArray(
stop_gradient = ParseBooleanArgs("stop_gradient", kws_map, kw_order_map, args,
flag_kwargs, args_num);
EmptyEagerTensorInitializer(py_tensor_ptr, act_name, place, persistable,
stop_gradient);
InitEagerTensorWithNumpyValue(py_tensor_ptr, numpy_value, zero_copy);
EmptyTensorInitializer(py_tensor_ptr, act_name, place, persistable,
stop_gradient);
InitTensorWithNumpyValue(py_tensor_ptr, numpy_value, zero_copy);
}
// initialize EagerTensor by EagerTensor or framework::Tensor (mix args and
// initialize Tensor by Tensor or framework::Tensor (mix args and
// kwargs) automatically.
void AutoInitEagerTensorByTensor(
TensorObject* py_tensor_ptr,
std::unordered_map<std::string, PyObject*> kws_map, PyObject* args,
bool flag_kwargs, Py_ssize_t args_num, bool init_by_egr_tensor = true) {
// The first argument of the EagerTensor constructor is EagerTensor or
void AutoInitTensorByTensor(TensorObject* py_tensor_ptr,
std::unordered_map<std::string, PyObject*> kws_map,
PyObject* args, bool flag_kwargs,
Py_ssize_t args_num,
bool init_by_egr_tensor = true) {
// The first argument of the Tensor constructor is Tensor or
// framework Tensor,
// there are 3 arguments to construct the new EagerTensor,
// there are 3 arguments to construct the new Tensor,
// kw_order_map's key is every arguments of the constructor,
// kw_order_map's value is the position of the arguments respectively.
// If u want to update this constructor with new arguments,
......@@ -345,14 +341,14 @@ void AutoInitEagerTensorByTensor(
src_tensor = CastPyArg2Tensor(kws_map["value"], 0);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"The first expected kwargs is {value: EagerTensor}, "
"but could not parse the first argument {value: EagerTensor} "
"The first expected kwargs is {value: Tensor}, "
"but could not parse the first argument {value: Tensor} "
"successfully. "
"Please check your input first and make sure you are on the right "
"way."));
}
}
InitEagerTensorWithEagerTensor(py_tensor_ptr, src_tensor, place, act_name);
InitTensorWithTensor(py_tensor_ptr, src_tensor, place, act_name);
} else {
// init by framework tensor
framework::Tensor src_tensor;
......@@ -372,8 +368,7 @@ void AutoInitEagerTensorByTensor(
"way."));
}
}
InitEagerTensorWithFrameworkTensor(py_tensor_ptr, src_tensor, place,
act_name);
InitTensorWithFrameworkTensor(py_tensor_ptr, src_tensor, place, act_name);
}
}
......@@ -402,12 +397,12 @@ void AutoInitEagerTensorByTensor(
* ** value: ndarray)
* 5.
* def __init__ (
* ** tensor: EagerTensor)
* ** tensor: Tensor)
* 6. (multi-place)
* (should have at least one parameter, one parameter equals to case 5, zero
* parameter equals to case 1.)
* def __init__ (
* ** tensor: EagerTensor,
* ** tensor: Tensor,
* ** place: paddle::platform::Place,
* ** name: std::string)
* 7. (multi-place) (should have at least one parameter, one parameter similar
......@@ -417,7 +412,7 @@ void AutoInitEagerTensorByTensor(
* ** place: paddle::platform::Place,
* ** name: std::string)
* **/
int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
int TensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
// set a flag to record use kwargs or not
bool flag_kwargs = false;
if (kwargs) flag_kwargs = true;
......@@ -427,7 +422,7 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
PyObject* kw_persistable = NULL;
PyObject* kw_stop_gradient = NULL;
PyObject* kw_value = NULL; // receive PyArray or EagerTensor
PyObject* kw_value = NULL; // receive PyArray or Tensor
PyObject* kw_place = NULL;
PyObject* kw_name = NULL;
PyObject* kw_dims = NULL;
......@@ -490,7 +485,7 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
if (!flag_kwargs) {
// case 1
VLOG(6) << "Calling case1's initializer.";
EmptyEagerTensorInitializer(
EmptyTensorInitializer(
py_tensor_ptr,
egr::Controller::Instance().GenerateUniqueName("generated_tensor"),
egr::Controller::Instance().GetExpectedPlace());
......@@ -499,28 +494,28 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
if (kw_value != NULL) {
if (pybind11::detail::npy_api::get().PyArray_Check_(kw_value)) {
VLOG(6) << "Calling case3's or case4's initializer";
AutoInitEagerTensorByPyArray(py_tensor_ptr, kws_map, args,
flag_kwargs, args_num);
AutoInitTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs,
args_num);
return 0;
} else if (PyObject_IsInstance(
kw_value, reinterpret_cast<PyObject*>(p_tensor_type))) {
VLOG(6) << "Calling case5's or case6's initializer";
AutoInitEagerTensorByTensor(py_tensor_ptr, kws_map, args, flag_kwargs,
args_num);
AutoInitTensorByTensor(py_tensor_ptr, kws_map, args, flag_kwargs,
args_num);
return 0;
} else if (PyObject_IsInstance(kw_value,
reinterpret_cast<PyObject*>(
g_framework_tensor_pytype))) {
VLOG(6) << "Calling case7's initializer.";
AutoInitEagerTensorByTensor(
py_tensor_ptr, kws_map, args, flag_kwargs, args_num,
/* false means not init by egr tensor*/ false);
AutoInitTensorByTensor(py_tensor_ptr, kws_map, args, flag_kwargs,
args_num,
/* false means not init by egr tensor*/ false);
return 0;
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Could not parse the first keyword argument successfully, "
"the first keyword argument is value, but it should be PyArray "
"or EagerTensor or framework::Tensor. "
"or Tensor or framework::Tensor. "
"Please check your input first and make sure you are on the "
"right way."));
}
......@@ -573,18 +568,18 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
CastPyArg2ProtoType(kw_type, 0);
bool persistable = CastPyArg2AttrBoolean(kw_persistable, 0);
EmptyEagerTensorInitializer(
py_tensor_ptr, act_name,
egr::Controller::Instance().GetExpectedPlace(), persistable,
/* stop_gradient */ true, dtype, dims, var_type);
EmptyTensorInitializer(py_tensor_ptr, act_name,
egr::Controller::Instance().GetExpectedPlace(),
persistable,
/* stop_gradient */ true, dtype, dims, var_type);
return 0;
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"We not only support construct EagerTensor from numpy value "
"or tensor(EagerTensor or framework::Tensor) "
"We not only support construct Tensor from numpy value "
"or tensor(Tensor or framework::Tensor) "
"with python kwargs by this initializer, "
"but also even support dtype to init a empty EagerTensor. "
"but also even support dtype to init a empty Tensor. "
"Please check your input first and make sure you call the existed "
"constructor."));
}
......@@ -595,28 +590,28 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0);
if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) {
VLOG(6) << "Calling case3's or case4's initializer.";
AutoInitEagerTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs,
args_num);
AutoInitTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs,
args_num);
return 0;
} else if (PyObject_IsInstance(
arg0_ptr, reinterpret_cast<PyObject*>(p_tensor_type))) {
VLOG(6) << "Calling case5's or case6's initializer.";
AutoInitEagerTensorByTensor(py_tensor_ptr, kws_map, args, flag_kwargs,
args_num);
AutoInitTensorByTensor(py_tensor_ptr, kws_map, args, flag_kwargs,
args_num);
return 0;
} else if (PyObject_IsInstance(arg0_ptr, reinterpret_cast<PyObject*>(
g_framework_tensor_pytype))) {
VLOG(6) << "Calling case7's initializer.";
AutoInitEagerTensorByTensor(
py_tensor_ptr, kws_map, args, flag_kwargs, args_num,
/* false means not init by egr tensor*/ false);
AutoInitTensorByTensor(py_tensor_ptr, kws_map, args, flag_kwargs,
args_num,
/* false means not init by egr tensor*/ false);
return 0;
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"We support construct EagerTensor from numpy value "
"or tensor(EagerTensor or framework::Tensor) "
"We support construct Tensor from numpy value "
"or tensor(Tensor or framework::Tensor) "
"with python args and kwargs by this initializer, "
"but the first argument should be PyArray or EagerTensor or "
"but the first argument should be PyArray or Tensor or "
"framework::Tensor. "
"Please check your input first and make sure you call the existed "
"constructor."));
......@@ -626,8 +621,8 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0);
if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) {
VLOG(6) << "Calling case3's or case4's initializer.";
AutoInitEagerTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs,
args_num);
AutoInitTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs,
args_num);
return 0;
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
......@@ -658,15 +653,14 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
paddle::framework::proto::VarType::Type var_type =
CastPyArg2ProtoType(PyTuple_GET_ITEM(args, 3), 3);
bool persistable = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 4), 4);
EmptyEagerTensorInitializer(
py_tensor_ptr, act_name,
egr::Controller::Instance().GetExpectedPlace(), persistable, true,
dtype, dims, var_type);
EmptyTensorInitializer(py_tensor_ptr, act_name,
egr::Controller::Instance().GetExpectedPlace(),
persistable, true, dtype, dims, var_type);
return 0;
} else if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) {
VLOG(6) << "Calling case3's initializer.";
AutoInitEagerTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs,
args_num);
AutoInitTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs,
args_num);
return 0;
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
......@@ -680,8 +674,8 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
PyObject* arg0_ptr = PyTuple_GET_ITEM(args, 0);
if (pybind11::detail::npy_api::get().PyArray_Check_(arg0_ptr)) {
VLOG(6) << "Calling case3's or case4's initializer";
AutoInitEagerTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs,
args_num);
AutoInitTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs,
args_num);
return 0;
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
......@@ -696,8 +690,8 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
if (!flag_kwargs) {
// case 3
VLOG(6) << "Calling case3's initializer.";
AutoInitEagerTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs,
args_num);
AutoInitTensorByPyArray(py_tensor_ptr, kws_map, args, flag_kwargs,
args_num);
return 0;
} else { // six position args, remainting arguments are kwargs, but this
// is not a right way
......@@ -716,7 +710,7 @@ int EagerTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
return 1;
}
static void EagerTensorDealloc(TensorObject* self) {
static void TensorDealloc(TensorObject* self) {
self->tensor.~Tensor();
Py_TYPE(self)->tp_free(reinterpret_cast<PyObject*>(self));
}
......@@ -735,19 +729,19 @@ void BindEager(pybind11::module* module) {
auto& internals = pybind11::detail::get_internals();
auto heap_type = reinterpret_cast<PyHeapTypeObject*>(
internals.default_metaclass->tp_alloc(internals.default_metaclass, 0));
heap_type->ht_name = ToPyObject("EagerTensor");
heap_type->ht_qualname = ToPyObject("EagerTensor");
heap_type->ht_name = ToPyObject("Tensor");
heap_type->ht_qualname = ToPyObject("Tensor");
auto type = &heap_type->ht_type;
type->tp_name = "EagerTensor";
type->tp_name = "Tensor";
type->tp_basicsize = sizeof(TensorObject);
type->tp_dealloc = (destructor)EagerTensorDealloc;
type->tp_dealloc = (destructor)TensorDealloc;
type->tp_as_number = &number_methods;
type->tp_as_sequence = &sequence_methods;
type->tp_as_mapping = &mapping_methods;
type->tp_methods = variable_methods;
type->tp_getset = variable_properties;
type->tp_init = EagerTensorInit;
type->tp_new = EagerTensorNew;
type->tp_init = TensorInit;
type->tp_new = TensorNew;
Py_INCREF(internals.instance_base);
type->tp_base = reinterpret_cast<PyTypeObject*>(internals.instance_base);
type->tp_flags |=
......@@ -764,8 +758,8 @@ void BindEager(pybind11::module* module) {
}
Py_INCREF(type);
if (PyModule_AddObject(m.ptr(), "EagerTensor",
reinterpret_cast<PyObject*>(type)) < 0) {
if (PyModule_AddObject(m.ptr(), "Tensor", reinterpret_cast<PyObject*>(type)) <
0) {
Py_DECREF(type);
Py_DECREF(m.ptr());
PADDLE_THROW(platform::errors::Fatal(
......
......@@ -145,9 +145,8 @@ static PyObject* eager_api_tensor_copy(PyObject* self, PyObject* args,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_api_read_next_eager_tensor_list(PyObject* self,
PyObject* args,
PyObject* kwargs) {
static PyObject* eager_api_read_next_tensor_list(PyObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
auto tensor_base_list =
CastPyArg2VectorOfTensorBase(PyTuple_GET_ITEM(args, 0), 0);
......@@ -182,8 +181,8 @@ PyMethodDef variable_functions[] = {
METH_VARARGS | METH_KEYWORDS, NULL},
{"tensor_copy", (PyCFunction)(void (*)(void))eager_api_tensor_copy,
METH_VARARGS | METH_KEYWORDS, NULL},
{"read_next_eager_tensor_list",
(PyCFunction)(void (*)(void))eager_api_read_next_eager_tensor_list,
{"read_next_tensor_list",
(PyCFunction)(void (*)(void))eager_api_read_next_tensor_list,
METH_VARARGS | METH_KEYWORDS, NULL},
{NULL, NULL, 0, NULL}};
......
......@@ -35,15 +35,15 @@ limitations under the License. */
namespace paddle {
namespace pybind {
extern void InitEagerTensorWithNumpyValue(TensorObject* self,
const pybind11::object& array,
bool zero_copy);
extern void InitTensorWithNumpyValue(TensorObject* self,
const pybind11::object& array,
bool zero_copy);
extern PyTypeObject* p_tensor_type;
static PyObject* eager_tensor_method_numpy(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_SYNC_TRY
static PyObject* tensor_method_numpy(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
PADDLE_ENFORCE_EQ(
self->tensor.initialized(), true,
platform::errors::InvalidArgument(
......@@ -99,18 +99,17 @@ static PyObject* eager_tensor_method_numpy(TensorObject* self, PyObject* args,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_tensor_method__is_initialized(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_SYNC_TRY
static PyObject* tensor_method__is_initialized(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_TRY
return ToPyObject(self->tensor.initialized());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_tensor_method__copy_to(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_SYNC_TRY
static PyObject* tensor_method__copy_to(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
bool blocking = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 0), 0);
auto place = CastPyArg2Place(PyTuple_GET_ITEM(args, 1), 1);
auto cp_tensor =
......@@ -123,10 +122,10 @@ static PyObject* eager_tensor_method__copy_to(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_tensor_method_reconstruct_from_(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_SYNC_TRY
static PyObject* tensor_method_reconstruct_from_(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_TRY
paddle::experimental::Tensor src_tensor =
CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0);
std::string orig_name = self->tensor.name();
......@@ -144,9 +143,9 @@ static PyObject* eager_tensor_method_reconstruct_from_(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_tensor_method_copy_(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_SYNC_TRY
static PyObject* tensor_method_copy_(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
paddle::experimental::Tensor src_tensor =
CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0);
bool blocking = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 1), 1);
......@@ -170,8 +169,8 @@ static PyObject* eager_tensor_method_copy_(TensorObject* self, PyObject* args,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_tensor_retain_grads(TensorObject* self, PyObject* args,
PyObject* kwargs) {
static PyObject* tensor_retain_grads(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
if (egr::Controller::Instance().HasGrad()) {
auto meta = egr::EagerUtils::autograd_meta(&(self->tensor));
......@@ -187,10 +186,9 @@ static PyObject* eager_tensor_retain_grads(TensorObject* self, PyObject* args,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_tensor__clear_gradient(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_SYNC_TRY
static PyObject* tensor__clear_gradient(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
VLOG(4) << "ClearGradient " << self->tensor.name();
paddle::experimental::Tensor* grad;
......@@ -223,8 +221,8 @@ static PyObject* eager_tensor__clear_gradient(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_tensor__zero_grads(TensorObject* self, PyObject* args,
PyObject* kwargs) {
static PyObject* tensor__zero_grads(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
VLOG(4) << "ZeroGrads " << self->tensor.name();
......@@ -257,10 +255,9 @@ static PyObject* eager_tensor__zero_grads(TensorObject* self, PyObject* args,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_tensor__share_buffer_to(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_SYNC_TRY
static PyObject* tensor__share_buffer_to(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
paddle::experimental::Tensor* dst_ptr =
&(reinterpret_cast<TensorObject*>(PyTuple_GET_ITEM(args, 0))->tensor);
PADDLE_ENFORCE_EQ(self->tensor.initialized(), true,
......@@ -279,10 +276,10 @@ static PyObject* eager_tensor__share_buffer_to(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_tensor__is_shared_buffer_with(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_SYNC_TRY
static PyObject* tensor__is_shared_buffer_with(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_TRY
paddle::experimental::Tensor* dst_ptr =
&(reinterpret_cast<TensorObject*>(PyTuple_GET_ITEM(args, 0))->tensor);
PADDLE_ENFORCE_EQ(self->tensor.initialized(), true,
......@@ -303,10 +300,10 @@ static PyObject* eager_tensor__is_shared_buffer_with(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_tensor__share_underline_tensor_to(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_SYNC_TRY
static PyObject* tensor__share_underline_tensor_to(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_TRY
paddle::experimental::Tensor* src_ptr =
&(reinterpret_cast<TensorObject*>(PyTuple_GET_ITEM(args, 0))->tensor);
PADDLE_ENFORCE_EQ(self->tensor.initialized(), true,
......@@ -320,9 +317,10 @@ static PyObject* eager_tensor__share_underline_tensor_to(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_tensor__is_shared_underline_tensor_with(
TensorObject* self, PyObject* args, PyObject* kwargs) {
EAGER_SYNC_TRY
static PyObject* tensor__is_shared_underline_tensor_with(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_TRY
paddle::experimental::Tensor src_tensor =
CastPyArg2Tensor(PyTuple_GET_ITEM(args, 0), 0);
PADDLE_ENFORCE_EQ(src_tensor.initialized(), true,
......@@ -339,9 +337,9 @@ static PyObject* eager_tensor__is_shared_underline_tensor_with(
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_tensor_method_detach(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_SYNC_TRY
static PyObject* tensor_method_detach(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
PADDLE_ENFORCE_EQ(
self->tensor.initialized(), true,
platform::errors::InvalidArgument("Tensor %s has not been initialized!",
......@@ -365,10 +363,10 @@ static PyObject* eager_tensor_method_detach(TensorObject* self, PyObject* args,
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_tensor_method_get_underline_tensor(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_SYNC_TRY
static PyObject* tensor_method_get_underline_tensor(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_TRY
if (self->tensor.is_dense_tensor()) {
auto* tensor =
static_cast<paddle::framework::LoDTensor*>(self->tensor.impl().get());
......@@ -382,57 +380,54 @@ static PyObject* eager_tensor_method_get_underline_tensor(TensorObject* self,
}
// NOTE(wuweilong): Set value and not change self's original place
static PyObject* eager_tensor_method_set_value(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
static PyObject* tensor_method_set_value(TensorObject* self, PyObject* args,
PyObject* kwargs) {
EAGER_TRY
VLOG(4) << "Value " << self->tensor.name();
pybind11::object numpy_value =
pybind11::object(pybind11::handle(PyTuple_GET_ITEM(args, 0)), true);
InitEagerTensorWithNumpyValue(self, numpy_value, false);
InitTensorWithNumpyValue(self, numpy_value, false);
Py_INCREF(Py_None);
return Py_None;
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyMethodDef variable_methods[] = {
{"numpy", (PyCFunction)(void (*)(void))eager_tensor_method_numpy,
{"numpy", (PyCFunction)(void (*)(void))tensor_method_numpy,
METH_VARARGS | METH_KEYWORDS, NULL},
{"_is_initialized",
(PyCFunction)(void (*)(void))eager_tensor_method__is_initialized,
(PyCFunction)(void (*)(void))tensor_method__is_initialized,
METH_VARARGS | METH_KEYWORDS, NULL},
{"_copy_to", (PyCFunction)(void (*)(void))eager_tensor_method__copy_to,
{"_copy_to", (PyCFunction)(void (*)(void))tensor_method__copy_to,
METH_VARARGS | METH_KEYWORDS, NULL},
{"copy_", (PyCFunction)(void (*)(void))eager_tensor_method_copy_,
{"copy_", (PyCFunction)(void (*)(void))tensor_method_copy_,
METH_VARARGS | METH_KEYWORDS, NULL},
{"reconstruct_from_",
(PyCFunction)(void (*)(void))eager_tensor_method_reconstruct_from_,
(PyCFunction)(void (*)(void))tensor_method_reconstruct_from_,
METH_VARARGS | METH_KEYWORDS, NULL},
{"retain_grads", (PyCFunction)(void (*)(void))eager_tensor_retain_grads,
{"retain_grads", (PyCFunction)(void (*)(void))tensor_retain_grads,
METH_VARARGS | METH_KEYWORDS, NULL},
{"_clear_gradient",
(PyCFunction)(void (*)(void))eager_tensor__clear_gradient,
{"_clear_gradient", (PyCFunction)(void (*)(void))tensor__clear_gradient,
METH_VARARGS | METH_KEYWORDS, NULL},
{"_zero_grads", (PyCFunction)(void (*)(void))eager_tensor__zero_grads,
{"_zero_grads", (PyCFunction)(void (*)(void))tensor__zero_grads,
METH_VARARGS | METH_KEYWORDS, NULL},
{"_share_buffer_to",
(PyCFunction)(void (*)(void))eager_tensor__share_buffer_to,
{"_share_buffer_to", (PyCFunction)(void (*)(void))tensor__share_buffer_to,
METH_VARARGS | METH_KEYWORDS, NULL},
{"_is_shared_buffer_with",
(PyCFunction)(void (*)(void))eager_tensor__is_shared_buffer_with,
(PyCFunction)(void (*)(void))tensor__is_shared_buffer_with,
METH_VARARGS | METH_KEYWORDS, NULL},
{"_share_underline_tensor_to",
(PyCFunction)(void (*)(void))eager_tensor__share_underline_tensor_to,
(PyCFunction)(void (*)(void))tensor__share_underline_tensor_to,
METH_VARARGS | METH_KEYWORDS, NULL},
{"_is_shared_underline_tensor_with",
(PyCFunction)(void (*)(void))eager_tensor__is_shared_underline_tensor_with,
(PyCFunction)(void (*)(void))tensor__is_shared_underline_tensor_with,
METH_VARARGS | METH_KEYWORDS, NULL},
{"detach", (PyCFunction)(void (*)(void))eager_tensor_method_detach,
{"detach", (PyCFunction)(void (*)(void))tensor_method_detach,
METH_VARARGS | METH_KEYWORDS, NULL},
{"get_tensor",
(PyCFunction)(void (*)(void))eager_tensor_method_get_underline_tensor,
(PyCFunction)(void (*)(void))tensor_method_get_underline_tensor,
METH_VARARGS | METH_KEYWORDS, NULL},
{"_set_value", (PyCFunction)(void (*)(void))eager_tensor_method_set_value,
{"_set_value", (PyCFunction)(void (*)(void))tensor_method_set_value,
METH_VARARGS | METH_KEYWORDS, NULL},
{NULL, NULL, 0, NULL}};
......
......@@ -79,10 +79,10 @@ const char* CAST_VAR_LIST_TEMPLATE = R"(
auto %s = GetTensorListFromArgs("%s", "%s", args, %d, %s);)";
const char* CAST_VAR_PTR_TEMPLATE = R"(
auto %s = GetEagerTensorPtrFromArgs("%s", "%s", args, %d, %s);)";
auto %s = GetTensorPtrFromArgs("%s", "%s", args, %d, %s);)";
const char* CAST_VAR_PTR_LIST_TEMPLATE = R"(
auto %s = GetEagerTensorPtrListFromArgs("%s", "%s", args, %d, %s);)";
auto %s = GetTensorPtrListFromArgs("%s", "%s", args, %d, %s);)";
const char* CAST_SIZE_T_TEMPLATE = R"(
auto %s = GetUnsignedLongFromArgs("%s", "%s", args, %d, %s);)";
......
......@@ -35,14 +35,14 @@ namespace pybind {
extern PyTypeObject* p_tensor_type;
PyObject* eager_tensor_properties_get_name(TensorObject* self, void* closure) {
EAGER_SYNC_TRY
PyObject* tensor_properties_get_name(TensorObject* self, void* closure) {
EAGER_TRY
return ToPyObject(self->tensor.name());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyObject* eager_tensor_properties_get_type(TensorObject* self, void* closure) {
EAGER_SYNC_TRY
PyObject* tensor_properties_get_type(TensorObject* self, void* closure) {
EAGER_TRY
if (self->tensor.is_dense_tensor()) {
return ToPyObject(paddle::framework::proto::VarType::LOD_TENSOR);
} else {
......@@ -52,24 +52,24 @@ PyObject* eager_tensor_properties_get_type(TensorObject* self, void* closure) {
EAGER_CATCH_AND_THROW_RETURN_NULL
}
int eager_tensor_properties_set_name(TensorObject* self, PyObject* value,
void* closure) {
EAGER_SYNC_TRY
int tensor_properties_set_name(TensorObject* self, PyObject* value,
void* closure) {
EAGER_TRY
self->tensor.set_name(CastPyArg2AttrString(value, 0));
return 0;
EAGER_CATCH_AND_THROW_RETURN_ZERO
}
PyObject* eager_tensor_properties_get_stop_gradient(TensorObject* self,
void* closure) {
EAGER_SYNC_TRY
PyObject* tensor_properties_get_stop_gradient(TensorObject* self,
void* closure) {
EAGER_TRY
auto meta = egr::EagerUtils::autograd_meta(&self->tensor);
return ToPyObject(meta->StopGradient());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyObject* eager_tensor_properties_get_grad(TensorObject* self, void* closure) {
EAGER_SYNC_TRY
PyObject* tensor_properties_get_grad(TensorObject* self, void* closure) {
EAGER_TRY
if (egr::egr_utils_api::IsLeafTensor(self->tensor)) {
std::shared_ptr<egr::GradNodeBase> grad_node =
egr::EagerUtils::grad_node(self->tensor);
......@@ -94,9 +94,9 @@ PyObject* eager_tensor_properties_get_grad(TensorObject* self, void* closure) {
EAGER_CATCH_AND_THROW_RETURN_NULL
}
int eager_tensor_properties_set_grad(TensorObject* self, PyObject* value,
void* closure) {
EAGER_SYNC_TRY
int tensor_properties_set_grad(TensorObject* self, PyObject* value,
void* closure) {
EAGER_TRY
auto src = CastPyArg2Tensor(value, 0);
PADDLE_ENFORCE(
egr::egr_utils_api::IsLeafTensor(self->tensor),
......@@ -115,34 +115,33 @@ int eager_tensor_properties_set_grad(TensorObject* self, PyObject* value,
EAGER_CATCH_AND_THROW_RETURN_ZERO
}
int eager_tensor_properties_set_stop_gradient(TensorObject* self,
PyObject* value, void* closure) {
EAGER_SYNC_TRY
int tensor_properties_set_stop_gradient(TensorObject* self, PyObject* value,
void* closure) {
EAGER_TRY
auto meta = egr::EagerUtils::autograd_meta(&self->tensor);
meta->SetStopGradient(CastPyArg2AttrBoolean(value, 0));
return 0;
EAGER_CATCH_AND_THROW_RETURN_ZERO
}
PyObject* eager_tensor_properties_get_persistable(TensorObject* self,
void* closure) {
EAGER_SYNC_TRY
PyObject* tensor_properties_get_persistable(TensorObject* self, void* closure) {
EAGER_TRY
auto meta = egr::EagerUtils::autograd_meta(&self->tensor);
return ToPyObject(meta->Persistable());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
int eager_tensor_properties_set_persistable(TensorObject* self, PyObject* value,
void* closure) {
EAGER_SYNC_TRY
int tensor_properties_set_persistable(TensorObject* self, PyObject* value,
void* closure) {
EAGER_TRY
auto meta = egr::EagerUtils::autograd_meta(&self->tensor);
meta->SetPersistable(CastPyArg2AttrBoolean(value, 0));
return 0;
EAGER_CATCH_AND_THROW_RETURN_ZERO
}
PyObject* eager_tensor_properties_get_shape(TensorObject* self, void* closure) {
EAGER_SYNC_TRY
PyObject* tensor_properties_get_shape(TensorObject* self, void* closure) {
EAGER_TRY
auto ddim = self->tensor.shape();
std::vector<int64_t> value;
size_t rank = static_cast<size_t>(ddim.size());
......@@ -155,50 +154,45 @@ PyObject* eager_tensor_properties_get_shape(TensorObject* self, void* closure) {
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyObject* eager_tensor_properties_get_place(TensorObject* self, void* closure) {
EAGER_SYNC_TRY
PyObject* tensor_properties_get_place(TensorObject* self, void* closure) {
EAGER_TRY
return ToPyObject(self->tensor.inner_place());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyObject* eager_tensor_properties_get_place_str(TensorObject* self,
void* closure) {
EAGER_SYNC_TRY
PyObject* tensor_properties_get_place_str(TensorObject* self, void* closure) {
EAGER_TRY
std::stringstream ostr;
ostr << self->tensor.inner_place();
return ToPyObject(ostr.str());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
PyObject* eager_tensor_properties_get_dtype(TensorObject* self, void* closure) {
EAGER_SYNC_TRY
PyObject* tensor_properties_get_dtype(TensorObject* self, void* closure) {
EAGER_TRY
return ToPyObject(
paddle::framework::TransToProtoVarType(self->tensor.type()));
EAGER_CATCH_AND_THROW_RETURN_NULL
}
struct PyGetSetDef variable_properties[] = {
{"grad", (getter)eager_tensor_properties_get_grad,
(setter)eager_tensor_properties_set_grad, nullptr, nullptr},
{"name", (getter)eager_tensor_properties_get_name,
(setter)eager_tensor_properties_set_name, nullptr, nullptr},
{"stop_gradient", (getter)eager_tensor_properties_get_stop_gradient,
(setter)eager_tensor_properties_set_stop_gradient, nullptr, nullptr},
{"persistable", (getter)eager_tensor_properties_get_persistable,
(setter)eager_tensor_properties_set_persistable, nullptr, nullptr},
{"shape", (getter)eager_tensor_properties_get_shape, nullptr, nullptr,
nullptr},
// {"is_leaf", (getter)eager_tensor_properties_get_is_leaf, nullptr,
{"grad", (getter)tensor_properties_get_grad,
(setter)tensor_properties_set_grad, nullptr, nullptr},
{"name", (getter)tensor_properties_get_name,
(setter)tensor_properties_set_name, nullptr, nullptr},
{"stop_gradient", (getter)tensor_properties_get_stop_gradient,
(setter)tensor_properties_set_stop_gradient, nullptr, nullptr},
{"persistable", (getter)tensor_properties_get_persistable,
(setter)tensor_properties_set_persistable, nullptr, nullptr},
{"shape", (getter)tensor_properties_get_shape, nullptr, nullptr, nullptr},
// {"is_leaf", (getter)tensor_properties_get_is_leaf, nullptr,
// nullptr,
// nullptr},
{"place", (getter)eager_tensor_properties_get_place, nullptr, nullptr,
nullptr},
{"_place_str", (getter)eager_tensor_properties_get_place_str, nullptr,
nullptr, nullptr},
{"dtype", (getter)eager_tensor_properties_get_dtype, nullptr, nullptr,
nullptr},
{"type", (getter)eager_tensor_properties_get_type, nullptr, nullptr,
{"place", (getter)tensor_properties_get_place, nullptr, nullptr, nullptr},
{"_place_str", (getter)tensor_properties_get_place_str, nullptr, nullptr,
nullptr},
{"dtype", (getter)tensor_properties_get_dtype, nullptr, nullptr, nullptr},
{"type", (getter)tensor_properties_get_type, nullptr, nullptr, nullptr},
{nullptr, nullptr, nullptr, nullptr, nullptr}};
} // namespace pybind
......
......@@ -179,7 +179,7 @@ paddle::experimental::Tensor CastPyArg2Tensor(PyObject* obj, ssize_t arg_pos) {
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"argument (position %d) must be "
"EagerTensor, but got %s",
"EagerVariable, but got %s",
arg_pos + 1, reinterpret_cast<PyTypeObject*>(obj->ob_type)->tp_name));
}
}
......@@ -309,7 +309,7 @@ framework::Tensor CastPyArg2FrameworkTensor(PyObject* obj, ssize_t arg_pos) {
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"argument (position %d) must be "
"EagerTensor, but got %s",
"EagerVariable, but got %s",
arg_pos + 1, reinterpret_cast<PyTypeObject*>(obj->ob_type)->tp_name));
}
}
......@@ -597,6 +597,7 @@ std::vector<paddle::experimental::Tensor> GetTensorListFromArgs(
if (PyList_Check(list)) {
Py_ssize_t len = PyList_Size(list);
result.reserve(static_cast<size_t>(len));
if (len == 0) {
PADDLE_THROW(platform::errors::InvalidArgument(
"%s(): argument '%s' (position %d) must be list of Tensors, but got "
......@@ -609,6 +610,7 @@ std::vector<paddle::experimental::Tensor> GetTensorListFromArgs(
}
} else if (PyTuple_Check(list)) {
Py_ssize_t len = PyTuple_Size(list);
result.reserve(static_cast<size_t>(len));
if (len == 0) {
PADDLE_THROW(platform::errors::InvalidArgument(
"%s(): argument '%s' (position %d) must be list of Tensors, but got "
......@@ -632,9 +634,11 @@ std::vector<paddle::experimental::Tensor> GetTensorListFromArgs(
return result;
}
paddle::experimental::Tensor* GetEagerTensorPtrFromArgs(
const std::string& op_type, const std::string& arg_name, PyObject* args,
ssize_t arg_idx, bool dispensable) {
paddle::experimental::Tensor* GetTensorPtrFromArgs(const std::string& op_type,
const std::string& arg_name,
PyObject* args,
ssize_t arg_idx,
bool dispensable) {
PyObject* obj = PyTuple_GET_ITEM(args, arg_idx);
if (PyTuple_Check(obj)) {
......@@ -654,7 +658,7 @@ paddle::experimental::Tensor* GetEagerTensorPtrFromArgs(
return &(reinterpret_cast<TensorObject*>(obj)->tensor);
}
std::vector<paddle::experimental::Tensor*> GetEagerTensorPtrListFromArgs(
std::vector<paddle::experimental::Tensor*> GetTensorPtrListFromArgs(
const std::string& op_type, const std::string& arg_name, PyObject* args,
ssize_t arg_idx, bool dispensable) {
PyObject* list = PyTuple_GET_ITEM(args, arg_idx);
......
......@@ -65,15 +65,15 @@ PyObject* ToPyObject(
const std::unordered_map<std::string, std::vector<std::string>>& value);
template <typename Tuple, size_t N>
struct TupleEagerTensorResult {
struct TupleTensorResult {
static void Run(const Tuple& out, PyObject* result) {
TupleEagerTensorResult<Tuple, N - 1>::Run(out, result);
TupleTensorResult<Tuple, N - 1>::Run(out, result);
PyTuple_SET_ITEM(result, N - 1, ToPyObject(std::get<N - 1>(out)));
}
};
template <typename Tuple>
struct TupleEagerTensorResult<Tuple, 1> {
struct TupleTensorResult<Tuple, 1> {
static void Run(const Tuple& out, PyObject* result) {
PyTuple_SET_ITEM(result, 0, ToPyObject(std::get<0>(out)));
}
......@@ -84,7 +84,7 @@ PyObject* ToPyObject(const std::tuple<Args...>& out) {
auto len = sizeof...(Args);
PyObject* result = PyTuple_New(len);
TupleEagerTensorResult<decltype(out), sizeof...(Args)>::Run(out, result);
TupleTensorResult<decltype(out), sizeof...(Args)>::Run(out, result);
return result;
}
......@@ -97,10 +97,12 @@ std::vector<paddle::experimental::Tensor> GetTensorListFromArgs(
const std::string& op_type, const std::string& arg_name, PyObject* args,
ssize_t arg_idx, bool dispensable = false);
paddle::experimental::Tensor* GetEagerTensorPtrFromArgs(
const std::string& op_type, const std::string& arg_name, PyObject* args,
ssize_t arg_idx, bool dispensable = false);
std::vector<paddle::experimental::Tensor*> GetEagerTensorPtrListFromArgs(
paddle::experimental::Tensor* GetTensorPtrFromArgs(const std::string& op_type,
const std::string& arg_name,
PyObject* args,
ssize_t arg_idx,
bool dispensable = false);
std::vector<paddle::experimental::Tensor*> GetTensorPtrListFromArgs(
const std::string& op_type, const std::string& arg_name, PyObject* args,
ssize_t arg_idx, bool dispensable = false);
......
......@@ -19,7 +19,6 @@ limitations under the License. */
#include "pybind11/pybind11.h"
#define EAGER_TRY try {
#define EAGER_SYNC_TRY try {
#define EAGER_CATCH_AND_THROW_RETURN_NULL \
} \
catch (...) { \
......
......@@ -222,6 +222,14 @@ class PADDLE_API Tensor final {
*/
bool is_dense_tensor() const;
/**
* @brief Determine whether tensor is SelectedRows
*
* @return true
* @return false
*/
bool is_selected_rows() const;
/* Part 3: Device and Backend methods */
/**
......
......@@ -29,7 +29,6 @@ limitations under the License. */
#include "paddle/pten/core/tensor_base.h"
#include "paddle/pten/core/tensor_meta.h"
#include "paddle/pten/core/tensor_utils.h"
/**
* [ Why still include the fluid headers? ]
*
......@@ -133,7 +132,9 @@ DataLayout Tensor::layout() const { return impl_->layout(); }
bool Tensor::is_dense_tensor() const {
return pten::DenseTensor::classof(impl_.get());
}
bool Tensor::is_selected_rows() const {
return pten::SelectedRows::classof(impl_.get());
}
/* Part 3: Device and Backend methods */
PlaceType Tensor::place() const {
......
......@@ -24,7 +24,7 @@ limitations under the License. */
#include <boost/variant.hpp>
namespace egr {
class EagerTensor;
class EagerVariable;
}
namespace paddle {
namespace framework {
......@@ -76,9 +76,9 @@ struct NameVarMapTrait<VariableWrapper> {
};
template <>
struct NameVarMapTrait<egr::EagerTensor> {
struct NameVarMapTrait<egr::EagerVariable> {
using Type =
std::map<std::string, std::vector<std::shared_ptr<egr::EagerTensor>>>;
std::map<std::string, std::vector<std::shared_ptr<egr::EagerVariable>>>;
};
} // namespace details
......@@ -88,7 +88,7 @@ using NameVarMap = typename details::NameVarMapTrait<T>::Type;
using NameVarBaseMap = NameVarMap<VarBase>;
using NameVariableWrapperMap = NameVarMap<VariableWrapper>;
using NameTensorMap = NameVarMap<egr::EagerTensor>;
using NameTensorMap = NameVarMap<egr::EagerVariable>;
using VariableWrapperList = std::vector<std::shared_ptr<VariableWrapper>>;
......
......@@ -29,10 +29,6 @@ limitations under the License. */
// See Note [ Why still include the fluid headers? ]
#include "paddle/fluid/framework/mixed_vector.h"
namespace egr {
class EagerTensor;
} // namespace egr
namespace pten {
class SelectedRows : public TensorBase,
public TypeInfoTraits<TensorBase, SelectedRows> {
......@@ -199,39 +195,6 @@ class SelectedRows : public TensorBase,
std::unique_ptr<DenseTensor> value_{nullptr};
int64_t height_; // height indicates the underline tensor's height
std::unique_ptr<RWLock> rwlock_{nullptr};
// TODO(jiabin): Remove this when we don't need EagerTensor support
// SelectedRows which is expected in next version.
/** Why we need this weird friend class?
* In eager mode, since some of ops doesn't support C++ API for now we need to
*use 'imperative::TraceOp' to run it.
* So, we need to support get a SelectedRows from egr::EagerTensor's
*framework::Variable obj and used it to reconstruct
* a new paddle::experimental::Tensor to support framework usage. However, we
*got 2 problems here.
* First, we got 2 unique_ptr in SelectedRows so that we can't support
*std::make_shared in EagerTensor's SetImplWithSelectedRows method,
* since we have to construct a shared_ptr for paddle::experimental::Tensor's
*impl.
* Second, when we are trying to support move constructor for SelectedRows we
*found that we can't get its rvalue from
* framework::Variable because it holds an obj of target type.
*
*
* The only three way to solve this problem is:
* 1. Just like what we have done, using friend class and just copy/move each
*member. In this way, we can avoid additional API
* and symbols.
* 2. Make pten::SelectedRows's member from unique_ptr to shared_ptr. However,
*this may cause some cost of performance.
* 3. Add some api to return or move member of framework::SelectedRows.
*However, it's not as safe as first solution.
* 4. Support all framework::SelectedRows related ops and make sure
*EagerTensor never holds framework::SelectedRows.
*
* If anyone got better ideas, welcome to contact JiabinYang, we are open for
*your help.
**/
friend class egr::EagerTensor;
};
} // namespace pten
......@@ -104,14 +104,14 @@ def check_type(input, input_name, expected_type, op_name, extra_message=''):
expected_type += (core.VarBase, )
# TODO(jiabin): uncomment it when we support declarative mode in eager
# if _in_eager_mode():
# expected_type += (core.eager.EagerTensor, )
# expected_type += (core.eager.Tensor, )
elif isinstance(input, core.VarBase):
raise TypeError(
"Please use `with fluid.dygraph.guard()` as context or `fluid.enable_dygraph()` to switch to imperative mode firstly. "
"Because received '{}' in {} is a imperative Variable.".format(
input_name, op_name))
elif hasattr(core, "eager"):
if isinstance(input, core.eager.EagerTensor):
if isinstance(input, core.eager.Tensor):
raise TypeError(
"Please use `with fluid.dygraph.guard()` as context or `fluid.enable_dygraph()` to switch to imperative mode firstly. "
"Because received '{}' in {} is a imperative Variable.".format(
......
......@@ -253,7 +253,7 @@ class _DataLoaderIterSingleProcess(_DataLoaderIterBase):
try:
if in_dygraph_mode():
if _in_eager_mode():
data = core.eager.read_next_eager_tensor_list(
data = core.eager.read_next_tensor_list(
self._reader.read_next_list()[0])
else:
data = self._reader.read_next_var_list()
......@@ -449,7 +449,7 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase):
while self._blocking_queue.size() >= len(self._places):
if in_dygraph_mode():
if _in_eager_mode():
data = core.eager.read_next_eager_tensor_list(
data = core.eager.read_next_tensor_list(
self._reader.read_next_list()[0])
else:
self._reader.read_next_var_list()
......@@ -705,7 +705,7 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase):
if in_dygraph_mode():
if _in_eager_mode():
data = core.eager.read_next_eager_tensor_list(
data = core.eager.read_next_tensor_list(
self._reader.read_next_list()[0])
else:
data = self._reader.read_next_var_list()
......
......@@ -721,10 +721,9 @@ def to_variable(value, name=None, zero_copy=None, dtype=None):
value = value.astype(dtype)
if _in_eager_mode():
return core.eager.EagerTensor(value,
framework._current_expected_place(),
False, zero_copy, name
if name else None, True)
return core.eager.Tensor(value,
framework._current_expected_place(), False,
zero_copy, name if name else None, True)
else:
py_var = core.VarBase(
value=value,
......
......@@ -222,7 +222,7 @@ def monkey_patch_math_varbase():
# 2. create varbase for scalar
lhs_dtype = self.dtype
if _in_eager_mode():
other_var_should_be = core.eager.EagerTensor
other_var_should_be = core.eager.Tensor
else:
other_var_should_be = core.VarBase
if not isinstance(other_var, other_var_should_be):
......@@ -343,7 +343,7 @@ def monkey_patch_math_varbase():
if core._in_eager_mode():
local_already_patch = _already_patch_eager_tensor
_already_patch_eager_tensor = True
local_tensor = core.eager.EagerTensor
local_tensor = core.eager.Tensor
else:
local_already_patch = _already_patch_varbase
_already_patch_varbase = True
......
......@@ -150,7 +150,7 @@ def monkey_patch_varbase():
"""
if core._in_eager_mode():
base_tensor = core.eager.EagerTensor
base_tensor = core.eager.Tensor
else:
base_tensor = core.VarBase
assert isinstance(value, (np.ndarray, base_tensor, dict, str)), \
......@@ -180,9 +180,9 @@ def monkey_patch_varbase():
"Variable dtype not match, Variable [ {} ] need tensor with dtype {} but load tensor with dtype {}".format(
self.name, self_tensor_np.dtype, value_np.dtype)
# NOTE(wuweilong): self could be VarBase or EagerTensor, the subsequent behavior are defined in different files
# NOTE(wuweilong): self could be VarBase or Tensor, the subsequent behavior are defined in different files
# if self is VarBase, method value() return Variable that bindded in imperative.cc, get_tensor() bindded in pybind.cc
# if self is EagerTensor, method value() return self that defined in this file, get_tensor() defined in eager_method.cc
# if self is Tensor, method value() return self that defined in this file, get_tensor() defined in eager_method.cc
# this Interface behavior will be unifed in the future.
self.value().get_tensor().set(value_np,
framework._current_expected_place())
......@@ -244,8 +244,8 @@ def monkey_patch_varbase():
if grad_tensor is not None:
if core._in_eager_mode():
assert isinstance(
grad_tensor, core.eager.EagerTensor
), "The type of grad_tensor must be paddle.Tensor"
grad_tensor, core.eager.
Tensor), "The type of grad_tensor must be paddle.Tensor"
else:
assert isinstance(
grad_tensor, paddle.
......@@ -592,8 +592,8 @@ def monkey_patch_varbase():
# [0.79010487, 0.53972793, 0.09495186, 0.44267157, 0.72112119]])
"""
if core._in_eager_mode():
from paddle.tensor.to_string import eager_tensor_to_string
return eager_tensor_to_string(self)
from paddle.tensor.to_string import tensor_to_string
return tensor_to_string(self)
else:
from paddle.tensor.to_string import to_string
return to_string(self)
......@@ -624,7 +624,7 @@ def monkey_patch_varbase():
"Only Leaf Tensor support the deepcopy at the moment, non-Leaf Tensors contains graph information that does't support deepcopy"
)
if core._in_eager_mode():
new_varbase = core.eager.EagerTensor()
new_varbase = core.eager.Tensor()
else:
new_varbase = core.VarBase()
new_varbase.name = self.name + unique_name.generate("_deepcopy")
......@@ -808,16 +808,16 @@ def monkey_patch_varbase():
("__getitem__", __getitem__), ("item", item),
("__setitem__", __setitem__), ("_to", _to)):
if core._in_eager_mode():
setattr(core.eager.EagerTensor, method_name, method)
setattr(core.eager.Tensor, method_name, method)
else:
setattr(core.VarBase, method_name, method)
if core._in_eager_mode():
setattr(core.eager.EagerTensor, "_grad_ivar", _grad_ivar)
setattr(core.eager.EagerTensor, "_set_grad_ivar", _set_grad_ivar)
setattr(core.eager.EagerTensor, "clear_gradient", clear_gradient)
setattr(core.eager.EagerTensor, "clone", clone)
setattr(core.eager.EagerTensor, "value", value)
setattr(core.eager.Tensor, "_grad_ivar", _grad_ivar)
setattr(core.eager.Tensor, "_set_grad_ivar", _set_grad_ivar)
setattr(core.eager.Tensor, "clear_gradient", clear_gradient)
setattr(core.eager.Tensor, "clone", clone)
setattr(core.eager.Tensor, "value", value)
else:
setattr(core.VarBase, "__name__", "Tensor")
setattr(core.VarBase, "grad", grad)
......
......@@ -1057,7 +1057,7 @@ def _varbase_creator(type=core.VarDesc.VarType.LOD_TENSOR,
dtype = convert_np_dtype_to_dtype_(dtype)
if _in_eager_mode():
eager_tensor = core.eager.EagerTensor(
eager_tensor = core.eager.Tensor(
dtype if dtype else core.VarDesc.VarType.FP32,
list(shape) if shape else [], name, type
if type else core.VarDesc.VarType.LOD_TENSOR, True
......@@ -1076,7 +1076,7 @@ class VariableMetaClass(type):
t = type(instance)
if in_dygraph_mode():
if _in_eager_mode():
return issubclass(t, core.eager.EagerTensor)
return issubclass(t, core.eager.Tensor)
return issubclass(t, core.VarBase)
else:
return issubclass(t, Variable)
......@@ -6412,7 +6412,7 @@ class ParamBase(core.VarBase):
if hasattr(core, "eager"):
_core_eager_eagertensor = core.eager.EagerTensor
_core_eager_eagertensor = core.eager.Tensor
else:
_core_eager_eagertensor = object
......
......@@ -85,10 +85,9 @@ class LayerHelperBase(object):
assert in_dygraph_mode(
), "to_variable could only be called in dygraph mode"
if _in_eager_mode():
return core.eager.EagerTensor(value,
_current_expected_place(), False,
False, name
if name else None, True)
return core.eager.Tensor(value,
_current_expected_place(), False,
False, name if name else None, True)
else:
py_var = core.VarBase(
value=value,
......
......@@ -972,7 +972,7 @@ class DygraphGeneratorLoader(DataLoaderBase):
def __next__(self):
try:
if _in_eager_mode():
return core.eager.read_next_eager_tensor_list(
return core.eager.read_next_tensor_list(
self._reader.read_next_list()[0])
else:
return self._reader.read_next_var_list()
......
......@@ -109,26 +109,26 @@ class EagerDtypeTestCase(unittest.TestCase):
core.VarDesc.VarType.COMPLEX128)
class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
def constructor(self, place):
egr_tensor = core.eager.EagerTensor()
egr_tensor = core.eager.Tensor()
self.assertEqual(egr_tensor.persistable, False)
self.assertTrue("generated" in egr_tensor.name)
self.assertEqual(egr_tensor.shape, [])
self.assertEqual(egr_tensor.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor.stop_gradient, True)
egr_tensor0 = core.eager.EagerTensor(
core.VarDesc.VarType.FP32, [4, 16, 16, 32], "test_eager_tensor",
core.VarDesc.VarType.LOD_TENSOR, True)
egr_tensor0 = core.eager.Tensor(core.VarDesc.VarType.FP32,
[4, 16, 16, 32], "test_eager_tensor",
core.VarDesc.VarType.LOD_TENSOR, True)
self.assertEqual(egr_tensor0.persistable, True)
self.assertEqual(egr_tensor0.name, "test_eager_tensor")
self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32])
self.assertEqual(egr_tensor0.dtype, core.VarDesc.VarType.FP32)
arr0 = np.random.rand(4, 16, 16, 32).astype('float32')
egr_tensor1 = core.eager.EagerTensor(arr0, place, True, False,
"numpy_tensor1", False)
egr_tensor1 = core.eager.Tensor(arr0, place, True, False,
"numpy_tensor1", False)
self.assertEqual(egr_tensor1.persistable, True)
self.assertEqual(egr_tensor1.name, "numpy_tensor1")
self.assertEqual(egr_tensor1.shape, [4, 16, 16, 32])
......@@ -138,8 +138,8 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue(np.array_equal(egr_tensor1.numpy(), arr0))
arr1 = np.random.randint(100, size=(4, 16, 16, 32), dtype=np.int64)
egr_tensor2 = core.eager.EagerTensor(arr1, place, False, True,
"numpy_tensor2", True)
egr_tensor2 = core.eager.Tensor(arr1, place, False, True,
"numpy_tensor2", True)
self.assertEqual(egr_tensor2.persistable, False)
self.assertEqual(egr_tensor2.name, "numpy_tensor2")
self.assertEqual(egr_tensor2.shape, [4, 16, 16, 32])
......@@ -149,7 +149,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue(np.array_equal(egr_tensor2.numpy(), arr1))
arr2 = np.random.rand(4, 16, 16, 32, 64).astype('float32')
egr_tensor3 = core.eager.EagerTensor(arr2)
egr_tensor3 = core.eager.Tensor(arr2)
self.assertEqual(egr_tensor3.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor3.name)
self.assertEqual(egr_tensor3.shape, [4, 16, 16, 32, 64])
......@@ -161,7 +161,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue(np.array_equal(egr_tensor3.numpy(), arr2))
egr_tensor3.stop_gradient = False
egr_tensor4 = core.eager.EagerTensor(egr_tensor3)
egr_tensor4 = core.eager.Tensor(egr_tensor3)
self.assertEqual(egr_tensor4.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor4.name)
self.assertEqual(egr_tensor4.shape, egr_tensor3.shape)
......@@ -174,7 +174,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
np.array_equal(egr_tensor4.numpy(), egr_tensor3.numpy()))
arr4 = np.random.rand(4, 16, 16, 32).astype('float32')
egr_tensor5 = core.eager.EagerTensor(arr4, place)
egr_tensor5 = core.eager.Tensor(arr4, place)
self.assertEqual(egr_tensor5.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor5.name)
self.assertEqual(egr_tensor5.shape, [4, 16, 16, 32])
......@@ -183,7 +183,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue(egr_tensor5.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor5.numpy(), arr4))
egr_tensor6 = core.eager.EagerTensor(egr_tensor5, core.CPUPlace())
egr_tensor6 = core.eager.Tensor(egr_tensor5, core.CPUPlace())
self.assertEqual(egr_tensor6.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor6.name)
self.assertEqual(egr_tensor6.shape, [4, 16, 16, 32])
......@@ -193,7 +193,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue(
np.array_equal(egr_tensor6.numpy(), egr_tensor5.numpy()))
egr_tensor7 = core.eager.EagerTensor(arr4, place, True)
egr_tensor7 = core.eager.Tensor(arr4, place, True)
self.assertEqual(egr_tensor7.persistable, True)
self.assertTrue("generated_tensor" in egr_tensor7.name)
self.assertEqual(egr_tensor7.shape, [4, 16, 16, 32])
......@@ -202,7 +202,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue(egr_tensor7.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor7.numpy(), arr4))
egr_tensor8 = core.eager.EagerTensor(egr_tensor6, place, "egr_tensor8")
egr_tensor8 = core.eager.Tensor(egr_tensor6, place, "egr_tensor8")
self.assertEqual(egr_tensor8.persistable, False)
self.assertEqual(egr_tensor8.name, "egr_tensor8")
self.assertEqual(egr_tensor8.shape, [4, 16, 16, 32])
......@@ -212,7 +212,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue(
np.array_equal(egr_tensor8.numpy(), egr_tensor5.numpy()))
egr_tensor9 = core.eager.EagerTensor(arr4, place, True, True)
egr_tensor9 = core.eager.Tensor(arr4, place, True, True)
self.assertEqual(egr_tensor9.persistable, True)
self.assertTrue("generated_tensor" in egr_tensor9.name)
self.assertEqual(egr_tensor9.shape, [4, 16, 16, 32])
......@@ -224,7 +224,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
x = np.random.rand(3, 3).astype('float32')
t = paddle.fluid.Tensor()
t.set(x, paddle.fluid.CPUPlace())
egr_tensor10 = core.eager.EagerTensor(t, place)
egr_tensor10 = core.eager.Tensor(t, place)
self.assertEqual(egr_tensor10.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor10.name)
self.assertEqual(egr_tensor10.shape, [3, 3])
......@@ -233,7 +233,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue(egr_tensor10.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor10.numpy(), x))
egr_tensor11 = core.eager.EagerTensor(t, place, "framework_constructed")
egr_tensor11 = core.eager.Tensor(t, place, "framework_constructed")
self.assertEqual(egr_tensor11.persistable, False)
self.assertTrue("framework_constructed" in egr_tensor11.name)
self.assertEqual(egr_tensor11.shape, [3, 3])
......@@ -242,7 +242,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue(egr_tensor11.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor11.numpy(), x))
egr_tensor12 = core.eager.EagerTensor(t)
egr_tensor12 = core.eager.Tensor(t)
self.assertEqual(egr_tensor12.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor12.name)
self.assertEqual(egr_tensor12.shape, [3, 3])
......@@ -290,10 +290,10 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.constructor(p)
def constructor_with_kwargs(self, place):
# init EagerTensor by Python array
# init Tensor by Python array
arr = np.random.rand(4, 16, 16, 32).astype('float32')
egr_tensor0 = core.eager.EagerTensor(value=arr)
egr_tensor0 = core.eager.Tensor(value=arr)
self.assertEqual(egr_tensor0.persistable, False)
self.assertTrue("generated" in egr_tensor0.name)
self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32])
......@@ -303,7 +303,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertEqual(egr_tensor0.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor0.stop_gradient, True)
egr_tensor1 = core.eager.EagerTensor(value=arr, place=place)
egr_tensor1 = core.eager.Tensor(value=arr, place=place)
self.assertEqual(egr_tensor1.persistable, False)
self.assertTrue("generated" in egr_tensor1.name)
self.assertEqual(egr_tensor1.shape, [4, 16, 16, 32])
......@@ -311,7 +311,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertEqual(egr_tensor1.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor1.stop_gradient, True)
egr_tensor2 = core.eager.EagerTensor(arr, place=place)
egr_tensor2 = core.eager.Tensor(arr, place=place)
self.assertEqual(egr_tensor2.persistable, False)
self.assertTrue("generated" in egr_tensor2.name)
self.assertEqual(egr_tensor2.shape, [4, 16, 16, 32])
......@@ -319,7 +319,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertEqual(egr_tensor2.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor2.stop_gradient, True)
egr_tensor3 = core.eager.EagerTensor(
egr_tensor3 = core.eager.Tensor(
arr, place=place, name="new_eager_tensor")
self.assertEqual(egr_tensor3.persistable, False)
self.assertTrue("new_eager_tensor" in egr_tensor3.name)
......@@ -328,7 +328,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertEqual(egr_tensor3.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor3.stop_gradient, True)
egr_tensor4 = core.eager.EagerTensor(
egr_tensor4 = core.eager.Tensor(
arr, place=place, persistable=True, name="new_eager_tensor")
self.assertEqual(egr_tensor4.persistable, True)
self.assertTrue("new_eager_tensor" in egr_tensor4.name)
......@@ -337,7 +337,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertEqual(egr_tensor4.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor4.stop_gradient, True)
egr_tensor5 = core.eager.EagerTensor(
egr_tensor5 = core.eager.Tensor(
arr,
core.CPUPlace(),
persistable=True,
......@@ -350,7 +350,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertEqual(egr_tensor5.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor5.stop_gradient, True)
egr_tensor6 = core.eager.EagerTensor(
egr_tensor6 = core.eager.Tensor(
arr,
place=core.CPUPlace(),
persistable=True,
......@@ -363,7 +363,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertEqual(egr_tensor6.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor6.stop_gradient, True)
egr_tensor7 = core.eager.EagerTensor(
egr_tensor7 = core.eager.Tensor(
arr,
place=place,
persistable=True,
......@@ -376,7 +376,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertEqual(egr_tensor7.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor7.stop_gradient, True)
egr_tensor8 = core.eager.EagerTensor(
egr_tensor8 = core.eager.Tensor(
arr,
place=place,
persistable=True,
......@@ -390,7 +390,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertEqual(egr_tensor8.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor8.stop_gradient, False)
egr_tensor9 = core.eager.EagerTensor(
egr_tensor9 = core.eager.Tensor(
arr, place, True, True, "new_eager_tensor", stop_gradient=False)
self.assertEqual(egr_tensor9.persistable, True)
self.assertTrue("new_eager_tensor" in egr_tensor9.name)
......@@ -399,7 +399,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertEqual(egr_tensor9.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor9.stop_gradient, False)
egr_tensor10 = core.eager.EagerTensor(
egr_tensor10 = core.eager.Tensor(
arr,
place,
True,
......@@ -413,7 +413,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertEqual(egr_tensor10.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor10.stop_gradient, False)
egr_tensor11 = core.eager.EagerTensor(
egr_tensor11 = core.eager.Tensor(
arr,
place,
True,
......@@ -427,7 +427,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertEqual(egr_tensor11.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor11.stop_gradient, False)
egr_tensor12 = core.eager.EagerTensor(
egr_tensor12 = core.eager.Tensor(
arr,
place,
persistable=True,
......@@ -441,7 +441,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertEqual(egr_tensor12.dtype, core.VarDesc.VarType.FP32)
self.assertEqual(egr_tensor12.stop_gradient, False)
egr_tensor13 = core.eager.EagerTensor(
egr_tensor13 = core.eager.Tensor(
value=arr,
place=place,
persistable=True,
......@@ -456,7 +456,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertEqual(egr_tensor13.stop_gradient, False)
# special case
egr_tensor14 = core.eager.EagerTensor(
egr_tensor14 = core.eager.Tensor(
dtype=core.VarDesc.VarType.FP32,
dims=[4, 16, 16, 32],
name="special_eager_tensor",
......@@ -467,8 +467,8 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertEqual(egr_tensor14.shape, [4, 16, 16, 32])
self.assertEqual(egr_tensor14.dtype, core.VarDesc.VarType.FP32)
# init EagerTensor by EagerTensor
egr_tensor15 = core.eager.EagerTensor(value=egr_tensor4)
# init Tensor by Tensor
egr_tensor15 = core.eager.Tensor(value=egr_tensor4)
self.assertEqual(egr_tensor15.persistable, True)
self.assertTrue("generated" in egr_tensor15.name)
self.assertEqual(egr_tensor15.shape, egr_tensor4.shape)
......@@ -480,7 +480,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue(
np.array_equal(egr_tensor15.numpy(), egr_tensor4.numpy()))
egr_tensor16 = core.eager.EagerTensor(
egr_tensor16 = core.eager.Tensor(
value=egr_tensor4, name="new_eager_tensor")
self.assertEqual(egr_tensor16.persistable, True)
self.assertTrue("new_eager_tensor" in egr_tensor16.name)
......@@ -493,7 +493,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue(
np.array_equal(egr_tensor16.numpy(), egr_tensor4.numpy()))
egr_tensor17 = core.eager.EagerTensor(
egr_tensor17 = core.eager.Tensor(
value=egr_tensor4,
place=place,
name="new_eager_tensor", )
......@@ -506,7 +506,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue(
np.array_equal(egr_tensor17.numpy(), egr_tensor4.numpy()))
egr_tensor18 = core.eager.EagerTensor(
egr_tensor18 = core.eager.Tensor(
egr_tensor4,
place=place,
name="new_eager_tensor", )
......@@ -519,7 +519,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue(
np.array_equal(egr_tensor18.numpy(), egr_tensor4.numpy()))
egr_tensor19 = core.eager.EagerTensor(
egr_tensor19 = core.eager.Tensor(
egr_tensor4,
place,
name="new_eager_tensor", )
......@@ -536,7 +536,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
x = np.random.rand(3, 3).astype('float32')
t = paddle.fluid.Tensor()
t.set(x, paddle.fluid.CPUPlace())
egr_tensor20 = core.eager.EagerTensor(value=t)
egr_tensor20 = core.eager.Tensor(value=t)
self.assertEqual(egr_tensor20.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor20.name)
self.assertEqual(egr_tensor20.shape, [3, 3])
......@@ -547,7 +547,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
paddle.fluid.framework._current_expected_place()))
self.assertTrue(np.array_equal(egr_tensor20.numpy(), x))
egr_tensor21 = core.eager.EagerTensor(value=t, place=place)
egr_tensor21 = core.eager.Tensor(value=t, place=place)
self.assertEqual(egr_tensor21.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor21.name)
self.assertEqual(egr_tensor21.shape, [3, 3])
......@@ -556,7 +556,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue(egr_tensor21.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor21.numpy(), x))
egr_tensor22 = core.eager.EagerTensor(t, place=place)
egr_tensor22 = core.eager.Tensor(t, place=place)
self.assertEqual(egr_tensor22.persistable, False)
self.assertTrue("generated_tensor" in egr_tensor22.name)
self.assertEqual(egr_tensor22.shape, [3, 3])
......@@ -565,8 +565,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue(egr_tensor22.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor22.numpy(), x))
egr_tensor23 = core.eager.EagerTensor(
t, place, name="from_framework_tensor")
egr_tensor23 = core.eager.Tensor(t, place, name="from_framework_tensor")
self.assertEqual(egr_tensor23.persistable, False)
self.assertTrue("from_framework_tensor" in egr_tensor23.name)
self.assertEqual(egr_tensor23.shape, [3, 3])
......@@ -575,7 +574,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue(egr_tensor23.place._equals(place))
self.assertTrue(np.array_equal(egr_tensor23.numpy(), x))
egr_tensor24 = core.eager.EagerTensor(
egr_tensor24 = core.eager.Tensor(
value=t, place=place, name="from_framework_tensor")
self.assertEqual(egr_tensor24.persistable, False)
self.assertTrue("from_framework_tensor" in egr_tensor24.name)
......@@ -587,7 +586,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
# Bad usage
# SyntaxError: positional argument follows keyword argument
# egr_tensor25 = core.eager.EagerTensor(value=t, place)
# egr_tensor25 = core.eager.Tensor(value=t, place)
def test_constructor_with_kwargs(self):
print("Test_constructor_with_kwargs")
......@@ -655,7 +654,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
tensor2 = None
tensor = paddle.to_tensor(arr, core.VarDesc.VarType.FP32,
core.CPUPlace())
tensor3 = core.eager.EagerTensor()
tensor3 = core.eager.Tensor()
if core.is_compiled_with_cuda():
tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32,
core.CUDAPlace(0))
......@@ -683,7 +682,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
tensor2 = None
tensor = paddle.to_tensor(arr, core.VarDesc.VarType.FP32,
core.CPUPlace())
tensor3 = core.eager.EagerTensor()
tensor3 = core.eager.Tensor()
if core.is_compiled_with_cuda():
tensor2 = paddle.to_tensor(arr2, core.VarDesc.VarType.FP32,
core.CUDAPlace(0))
......@@ -748,7 +747,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
with _test_eager_guard():
arr = np.random.rand(4, 16, 16, 32).astype('float64')
egr_tensor0 = core.eager.EagerTensor(value=arr)
egr_tensor0 = core.eager.Tensor(value=arr)
self.assertEqual(egr_tensor0.persistable, False)
self.assertTrue("generated" in egr_tensor0.name)
self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32])
......@@ -766,7 +765,7 @@ class EagerTensorPropertiesAndMethodsTestCase(unittest.TestCase):
def test_set_value(self):
with _test_eager_guard():
ori_arr = np.random.rand(4, 16, 16, 32).astype('float32')
egr_tensor = core.eager.EagerTensor(value=ori_arr)
egr_tensor = core.eager.Tensor(value=ori_arr)
self.assertEqual(egr_tensor.stop_gradient, True)
self.assertEqual(egr_tensor.shape, [4, 16, 16, 32])
self.assertTrue(np.array_equal(egr_tensor.numpy(), ori_arr))
......@@ -859,7 +858,7 @@ class EagerParamBaseUsageTestCase(unittest.TestCase):
def test_backward_with_single_tensor(self):
with _test_eager_guard():
arr4 = np.random.rand(4, 16, 16, 32).astype('float32')
egr_tensor12 = core.eager.EagerTensor(arr4, core.CPUPlace())
egr_tensor12 = core.eager.Tensor(arr4, core.CPUPlace())
egr_tensor12.retain_grads()
arr = np.ones([4, 16, 16, 32]).astype('float32')
self.assertEqual(egr_tensor12.persistable, False)
......
......@@ -203,7 +203,7 @@ class TestImperative(unittest.TestCase):
with fluid.dygraph.guard():
if fluid.framework._in_eager_mode():
var_base = paddle.to_tensor(np.array([3, 4, 5]))
self.assertTrue(isinstance(var_base, core.eager.EagerTensor))
self.assertTrue(isinstance(var_base, core.eager.Tensor))
else:
var_base = paddle.to_tensor(np.array([3, 4, 5]))
self.assertTrue(isinstance(var_base, core.VarBase))
......@@ -221,13 +221,13 @@ class TestImperative(unittest.TestCase):
t.set(x, fluid.CPUPlace())
if _in_eager_mode():
# TODO(jiabin): Support Kwargs and uncomment these tests
# egr_tmp = fluid.core.eager.EagerTensor(value=x, place=fluid.core.CPUPlace())
egr_tmp2 = fluid.core.eager.EagerTensor(y, fluid.core.CPUPlace())
# egr_tmp = fluid.core.eager.Tensor(value=x, place=fluid.core.CPUPlace())
egr_tmp2 = fluid.core.eager.Tensor(y, fluid.core.CPUPlace())
egr_tmp3 = paddle.to_tensor(x)
egr_tmp4 = fluid.core.eager.EagerTensor(y)
# egr_tmp5 = fluid.core.eager.EagerTensor(value=x)
egr_tmp4 = fluid.core.eager.Tensor(y)
# egr_tmp5 = fluid.core.eager.Tensor(value=x)
# TODO(jiabin): Support it when we merge LoDTensor with DenseTensor
egr_tmp6 = fluid.core.eager.EagerTensor(t)
egr_tmp6 = fluid.core.eager.Tensor(t)
# self.assertTrue(np.array_equal(x, egr_tmp.numpy()))
self.assertTrue(np.array_equal(y, egr_tmp2.numpy()))
......@@ -953,8 +953,7 @@ class TestMetaclass(unittest.TestCase):
self.assertNotEqual(type(MyLayer).__name__, 'pybind11_type')
if core._in_eager_mode():
self.assertEqual(
type(paddle.fluid.core.eager.EagerTensor).__name__,
'pybind11_type')
type(paddle.fluid.core.eager.Tensor).__name__, 'pybind11_type')
else:
self.assertEqual(
type(paddle.fluid.core.VarBase).__name__, 'pybind11_type')
......
......@@ -41,7 +41,7 @@ class TestImperativeNumpyBridge(unittest.TestCase):
data_np[0][0] = -1
self.assertEqual(data_np[0][0], -1)
if _in_eager_mode():
# eager_mode, var2 is EagerTensor, is not subscriptable
# eager_mode, var2 is Tensor, is not subscriptable
# TODO(wuweilong): to support slice in eager mode later
self.assertNotEqual(var2.numpy()[0][0], -1)
else:
......
......@@ -1358,7 +1358,7 @@ class ReduceOnPlateau(LRScheduler):
self.last_epoch = epoch
if _in_eager_mode():
tmp = core.eager.EagerTensor
tmp = core.eager.Tensor
else:
tmp = Tensor
# loss must be float, numpy.ndarray or 1-D Tensor with shape [1]
......
......@@ -169,8 +169,7 @@ def to_tensor(data, dtype=None, place=None, stop_gradient=True):
# TOOD(jiabin): Support kwargs in eager tensor constructor
if _in_eager_mode() and isinstance(data, np.ndarray):
return core.eager.EagerTensor(data, place, False, False, None,
stop_gradient)
return core.eager.Tensor(data, place, False, False, None, stop_gradient)
else:
return paddle.Tensor(
value=data,
......
......@@ -263,7 +263,7 @@ def to_string(var, prefix='Tensor'):
data=data)
def eager_tensor_to_string(tensor, prefix='Tensor'):
def tensor_to_string(tensor, prefix='Tensor'):
indent = len(prefix) + 1
_template = "{prefix}(shape={shape}, dtype={dtype}, place={place}, stop_gradient={stop_gradient},\n{indent}{data})"
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册