提交 8f3be73f 编写于 作者: J Jiabin Yang 提交者: GitHub

Revert "Refactor dygraph to eager (#37318)"

This reverts commit b962f5fe.
上级 edc3496f
...@@ -11,6 +11,6 @@ add_subdirectory(imperative) ...@@ -11,6 +11,6 @@ add_subdirectory(imperative)
add_subdirectory(operators) add_subdirectory(operators)
add_subdirectory(string) add_subdirectory(string)
add_subdirectory(pybind) add_subdirectory(pybind)
add_subdirectory(eager)
# NOTE: please add subdirectory inference at last. # NOTE: please add subdirectory inference at last.
add_subdirectory(inference) add_subdirectory(inference)
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <Python.h>
// framework deps
#include "paddle/fluid/framework/data_layout_transform.h"
#include "paddle/fluid/framework/pten_utils.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/framework/variable.h"
// pten deps
#include "paddle/pten/all.h"
#include "paddle/pten/api/all.h"
#include "paddle/pten/api/lib/utils/tensor_utils.h"
/**
* This class is used by Eager mode for now. It's painful to do this in Eager
* Mode, the better
* choice is to use paddle::experimental::Tensor directly. However, we have a
* punch of nested kernel code, and
* they use paddle::framework::Variable in inner logic code. So, we have to
* provide variable in
* paddle::framework::ExecutionContext to support it. We should remove this as
* soon as we finish our latest
* Pten Lib, and use paddle::experimental::Tensor instead.
*
* Note: Keep this class as clean as possible.
* This class should only support method declared in
* paddle::experimental::Tensor with access method of
* paddle::framework::Variable no more members are acceptable.
* **/
namespace egr {
class EagerTensor final {
public:
/* Part 1: Constructors */
EagerTensor()
: tensor_(std::make_shared<paddle::experimental::Tensor>()),
var_(paddle::framework::Variable()) {}
explicit EagerTensor(const std::string& name)
: tensor_(std::make_shared<paddle::experimental::Tensor>(name)),
var_(paddle::framework::Variable()) {}
/**
* @description: Use a TensorImpl pointer to construct a Tensor
* @param {shared_ptr<TensorBase>} tensor_impl
* @return {Tensor}
*/
explicit EagerTensor(const std::shared_ptr<pten::TensorBase>& tensor_impl)
: tensor_(std::make_shared<paddle::experimental::Tensor>(tensor_impl)),
var_(paddle::framework::Variable()) {}
EagerTensor(const EagerTensor&) = default;
EagerTensor(EagerTensor&&) = default;
/* Part 2: Name access methods */
/**
* @description: Return the name of current Tensor.
* @param None
* @return {const std::string&}
*/
const std::string& name() const { return tensor_->name(); }
/**
* @description: Set the name of current Tensor.
* @param {const std::string& name}
* @return None
*/
void set_name(const std::string& name) { tensor_->set_name(name); }
/* Part 3: Dimension, DataType and DataLayout methods */
/**
* @description: Return the number of elements of current Tensor.
* @param None
* @return {int64_t}
*/
int64_t numel() const { return tensor_->numel(); }
/**
* @description: Return the shape (dimensions) of current Tensor.
* @param None
* @return {DDim}
*/
paddle::framework::DDim shape() const { return tensor_->dims(); }
/**
* @description: Return the data type of current Tensor.
* @param None
* @return {DataType}
*/
paddle::experimental::DataType type() const { return tensor_->type(); }
/**
* @description: Return the layout of current Tensor.
* @param None
* @return {DataLayout}
*/
paddle::experimental::DataLayout layout() const { return tensor_->layout(); }
/* Part 3: Device and Backend methods */
/**
* @description: Return the place (device) of current Tensor.
* @param None
* @return {Place}
*/
paddle::platform::Place place() const { return tensor_->inner_place(); }
/**
* Backend judgment APIs, shield the concept of Backend.
*/
bool is_cpu() const { return paddle::platform::is_cpu_place(place()); }
bool is_cuda() const { return paddle::platform::is_gpu_place(place()); }
/* Part 4: Data Access methods */
/**
* @description: Return the implemention of current Tensor.
* @param None
* @return {std::shared_ptr<TensorBase>}
*/
std::shared_ptr<pten::TensorBase> impl() const { return tensor_->impl(); }
/**
* @description: Set the implemention of current Tensor.
* @param {std::shared_ptr<TensorBase>}
* @return None
*/
void set_impl(const std::shared_ptr<pten::TensorBase>& impl) {
tensor_->set_impl(impl);
}
// TODO(chenweihang): Whether API Tensor need `data` and `mutable_data`?
// TODO(chenweihang): slice and split methods use kernels?
/* Part 5: Status utils methods */
/**
* @description: Determine whether it is a meaningful Tensor
* @param None
* @return {bool}
*/
bool defined() const { return tensor_->defined(); }
/**
* @description: Determine whether Tensor is initialized
* @param None
* @return {bool}
*/
bool initialized() const { return tensor_->initialized(); }
/**
* @description: Reset the Tensor implementation
* @param None
* @return {void}
*/
void reset() { tensor_->reset(); }
/* Part 6: Operator overloading */
EagerTensor& operator=(const EagerTensor& x) & {
tensor_ = x.tensor_;
var_ = x.var_;
return *this;
}
EagerTensor& operator=(EagerTensor&& x) & {
tensor_ = std::move(x.tensor_);
var_ = std::move(x.var_);
return *this;
}
/* Part 7: Autograd methods */
paddle::experimental::AbstractAutogradMeta* get_autograd_meta() const {
return tensor_->get_autograd_meta();
}
void set_autograd_meta(
std::shared_ptr<paddle::experimental::AbstractAutogradMeta>
autograd_meta) {
tensor_->set_autograd_meta(autograd_meta);
}
/** Part 9: Get framework::Variable from EagerTensor **/
const paddle::framework::Variable& Var() const { return var_; }
paddle::framework::Variable* MutableVar() { return &var_; }
/** Part 10: Sync paddle::framework::Variable with pten::Tensor **/
void SyncToVar(paddle::framework::proto::VarType_Type type =
paddle::framework::proto::VarType::LOD_TENSOR) {
// Synchronize allocation only once.
if (!var_.IsInitialized()) {
// TODO(jiabin): Support selected rows later.
if (this->initialized()) {
if (type == paddle::framework::proto::VarType::LOD_TENSOR) {
auto* framework_tensor =
var_.GetMutable<paddle::framework::LoDTensor>();
framework_tensor->Resize(tensor_->dims());
framework_tensor->set_layout(
pten::TransToFluidDataLayout(tensor_->layout()));
// Contruct framework::Tensor from egr::EagerTensor
auto tensor_dense =
std::dynamic_pointer_cast<pten::DenseTensor>(tensor_->impl());
if (tensor_dense) {
paddle::experimental::MovesStorage(tensor_dense.get(),
framework_tensor);
} else {
PADDLE_THROW(paddle::platform::errors::Fatal(
"Unrecognized egr::EagerTensor type, only "
"DenseTensor is supported for now."));
}
}
} else {
PADDLE_THROW(paddle::platform::errors::Fatal(
"Can not Sync EagerTensor %s whose "
"pten::DenseTensor is not initialized!",
name()));
}
}
}
/** Part 11: Sync paddle::framework::Variable with pten::Tensor **/
void SyncToTensor() {
// Synchronize allocation only once.
if (!this->defined() || !this->initialized()) {
// TODO(jiabin): Support selected rows later.
if (var_.IsInitialized()) {
if (var_.IsType<paddle::framework::LoDTensor>()) {
SetImplWithLegacyTensor<paddle::framework::LoDTensor,
pten::DenseTensor>();
} else if (var_.IsType<paddle::framework::Tensor>()) {
SetImplWithLegacyTensor<paddle::framework::Tensor,
pten::DenseTensor>();
} else {
PADDLE_THROW(paddle::platform::errors::Fatal(
"Unable to fetch underlying tensor "
"from VarBase, only LoDTensor and "
"Tensor are supported for now"));
}
} else {
PADDLE_THROW(paddle::platform::errors::Fatal(
"Can not Sync EagerTensor %s whose paddle::framework::Variable is "
"not initialized!",
name()));
}
}
}
void ResetVar(const paddle::framework::Variable& src) { var_ = src; }
private:
template <typename LEGACY_TYPE, typename TYPE>
void SetImplWithLegacyTensor() {
const auto& framework_tensor = var_.Get<LEGACY_TYPE>();
this->set_impl(
std::move(paddle::experimental::MakePtenDenseTensor(framework_tensor)));
var_.Clear();
}
private:
std::shared_ptr<paddle::experimental::Tensor> tensor_ = nullptr;
paddle::framework::Variable var_;
};
} // namespace egr
set(eager_deps pten pten_api python)
add_subdirectory(data_structure_tests)
cc_test(test_egr_ds_eager_tensor SRCS eager_tensor_test.cc DEPS ${eager_deps})
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "glog/logging.h"
#include "gtest/gtest.h"
#include "paddle/fluid/eager/eager_tensor.h"
#include "paddle/pten/api/lib/utils/allocator.h"
// TODO(jiabin): remove nolint here!!!
using namespace egr; // NOLINT
namespace eager_test {
using AbstractAutogradMeta = paddle::experimental::AbstractAutogradMeta;
class AutogradMetaTest : public AbstractAutogradMeta {
public:
explicit AutogradMetaTest(int val) : val_(val) {}
int val_ = 0;
};
}
TEST(EagerTensor, Constructor) {
EagerTensor et1 = EagerTensor();
EagerTensor et2 = EagerTensor("et2");
CHECK_EQ(et1.defined(), false);
CHECK_EQ(et2.name(), "et2");
pten::DenseTensorMeta meta = pten::DenseTensorMeta(
pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 2}));
std::shared_ptr<pten::DenseTensor> dt = std::make_shared<pten::DenseTensor>(
std::make_shared<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace()),
meta);
auto* dt_ptr = dt->mutable_data<float>();
dt_ptr[0] = 5.0f;
dt_ptr[1] = 10.0f;
EagerTensor et3 = EagerTensor(dt);
auto* et3_ptr =
std::dynamic_pointer_cast<pten::DenseTensor>(et3.impl())->data<float>();
CHECK_EQ(et3_ptr[0], 5.0f);
CHECK_EQ(et3_ptr[1], 10.0f);
// copy constructor
EagerTensor et4(et3);
auto* et4_ptr =
std::dynamic_pointer_cast<pten::DenseTensor>(et4.impl())->data<float>();
CHECK_EQ(et4_ptr[0], 5.0f);
CHECK_EQ(et4_ptr[1], 10.0f);
EagerTensor et5(std::move(et4));
auto* et5_ptr =
std::dynamic_pointer_cast<pten::DenseTensor>(et5.impl())->data<float>();
CHECK_EQ(et5_ptr[0], 5.0f);
CHECK_EQ(et5_ptr[1], 10.0f);
}
TEST(EagerTensor, MemberFunction) {
EagerTensor et3;
pten::DenseTensorMeta meta = pten::DenseTensorMeta(
pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 2}));
std::shared_ptr<pten::DenseTensor> dt = std::make_shared<pten::DenseTensor>(
std::make_shared<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace()),
meta);
auto* dt_ptr = dt->mutable_data<float>();
dt_ptr[0] = 5.0f;
dt_ptr[1] = 10.0f;
VLOG(6) << "Make Dense Tensor";
et3.set_name("et3");
VLOG(6) << "Set Name";
CHECK_EQ(et3.name(), "et3");
CHECK_EQ(et3.defined(), false);
et3.set_impl(dt);
VLOG(6) << "Set impl";
CHECK_EQ(et3.initialized(), true);
CHECK_EQ(et3.is_cpu(), true);
CHECK_EQ(et3.is_cuda(), false);
CHECK_EQ(et3.numel(), 2);
auto expected_dim = paddle::framework::make_ddim({1, 2});
CHECK_EQ(et3.shape(), expected_dim);
CHECK_EQ(et3.type(), paddle::experimental::DataType::FLOAT32);
CHECK_EQ(et3.layout(), paddle::experimental::DataLayout::NCHW);
CHECK(paddle::platform::is_cpu_place(et3.place()));
VLOG(6) << "Get impl";
auto* dt3_ptr =
std::dynamic_pointer_cast<pten::DenseTensor>(et3.impl())->data<float>();
CHECK_EQ(dt3_ptr[0], 5.0f);
CHECK_EQ(dt3_ptr[1], 10.0f);
EagerTensor et4 = et3;
VLOG(6) << "copy =";
CHECK(et4.initialized() == true);
auto* dt4_ptr =
std::dynamic_pointer_cast<pten::DenseTensor>(et4.impl())->data<float>();
CHECK_EQ(dt4_ptr[0], 5.0f);
CHECK_EQ(dt4_ptr[1], 10.0f);
VLOG(6) << "move =";
EagerTensor et5 = std::move(et4);
auto* dt5_ptr =
std::dynamic_pointer_cast<pten::DenseTensor>(et5.impl())->data<float>();
CHECK_EQ(dt5_ptr[0], 5.0f);
CHECK_EQ(dt5_ptr[1], 10.0f);
VLOG(6) << "AutogradMeta";
auto autograd_meta_test = std::make_shared<eager_test::AutogradMetaTest>(2);
et3.set_autograd_meta(autograd_meta_test);
auto* tmp_autograd_meta_test =
static_cast<eager_test::AutogradMetaTest*>(et3.get_autograd_meta());
CHECK_EQ(tmp_autograd_meta_test->val_, 2);
VLOG(6) << "SyncToVar";
et3.SyncToVar();
CHECK_EQ(et3.Var().Get<paddle::framework::LoDTensor>().data<float>()[0],
5.0f);
CHECK_EQ(et3.Var().Get<paddle::framework::LoDTensor>().data<float>()[1],
10.0f);
VLOG(6) << "SyncToTensor";
CHECK(et3.initialized() == false);
et3.SyncToTensor();
CHECK(et3.initialized() == true);
VLOG(6) << "Check Tensor";
auto* dt3_tmp_ptr =
std::dynamic_pointer_cast<pten::DenseTensor>(et3.impl())->data<float>();
CHECK_EQ(dt3_tmp_ptr[0], 5.0f);
CHECK_EQ(dt3_tmp_ptr[1], 10.0f);
et3.reset();
CHECK(et3.defined() == false);
VLOG(6) << "Finish";
}
...@@ -86,16 +86,13 @@ class AbstractAutogradMeta { ...@@ -86,16 +86,13 @@ class AbstractAutogradMeta {
class PD_DLL_DECL Tensor final { class PD_DLL_DECL Tensor final {
public: public:
/* Part 1: Construction and destruction methods */
/** /**
* @brief Construct a new Tensor object * @brief Construct a new Tensor object
*/ */
Tensor() = default; Tensor() = default;
/**
* @brief Construct a new Tensor object with name
* */
explicit Tensor(const std::string& name) { name_ = name; }
/** /**
* @brief Construct a new Tensor object by copy * @brief Construct a new Tensor object by copy
*/ */
...@@ -131,19 +128,7 @@ class PD_DLL_DECL Tensor final { ...@@ -131,19 +128,7 @@ class PD_DLL_DECL Tensor final {
*/ */
Tensor(const PlaceType& place, const std::vector<int64_t>& shape); Tensor(const PlaceType& place, const std::vector<int64_t>& shape);
/** /* Part 2: Dimension, DataType and DataLayout methods */
* @brief Return the name of Tensor.
*
* @return const std::string&
*/
const std::string& name() const { return name_; }
/**
* @brief Set name of Tensor.
*
* @param const std::string& name
*/
void set_name(const std::string& name) { name_ = name; }
/** /**
* @brief Return the number of elements of Tensor. * @brief Return the number of elements of Tensor.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册