diff --git a/paddle/fluid/CMakeLists.txt b/paddle/fluid/CMakeLists.txt index 369aa64016951ac4429c963ff35e9403dfa1f3ce..f4b68ec65b9d23e247aedd27e758d186f2e9930a 100644 --- a/paddle/fluid/CMakeLists.txt +++ b/paddle/fluid/CMakeLists.txt @@ -11,6 +11,6 @@ add_subdirectory(imperative) add_subdirectory(operators) add_subdirectory(string) add_subdirectory(pybind) - +add_subdirectory(eager) # NOTE: please add subdirectory inference at last. add_subdirectory(inference) diff --git a/paddle/fluid/eager/CMakeLists.txt b/paddle/fluid/eager/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..88c05163602afabc81c663eb0fd0b3db882b2a3b --- /dev/null +++ b/paddle/fluid/eager/CMakeLists.txt @@ -0,0 +1 @@ +add_subdirectory(tests) diff --git a/paddle/fluid/eager/eager_tensor.h b/paddle/fluid/eager/eager_tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..1ae8d3eded70c0eebc5a08d1cb032586c392f561 --- /dev/null +++ b/paddle/fluid/eager/eager_tensor.h @@ -0,0 +1,265 @@ +// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +// framework deps +#include "paddle/fluid/framework/data_layout_transform.h" +#include "paddle/fluid/framework/pten_utils.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/framework/variable.h" +// pten deps +#include "paddle/pten/all.h" +#include "paddle/pten/api/all.h" +#include "paddle/pten/api/lib/utils/tensor_utils.h" +/** + * This class is used by Eager mode for now. It's painful to do this in Eager + * Mode, the better + * choice is to use paddle::experimental::Tensor directly. However, we have a + * punch of nested kernel code, and + * they use paddle::framework::Variable in inner logic code. So, we have to + * provide variable in + * paddle::framework::ExecutionContext to support it. We should remove this as + * soon as we finish our latest + * Pten Lib, and use paddle::experimental::Tensor instead. + * + * Note: Keep this class as clean as possible. + * This class should only support method declared in + * paddle::experimental::Tensor with access method of + * paddle::framework::Variable no more members are acceptable. + * **/ + +namespace egr { +class EagerTensor final { + public: + /* Part 1: Constructors */ + EagerTensor() + : tensor_(std::make_shared()), + var_(paddle::framework::Variable()) {} + explicit EagerTensor(const std::string& name) + : tensor_(std::make_shared(name)), + var_(paddle::framework::Variable()) {} + /** + * @description: Use a TensorImpl pointer to construct a Tensor + * @param {shared_ptr} tensor_impl + * @return {Tensor} + */ + explicit EagerTensor(const std::shared_ptr& tensor_impl) + : tensor_(std::make_shared(tensor_impl)), + var_(paddle::framework::Variable()) {} + EagerTensor(const EagerTensor&) = default; + EagerTensor(EagerTensor&&) = default; + + /* Part 2: Name access methods */ + /** + * @description: Return the name of current Tensor. + * @param None + * @return {const std::string&} + */ + const std::string& name() const { return tensor_->name(); } + /** + * @description: Set the name of current Tensor. + * @param {const std::string& name} + * @return None + */ + void set_name(const std::string& name) { tensor_->set_name(name); } + + /* Part 3: Dimension, DataType and DataLayout methods */ + /** + * @description: Return the number of elements of current Tensor. + * @param None + * @return {int64_t} + */ + int64_t numel() const { return tensor_->numel(); } + /** + * @description: Return the shape (dimensions) of current Tensor. + * @param None + * @return {DDim} + */ + paddle::framework::DDim shape() const { return tensor_->dims(); } + + /** + * @description: Return the data type of current Tensor. + * @param None + * @return {DataType} + */ + paddle::experimental::DataType type() const { return tensor_->type(); } + + /** + * @description: Return the layout of current Tensor. + * @param None + * @return {DataLayout} + */ + paddle::experimental::DataLayout layout() const { return tensor_->layout(); } + + /* Part 3: Device and Backend methods */ + /** + * @description: Return the place (device) of current Tensor. + * @param None + * @return {Place} + */ + paddle::platform::Place place() const { return tensor_->inner_place(); } + + /** + * Backend judgment APIs, shield the concept of Backend. + */ + bool is_cpu() const { return paddle::platform::is_cpu_place(place()); } + bool is_cuda() const { return paddle::platform::is_gpu_place(place()); } + + /* Part 4: Data Access methods */ + /** + * @description: Return the implemention of current Tensor. + * @param None + * @return {std::shared_ptr} + */ + std::shared_ptr impl() const { return tensor_->impl(); } + + /** + * @description: Set the implemention of current Tensor. + * @param {std::shared_ptr} + * @return None + */ + void set_impl(const std::shared_ptr& impl) { + tensor_->set_impl(impl); + } + + // TODO(chenweihang): Whether API Tensor need `data` and `mutable_data`? + + // TODO(chenweihang): slice and split methods use kernels? + + /* Part 5: Status utils methods */ + /** + * @description: Determine whether it is a meaningful Tensor + * @param None + * @return {bool} + */ + bool defined() const { return tensor_->defined(); } + + /** + * @description: Determine whether Tensor is initialized + * @param None + * @return {bool} + */ + bool initialized() const { return tensor_->initialized(); } + + /** + * @description: Reset the Tensor implementation + * @param None + * @return {void} + */ + void reset() { tensor_->reset(); } + + /* Part 6: Operator overloading */ + EagerTensor& operator=(const EagerTensor& x) & { + tensor_ = x.tensor_; + var_ = x.var_; + return *this; + } + EagerTensor& operator=(EagerTensor&& x) & { + tensor_ = std::move(x.tensor_); + var_ = std::move(x.var_); + return *this; + } + + /* Part 7: Autograd methods */ + paddle::experimental::AbstractAutogradMeta* get_autograd_meta() const { + return tensor_->get_autograd_meta(); + } + void set_autograd_meta( + std::shared_ptr + autograd_meta) { + tensor_->set_autograd_meta(autograd_meta); + } + + /** Part 9: Get framework::Variable from EagerTensor **/ + const paddle::framework::Variable& Var() const { return var_; } + + paddle::framework::Variable* MutableVar() { return &var_; } + + /** Part 10: Sync paddle::framework::Variable with pten::Tensor **/ + void SyncToVar(paddle::framework::proto::VarType_Type type = + paddle::framework::proto::VarType::LOD_TENSOR) { + // Synchronize allocation only once. + if (!var_.IsInitialized()) { + // TODO(jiabin): Support selected rows later. + if (this->initialized()) { + if (type == paddle::framework::proto::VarType::LOD_TENSOR) { + auto* framework_tensor = + var_.GetMutable(); + framework_tensor->Resize(tensor_->dims()); + framework_tensor->set_layout( + pten::TransToFluidDataLayout(tensor_->layout())); + // Contruct framework::Tensor from egr::EagerTensor + auto tensor_dense = + std::dynamic_pointer_cast(tensor_->impl()); + if (tensor_dense) { + paddle::experimental::MovesStorage(tensor_dense.get(), + framework_tensor); + } else { + PADDLE_THROW(paddle::platform::errors::Fatal( + "Unrecognized egr::EagerTensor type, only " + "DenseTensor is supported for now.")); + } + } + } else { + PADDLE_THROW(paddle::platform::errors::Fatal( + "Can not Sync EagerTensor %s whose " + "pten::DenseTensor is not initialized!", + name())); + } + } + } + /** Part 11: Sync paddle::framework::Variable with pten::Tensor **/ + void SyncToTensor() { + // Synchronize allocation only once. + if (!this->defined() || !this->initialized()) { + // TODO(jiabin): Support selected rows later. + if (var_.IsInitialized()) { + if (var_.IsType()) { + SetImplWithLegacyTensor(); + } else if (var_.IsType()) { + SetImplWithLegacyTensor(); + } else { + PADDLE_THROW(paddle::platform::errors::Fatal( + "Unable to fetch underlying tensor " + "from VarBase, only LoDTensor and " + "Tensor are supported for now")); + } + } else { + PADDLE_THROW(paddle::platform::errors::Fatal( + "Can not Sync EagerTensor %s whose paddle::framework::Variable is " + "not initialized!", + name())); + } + } + } + + void ResetVar(const paddle::framework::Variable& src) { var_ = src; } + + private: + template + void SetImplWithLegacyTensor() { + const auto& framework_tensor = var_.Get(); + this->set_impl( + std::move(paddle::experimental::MakePtenDenseTensor(framework_tensor))); + var_.Clear(); + } + + private: + std::shared_ptr tensor_ = nullptr; + paddle::framework::Variable var_; +}; +} // namespace egr diff --git a/paddle/fluid/eager/tests/CMakeLists.txt b/paddle/fluid/eager/tests/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..171ac1581882ab8cd9b04210c70509cf99b24e97 --- /dev/null +++ b/paddle/fluid/eager/tests/CMakeLists.txt @@ -0,0 +1,2 @@ +set(eager_deps pten pten_api python) +add_subdirectory(data_structure_tests) diff --git a/paddle/fluid/eager/tests/data_structure_tests/CMakeLists.txt b/paddle/fluid/eager/tests/data_structure_tests/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..907fcd101ba69678dd2c0144b2501df5187fc1d1 --- /dev/null +++ b/paddle/fluid/eager/tests/data_structure_tests/CMakeLists.txt @@ -0,0 +1 @@ +cc_test(test_egr_ds_eager_tensor SRCS eager_tensor_test.cc DEPS ${eager_deps}) diff --git a/paddle/fluid/eager/tests/data_structure_tests/eager_tensor_test.cc b/paddle/fluid/eager/tests/data_structure_tests/eager_tensor_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..a528867d4420361e2d70f00d81816d83a41a6e35 --- /dev/null +++ b/paddle/fluid/eager/tests/data_structure_tests/eager_tensor_test.cc @@ -0,0 +1,135 @@ +// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "glog/logging.h" +#include "gtest/gtest.h" + +#include "paddle/fluid/eager/eager_tensor.h" +#include "paddle/pten/api/lib/utils/allocator.h" + +// TODO(jiabin): remove nolint here!!! +using namespace egr; // NOLINT + +namespace eager_test { +using AbstractAutogradMeta = paddle::experimental::AbstractAutogradMeta; +class AutogradMetaTest : public AbstractAutogradMeta { + public: + explicit AutogradMetaTest(int val) : val_(val) {} + int val_ = 0; +}; +} +TEST(EagerTensor, Constructor) { + EagerTensor et1 = EagerTensor(); + EagerTensor et2 = EagerTensor("et2"); + + CHECK_EQ(et1.defined(), false); + CHECK_EQ(et2.name(), "et2"); + + pten::DenseTensorMeta meta = pten::DenseTensorMeta( + pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 2})); + std::shared_ptr dt = std::make_shared( + std::make_shared( + paddle::platform::CPUPlace()), + meta); + auto* dt_ptr = dt->mutable_data(); + dt_ptr[0] = 5.0f; + dt_ptr[1] = 10.0f; + EagerTensor et3 = EagerTensor(dt); + auto* et3_ptr = + std::dynamic_pointer_cast(et3.impl())->data(); + CHECK_EQ(et3_ptr[0], 5.0f); + CHECK_EQ(et3_ptr[1], 10.0f); + // copy constructor + EagerTensor et4(et3); + auto* et4_ptr = + std::dynamic_pointer_cast(et4.impl())->data(); + CHECK_EQ(et4_ptr[0], 5.0f); + CHECK_EQ(et4_ptr[1], 10.0f); + EagerTensor et5(std::move(et4)); + auto* et5_ptr = + std::dynamic_pointer_cast(et5.impl())->data(); + CHECK_EQ(et5_ptr[0], 5.0f); + CHECK_EQ(et5_ptr[1], 10.0f); +} + +TEST(EagerTensor, MemberFunction) { + EagerTensor et3; + pten::DenseTensorMeta meta = pten::DenseTensorMeta( + pten::DataType::FLOAT32, paddle::framework::make_ddim({1, 2})); + std::shared_ptr dt = std::make_shared( + std::make_shared( + paddle::platform::CPUPlace()), + meta); + auto* dt_ptr = dt->mutable_data(); + dt_ptr[0] = 5.0f; + dt_ptr[1] = 10.0f; + VLOG(6) << "Make Dense Tensor"; + et3.set_name("et3"); + VLOG(6) << "Set Name"; + CHECK_EQ(et3.name(), "et3"); + CHECK_EQ(et3.defined(), false); + et3.set_impl(dt); + VLOG(6) << "Set impl"; + CHECK_EQ(et3.initialized(), true); + CHECK_EQ(et3.is_cpu(), true); + CHECK_EQ(et3.is_cuda(), false); + CHECK_EQ(et3.numel(), 2); + auto expected_dim = paddle::framework::make_ddim({1, 2}); + CHECK_EQ(et3.shape(), expected_dim); + CHECK_EQ(et3.type(), paddle::experimental::DataType::FLOAT32); + CHECK_EQ(et3.layout(), paddle::experimental::DataLayout::NCHW); + CHECK(paddle::platform::is_cpu_place(et3.place())); + VLOG(6) << "Get impl"; + auto* dt3_ptr = + std::dynamic_pointer_cast(et3.impl())->data(); + CHECK_EQ(dt3_ptr[0], 5.0f); + CHECK_EQ(dt3_ptr[1], 10.0f); + EagerTensor et4 = et3; + VLOG(6) << "copy ="; + CHECK(et4.initialized() == true); + auto* dt4_ptr = + std::dynamic_pointer_cast(et4.impl())->data(); + CHECK_EQ(dt4_ptr[0], 5.0f); + CHECK_EQ(dt4_ptr[1], 10.0f); + VLOG(6) << "move ="; + EagerTensor et5 = std::move(et4); + auto* dt5_ptr = + std::dynamic_pointer_cast(et5.impl())->data(); + CHECK_EQ(dt5_ptr[0], 5.0f); + CHECK_EQ(dt5_ptr[1], 10.0f); + VLOG(6) << "AutogradMeta"; + auto autograd_meta_test = std::make_shared(2); + et3.set_autograd_meta(autograd_meta_test); + auto* tmp_autograd_meta_test = + static_cast(et3.get_autograd_meta()); + CHECK_EQ(tmp_autograd_meta_test->val_, 2); + VLOG(6) << "SyncToVar"; + et3.SyncToVar(); + CHECK_EQ(et3.Var().Get().data()[0], + 5.0f); + CHECK_EQ(et3.Var().Get().data()[1], + 10.0f); + VLOG(6) << "SyncToTensor"; + CHECK(et3.initialized() == false); + et3.SyncToTensor(); + CHECK(et3.initialized() == true); + VLOG(6) << "Check Tensor"; + auto* dt3_tmp_ptr = + std::dynamic_pointer_cast(et3.impl())->data(); + CHECK_EQ(dt3_tmp_ptr[0], 5.0f); + CHECK_EQ(dt3_tmp_ptr[1], 10.0f); + et3.reset(); + CHECK(et3.defined() == false); + VLOG(6) << "Finish"; +} diff --git a/paddle/pten/api/include/tensor.h b/paddle/pten/api/include/tensor.h index 982ec29f2be64983c4f8241618f422c27453bf52..476dddb4e2cbd00a1ae9cff50dc97816f45a4e6c 100644 --- a/paddle/pten/api/include/tensor.h +++ b/paddle/pten/api/include/tensor.h @@ -86,13 +86,16 @@ class AbstractAutogradMeta { class PD_DLL_DECL Tensor final { public: - /* Part 1: Construction and destruction methods */ - /** * @brief Construct a new Tensor object */ Tensor() = default; + /** + * @brief Construct a new Tensor object with name + * */ + explicit Tensor(const std::string& name) { name_ = name; } + /** * @brief Construct a new Tensor object by copy */ @@ -128,7 +131,19 @@ class PD_DLL_DECL Tensor final { */ Tensor(const PlaceType& place, const std::vector& shape); - /* Part 2: Dimension, DataType and DataLayout methods */ + /** + * @brief Return the name of Tensor. + * + * @return const std::string& + */ + const std::string& name() const { return name_; } + + /** + * @brief Set name of Tensor. + * + * @param const std::string& name + */ + void set_name(const std::string& name) { name_ = name; } /** * @brief Return the number of elements of Tensor.