未验证 提交 78ef1071 编写于 作者: C Chen Weihang 提交者: GitHub

[Phi&CustomOp] Remove deprecated enum PlaceType for custom op & add warning (#41647)

* remove old custom op placetype

* replace dist  placetype using

* add with gpu macro

* fix mutable_data error

* fix set value error

* add comment
上级 ce5e1196
......@@ -41,13 +41,14 @@ std::string GetKeyFromPlaces(const std::vector<Place>& places) {
}
static bool CheckTensorsInPlace(const std::vector<Tensor>& tensors,
const PlaceType type) {
return std::all_of(tensors.cbegin(), tensors.cend(),
[&](const Tensor& t) { return t.place() == type; });
phi::AllocationType type) {
return std::all_of(tensors.cbegin(), tensors.cend(), [&](const Tensor& t) {
return t.place().GetType() == type;
});
}
bool CheckTensorsInCudaPlace(const std::vector<Tensor>& tensors) {
return CheckTensorsInPlace(tensors, PlaceType::kGPU);
return CheckTensorsInPlace(tensors, phi::AllocationType::GPU);
}
} // namespace distributed
......
......@@ -414,20 +414,13 @@ void EagerReducer::InitializeDenseGroups(
p_group->dense_tensors_.push_back(phi::DenseTensor());
const auto &dtype = tensor.dtype();
const auto &place = tensor.place();
const auto &inner_place = tensor.impl()->place();
if (index > 0) {
PADDLE_ENFORCE_EQ(dtype, p_group->dtype_,
platform::errors::PreconditionNotMet(
"Tensor %s has unexpected dtype.", tensor_name));
PADDLE_ENFORCE_EQ(place, place_,
platform::errors::PreconditionNotMet(
"Tensor %s has different place. Expected place is "
"%s, but actual place is %s",
tensor_name, inner_place_, inner_place));
} else {
p_group->dtype_ = dtype;
place_ = place;
inner_place_ = inner_place;
}
}
......
......@@ -26,7 +26,6 @@
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
#include "paddle/phi/api/include/api.h"
#include "paddle/phi/api/include/tensor.h"
#include "paddle/phi/api/lib/ext_compat_utils.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/utils/string/string_helper.h"
......@@ -121,7 +120,6 @@ class EagerReducer {
std::vector<EagerGroup> groups_;
std::vector<TensorLocator> variable_locators_;
PlaceType place_;
platform::Place inner_place_;
size_t next_group_ = 0;
int64_t nranks_ = -1;
......
......@@ -36,7 +36,6 @@ limitations under the License. */
#include "paddle/fluid/platform/dynload/dynamic_loader.h"
#include "paddle/fluid/string/string_helper.h"
#include "paddle/phi/api/all.h"
#include "paddle/phi/api/lib/ext_compat_utils.h"
#include "paddle/phi/api/lib/utils/tensor_utils.h"
#include "paddle/phi/core/compat/convert_utils.h"
#include "paddle/utils/any.h"
......@@ -627,8 +626,8 @@ class CustomGradOpMaker<imperative::OpBase>
static void RegisterOperatorKernelWithPlace(
const std::string& name,
const OperatorWithKernel::OpKernelFunc& op_kernel_func,
const proto::VarType::Type type, const PlaceType& place) {
OpKernelType key(type, experimental::ConvertExtPlaceToInnerPlace(place));
const proto::VarType::Type type, const platform::Place& place) {
OpKernelType key(type, place);
VLOG(3) << "Custom Operator: op kernel key: " << key;
OperatorWithKernel::AllOpKernels()[name][key] = op_kernel_func;
}
......@@ -666,10 +665,10 @@ static void RegisterOperatorKernel(const std::string& name,
op_kernel_func = func;
}
RegisterOperatorKernelWithPlace(name, op_kernel_func, proto::VarType::RAW,
PlaceType::kCPU);
platform::CPUPlace());
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
RegisterOperatorKernelWithPlace(name, op_kernel_func, proto::VarType::RAW,
PlaceType::kGPU);
platform::CUDAPlace());
#endif
}
......
......@@ -921,7 +921,7 @@ static PyObject* tensor_method__setitem_eager_tensor(TensorObject* self,
"please check the type of tensor."));
}
if (value_tensor_tmp.place() == paddle::PlaceType::kUNK) {
if (!value_tensor_tmp.initialized()) {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
SetTensorFromPyArray(
static_cast<phi::DenseTensor*>(value_tensor_tmp.impl().get()),
......@@ -1009,7 +1009,7 @@ static PyObject* tensor_method__setitem_eager_tensor(TensorObject* self,
VLOG(4) << "index is not tensor";
self_numpy[_index] = py::object(py::handle(value_obj), true);
}
if (self->tensor.place() == paddle::PlaceType::kUNK) {
if (!self->tensor.initialized()) {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
SetTensorFromPyArray(self_tensor, self_numpy,
platform::Place(platform::CUDAPlace(0)), false);
......
......@@ -41,5 +41,4 @@ limitations under the License. */
#include "paddle/phi/api/ext/dispatch.h"
#include "paddle/phi/api/ext/exception.h"
#include "paddle/phi/api/ext/op_meta_info.h"
#include "paddle/phi/api/ext/place.h"
#include "paddle/phi/api/ext/tensor_compat.h"
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
namespace paddle {
// TODO(yangjiabin): Add other place support in next PR
enum class PlaceType { kUNK = -1, kCPU, kGPU };
} // namespace paddle
......@@ -14,6 +14,7 @@ limitations under the License. */
#pragma once
#include "paddle/phi/api/include/api.h"
#include "paddle/phi/api/include/tensor.h"
// Note(chenweihang): In order to be compatible with the original custom
......@@ -21,5 +22,8 @@ limitations under the License. */
// cannot be includeed in paddle
namespace paddle {
using Tensor = paddle::experimental::Tensor;
using Tensor = experimental::Tensor;
// using several Tensor initialize functions in paddle namespace
using experimental::empty;
using experimental::full;
} // namespace paddle
......@@ -29,7 +29,6 @@ using gpuStream_t = cudaStream_t;
using gpuStream_t = hipStream_t;
#endif
#include "paddle/phi/api/ext/place.h"
#include "paddle/phi/api/include/dll_decl.h"
#include "paddle/phi/common/data_type.h"
#include "paddle/phi/common/layout.h"
......@@ -109,21 +108,23 @@ class PADDLE_API Tensor final {
/**
* @brief Construct a new Tensor object on the target place.
* This is a deprecated method and may be removed in the future!
*
* This is a deprecated method and may be removed in the future!!!
*
* @param place
*/
explicit Tensor(const PlaceType& place);
explicit Tensor(const Place& place);
/**
* @brief Construct a new Tensor object on the target place
* with specified shape.
* This is a deprecated method and may be removed in the future!
*
* This is a deprecated method and may be removed in the future!!!
*
* @param place
* @param shape
*/
Tensor(const PlaceType& place, const std::vector<int64_t>& shape);
Tensor(const Place& place, const std::vector<int64_t>& shape);
/**
* @brief Construct a new Tensor object by a TensorBase pointer and name
......@@ -135,8 +136,9 @@ class PADDLE_API Tensor final {
/**
* @brief Construct a new Tensor object with name
*
* @note Used to adapt original execution mechanism and debug analysis
* in the development of new dygraph. It may be removed in the future.
* @note Internal method, used to adapt original execution mechanism and
* debug analysis in the development of new dygraph. It may be removed in
* the future.
* */
explicit Tensor(const std::string& name) : name_(name) {}
......@@ -151,6 +153,7 @@ class PADDLE_API Tensor final {
/**
* @brief Get the size of current tensor.
*
* The compatible method of `Tensor::numel()`.
* This is a deprecated method and may be removed in the future!
*
......@@ -167,6 +170,7 @@ class PADDLE_API Tensor final {
/**
* @brief Return the shape (dimensions) of Tensor.
*
* The compatible method of `Tensor::dims()`.
* This is a deprecated method and may be removed in the future!
*
......@@ -178,7 +182,7 @@ class PADDLE_API Tensor final {
* @brief Reset the shape of the tensor.
* @note: This method means Reset the shape of the tensor,
* and must be called before calling mutable_data() or
* copy_to(const PlaceType& place), this is not a standard definition of
* copy_to(const Place& place), this is not a standard definition of
* reshape behavior, so we will deprecated this feature in the future.
*
* @param shape
......@@ -194,6 +198,7 @@ class PADDLE_API Tensor final {
/**
* @brief Return the data type of Tensor.
*
* The compatible method of `Tensor::dtype()`.
* This is a deprecated method and may be removed in the future!
*
......@@ -246,18 +251,18 @@ class PADDLE_API Tensor final {
* @brief Return the place (device) of Tensor.
* This is a deprecated method and may be removed in the future!
*
* @return PlaceType
* @return Place
*/
PlaceType place() const;
Place place() const;
/**
* @brief Return the place (device) of Tensor.
* Because the `place` method already exists, so we need to use a new name,
* here we temporarily use `inner_place`.
*
* @return paddle::platform::Place
* This is a deprecated method and may be removed in the future!!!
*
* @return Place
*/
phi::Place inner_place() const;
Place inner_place() const;
/**
* @brief Determine whether the tensor device is CPU
......@@ -287,7 +292,7 @@ class PADDLE_API Tensor final {
/**
* @brief Get the memory pointer in CPU or GPU with specific data type.
* It's usually used to get the output data pointer.
* It's usually used to get the output data pointer, same as the T* data().
*
* @tparam T
* @return T*
......@@ -297,6 +302,7 @@ class PADDLE_API Tensor final {
/**
* @brief Get the memory pointer in CPU or GPU with specific data type.
*
* It's usually used to get the output data pointer.
* This is a deprecated method and may be removed in the future!
*
......@@ -305,7 +311,7 @@ class PADDLE_API Tensor final {
* @return T*
*/
template <typename T>
T* mutable_data(const PlaceType& place);
T* mutable_data(const Place& place);
/**
* @brief Get the const memory pointer directly.
......@@ -319,8 +325,7 @@ class PADDLE_API Tensor final {
/**
* @brief Get the memory pointer directly.
* It's usually used to get the output data pointer.
* This is a deprecated method and may be removed in the future!
* It's usually used to get the mutable output data pointer.
*
* @tparam T
* @return T*
......@@ -409,7 +414,7 @@ class PADDLE_API Tensor final {
* @return Tensor
*/
template <typename T>
Tensor copy_to(const PlaceType& target_place) const;
Tensor copy_to(const Place& target_place) const;
/**
* @brief Transfer the current Tensor to the specified device and return.
......@@ -427,7 +432,8 @@ class PADDLE_API Tensor final {
* @param blocking, Should we copy this in sync way.
* @return void
*/
void copy_(const Tensor& src, const phi::Place& target_place, bool blocking);
void copy_(const Tensor& src, const Place& target_place, bool blocking);
/**
* @brief Cast datatype from one to another
*
......@@ -489,11 +495,17 @@ class PADDLE_API Tensor final {
/* Part 8: Autograd methods */
/**
* @brief Get the autograd meta object
* @brief Get the autograd meta object pointer
*
* @return AbstractAutogradMeta*
*/
AbstractAutogradMeta* get_autograd_meta() const;
/**
* @brief Get the shared pointer of autograd meta object
*
* @return std::shared_ptr<AbstractAutogradMeta>&
*/
const std::shared_ptr<AbstractAutogradMeta>& mutable_autograd_meta() const;
/**
......@@ -524,7 +536,7 @@ class PADDLE_API Tensor final {
/* Part 10: Auto generated Tensor methods */
/* Part 11: Methods of converting SparseTensor and DenseTensor to each other
/* Part 11: Methods of converting underlying TensorType to each other
*/
/**
* @brief Convert DenseTensor or SparseCsrTensor to SparseCooTensor
......@@ -587,12 +599,6 @@ class PADDLE_API Tensor final {
* in the development of new dygraph. It may be removed in the future.
*/
std::string name_{""};
/**
* Place type: Return the expected memory location if the Tensor is
* uninitialized.
*/
PlaceType place_{PlaceType::kUNK};
};
} // namespace experimental
......
add_subdirectory(utils)
cc_library(ext_compat_utils SRCS ext_compat_utils.cc DEPS place)
if (WITH_GPU)
nv_library(phi_tensor_raw SRCS tensor.cc DEPS tensor_base dense_tensor phi_api_utils ext_compat_utils phi_enforce)
nv_library(phi_tensor_raw SRCS tensor.cc DEPS tensor_base dense_tensor phi_api_utils phi_enforce)
elseif (WITH_ROCM)
hip_library(phi_tensor_raw SRCS tensor.cc DEPS tensor_base dense_tensor phi_api_utils ext_compat_utils phi_enforce)
hip_library(phi_tensor_raw SRCS tensor.cc DEPS tensor_base dense_tensor phi_api_utils phi_enforce)
else()
cc_library(phi_tensor_raw SRCS tensor.cc DEPS tensor_base dense_tensor phi_api_utils ext_compat_utils phi_enforce)
cc_library(phi_tensor_raw SRCS tensor.cc DEPS tensor_base dense_tensor phi_api_utils phi_enforce)
endif()
set(api_gen_base ${CMAKE_SOURCE_DIR}/python/paddle/utils/code_gen/api_base.py)
......
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/api/lib/ext_compat_utils.h"
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
namespace paddle {
namespace experimental {
platform::Place ConvertExtPlaceToInnerPlace(PlaceType p) {
if (p == PlaceType::kCPU) {
return platform::Place(platform::CPUPlace());
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
} else if (p == PlaceType::kGPU) {
return platform::Place(platform::CUDAPlace(platform::GetCurrentDeviceId()));
#endif
} else {
PADDLE_THROW(
platform::errors::Unimplemented("Unsupported place type code(%d) when "
"casting enum place to paddle place.",
static_cast<int>(p)));
}
return platform::Place();
}
PlaceType ConvertInnerPlaceToExtPlace(const platform::Place& p) {
if (platform::is_cpu_place(p)) {
return PlaceType::kCPU;
} else if (platform::is_gpu_place(p)) {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
return PlaceType::kGPU;
#endif
} else {
PADDLE_THROW(
platform::errors::Unimplemented("Unsupported place type `%s` when "
"casting paddle place to enum place.",
p));
}
return PlaceType::kUNK;
}
Backend ConvertExtPlaceToBackend(PlaceType p) {
switch (p) {
case PlaceType::kCPU:
return Backend::CPU;
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
case PlaceType::kGPU:
return Backend::GPU;
#endif
default:
PADDLE_THROW(
platform::errors::Unimplemented("Unsupported place type `%s` when "
"casting enum place to backend.",
static_cast<int>(p)));
}
}
} // namespace experimental
} // namespace paddle
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/fluid/platform/place.h"
#include "paddle/phi/api/ext/place.h"
#include "paddle/phi/common/backend.h"
namespace paddle {
namespace experimental {
platform::Place ConvertExtPlaceToInnerPlace(PlaceType p);
PlaceType ConvertInnerPlaceToExtPlace(const platform::Place& p);
Backend ConvertExtPlaceToBackend(PlaceType p);
} // namespace experimental
} // namespace paddle
......@@ -19,46 +19,41 @@ limitations under the License. */
#include <vector>
#include "glog/logging.h"
#include "paddle/phi/api/lib/ext_compat_utils.h"
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/api/lib/utils/storage.h"
#include "paddle/phi/core/compat/convert_utils.h"
#include "paddle/phi/backends/gpu/gpu_info.h"
#include "paddle/phi/core/ddim.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/selected_rows.h"
#include "paddle/phi/core/sparse_coo_tensor.h"
#include "paddle/phi/core/sparse_csr_tensor.h"
#include "paddle/phi/core/tensor_base.h"
#include "paddle/phi/core/tensor_meta.h"
#include "paddle/phi/core/tensor_utils.h"
/**
* [ Why still include the fluid headers? ]
*
* We hope to organize the basic implementation of Tensor and the logic related
* to Tensor computation into an independent library, which we call
* [Tensor Operation Library, phi], so we extract or rewrite the original
* Kernels.
*
* In the future, the training library, inference library and custom operators
* will link to this Tensor Operation library.
*
* However, if we directly split the link relation, we need to make too many
* changes, which will affect the stability of the framework, so here we still
* rely on the implementation of the framework, which is a intermediate state.
*
* In the future, the necessary components will be moved to the this library,
* or the corresponding components will be re-implemented.
*/
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/platform/stream/cuda_stream.h"
#include "paddle/phi/common/complex.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/ddim.h"
#include "paddle/phi/core/enforce.h"
namespace paddle {
namespace experimental {
namespace detail {
static Place GetCorrectPlaceByPlaceType(const Place &place_type) {
auto alloc_type = place_type.GetType();
switch (alloc_type) {
case AllocationType::CPU:
return place_type;
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
case AllocationType::GPU:
return phi::Place(AllocationType::GPU,
phi::backends::gpu::GetCurrentDeviceId());
#endif
default:
PADDLE_THROW(phi::errors::Unavailable(
"The PlaceType is a legacy design, only supports CPU and GPU, "
"and will not support other place types in the future."));
}
}
} // namespace detail
/////// Tensor Methods ////////
......@@ -71,27 +66,41 @@ Tensor::Tensor(std::shared_ptr<phi::TensorBase> tensor_impl)
phi::errors::InvalidArgument("TensorImpl with nullptr is not supported"));
}
Tensor::Tensor(const PlaceType &place)
: impl_(std::move(std::make_shared<phi::DenseTensor>(
std::move(phi::make_intrusive<SharedStorage>(
ConvertExtPlaceToInnerPlace(place))),
std::move(phi::DenseTensorMeta(phi::DataType::UNDEFINED,
phi::make_ddim({}),
phi::DataLayout::NCHW))))),
place_{place} {}
Tensor::Tensor(const PlaceType &place, const std::vector<int64_t> &shape)
: impl_(std::move(std::make_shared<phi::DenseTensor>(
std::move(phi::make_intrusive<SharedStorage>(
ConvertExtPlaceToInnerPlace(place))),
std::move(phi::DenseTensorMeta(phi::DataType::UNDEFINED,
phi::make_ddim(shape),
phi::DataLayout::NCHW))))),
place_{place} {}
Tensor::Tensor(const Place &place) {
LOG(WARNING) << "The Tensor(place) constructor is deprecated since version "
"2.3, and will be removed in version 2.4! Please use "
"`paddle::empty/full` method to create a new "
"Tensor instead. "
"Reason: A legal tensor cannot be constructed only based on "
"the `place`, and datatype, shape, layout, etc. is also "
"required.";
DefaultAllocator alloc(detail::GetCorrectPlaceByPlaceType(place));
impl_ = std::move(std::make_shared<phi::DenseTensor>(
&alloc,
std::move(phi::DenseTensorMeta(
phi::DataType::FLOAT32, phi::make_ddim({}), phi::DataLayout::NCHW))));
}
Tensor::Tensor(const Place &place, const std::vector<int64_t> &shape) {
LOG(WARNING) << "The Tensor(place, shape) constructor is deprecated since "
"version 2.3, and will be removed in version 2.4! Please use "
"`paddle::empty/full` method to create a new "
"Tensor instead. "
"Reason: A legal tensor cannot be constructed only based on "
"the `place` and `shape`, and datatype, layout, etc. is also "
"required.";
DefaultAllocator alloc(detail::GetCorrectPlaceByPlaceType(place));
impl_ = std::move(std::make_shared<phi::DenseTensor>(
&alloc,
std::move(phi::DenseTensorMeta(phi::DataType::FLOAT32,
phi::make_ddim({shape}),
phi::DataLayout::NCHW))));
}
Tensor::Tensor(std::shared_ptr<phi::TensorBase> tensor_impl,
const std::string &name)
: impl_(std::move(tensor_impl)), name_(std::move(name)) {}
/* Part 2: Dimension, DataType and DataLayout methods */
int64_t Tensor::numel() const { return impl_->numel(); }
......@@ -112,14 +121,13 @@ void Tensor::reshape(const std::vector<int64_t> &shape) {
LOG(WARNING) << "The function of resetting the shape of the uninitialized "
"Tensor of the `reshape` method is deprecated since version "
"2.3, and will be removed in version 2.4, please use "
"`paddle::experimental::full` method to create a new Tensor "
"`paddle::empty/full` method to create a new Tensor "
"instead. "
"reason: `reshape` means changing the tensor shape without "
"touching underlying data, this requires the total size of "
"the tensor to remain constant.";
if (is_dense_tensor()) {
std::dynamic_pointer_cast<phi::DenseTensor>(impl_)->Resize(
phi::make_ddim(shape));
static_cast<phi::DenseTensor *>(impl_.get())->Resize(phi::make_ddim(shape));
} else {
PADDLE_THROW(phi::errors::Unimplemented(
"Only support reshape operation on DenseTensor now."));
......@@ -146,15 +154,16 @@ bool Tensor::is_sparse_csr_tensor() const {
}
/* Part 3: Device and Backend methods */
PlaceType Tensor::place() const {
if (!impl_->initialized()) {
return place_;
} else {
return ConvertInnerPlaceToExtPlace(impl_->place());
}
Place Tensor::place() const {
PADDLE_ENFORCE_NOT_NULL(
impl_,
phi::errors::PermissionDenied(
"Null pointer error, the impl_ of Tensor should not be "
"Null when calling Tensor::place()."));
return impl_->place();
}
paddle::platform::Place Tensor::inner_place() const {
Place Tensor::inner_place() const {
PADDLE_ENFORCE_NOT_NULL(
impl_,
phi::errors::PermissionDenied(
......@@ -179,9 +188,18 @@ bool Tensor::is_gpu_pinned() const {
template <typename T>
T *Tensor::mutable_data() {
LOG(WARNING) << "Allocating memory through `mutable_data` method is "
"deprecated since version 2.3, and `mutable_data` method "
"will be removed in version 2.4! Please use "
"`paddle::empty/full` method to create a new "
"Tensor with allocated memory, and use data<T>() method "
"to get the memory pointer of tensor instead. "
"Reason: When calling `mutable_data` to allocate memory, "
"the place, datatype, and data layout of tensor may be in "
"an illegal state.";
if (is_dense_tensor()) {
return std::dynamic_pointer_cast<phi::DenseTensor>(impl_)->mutable_data<T>(
ConvertExtPlaceToInnerPlace(place()));
return static_cast<phi::DenseTensor *>(impl_.get())
->mutable_data<T>(place());
}
return nullptr;
}
......@@ -202,51 +220,43 @@ template PADDLE_API phi::dtype::float16 *
Tensor::mutable_data<phi::dtype::float16>();
template <typename T>
T *Tensor::mutable_data(const PlaceType &place) {
auto inner_place = ConvertExtPlaceToInnerPlace(place);
if (impl_->initialized()) {
PADDLE_ENFORCE_EQ(
platform::is_same_place(inner_place, impl_->place()),
true,
phi::errors::Unimplemented("Modification of tensor place through "
"mutable_data is not supported now"));
}
T *Tensor::mutable_data(const Place &place) {
LOG(WARNING) << "Allocating memory through `mutable_data` method is "
"deprecated since version 2.3, and `mutable_data` method "
"will be removed in version 2.4! Please use "
"`paddle::empty/full` method to create a new "
"Tensor with allocated memory, and use data<T>() method "
"to get the memory pointer of tensor instead. "
"Reason: When calling `mutable_data` to allocate memory, "
"the datatype, and data layout of tensor may be in "
"an illegal state.";
if (is_dense_tensor()) {
return std::dynamic_pointer_cast<phi::DenseTensor>(impl_)->mutable_data<T>(
inner_place);
return static_cast<phi::DenseTensor *>(impl_.get())->mutable_data<T>(place);
}
return nullptr;
}
template PADDLE_API float *Tensor::mutable_data<float>(const PlaceType &place);
template PADDLE_API double *Tensor::mutable_data<double>(
const PlaceType &place);
template PADDLE_API int64_t *Tensor::mutable_data<int64_t>(
const PlaceType &place);
template PADDLE_API int32_t *Tensor::mutable_data<int32_t>(
const PlaceType &place);
template PADDLE_API uint8_t *Tensor::mutable_data<uint8_t>(
const PlaceType &place);
template PADDLE_API int8_t *Tensor::mutable_data<int8_t>(
const PlaceType &place);
template PADDLE_API int16_t *Tensor::mutable_data<int16_t>(
const PlaceType &place);
template PADDLE_API bool *Tensor::mutable_data<bool>(const PlaceType &place);
template PADDLE_API float *Tensor::mutable_data<float>(const Place &place);
template PADDLE_API double *Tensor::mutable_data<double>(const Place &place);
template PADDLE_API int64_t *Tensor::mutable_data<int64_t>(const Place &place);
template PADDLE_API int32_t *Tensor::mutable_data<int32_t>(const Place &place);
template PADDLE_API uint8_t *Tensor::mutable_data<uint8_t>(const Place &place);
template PADDLE_API int8_t *Tensor::mutable_data<int8_t>(const Place &place);
template PADDLE_API int16_t *Tensor::mutable_data<int16_t>(const Place &place);
template PADDLE_API bool *Tensor::mutable_data<bool>(const Place &place);
template PADDLE_API phi::dtype::complex<float>
*Tensor::mutable_data<phi::dtype::complex<float>>(const PlaceType &place);
*Tensor::mutable_data<phi::dtype::complex<float>>(const Place &place);
template PADDLE_API phi::dtype::complex<double>
*Tensor::mutable_data<phi::dtype::complex<double>>(const PlaceType &place);
*Tensor::mutable_data<phi::dtype::complex<double>>(const Place &place);
template PADDLE_API phi::dtype::float16 *
Tensor::mutable_data<phi::dtype::float16>(const PlaceType &place);
Tensor::mutable_data<phi::dtype::float16>(const Place &place);
template <typename T>
const T *Tensor::data() const {
if (is_dense_tensor()) {
return std::dynamic_pointer_cast<phi::DenseTensor>(impl_)->data<T>();
} else if (phi::SelectedRows::classof(impl_.get())) {
return std::dynamic_pointer_cast<phi::SelectedRows>(impl_)
->value()
.data<T>();
return static_cast<phi::DenseTensor *>(impl_.get())->data<T>();
} else if (is_selected_rows()) {
return static_cast<phi::SelectedRows *>(impl_.get())->value().data<T>();
}
return nullptr;
}
......@@ -271,9 +281,9 @@ Tensor::data<phi::dtype::bfloat16>() const;
template <typename T>
T *Tensor::data() {
if (is_dense_tensor()) {
return std::dynamic_pointer_cast<phi::DenseTensor>(impl_)->data<T>();
} else if (phi::SelectedRows::classof(impl_.get())) {
return std::dynamic_pointer_cast<phi::SelectedRows>(impl_)
return static_cast<phi::DenseTensor *>(impl_.get())->data<T>();
} else if (is_selected_rows()) {
return static_cast<phi::SelectedRows *>(impl_.get())
->mutable_value()
->data<T>();
}
......@@ -299,7 +309,7 @@ Tensor Tensor::slice(int64_t begin_idx, int64_t end_idx) const {
if (is_dense_tensor()) {
return Tensor(std::make_shared<phi::DenseTensor>(
std::move(phi::DenseTensorUtils::Slice(
*(std::dynamic_pointer_cast<phi::DenseTensor>(impl_).get()),
*(static_cast<phi::DenseTensor *>(impl_.get())),
begin_idx,
end_idx))));
} else {
......@@ -331,6 +341,9 @@ bool Tensor::defined() const { return impl_ != nullptr; }
bool Tensor::initialized() const { return defined() && impl_->initialized(); }
bool Tensor::is_initialized() const {
LOG(WARNING) << "The `is_initialized` method is deprecated since version "
"2.3, and will be removed in version 2.4! "
"Please use `initialized` method instead.";
return defined() && impl_->initialized();
}
......@@ -342,7 +355,6 @@ Tensor &Tensor::operator=(const Tensor &x) & {
impl_ = x.impl_;
autograd_meta_ = x.autograd_meta_;
name_ = x.name_;
place_ = x.place_;
return *this;
}
......@@ -350,7 +362,6 @@ Tensor &Tensor::operator=(Tensor &&x) & {
impl_ = std::move(x.impl_);
autograd_meta_ = std::move(x.autograd_meta_);
name_ = std::move(x.name_);
place_ = std::move(x.place_);
return *this;
}
......@@ -371,8 +382,7 @@ void Tensor::set_autograd_meta(
void Tensor::bump_inplace_version() {
if (is_dense_tensor()) {
auto &inplace_version_counter =
std::dynamic_pointer_cast<phi::DenseTensor>(impl_)
->InplaceVersionCounter();
static_cast<phi::DenseTensor *>(impl_.get())->InplaceVersionCounter();
inplace_version_counter.Bump();
} else {
PADDLE_THROW(phi::errors::Unimplemented(
......@@ -383,8 +393,7 @@ void Tensor::bump_inplace_version() {
uint32_t Tensor::current_inplace_version() {
if (is_dense_tensor()) {
auto &inplace_version_counter =
std::dynamic_pointer_cast<phi::DenseTensor>(impl_)
->InplaceVersionCounter();
static_cast<phi::DenseTensor *>(impl_.get())->InplaceVersionCounter();
return inplace_version_counter.CurrentVersion();
} else {
PADDLE_THROW(phi::errors::Unimplemented(
......@@ -397,8 +406,7 @@ void Tensor::reset_inplace_version(bool set_to_zero) {
if (set_to_zero) {
if (is_dense_tensor()) {
auto &inplace_version_counter =
std::dynamic_pointer_cast<phi::DenseTensor>(impl_)
->InplaceVersionCounter();
static_cast<phi::DenseTensor *>(impl_.get())->InplaceVersionCounter();
inplace_version_counter.SetInplaceVersionToZero();
}
}
......
......@@ -14,7 +14,6 @@ limitations under the License. */
#include "paddle/phi/api/include/tensor.h"
#include "paddle/phi/api/lib/ext_compat_utils.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/compat/convert_utils.h"
#include "paddle/phi/core/tensor_base.h"
......@@ -39,37 +38,37 @@ Tensor Tensor::copy_to(Place place, bool blocking) const {
}
template <typename T>
Tensor Tensor::copy_to(const PlaceType &target_place) const {
Tensor Tensor::copy_to(const Place &target_place) const {
LOG(WARNING) << "The Tensor's `copy_to` method is deprecated since version "
"2.3, and will be removed in version 2.4, please use "
"`copy_to` method without template argument instead. "
"reason: copying a Tensor to another device does not need "
"to specify the data type template argument.";
return copy_to(ConvertExtPlaceToInnerPlace(target_place), /*blocking=*/false);
return copy_to(target_place, /*blocking=*/false);
}
template PADDLE_API Tensor
Tensor::copy_to<float>(const PlaceType &target_place) const;
Tensor::copy_to<float>(const Place &target_place) const;
template PADDLE_API Tensor
Tensor::copy_to<double>(const PlaceType &target_place) const;
Tensor::copy_to<double>(const Place &target_place) const;
template PADDLE_API Tensor
Tensor::copy_to<int64_t>(const PlaceType &target_place) const;
Tensor::copy_to<int64_t>(const Place &target_place) const;
template PADDLE_API Tensor
Tensor::copy_to<int32_t>(const PlaceType &target_place) const;
Tensor::copy_to<int32_t>(const Place &target_place) const;
template PADDLE_API Tensor
Tensor::copy_to<uint8_t>(const PlaceType &target_place) const;
Tensor::copy_to<uint8_t>(const Place &target_place) const;
template PADDLE_API Tensor
Tensor::copy_to<int8_t>(const PlaceType &target_place) const;
Tensor::copy_to<int8_t>(const Place &target_place) const;
template PADDLE_API Tensor
Tensor::copy_to<int16_t>(const PlaceType &target_place) const;
Tensor::copy_to<int16_t>(const Place &target_place) const;
template PADDLE_API Tensor
Tensor::copy_to<bool>(const PlaceType &target_place) const;
template PADDLE_API Tensor Tensor::copy_to<phi::dtype::complex<float>>(
const PlaceType &target_place) const;
template PADDLE_API Tensor Tensor::copy_to<phi::dtype::complex<double>>(
const PlaceType &target_place) const;
Tensor::copy_to<bool>(const Place &target_place) const;
template PADDLE_API Tensor
Tensor::copy_to<phi::dtype::float16>(const PlaceType &target_place) const;
Tensor::copy_to<phi::dtype::complex<float>>(const Place &target_place) const;
template PADDLE_API Tensor
Tensor::copy_to<phi::dtype::complex<double>>(const Place &target_place) const;
template PADDLE_API Tensor
Tensor::copy_to<phi::dtype::float16>(const Place &target_place) const;
void Tensor::copy_(const Tensor &src,
const phi::Place &target_place,
......
......@@ -109,3 +109,16 @@ uint32_t Place::Hash::operator()(const Place &place) const {
}
} // namespace phi
namespace paddle {
phi::Place PlaceType::kUNK = phi::Place();
phi::Place PlaceType::kCPU = phi::Place(phi::AllocationType::CPU);
// GPU Place contains device id, here we use default value 0, so it cannot
// use for multi-casd cases, but because it is static variable, it is difficult
// to get the exact device id at all time.
// NOTE: Please DO NOT use this place in the framework!!!
// It only for external compatibility
phi::Place PlaceType::kGPU = phi::Place(phi::AllocationType::GPU);
} // namespace paddle
......@@ -213,4 +213,30 @@ using GPUPinnedPlace = phi::GPUPinnedPlace;
using XPUPlace = phi::XPUPlace;
using NPUPlace = phi::NPUPlace;
} // namespace experimental
/* NOTE: In order to remove and be compatible with the enumeration type
`PlaceType` of custom operator, we define a temporary type.
This type cannot add any new type!!! It is only used for compatibility with
historical writing and we will remove this temporary type in the future.
This Type cannot be used in framework! only used for custom operator!
The historical PlaceType define:
- enum class PlaceType { kUNK = -1, kCPU, kGPU };
The historical PlaceType using:
- PD_CHECK(x.place() == paddle::PlaceType::kCPU)
- auto out = paddle::Tensor(paddle::PlaceType::kCPU, x.shape());
The new type cannot be used as int value! If you use as int, please modify
the implementation.
*/
struct PADDLE_API PlaceType {
static phi::Place kUNK;
static phi::Place kCPU;
static phi::Place kGPU;
};
} // namespace paddle
......@@ -19,7 +19,24 @@ limitations under the License. */
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/compat/convert_utils.h"
// See Note [ Why still include the fluid headers? ]
/**
* [ Why still include the fluid headers? ]
*
* We hope to organize the basic implementation of Tensor and the logic related
* to Tensor computation into an independent library, which we call
* [Tensor Operation Library, phi], so we extract or rewrite the original
* Kernels.
*
* In the future, the training library, inference library and custom operators
* will link to this Tensor Operation library.
*
* However, if we directly split the link relation, we need to make too many
* changes, which will affect the stability of the framework, so here we still
* rely on the implementation of the framework, which is a intermediate state.
*
* In the future, the necessary components will be moved to the this library,
* or the corresponding components will be re-implemented.
*/
#include "paddle/fluid/memory/malloc.h"
namespace phi {
......
......@@ -15,7 +15,6 @@
#include "glog/logging.h"
#include "gtest/gtest.h"
#include "paddle/phi/api/include/tensor.h"
#include "paddle/phi/api/lib/ext_compat_utils.h"
#include "paddle/phi/core/kernel_registry.h"
PD_DECLARE_KERNEL(copy, CPU, ALL_LAYOUT);
......@@ -201,7 +200,7 @@ void GroupTestDtype() {
void TestInitilized() {
experimental::Tensor test_tensor(paddle::PlaceType::kCPU, {1, 1});
CHECK(test_tensor.is_initialized() == false);
CHECK(test_tensor.is_initialized() == true);
test_tensor.mutable_data<float>(paddle::PlaceType::kCPU);
CHECK(test_tensor.is_initialized() == true);
float* tensor_data = test_tensor.mutable_data<float>();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册