未验证 提交 b5d9c31c 编写于 作者: C Chen Weihang 提交者: GitHub

[CustomOp] Fix PlaceType related compat error (#41826)

* fix place type related compat error

* fix test failed

* remove dll decl

* revert place type change

* add dll decl
上级 21aa3adc
...@@ -39,11 +39,12 @@ Tensor Tensor::copy_to(Place place, bool blocking) const { ...@@ -39,11 +39,12 @@ Tensor Tensor::copy_to(Place place, bool blocking) const {
template <typename T> template <typename T>
Tensor Tensor::copy_to(const Place &target_place) const { Tensor Tensor::copy_to(const Place &target_place) const {
LOG(WARNING) << "The Tensor's `copy_to` method is deprecated since version " LOG_FIRST_N(WARNING, 1)
"2.3, and will be removed in version 2.4, please use " << "The Tensor's `copy_to` method is deprecated since version "
"`copy_to` method without template argument instead. " "2.3, and will be removed in version 2.4, please use "
"reason: copying a Tensor to another device does not need " "`copy_to` method without template argument instead. "
"to specify the data type template argument."; "reason: copying a Tensor to another device does not need "
"to specify the data type template argument.";
return copy_to(target_place, /*blocking=*/false); return copy_to(target_place, /*blocking=*/false);
} }
......
...@@ -18,6 +18,8 @@ limitations under the License. */ ...@@ -18,6 +18,8 @@ limitations under the License. */
#include <string> #include <string>
#include <unordered_map> #include <unordered_map>
#include "glog/logging.h"
#include "paddle/phi/api/ext/exception.h" #include "paddle/phi/api/ext/exception.h"
namespace phi { namespace phi {
...@@ -108,17 +110,34 @@ uint32_t Place::Hash::operator()(const Place &place) const { ...@@ -108,17 +110,34 @@ uint32_t Place::Hash::operator()(const Place &place) const {
return hash_value; return hash_value;
} }
Place::Place(paddle::PlaceType type)
: device(0),
alloc_type_(static_cast<AllocationType>(type)),
device_type_id_(GetOrRegisterGlobalDeviceTypeId("")) {
LOG_FIRST_N(WARNING, 1)
<< "The `paddle::PlaceType::kCPU/kGPU` is deprecated since version "
"2.3, and will be removed in version 2.4! Please use "
"`paddle::CPUPlace()/GPUPlace()` to represent the place type.";
}
} // namespace phi } // namespace phi
namespace paddle { namespace paddle {
phi::Place PlaceType::kUNK = phi::Place(); bool operator==(const Place &place, PlaceType place_type) {
phi::Place PlaceType::kCPU = phi::Place(phi::AllocationType::CPU); LOG_FIRST_N(WARNING, 1)
// GPU Place contains device id, here we use default value 0, so it cannot << "The `paddle::PlaceType::kCPU/kGPU` is deprecated since version "
// use for multi-casd cases, but because it is static variable, it is difficult "2.3, and will be removed in version 2.4! Please use "
// to get the exact device id at all time. "`Tensor::is_cpu()/is_gpu()` method to determine the type of place.";
// NOTE: Please DO NOT use this place in the framework!!! return place.GetType() == static_cast<AllocationType>(place_type);
// It only for external compatibility }
phi::Place PlaceType::kGPU = phi::Place(phi::AllocationType::GPU);
bool operator==(PlaceType place_type, const Place &place) {
LOG_FIRST_N(WARNING, 1)
<< "The `paddle::PlaceType::kCPU/kGPU` is deprecated since version "
"2.3, and will be removed in version 2.4! Please use "
"`Tensor::is_cpu()/is_gpu()` method to determine the type of place.";
return static_cast<AllocationType>(place_type) == place.GetType();
}
} // namespace paddle } // namespace paddle
...@@ -18,6 +18,10 @@ limitations under the License. */ ...@@ -18,6 +18,10 @@ limitations under the License. */
#include "paddle/phi/api/include/dll_decl.h" #include "paddle/phi/api/include/dll_decl.h"
namespace paddle {
enum class PlaceType;
}
namespace phi { namespace phi {
enum class AllocationType : int8_t { enum class AllocationType : int8_t {
...@@ -57,6 +61,9 @@ class PADDLE_API Place { ...@@ -57,6 +61,9 @@ class PADDLE_API Place {
alloc_type_(type), alloc_type_(type),
device_type_id_(GetOrRegisterGlobalDeviceTypeId(dev_type)) {} device_type_id_(GetOrRegisterGlobalDeviceTypeId(dev_type)) {}
// See NOTE [ Why need to temporarily adapt to PlaceType? ]
Place(paddle::PlaceType type); // NOLINT
void Reset(AllocationType type, void Reset(AllocationType type,
int8_t device_id = 0, int8_t device_id = 0,
const std::string& dev_type = "") noexcept { const std::string& dev_type = "") noexcept {
...@@ -214,14 +221,26 @@ using XPUPlace = phi::XPUPlace; ...@@ -214,14 +221,26 @@ using XPUPlace = phi::XPUPlace;
using NPUPlace = phi::NPUPlace; using NPUPlace = phi::NPUPlace;
} // namespace experimental } // namespace experimental
/* NOTE: In order to remove and be compatible with the enumeration type using AllocationType = phi::AllocationType;
`PlaceType` of custom operator, we define a temporary type. using Place = phi::Place;
using CPUPlace = phi::CPUPlace;
using GPUPlace = phi::GPUPlace;
/* NOTE [ Why need to temporarily adapt to PlaceType? ]
This type cannot add any new type!!! It is only used for compatibility with `PlaceType` emum class is the place type used by custom operators since the
release of 2.0. Since 2.3, we have refactored the operator library and designed
a new external Place type. The original PlaceType is no longer suitable for use
as an internal type of the framework, but immediately delete the PlaceType,
it will cause the previous custom operators to be incompatible, so it cannot be
deleted in the short term. We'd better delete this abandoned data type in 2.4.
Note: This type cannot add any new type!!! It is only used for compatibility
with
historical writing and we will remove this temporary type in the future. historical writing and we will remove this temporary type in the future.
This Type cannot be used in framework! only used for custom operator! This Type cannot be used in framework! only used for custom operator!
The historical PlaceType define: The original PlaceType define:
- enum class PlaceType { kUNK = -1, kCPU, kGPU }; - enum class PlaceType { kUNK = -1, kCPU, kGPU };
...@@ -230,13 +249,14 @@ The historical PlaceType using: ...@@ -230,13 +249,14 @@ The historical PlaceType using:
- PD_CHECK(x.place() == paddle::PlaceType::kCPU) - PD_CHECK(x.place() == paddle::PlaceType::kCPU)
- auto out = paddle::Tensor(paddle::PlaceType::kCPU, x.shape()); - auto out = paddle::Tensor(paddle::PlaceType::kCPU, x.shape());
The new type cannot be used as int value! If you use as int, please modify
the implementation.
*/ */
struct PADDLE_API PlaceType { enum class PlaceType {
static phi::Place kUNK; kUNK = static_cast<int>(phi::AllocationType::UNDEFINED),
static phi::Place kCPU; kCPU = static_cast<int>(phi::AllocationType::CPU),
static phi::Place kGPU; kGPU = static_cast<int>(phi::AllocationType::GPU),
}; };
PADDLE_API bool operator==(const Place& place, PlaceType place_type);
PADDLE_API bool operator==(PlaceType place_type, const Place& place);
} // namespace paddle } // namespace paddle
if(WITH_ROCM) if(WITH_GPU)
nv_test(test_phi_tensor SRCS test_pten_tensor.cc DEPS phi_tensor glog)
elseif(WITH_ROCM)
hip_test(test_phi_tensor SRCS test_pten_tensor.cc DEPS phi_tensor glog) hip_test(test_phi_tensor SRCS test_pten_tensor.cc DEPS phi_tensor glog)
else() else()
cc_test(test_phi_tensor SRCS test_pten_tensor.cc DEPS phi_tensor glog) cc_test(test_phi_tensor SRCS test_pten_tensor.cc DEPS phi_tensor glog)
......
...@@ -108,7 +108,6 @@ std::vector<paddle::Tensor> relu_cuda_double_backward( ...@@ -108,7 +108,6 @@ std::vector<paddle::Tensor> relu_cuda_double_backward(
const paddle::Tensor& out, const paddle::Tensor& ddx); const paddle::Tensor& out, const paddle::Tensor& ddx);
std::vector<paddle::Tensor> ReluForward(const paddle::Tensor& x) { std::vector<paddle::Tensor> ReluForward(const paddle::Tensor& x) {
// TODO(chenweihang): Check Input
if (x.place() == paddle::PlaceType::kCPU) { if (x.place() == paddle::PlaceType::kCPU) {
return relu_cpu_forward(x); return relu_cpu_forward(x);
} else if (x.place() == paddle::PlaceType::kGPU) { } else if (x.place() == paddle::PlaceType::kGPU) {
......
...@@ -53,6 +53,7 @@ __global__ void relu_cuda_double_backward_kernel(const data_t* out_data, ...@@ -53,6 +53,7 @@ __global__ void relu_cuda_double_backward_kernel(const data_t* out_data,
} }
std::vector<paddle::Tensor> relu_cuda_forward(const paddle::Tensor& x) { std::vector<paddle::Tensor> relu_cuda_forward(const paddle::Tensor& x) {
CHECK_GPU_INPUT(x);
auto out = paddle::Tensor(paddle::PlaceType::kGPU, x.shape()); auto out = paddle::Tensor(paddle::PlaceType::kGPU, x.shape());
int numel = x.size(); int numel = x.size();
...@@ -70,6 +71,9 @@ std::vector<paddle::Tensor> relu_cuda_forward(const paddle::Tensor& x) { ...@@ -70,6 +71,9 @@ std::vector<paddle::Tensor> relu_cuda_forward(const paddle::Tensor& x) {
std::vector<paddle::Tensor> relu_cuda_backward(const paddle::Tensor& x, std::vector<paddle::Tensor> relu_cuda_backward(const paddle::Tensor& x,
const paddle::Tensor& out, const paddle::Tensor& out,
const paddle::Tensor& grad_out) { const paddle::Tensor& grad_out) {
CHECK_GPU_INPUT(x);
CHECK_GPU_INPUT(out);
CHECK_GPU_INPUT(grad_out);
auto grad_x = paddle::Tensor(paddle::PlaceType::kGPU, x.shape()); auto grad_x = paddle::Tensor(paddle::PlaceType::kGPU, x.shape());
int numel = out.size(); int numel = out.size();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册