未验证 提交 b5d9c31c 编写于 作者: C Chen Weihang 提交者: GitHub

[CustomOp] Fix PlaceType related compat error (#41826)

* fix place type related compat error

* fix test failed

* remove dll decl

* revert place type change

* add dll decl
上级 21aa3adc
......@@ -39,11 +39,12 @@ Tensor Tensor::copy_to(Place place, bool blocking) const {
template <typename T>
Tensor Tensor::copy_to(const Place &target_place) const {
LOG(WARNING) << "The Tensor's `copy_to` method is deprecated since version "
"2.3, and will be removed in version 2.4, please use "
"`copy_to` method without template argument instead. "
"reason: copying a Tensor to another device does not need "
"to specify the data type template argument.";
LOG_FIRST_N(WARNING, 1)
<< "The Tensor's `copy_to` method is deprecated since version "
"2.3, and will be removed in version 2.4, please use "
"`copy_to` method without template argument instead. "
"reason: copying a Tensor to another device does not need "
"to specify the data type template argument.";
return copy_to(target_place, /*blocking=*/false);
}
......
......@@ -18,6 +18,8 @@ limitations under the License. */
#include <string>
#include <unordered_map>
#include "glog/logging.h"
#include "paddle/phi/api/ext/exception.h"
namespace phi {
......@@ -108,17 +110,34 @@ uint32_t Place::Hash::operator()(const Place &place) const {
return hash_value;
}
Place::Place(paddle::PlaceType type)
: device(0),
alloc_type_(static_cast<AllocationType>(type)),
device_type_id_(GetOrRegisterGlobalDeviceTypeId("")) {
LOG_FIRST_N(WARNING, 1)
<< "The `paddle::PlaceType::kCPU/kGPU` is deprecated since version "
"2.3, and will be removed in version 2.4! Please use "
"`paddle::CPUPlace()/GPUPlace()` to represent the place type.";
}
} // namespace phi
namespace paddle {
phi::Place PlaceType::kUNK = phi::Place();
phi::Place PlaceType::kCPU = phi::Place(phi::AllocationType::CPU);
// GPU Place contains device id, here we use default value 0, so it cannot
// use for multi-casd cases, but because it is static variable, it is difficult
// to get the exact device id at all time.
// NOTE: Please DO NOT use this place in the framework!!!
// It only for external compatibility
phi::Place PlaceType::kGPU = phi::Place(phi::AllocationType::GPU);
bool operator==(const Place &place, PlaceType place_type) {
LOG_FIRST_N(WARNING, 1)
<< "The `paddle::PlaceType::kCPU/kGPU` is deprecated since version "
"2.3, and will be removed in version 2.4! Please use "
"`Tensor::is_cpu()/is_gpu()` method to determine the type of place.";
return place.GetType() == static_cast<AllocationType>(place_type);
}
bool operator==(PlaceType place_type, const Place &place) {
LOG_FIRST_N(WARNING, 1)
<< "The `paddle::PlaceType::kCPU/kGPU` is deprecated since version "
"2.3, and will be removed in version 2.4! Please use "
"`Tensor::is_cpu()/is_gpu()` method to determine the type of place.";
return static_cast<AllocationType>(place_type) == place.GetType();
}
} // namespace paddle
......@@ -18,6 +18,10 @@ limitations under the License. */
#include "paddle/phi/api/include/dll_decl.h"
namespace paddle {
enum class PlaceType;
}
namespace phi {
enum class AllocationType : int8_t {
......@@ -57,6 +61,9 @@ class PADDLE_API Place {
alloc_type_(type),
device_type_id_(GetOrRegisterGlobalDeviceTypeId(dev_type)) {}
// See NOTE [ Why need to temporarily adapt to PlaceType? ]
Place(paddle::PlaceType type); // NOLINT
void Reset(AllocationType type,
int8_t device_id = 0,
const std::string& dev_type = "") noexcept {
......@@ -214,14 +221,26 @@ using XPUPlace = phi::XPUPlace;
using NPUPlace = phi::NPUPlace;
} // namespace experimental
/* NOTE: In order to remove and be compatible with the enumeration type
`PlaceType` of custom operator, we define a temporary type.
using AllocationType = phi::AllocationType;
using Place = phi::Place;
using CPUPlace = phi::CPUPlace;
using GPUPlace = phi::GPUPlace;
/* NOTE [ Why need to temporarily adapt to PlaceType? ]
This type cannot add any new type!!! It is only used for compatibility with
`PlaceType` emum class is the place type used by custom operators since the
release of 2.0. Since 2.3, we have refactored the operator library and designed
a new external Place type. The original PlaceType is no longer suitable for use
as an internal type of the framework, but immediately delete the PlaceType,
it will cause the previous custom operators to be incompatible, so it cannot be
deleted in the short term. We'd better delete this abandoned data type in 2.4.
Note: This type cannot add any new type!!! It is only used for compatibility
with
historical writing and we will remove this temporary type in the future.
This Type cannot be used in framework! only used for custom operator!
The historical PlaceType define:
The original PlaceType define:
- enum class PlaceType { kUNK = -1, kCPU, kGPU };
......@@ -230,13 +249,14 @@ The historical PlaceType using:
- PD_CHECK(x.place() == paddle::PlaceType::kCPU)
- auto out = paddle::Tensor(paddle::PlaceType::kCPU, x.shape());
The new type cannot be used as int value! If you use as int, please modify
the implementation.
*/
struct PADDLE_API PlaceType {
static phi::Place kUNK;
static phi::Place kCPU;
static phi::Place kGPU;
enum class PlaceType {
kUNK = static_cast<int>(phi::AllocationType::UNDEFINED),
kCPU = static_cast<int>(phi::AllocationType::CPU),
kGPU = static_cast<int>(phi::AllocationType::GPU),
};
PADDLE_API bool operator==(const Place& place, PlaceType place_type);
PADDLE_API bool operator==(PlaceType place_type, const Place& place);
} // namespace paddle
if(WITH_ROCM)
if(WITH_GPU)
nv_test(test_phi_tensor SRCS test_pten_tensor.cc DEPS phi_tensor glog)
elseif(WITH_ROCM)
hip_test(test_phi_tensor SRCS test_pten_tensor.cc DEPS phi_tensor glog)
else()
cc_test(test_phi_tensor SRCS test_pten_tensor.cc DEPS phi_tensor glog)
......
......@@ -108,7 +108,6 @@ std::vector<paddle::Tensor> relu_cuda_double_backward(
const paddle::Tensor& out, const paddle::Tensor& ddx);
std::vector<paddle::Tensor> ReluForward(const paddle::Tensor& x) {
// TODO(chenweihang): Check Input
if (x.place() == paddle::PlaceType::kCPU) {
return relu_cpu_forward(x);
} else if (x.place() == paddle::PlaceType::kGPU) {
......
......@@ -53,6 +53,7 @@ __global__ void relu_cuda_double_backward_kernel(const data_t* out_data,
}
std::vector<paddle::Tensor> relu_cuda_forward(const paddle::Tensor& x) {
CHECK_GPU_INPUT(x);
auto out = paddle::Tensor(paddle::PlaceType::kGPU, x.shape());
int numel = x.size();
......@@ -70,6 +71,9 @@ std::vector<paddle::Tensor> relu_cuda_forward(const paddle::Tensor& x) {
std::vector<paddle::Tensor> relu_cuda_backward(const paddle::Tensor& x,
const paddle::Tensor& out,
const paddle::Tensor& grad_out) {
CHECK_GPU_INPUT(x);
CHECK_GPU_INPUT(out);
CHECK_GPU_INPUT(grad_out);
auto grad_x = paddle::Tensor(paddle::PlaceType::kGPU, x.shape());
int numel = out.size();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册