“e0d904ffd052930504913fb105dd25764f5f0bd8”上不存在“drivers/net/bnx2x/bnx2x_main.c”
未验证 提交 f352c23e 编写于 作者: H HongyuJia 提交者: GitHub

[CustomOP Unittest] Polish unit test of custom operator, kCPU->CPU (#52725)

* [CustomOP Unittest] Polish unit test of custom operator, kCPU->CPU

* AllocationType::CPU -> is_cpu()
上级 dee7d78d
...@@ -132,7 +132,7 @@ std::vector<paddle::Tensor> AttrTestForward( ...@@ -132,7 +132,7 @@ std::vector<paddle::Tensor> AttrTestForward(
std::vector<float> float_vec_attr, std::vector<float> float_vec_attr,
std::vector<int64_t> int64_vec_attr, std::vector<int64_t> int64_vec_attr,
std::vector<std::string> str_vec_attr) { std::vector<std::string> str_vec_attr) {
auto out = paddle::Tensor(paddle::PlaceType::kCPU, x.shape()); auto out = paddle::empty_like(x);
PD_DISPATCH_FLOATING_TYPES( PD_DISPATCH_FLOATING_TYPES(
x.type(), "assign_cpu_kernel", ([&] { x.type(), "assign_cpu_kernel", ([&] {
...@@ -173,7 +173,7 @@ std::vector<paddle::Tensor> AttrTestBackward( ...@@ -173,7 +173,7 @@ std::vector<paddle::Tensor> AttrTestBackward(
int int_attr, int int_attr,
const std::vector<float>& float_vec_attr, const std::vector<float>& float_vec_attr,
const std::vector<std::string>& str_vec_attr) { const std::vector<std::string>& str_vec_attr) {
auto grad_x = paddle::Tensor(paddle::PlaceType::kCPU, grad_out.shape()); auto grad_x = paddle::empty_like(grad_out);
PD_DISPATCH_FLOATING_TYPES(grad_out.type(), "assign_cpu_kernel", ([&] { PD_DISPATCH_FLOATING_TYPES(grad_out.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>( assign_cpu_kernel<data_t>(
...@@ -198,7 +198,7 @@ std::vector<paddle::Tensor> ConstAttrTestForward( ...@@ -198,7 +198,7 @@ std::vector<paddle::Tensor> ConstAttrTestForward(
const std::vector<float>& float_vec_attr, const std::vector<float>& float_vec_attr,
const std::vector<int64_t>& int64_vec_attr, const std::vector<int64_t>& int64_vec_attr,
const std::vector<std::string>& str_vec_attr) { const std::vector<std::string>& str_vec_attr) {
auto out = paddle::Tensor(paddle::PlaceType::kCPU, x.shape()); auto out = paddle::empty_like(x);
PD_DISPATCH_FLOATING_TYPES( PD_DISPATCH_FLOATING_TYPES(
x.type(), "assign_cpu_kernel", ([&] { x.type(), "assign_cpu_kernel", ([&] {
...@@ -239,7 +239,7 @@ std::vector<paddle::Tensor> ConstAttrTestBackward( ...@@ -239,7 +239,7 @@ std::vector<paddle::Tensor> ConstAttrTestBackward(
const int& int_attr, const int& int_attr,
const std::vector<float>& float_vec_attr, const std::vector<float>& float_vec_attr,
const std::vector<std::string>& str_vec_attr) { const std::vector<std::string>& str_vec_attr) {
auto grad_x = paddle::Tensor(paddle::PlaceType::kCPU, grad_out.shape()); auto grad_x = paddle::empty_like(grad_out);
PD_DISPATCH_FLOATING_TYPES(grad_out.type(), "assign_cpu_kernel", ([&] { PD_DISPATCH_FLOATING_TYPES(grad_out.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>( assign_cpu_kernel<data_t>(
......
...@@ -17,8 +17,7 @@ ...@@ -17,8 +17,7 @@
#include "paddle/extension.h" #include "paddle/extension.h"
#include "paddle/phi/backends/context_pool.h" #include "paddle/phi/backends/context_pool.h"
#define CHECK_INPUT(x) \ #define CHECK_INPUT(x) PD_CHECK(x.is_cpu(), #x " must be a CPU Tensor.")
PD_CHECK(x.place() == paddle::PlaceType::kCPU, #x " must be a CPU Tensor.")
std::vector<paddle::Tensor> ContextPoolTest(const paddle::Tensor& x) { std::vector<paddle::Tensor> ContextPoolTest(const paddle::Tensor& x) {
// 1. test cpu context // 1. test cpu context
......
...@@ -17,8 +17,7 @@ ...@@ -17,8 +17,7 @@
#include "concat_and_split.h" // NOLINT #include "concat_and_split.h" // NOLINT
#include "paddle/extension.h" #include "paddle/extension.h"
#define CHECK_INPUT(x) \ #define CHECK_INPUT(x) PD_CHECK(x.is_cpu(), #x " must be a CPU Tensor.")
PD_CHECK(x.place() == paddle::PlaceType::kCPU, #x " must be a CPU Tensor.")
int64_t ComputeAxis(int64_t axis, int64_t rank) { int64_t ComputeAxis(int64_t axis, int64_t rank) {
PD_CHECK(axis >= -rank && axis < rank, PD_CHECK(axis >= -rank && axis < rank,
......
...@@ -18,8 +18,7 @@ ...@@ -18,8 +18,7 @@
#include "paddle/extension.h" #include "paddle/extension.h"
#define CHECK_INPUT(x) \ #define CHECK_INPUT(x) PD_CHECK(x.is_cpu(), #x " must be a CPU Tensor.")
PD_CHECK(x.place() == paddle::PlaceType::kCPU, #x " must be a CPU Tensor.")
template <typename data_t> template <typename data_t>
using EnableComplex = typename std::enable_if< using EnableComplex = typename std::enable_if<
......
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
#include "paddle/extension.h" #include "paddle/extension.h"
#define CHECK_INPUT(x) PD_CHECK(x.is_cpu(), #x " must be a CPU Tensor.")
template <typename data_t> template <typename data_t>
void add_data_pointer(const data_t* x_data, data_t* out_data, int64_t numel) { void add_data_pointer(const data_t* x_data, data_t* out_data, int64_t numel) {
for (size_t i = 0; i < numel; ++i) { for (size_t i = 0; i < numel; ++i) {
...@@ -52,7 +54,7 @@ void relu_backward_kernel(const data_t* out_data, ...@@ -52,7 +54,7 @@ void relu_backward_kernel(const data_t* out_data,
} }
void AddForward(paddle::Tensor& x, const paddle::Tensor& y) { // NOLINT void AddForward(paddle::Tensor& x, const paddle::Tensor& y) { // NOLINT
PD_CHECK(x.place() == paddle::PlaceType::kCPU, "x must be a CPU Tensor."); CHECK_INPUT(x);
PD_DISPATCH_FLOATING_TYPES( PD_DISPATCH_FLOATING_TYPES(
x.type(), "AddForward", ([&] { x.type(), "AddForward", ([&] {
...@@ -63,8 +65,8 @@ void AddForward(paddle::Tensor& x, const paddle::Tensor& y) { // NOLINT ...@@ -63,8 +65,8 @@ void AddForward(paddle::Tensor& x, const paddle::Tensor& y) { // NOLINT
std::vector<paddle::Tensor> AddBackward(const paddle::Tensor& x, std::vector<paddle::Tensor> AddBackward(const paddle::Tensor& x,
const paddle::Tensor& y, const paddle::Tensor& y,
paddle::Tensor& out_grad) { // NOLINT paddle::Tensor& out_grad) { // NOLINT
PD_CHECK(x.place() == paddle::PlaceType::kCPU, "x must be a CPU Tensor."); CHECK_INPUT(x);
PD_CHECK(y.place() == paddle::PlaceType::kCPU, "y must be a CPU Tensor."); CHECK_INPUT(y);
paddle::Tensor y_grad = paddle::empty(x.shape(), x.dtype(), x.place()); paddle::Tensor y_grad = paddle::empty(x.shape(), x.dtype(), x.place());
...@@ -92,7 +94,7 @@ PD_BUILD_GRAD_OP(custom_add) ...@@ -92,7 +94,7 @@ PD_BUILD_GRAD_OP(custom_add)
// out[i] = x[i] + y // out[i] = x[i] + y
void AddVectorForward(std::vector<paddle::Tensor>& x, // NOLINT void AddVectorForward(std::vector<paddle::Tensor>& x, // NOLINT
const paddle::Tensor& y) { const paddle::Tensor& y) {
PD_CHECK(y.place() == paddle::PlaceType::kCPU, "y must be a CPU Tensor."); CHECK_INPUT(y);
PD_DISPATCH_FLOATING_TYPES(y.type(), "AddVectorForward", ([&] { PD_DISPATCH_FLOATING_TYPES(y.type(), "AddVectorForward", ([&] {
for (size_t i = 0; i < x.size(); ++i) { for (size_t i = 0; i < x.size(); ++i) {
...@@ -109,9 +111,8 @@ std::vector<paddle::Tensor> AddVectorBackward( ...@@ -109,9 +111,8 @@ std::vector<paddle::Tensor> AddVectorBackward(
const std::vector<paddle::Tensor>& x, const std::vector<paddle::Tensor>& x,
const paddle::Tensor& y, const paddle::Tensor& y,
std::vector<paddle::Tensor>& out_grad) { // NOLINT std::vector<paddle::Tensor>& out_grad) { // NOLINT
PD_CHECK(x[0].place() == paddle::PlaceType::kCPU, CHECK_INPUT(x[0]);
"x[0] must be a CPU Tensor."); CHECK_INPUT(y);
PD_CHECK(y.place() == paddle::PlaceType::kCPU, "y must be a CPU Tensor.");
PD_CHECK(x.size() == out_grad.size(), PD_CHECK(x.size() == out_grad.size(),
"x must have the same size as out_grad."); "x must have the same size as out_grad.");
...@@ -145,8 +146,8 @@ void MultiInplaceForward(paddle::Tensor& x, // NOLINT ...@@ -145,8 +146,8 @@ void MultiInplaceForward(paddle::Tensor& x, // NOLINT
const paddle::Tensor& y, const paddle::Tensor& y,
paddle::Tensor& a, // NOLINT paddle::Tensor& a, // NOLINT
const paddle::Tensor& b) { const paddle::Tensor& b) {
PD_CHECK(x.place() == paddle::PlaceType::kCPU, "x must be a CPU Tensor."); CHECK_INPUT(x);
PD_CHECK(a.place() == paddle::PlaceType::kCPU, "a must be a CPU Tensor."); CHECK_INPUT(a);
PD_DISPATCH_FLOATING_TYPES( PD_DISPATCH_FLOATING_TYPES(
x.type(), "MultiInplaceForward", ([&] { x.type(), "MultiInplaceForward", ([&] {
...@@ -162,10 +163,10 @@ std::vector<paddle::Tensor> MultiInplaceBackward( ...@@ -162,10 +163,10 @@ std::vector<paddle::Tensor> MultiInplaceBackward(
const paddle::Tensor& a, const paddle::Tensor& a,
const paddle::Tensor& b, const paddle::Tensor& b,
paddle::Tensor& outab_grad) { // NOLINT paddle::Tensor& outab_grad) { // NOLINT
PD_CHECK(x.place() == paddle::PlaceType::kCPU, "x must be a CPU Tensor."); CHECK_INPUT(x);
PD_CHECK(y.place() == paddle::PlaceType::kCPU, "y must be a CPU Tensor."); CHECK_INPUT(y);
PD_CHECK(a.place() == paddle::PlaceType::kCPU, "a must be a CPU Tensor."); CHECK_INPUT(a);
PD_CHECK(b.place() == paddle::PlaceType::kCPU, "b must be a CPU Tensor."); CHECK_INPUT(b);
paddle::Tensor y_grad = paddle::empty(x.shape(), x.dtype(), x.place()); paddle::Tensor y_grad = paddle::empty(x.shape(), x.dtype(), x.place());
paddle::Tensor b_grad = paddle::empty(a.shape(), a.dtype(), a.place()); paddle::Tensor b_grad = paddle::empty(a.shape(), a.dtype(), a.place());
...@@ -200,7 +201,7 @@ PD_BUILD_GRAD_OP(custom_multi_inplace) ...@@ -200,7 +201,7 @@ PD_BUILD_GRAD_OP(custom_multi_inplace)
.SetKernelFn(PD_KERNEL(MultiInplaceBackward)); .SetKernelFn(PD_KERNEL(MultiInplaceBackward));
void ReluForwardInplace(paddle::Tensor& x) { // NOLINT void ReluForwardInplace(paddle::Tensor& x) { // NOLINT
PD_CHECK(x.place() == paddle::PlaceType::kCPU, "x must be a CPU Tensor."); CHECK_INPUT(x);
PD_DISPATCH_FLOATING_TYPES(x.type(), "ReluForward", ([&] { PD_DISPATCH_FLOATING_TYPES(x.type(), "ReluForward", ([&] {
relu_forward_kernel<data_t>(x.data<data_t>(), relu_forward_kernel<data_t>(x.data<data_t>(),
...@@ -211,7 +212,7 @@ void ReluForwardInplace(paddle::Tensor& x) { // NOLINT ...@@ -211,7 +212,7 @@ void ReluForwardInplace(paddle::Tensor& x) { // NOLINT
void ReluBackwardInplace(const paddle::Tensor& x, void ReluBackwardInplace(const paddle::Tensor& x,
const paddle::Tensor& out, const paddle::Tensor& out,
paddle::Tensor& grad_out) { // NOLINT paddle::Tensor& grad_out) { // NOLINT
PD_CHECK(out.place() == paddle::PlaceType::kCPU, "x must be a CPU Tensor."); CHECK_INPUT(out);
PD_DISPATCH_FLOATING_TYPES( PD_DISPATCH_FLOATING_TYPES(
grad_out.type(), "ReluBackward", ([&] { grad_out.type(), "ReluBackward", ([&] {
......
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
#include "paddle/extension.h" #include "paddle/extension.h"
#define CHECK_INPUT(x) PD_CHECK(x.is_cpu(), #x " must be a CPU Tensor.")
template <typename data_t> template <typename data_t>
void add_one_pointer(const data_t* x_data, data_t* out_data, int64_t numel) { void add_one_pointer(const data_t* x_data, data_t* out_data, int64_t numel) {
for (size_t i = 0; i < numel; ++i) { for (size_t i = 0; i < numel; ++i) {
...@@ -45,7 +47,7 @@ if (y) { ...@@ -45,7 +47,7 @@ if (y) {
std::vector<paddle::Tensor> AddForward( std::vector<paddle::Tensor> AddForward(
const paddle::Tensor& x, const paddle::Tensor& x,
const paddle::optional<paddle::Tensor>& y) { // NOLINT const paddle::optional<paddle::Tensor>& y) { // NOLINT
PD_CHECK(x.place() == paddle::PlaceType::kCPU, "x must be a CPU Tensor."); CHECK_INPUT(x);
paddle::Tensor out = paddle::empty(x.shape(), x.dtype(), x.place()); paddle::Tensor out = paddle::empty(x.shape(), x.dtype(), x.place());
if (y) { if (y) {
...@@ -85,7 +87,7 @@ std::vector<paddle::Tensor> AddBackward( ...@@ -85,7 +87,7 @@ std::vector<paddle::Tensor> AddBackward(
const paddle::Tensor& x, const paddle::Tensor& x,
const paddle::optional<paddle::Tensor>& y, const paddle::optional<paddle::Tensor>& y,
const paddle::Tensor& out_grad) { // NOLINT const paddle::Tensor& out_grad) { // NOLINT
PD_CHECK(x.place() == paddle::PlaceType::kCPU, "x must be a CPU Tensor."); CHECK_INPUT(x);
paddle::Tensor x_grad = paddle::zeros(x.shape(), x.dtype(), x.place()); paddle::Tensor x_grad = paddle::zeros(x.shape(), x.dtype(), x.place());
if (y) { if (y) {
...@@ -118,7 +120,7 @@ if (y) { ...@@ -118,7 +120,7 @@ if (y) {
std::vector<paddle::Tensor> AddVectorForward( std::vector<paddle::Tensor> AddVectorForward(
const paddle::Tensor& x, const paddle::Tensor& x,
const paddle::optional<std::vector<paddle::Tensor>>& y) { // NOLINT const paddle::optional<std::vector<paddle::Tensor>>& y) { // NOLINT
PD_CHECK(x.place() == paddle::PlaceType::kCPU, "x must be a CPU Tensor."); CHECK_INPUT(x);
paddle::Tensor out = paddle::zeros(x.shape(), x.dtype(), x.place()); paddle::Tensor out = paddle::zeros(x.shape(), x.dtype(), x.place());
PD_DISPATCH_FLOATING_TYPES( PD_DISPATCH_FLOATING_TYPES(
...@@ -167,7 +169,7 @@ std::vector<paddle::Tensor> AddVectorBackward( ...@@ -167,7 +169,7 @@ std::vector<paddle::Tensor> AddVectorBackward(
const paddle::Tensor& x, const paddle::Tensor& x,
const paddle::optional<std::vector<paddle::Tensor>>& y, const paddle::optional<std::vector<paddle::Tensor>>& y,
const paddle::Tensor& out_grad) { // NOLINT const paddle::Tensor& out_grad) { // NOLINT
PD_CHECK(x.place() == paddle::PlaceType::kCPU, "x must be a CPU Tensor."); CHECK_INPUT(x);
paddle::Tensor x_grad = paddle::zeros(x.shape(), x.dtype(), x.place()); paddle::Tensor x_grad = paddle::zeros(x.shape(), x.dtype(), x.place());
...@@ -208,7 +210,7 @@ if (y) { ...@@ -208,7 +210,7 @@ if (y) {
std::vector<paddle::Tensor> AddOptionalInplaceForward( std::vector<paddle::Tensor> AddOptionalInplaceForward(
const paddle::Tensor& x, const paddle::Tensor& x,
paddle::optional<paddle::Tensor>& y) { // NOLINT paddle::optional<paddle::Tensor>& y) { // NOLINT
PD_CHECK(x.place() == paddle::PlaceType::kCPU, "x must be a CPU Tensor."); CHECK_INPUT(x);
paddle::Tensor outX = paddle::zeros(x.shape(), x.dtype(), x.place()); paddle::Tensor outX = paddle::zeros(x.shape(), x.dtype(), x.place());
PD_DISPATCH_FLOATING_TYPES( PD_DISPATCH_FLOATING_TYPES(
...@@ -252,7 +254,7 @@ std::vector<paddle::Tensor> AddOptionalInplaceBackward( ...@@ -252,7 +254,7 @@ std::vector<paddle::Tensor> AddOptionalInplaceBackward(
const paddle::optional<paddle::Tensor>& y, const paddle::optional<paddle::Tensor>& y,
const paddle::Tensor& outx_grad, const paddle::Tensor& outx_grad,
paddle::optional<paddle::Tensor>& outy_grad) { // NOLINT paddle::optional<paddle::Tensor>& outy_grad) { // NOLINT
PD_CHECK(x.place() == paddle::PlaceType::kCPU, "x must be a CPU Tensor."); CHECK_INPUT(x);
paddle::Tensor x_grad = paddle::zeros(x.shape(), x.dtype(), x.place()); paddle::Tensor x_grad = paddle::zeros(x.shape(), x.dtype(), x.place());
...@@ -313,7 +315,7 @@ if (y) { ...@@ -313,7 +315,7 @@ if (y) {
std::vector<paddle::Tensor> AddOptionalInplaceVectorForward( std::vector<paddle::Tensor> AddOptionalInplaceVectorForward(
const paddle::Tensor& x, const paddle::Tensor& x,
paddle::optional<std::vector<paddle::Tensor>>& y) { // NOLINT paddle::optional<std::vector<paddle::Tensor>>& y) { // NOLINT
PD_CHECK(x.place() == paddle::PlaceType::kCPU, "x must be a CPU Tensor."); CHECK_INPUT(x);
paddle::Tensor outX = paddle::zeros(x.shape(), x.dtype(), x.place()); paddle::Tensor outX = paddle::zeros(x.shape(), x.dtype(), x.place());
PD_DISPATCH_FLOATING_TYPES( PD_DISPATCH_FLOATING_TYPES(
...@@ -359,7 +361,7 @@ std::vector<paddle::Tensor> AddOptionalInplaceVectorBackward( ...@@ -359,7 +361,7 @@ std::vector<paddle::Tensor> AddOptionalInplaceVectorBackward(
const paddle::optional<std::vector<paddle::Tensor>>& y, const paddle::optional<std::vector<paddle::Tensor>>& y,
const paddle::Tensor& outx_grad, const paddle::Tensor& outx_grad,
paddle::optional<std::vector<paddle::Tensor>>& outy_grad) { // NOLINT paddle::optional<std::vector<paddle::Tensor>>& outy_grad) { // NOLINT
PD_CHECK(x.place() == paddle::PlaceType::kCPU, "x must be a CPU Tensor."); CHECK_INPUT(x);
paddle::Tensor x_grad = paddle::zeros(x.shape(), x.dtype(), x.place()); paddle::Tensor x_grad = paddle::zeros(x.shape(), x.dtype(), x.place());
......
...@@ -128,9 +128,9 @@ std::vector<paddle::Tensor> ReluBackward(const paddle::Tensor& x, ...@@ -128,9 +128,9 @@ std::vector<paddle::Tensor> ReluBackward(const paddle::Tensor& x,
std::vector<paddle::Tensor> ReluDoubleBackward(const paddle::Tensor& out, std::vector<paddle::Tensor> ReluDoubleBackward(const paddle::Tensor& out,
const paddle::Tensor& ddx) { const paddle::Tensor& ddx) {
if (out.place() == paddle::PlaceType::kCPU) { if (out.is_cpu()) {
return relu_cpu_double_backward(out, ddx); return relu_cpu_double_backward(out, ddx);
} else if (out.place() == paddle::PlaceType::kGPU) { } else if (out.is_gpu()) {
return relu_cuda_double_backward(out, ddx); return relu_cuda_double_backward(out, ddx);
} else { } else {
PD_THROW("Not implemented."); PD_THROW("Not implemented.");
...@@ -179,9 +179,9 @@ std::vector<paddle::Tensor> relu_cuda_backward_without_x( ...@@ -179,9 +179,9 @@ std::vector<paddle::Tensor> relu_cuda_backward_without_x(
std::vector<paddle::Tensor> ReluBackwardWithoutX( std::vector<paddle::Tensor> ReluBackwardWithoutX(
const paddle::Tensor& out, const paddle::Tensor& grad_out) { const paddle::Tensor& out, const paddle::Tensor& grad_out) {
if (out.place() == paddle::PlaceType::kCPU) { if (out.is_cpu()) {
return relu_cpu_backward_without_x(out, grad_out); return relu_cpu_backward_without_x(out, grad_out);
} else if (out.place() == paddle::PlaceType::kGPU) { } else if (out.is_gpu()) {
return relu_cuda_backward_without_x(out, grad_out); return relu_cuda_backward_without_x(out, grad_out);
} else { } else {
PD_THROW("Not implemented."); PD_THROW("Not implemented.");
...@@ -235,9 +235,9 @@ void relu_cuda_backward_out(const paddle::Tensor& x, ...@@ -235,9 +235,9 @@ void relu_cuda_backward_out(const paddle::Tensor& x,
paddle::Tensor* grad_x); paddle::Tensor* grad_x);
void ReluForwardOut(const paddle::Tensor& x, paddle::Tensor* out) { void ReluForwardOut(const paddle::Tensor& x, paddle::Tensor* out) {
if (x.place() == paddle::PlaceType::kCPU) { if (x.is_cpu()) {
return relu_cpu_forward_out(x, out); return relu_cpu_forward_out(x, out);
} else if (x.place() == paddle::PlaceType::kGPU) { } else if (x.is_gpu()) {
return relu_cuda_forward_out(x, out); return relu_cuda_forward_out(x, out);
} else { } else {
PD_THROW("Not implemented."); PD_THROW("Not implemented.");
...@@ -248,9 +248,9 @@ void ReluBackwardOut(const paddle::Tensor& x, ...@@ -248,9 +248,9 @@ void ReluBackwardOut(const paddle::Tensor& x,
const paddle::Tensor& out, const paddle::Tensor& out,
const paddle::Tensor& grad_out, const paddle::Tensor& grad_out,
paddle::Tensor* grad_x) { paddle::Tensor* grad_x) {
if (x.place() == paddle::PlaceType::kCPU) { if (x.is_cpu()) {
return relu_cpu_backward_out(x, out, grad_out, grad_x); return relu_cpu_backward_out(x, out, grad_out, grad_x);
} else if (x.place() == paddle::PlaceType::kGPU) { } else if (x.is_gpu()) {
return relu_cuda_backward_out(x, out, grad_out, grad_x); return relu_cuda_backward_out(x, out, grad_out, grad_x);
} else { } else {
PD_THROW("Not implemented."); PD_THROW("Not implemented.");
......
...@@ -161,7 +161,7 @@ std::vector<paddle::Tensor> ReluBackward(const paddle::Tensor& x, ...@@ -161,7 +161,7 @@ std::vector<paddle::Tensor> ReluBackward(const paddle::Tensor& x,
std::vector<paddle::Tensor> ReluDoubleBackward(const paddle::Tensor& out, std::vector<paddle::Tensor> ReluDoubleBackward(const paddle::Tensor& out,
const paddle::Tensor& ddx) { const paddle::Tensor& ddx) {
if (out.place() == paddle::PlaceType::kCPU) { if (out.is_cpu()) {
return relu_cpu_double_backward(out, ddx); return relu_cpu_double_backward(out, ddx);
} else if (out.place().GetType() == phi::AllocationType::XPU) { } else if (out.place().GetType() == phi::AllocationType::XPU) {
return relu_xpu_double_backward(out, ddx); return relu_xpu_double_backward(out, ddx);
......
...@@ -17,8 +17,7 @@ ...@@ -17,8 +17,7 @@
#include "paddle/extension.h" #include "paddle/extension.h"
#define CHECK_INPUT(x) \ #define CHECK_INPUT(x) PD_CHECK(x.is_cpu(), #x " must be a CPU Tensor.")
PD_CHECK(x.place() == paddle::PlaceType::kCPU, #x " must be a CPU Tensor.")
std::vector<paddle::Tensor> SimpleSliceFunction(const paddle::Tensor& x, std::vector<paddle::Tensor> SimpleSliceFunction(const paddle::Tensor& x,
int64_t begin_index, int64_t begin_index,
......
...@@ -18,8 +18,7 @@ ...@@ -18,8 +18,7 @@
#include "paddle/extension.h" #include "paddle/extension.h"
#define CHECK_CPU_INPUT(x) \ #define CHECK_CPU_INPUT(x) PD_CHECK(x.is_cpu(), #x " must be a CPU Tensor.")
PD_CHECK(x.place() == paddle::PlaceType::kCPU, #x " must be a CPU Tensor.")
template <typename data_t> template <typename data_t>
void tanh_cpu_forward_kernel(const data_t* x_data, void tanh_cpu_forward_kernel(const data_t* x_data,
......
...@@ -27,7 +27,7 @@ void assign_cpu_kernel(const data_t* x_data, ...@@ -27,7 +27,7 @@ void assign_cpu_kernel(const data_t* x_data,
} }
std::vector<paddle::Tensor> DispatchTestInterger(const paddle::Tensor& x) { std::vector<paddle::Tensor> DispatchTestInterger(const paddle::Tensor& x) {
auto out = paddle::Tensor(paddle::PlaceType::kCPU, x.shape()); auto out = paddle::empty_like(x);
PD_DISPATCH_INTEGRAL_TYPES( PD_DISPATCH_INTEGRAL_TYPES(
x.type(), "assign_cpu_kernel", ([&] { x.type(), "assign_cpu_kernel", ([&] {
...@@ -45,7 +45,7 @@ PD_BUILD_OP(dispatch_test_integer) ...@@ -45,7 +45,7 @@ PD_BUILD_OP(dispatch_test_integer)
std::vector<paddle::Tensor> DispatchTestFloatAndInteger( std::vector<paddle::Tensor> DispatchTestFloatAndInteger(
const paddle::Tensor& x) { const paddle::Tensor& x) {
auto out = paddle::Tensor(paddle::PlaceType::kCPU, x.shape()); auto out = paddle::empty_like(x);
PD_DISPATCH_FLOATING_AND_INTEGRAL_TYPES( PD_DISPATCH_FLOATING_AND_INTEGRAL_TYPES(
x.type(), "assign_cpu_kernel", ([&] { x.type(), "assign_cpu_kernel", ([&] {
...@@ -62,7 +62,7 @@ PD_BUILD_OP(dispatch_test_float_and_integer) ...@@ -62,7 +62,7 @@ PD_BUILD_OP(dispatch_test_float_and_integer)
.SetKernelFn(PD_KERNEL(DispatchTestFloatAndInteger)); .SetKernelFn(PD_KERNEL(DispatchTestFloatAndInteger));
std::vector<paddle::Tensor> DispatchTestComplex(const paddle::Tensor& x) { std::vector<paddle::Tensor> DispatchTestComplex(const paddle::Tensor& x) {
auto out = paddle::Tensor(paddle::PlaceType::kCPU, x.shape()); auto out = paddle::empty_like(x);
PD_DISPATCH_COMPLEX_TYPES( PD_DISPATCH_COMPLEX_TYPES(
x.type(), "assign_cpu_kernel", ([&] { x.type(), "assign_cpu_kernel", ([&] {
...@@ -80,7 +80,7 @@ PD_BUILD_OP(dispatch_test_complex) ...@@ -80,7 +80,7 @@ PD_BUILD_OP(dispatch_test_complex)
std::vector<paddle::Tensor> DispatchTestFloatAndComplex( std::vector<paddle::Tensor> DispatchTestFloatAndComplex(
const paddle::Tensor& x) { const paddle::Tensor& x) {
auto out = paddle::Tensor(paddle::PlaceType::kCPU, x.shape()); auto out = paddle::empty_like(x);
PD_DISPATCH_FLOATING_AND_COMPLEX_TYPES( PD_DISPATCH_FLOATING_AND_COMPLEX_TYPES(
x.type(), "assign_cpu_kernel", ([&] { x.type(), "assign_cpu_kernel", ([&] {
...@@ -98,7 +98,7 @@ PD_BUILD_OP(dispatch_test_float_and_complex) ...@@ -98,7 +98,7 @@ PD_BUILD_OP(dispatch_test_float_and_complex)
std::vector<paddle::Tensor> DispatchTestFloatAndIntegerAndComplex( std::vector<paddle::Tensor> DispatchTestFloatAndIntegerAndComplex(
const paddle::Tensor& x) { const paddle::Tensor& x) {
auto out = paddle::Tensor(paddle::PlaceType::kCPU, x.shape()); auto out = paddle::empty_like(x);
PD_DISPATCH_FLOATING_AND_INTEGRAL_AND_COMPLEX_TYPES( PD_DISPATCH_FLOATING_AND_INTEGRAL_AND_COMPLEX_TYPES(
x.type(), "assign_cpu_kernel", ([&] { x.type(), "assign_cpu_kernel", ([&] {
...@@ -115,7 +115,7 @@ PD_BUILD_OP(dispatch_test_float_and_integer_and_complex) ...@@ -115,7 +115,7 @@ PD_BUILD_OP(dispatch_test_float_and_integer_and_complex)
.SetKernelFn(PD_KERNEL(DispatchTestFloatAndIntegerAndComplex)); .SetKernelFn(PD_KERNEL(DispatchTestFloatAndIntegerAndComplex));
std::vector<paddle::Tensor> DispatchTestFloatAndHalf(const paddle::Tensor& x) { std::vector<paddle::Tensor> DispatchTestFloatAndHalf(const paddle::Tensor& x) {
auto out = paddle::Tensor(paddle::PlaceType::kCPU, x.shape()); auto out = paddle::empty_like(x);
PD_DISPATCH_FLOATING_AND_HALF_TYPES( PD_DISPATCH_FLOATING_AND_HALF_TYPES(
x.type(), "assign_cpu_kernel", ([&] { x.type(), "assign_cpu_kernel", ([&] {
......
...@@ -34,7 +34,7 @@ void fill_constant_cpu_kernel(data_t* out_data, int64_t x_numel, data_t value) { ...@@ -34,7 +34,7 @@ void fill_constant_cpu_kernel(data_t* out_data, int64_t x_numel, data_t value) {
} }
std::vector<paddle::Tensor> MultiOutCPU(const paddle::Tensor& x) { std::vector<paddle::Tensor> MultiOutCPU(const paddle::Tensor& x) {
auto out = paddle::Tensor(paddle::PlaceType::kCPU, x.shape()); auto out = paddle::empty_like(x);
PD_DISPATCH_FLOATING_TYPES( PD_DISPATCH_FLOATING_TYPES(
x.type(), "assign_cpu_kernel", ([&] { x.type(), "assign_cpu_kernel", ([&] {
...@@ -43,13 +43,13 @@ std::vector<paddle::Tensor> MultiOutCPU(const paddle::Tensor& x) { ...@@ -43,13 +43,13 @@ std::vector<paddle::Tensor> MultiOutCPU(const paddle::Tensor& x) {
})); }));
// fake multi output: Fake_float64 with float64 dtype // fake multi output: Fake_float64 with float64 dtype
auto fake_float64 = paddle::Tensor(paddle::PlaceType::kCPU, x.shape()); auto fake_float64 = paddle::empty_like(x);
fill_constant_cpu_kernel<double>( fill_constant_cpu_kernel<double>(
fake_float64.mutable_data<double>(x.place()), x.size(), 0.); fake_float64.mutable_data<double>(x.place()), x.size(), 0.);
// fake multi output: ZFake_int32 with int32 dtype // fake multi output: ZFake_int32 with int32 dtype
auto zfake_int32 = paddle::Tensor(paddle::PlaceType::kCPU, x.shape()); auto zfake_int32 = paddle::empty_like(x);
fill_constant_cpu_kernel<int32_t>( fill_constant_cpu_kernel<int32_t>(
zfake_int32.mutable_data<int32_t>(x.place()), x.size(), 1); zfake_int32.mutable_data<int32_t>(x.place()), x.size(), 1);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册