diff --git a/test/custom_op/attr_test_op.cc b/test/custom_op/attr_test_op.cc index 14cb0aa7c716d8449c672231f5399027275f8c5d..819d5e0ea3a2d876d57f40578b41f8731a1bf212 100644 --- a/test/custom_op/attr_test_op.cc +++ b/test/custom_op/attr_test_op.cc @@ -132,7 +132,7 @@ std::vector AttrTestForward( std::vector float_vec_attr, std::vector int64_vec_attr, std::vector str_vec_attr) { - auto out = paddle::Tensor(paddle::PlaceType::kCPU, x.shape()); + auto out = paddle::empty_like(x); PD_DISPATCH_FLOATING_TYPES( x.type(), "assign_cpu_kernel", ([&] { @@ -173,7 +173,7 @@ std::vector AttrTestBackward( int int_attr, const std::vector& float_vec_attr, const std::vector& str_vec_attr) { - auto grad_x = paddle::Tensor(paddle::PlaceType::kCPU, grad_out.shape()); + auto grad_x = paddle::empty_like(grad_out); PD_DISPATCH_FLOATING_TYPES(grad_out.type(), "assign_cpu_kernel", ([&] { assign_cpu_kernel( @@ -198,7 +198,7 @@ std::vector ConstAttrTestForward( const std::vector& float_vec_attr, const std::vector& int64_vec_attr, const std::vector& str_vec_attr) { - auto out = paddle::Tensor(paddle::PlaceType::kCPU, x.shape()); + auto out = paddle::empty_like(x); PD_DISPATCH_FLOATING_TYPES( x.type(), "assign_cpu_kernel", ([&] { @@ -239,7 +239,7 @@ std::vector ConstAttrTestBackward( const int& int_attr, const std::vector& float_vec_attr, const std::vector& str_vec_attr) { - auto grad_x = paddle::Tensor(paddle::PlaceType::kCPU, grad_out.shape()); + auto grad_x = paddle::empty_like(grad_out); PD_DISPATCH_FLOATING_TYPES(grad_out.type(), "assign_cpu_kernel", ([&] { assign_cpu_kernel( diff --git a/test/custom_op/context_pool_test_op.cc b/test/custom_op/context_pool_test_op.cc index 1687bdccc9227d6a8f1a08bb17124aac50d2df48..72b28064f0a3f81f38af8ff7360dfed08aaaa345 100644 --- a/test/custom_op/context_pool_test_op.cc +++ b/test/custom_op/context_pool_test_op.cc @@ -17,8 +17,7 @@ #include "paddle/extension.h" #include "paddle/phi/backends/context_pool.h" -#define CHECK_INPUT(x) \ - PD_CHECK(x.place() == paddle::PlaceType::kCPU, #x " must be a CPU Tensor.") +#define CHECK_INPUT(x) PD_CHECK(x.is_cpu(), #x " must be a CPU Tensor.") std::vector ContextPoolTest(const paddle::Tensor& x) { // 1. test cpu context diff --git a/test/custom_op/custom_concat_op.cc b/test/custom_op/custom_concat_op.cc index 80f76e2df54fea69b63f3fc822c6fcafba882e91..e34fffff7b2bb408f11c398e8af6a22d726fc940 100644 --- a/test/custom_op/custom_concat_op.cc +++ b/test/custom_op/custom_concat_op.cc @@ -17,8 +17,7 @@ #include "concat_and_split.h" // NOLINT #include "paddle/extension.h" -#define CHECK_INPUT(x) \ - PD_CHECK(x.place() == paddle::PlaceType::kCPU, #x " must be a CPU Tensor.") +#define CHECK_INPUT(x) PD_CHECK(x.is_cpu(), #x " must be a CPU Tensor.") int64_t ComputeAxis(int64_t axis, int64_t rank) { PD_CHECK(axis >= -rank && axis < rank, diff --git a/test/custom_op/custom_conj_op.cc b/test/custom_op/custom_conj_op.cc index 56938552420e7334294b80d65390230df46b4ac3..0f76f715c427fb431a7d6ff77180e7c8fabfee8b 100644 --- a/test/custom_op/custom_conj_op.cc +++ b/test/custom_op/custom_conj_op.cc @@ -18,8 +18,7 @@ #include "paddle/extension.h" -#define CHECK_INPUT(x) \ - PD_CHECK(x.place() == paddle::PlaceType::kCPU, #x " must be a CPU Tensor.") +#define CHECK_INPUT(x) PD_CHECK(x.is_cpu(), #x " must be a CPU Tensor.") template using EnableComplex = typename std::enable_if< diff --git a/test/custom_op/custom_inplace.cc b/test/custom_op/custom_inplace.cc index fbbe10b513ece5cd2f2430b936f8fadd15d599ed..f7db7922bf3f723e1cb16434d4965db3d53709dc 100644 --- a/test/custom_op/custom_inplace.cc +++ b/test/custom_op/custom_inplace.cc @@ -18,6 +18,8 @@ #include "paddle/extension.h" +#define CHECK_INPUT(x) PD_CHECK(x.is_cpu(), #x " must be a CPU Tensor.") + template void add_data_pointer(const data_t* x_data, data_t* out_data, int64_t numel) { for (size_t i = 0; i < numel; ++i) { @@ -52,7 +54,7 @@ void relu_backward_kernel(const data_t* out_data, } void AddForward(paddle::Tensor& x, const paddle::Tensor& y) { // NOLINT - PD_CHECK(x.place() == paddle::PlaceType::kCPU, "x must be a CPU Tensor."); + CHECK_INPUT(x); PD_DISPATCH_FLOATING_TYPES( x.type(), "AddForward", ([&] { @@ -63,8 +65,8 @@ void AddForward(paddle::Tensor& x, const paddle::Tensor& y) { // NOLINT std::vector AddBackward(const paddle::Tensor& x, const paddle::Tensor& y, paddle::Tensor& out_grad) { // NOLINT - PD_CHECK(x.place() == paddle::PlaceType::kCPU, "x must be a CPU Tensor."); - PD_CHECK(y.place() == paddle::PlaceType::kCPU, "y must be a CPU Tensor."); + CHECK_INPUT(x); + CHECK_INPUT(y); paddle::Tensor y_grad = paddle::empty(x.shape(), x.dtype(), x.place()); @@ -92,7 +94,7 @@ PD_BUILD_GRAD_OP(custom_add) // out[i] = x[i] + y void AddVectorForward(std::vector& x, // NOLINT const paddle::Tensor& y) { - PD_CHECK(y.place() == paddle::PlaceType::kCPU, "y must be a CPU Tensor."); + CHECK_INPUT(y); PD_DISPATCH_FLOATING_TYPES(y.type(), "AddVectorForward", ([&] { for (size_t i = 0; i < x.size(); ++i) { @@ -109,9 +111,8 @@ std::vector AddVectorBackward( const std::vector& x, const paddle::Tensor& y, std::vector& out_grad) { // NOLINT - PD_CHECK(x[0].place() == paddle::PlaceType::kCPU, - "x[0] must be a CPU Tensor."); - PD_CHECK(y.place() == paddle::PlaceType::kCPU, "y must be a CPU Tensor."); + CHECK_INPUT(x[0]); + CHECK_INPUT(y); PD_CHECK(x.size() == out_grad.size(), "x must have the same size as out_grad."); @@ -145,8 +146,8 @@ void MultiInplaceForward(paddle::Tensor& x, // NOLINT const paddle::Tensor& y, paddle::Tensor& a, // NOLINT const paddle::Tensor& b) { - PD_CHECK(x.place() == paddle::PlaceType::kCPU, "x must be a CPU Tensor."); - PD_CHECK(a.place() == paddle::PlaceType::kCPU, "a must be a CPU Tensor."); + CHECK_INPUT(x); + CHECK_INPUT(a); PD_DISPATCH_FLOATING_TYPES( x.type(), "MultiInplaceForward", ([&] { @@ -162,10 +163,10 @@ std::vector MultiInplaceBackward( const paddle::Tensor& a, const paddle::Tensor& b, paddle::Tensor& outab_grad) { // NOLINT - PD_CHECK(x.place() == paddle::PlaceType::kCPU, "x must be a CPU Tensor."); - PD_CHECK(y.place() == paddle::PlaceType::kCPU, "y must be a CPU Tensor."); - PD_CHECK(a.place() == paddle::PlaceType::kCPU, "a must be a CPU Tensor."); - PD_CHECK(b.place() == paddle::PlaceType::kCPU, "b must be a CPU Tensor."); + CHECK_INPUT(x); + CHECK_INPUT(y); + CHECK_INPUT(a); + CHECK_INPUT(b); paddle::Tensor y_grad = paddle::empty(x.shape(), x.dtype(), x.place()); paddle::Tensor b_grad = paddle::empty(a.shape(), a.dtype(), a.place()); @@ -200,7 +201,7 @@ PD_BUILD_GRAD_OP(custom_multi_inplace) .SetKernelFn(PD_KERNEL(MultiInplaceBackward)); void ReluForwardInplace(paddle::Tensor& x) { // NOLINT - PD_CHECK(x.place() == paddle::PlaceType::kCPU, "x must be a CPU Tensor."); + CHECK_INPUT(x); PD_DISPATCH_FLOATING_TYPES(x.type(), "ReluForward", ([&] { relu_forward_kernel(x.data(), @@ -211,7 +212,7 @@ void ReluForwardInplace(paddle::Tensor& x) { // NOLINT void ReluBackwardInplace(const paddle::Tensor& x, const paddle::Tensor& out, paddle::Tensor& grad_out) { // NOLINT - PD_CHECK(out.place() == paddle::PlaceType::kCPU, "x must be a CPU Tensor."); + CHECK_INPUT(out); PD_DISPATCH_FLOATING_TYPES( grad_out.type(), "ReluBackward", ([&] { diff --git a/test/custom_op/custom_optional.cc b/test/custom_op/custom_optional.cc index 0e28ce84d5a35759489096b93f6fd29527b50380..9d247f4a27694dfc9eec3466919f104eec58ddc5 100644 --- a/test/custom_op/custom_optional.cc +++ b/test/custom_op/custom_optional.cc @@ -18,6 +18,8 @@ #include "paddle/extension.h" +#define CHECK_INPUT(x) PD_CHECK(x.is_cpu(), #x " must be a CPU Tensor.") + template void add_one_pointer(const data_t* x_data, data_t* out_data, int64_t numel) { for (size_t i = 0; i < numel; ++i) { @@ -45,7 +47,7 @@ if (y) { std::vector AddForward( const paddle::Tensor& x, const paddle::optional& y) { // NOLINT - PD_CHECK(x.place() == paddle::PlaceType::kCPU, "x must be a CPU Tensor."); + CHECK_INPUT(x); paddle::Tensor out = paddle::empty(x.shape(), x.dtype(), x.place()); if (y) { @@ -85,7 +87,7 @@ std::vector AddBackward( const paddle::Tensor& x, const paddle::optional& y, const paddle::Tensor& out_grad) { // NOLINT - PD_CHECK(x.place() == paddle::PlaceType::kCPU, "x must be a CPU Tensor."); + CHECK_INPUT(x); paddle::Tensor x_grad = paddle::zeros(x.shape(), x.dtype(), x.place()); if (y) { @@ -118,7 +120,7 @@ if (y) { std::vector AddVectorForward( const paddle::Tensor& x, const paddle::optional>& y) { // NOLINT - PD_CHECK(x.place() == paddle::PlaceType::kCPU, "x must be a CPU Tensor."); + CHECK_INPUT(x); paddle::Tensor out = paddle::zeros(x.shape(), x.dtype(), x.place()); PD_DISPATCH_FLOATING_TYPES( @@ -167,7 +169,7 @@ std::vector AddVectorBackward( const paddle::Tensor& x, const paddle::optional>& y, const paddle::Tensor& out_grad) { // NOLINT - PD_CHECK(x.place() == paddle::PlaceType::kCPU, "x must be a CPU Tensor."); + CHECK_INPUT(x); paddle::Tensor x_grad = paddle::zeros(x.shape(), x.dtype(), x.place()); @@ -208,7 +210,7 @@ if (y) { std::vector AddOptionalInplaceForward( const paddle::Tensor& x, paddle::optional& y) { // NOLINT - PD_CHECK(x.place() == paddle::PlaceType::kCPU, "x must be a CPU Tensor."); + CHECK_INPUT(x); paddle::Tensor outX = paddle::zeros(x.shape(), x.dtype(), x.place()); PD_DISPATCH_FLOATING_TYPES( @@ -252,7 +254,7 @@ std::vector AddOptionalInplaceBackward( const paddle::optional& y, const paddle::Tensor& outx_grad, paddle::optional& outy_grad) { // NOLINT - PD_CHECK(x.place() == paddle::PlaceType::kCPU, "x must be a CPU Tensor."); + CHECK_INPUT(x); paddle::Tensor x_grad = paddle::zeros(x.shape(), x.dtype(), x.place()); @@ -313,7 +315,7 @@ if (y) { std::vector AddOptionalInplaceVectorForward( const paddle::Tensor& x, paddle::optional>& y) { // NOLINT - PD_CHECK(x.place() == paddle::PlaceType::kCPU, "x must be a CPU Tensor."); + CHECK_INPUT(x); paddle::Tensor outX = paddle::zeros(x.shape(), x.dtype(), x.place()); PD_DISPATCH_FLOATING_TYPES( @@ -359,7 +361,7 @@ std::vector AddOptionalInplaceVectorBackward( const paddle::optional>& y, const paddle::Tensor& outx_grad, paddle::optional>& outy_grad) { // NOLINT - PD_CHECK(x.place() == paddle::PlaceType::kCPU, "x must be a CPU Tensor."); + CHECK_INPUT(x); paddle::Tensor x_grad = paddle::zeros(x.shape(), x.dtype(), x.place()); diff --git a/test/custom_op/custom_relu_op.cc b/test/custom_op/custom_relu_op.cc index 7575887318ce35ffa39d33d594f1588902866bfc..5627bb28b921f4fa5266bce99e4f0ec86a6e8570 100644 --- a/test/custom_op/custom_relu_op.cc +++ b/test/custom_op/custom_relu_op.cc @@ -128,9 +128,9 @@ std::vector ReluBackward(const paddle::Tensor& x, std::vector ReluDoubleBackward(const paddle::Tensor& out, const paddle::Tensor& ddx) { - if (out.place() == paddle::PlaceType::kCPU) { + if (out.is_cpu()) { return relu_cpu_double_backward(out, ddx); - } else if (out.place() == paddle::PlaceType::kGPU) { + } else if (out.is_gpu()) { return relu_cuda_double_backward(out, ddx); } else { PD_THROW("Not implemented."); @@ -179,9 +179,9 @@ std::vector relu_cuda_backward_without_x( std::vector ReluBackwardWithoutX( const paddle::Tensor& out, const paddle::Tensor& grad_out) { - if (out.place() == paddle::PlaceType::kCPU) { + if (out.is_cpu()) { return relu_cpu_backward_without_x(out, grad_out); - } else if (out.place() == paddle::PlaceType::kGPU) { + } else if (out.is_gpu()) { return relu_cuda_backward_without_x(out, grad_out); } else { PD_THROW("Not implemented."); @@ -235,9 +235,9 @@ void relu_cuda_backward_out(const paddle::Tensor& x, paddle::Tensor* grad_x); void ReluForwardOut(const paddle::Tensor& x, paddle::Tensor* out) { - if (x.place() == paddle::PlaceType::kCPU) { + if (x.is_cpu()) { return relu_cpu_forward_out(x, out); - } else if (x.place() == paddle::PlaceType::kGPU) { + } else if (x.is_gpu()) { return relu_cuda_forward_out(x, out); } else { PD_THROW("Not implemented."); @@ -248,9 +248,9 @@ void ReluBackwardOut(const paddle::Tensor& x, const paddle::Tensor& out, const paddle::Tensor& grad_out, paddle::Tensor* grad_x) { - if (x.place() == paddle::PlaceType::kCPU) { + if (x.is_cpu()) { return relu_cpu_backward_out(x, out, grad_out, grad_x); - } else if (x.place() == paddle::PlaceType::kGPU) { + } else if (x.is_gpu()) { return relu_cuda_backward_out(x, out, grad_out, grad_x); } else { PD_THROW("Not implemented."); diff --git a/test/custom_op/custom_relu_op_xpu.cc b/test/custom_op/custom_relu_op_xpu.cc index c38f8b877da2c32ab7b86bf9016ea2a157a2cf30..ee717785ad848601162298d621ca393c91772cca 100644 --- a/test/custom_op/custom_relu_op_xpu.cc +++ b/test/custom_op/custom_relu_op_xpu.cc @@ -161,7 +161,7 @@ std::vector ReluBackward(const paddle::Tensor& x, std::vector ReluDoubleBackward(const paddle::Tensor& out, const paddle::Tensor& ddx) { - if (out.place() == paddle::PlaceType::kCPU) { + if (out.is_cpu()) { return relu_cpu_double_backward(out, ddx); } else if (out.place().GetType() == phi::AllocationType::XPU) { return relu_xpu_double_backward(out, ddx); diff --git a/test/custom_op/custom_simple_slice_op.cc b/test/custom_op/custom_simple_slice_op.cc index 783e0cd96fdd95b594bbc212832bfb5c06eea0f3..21bd1b8ada27de623afc0b369e04bc3d88f70375 100644 --- a/test/custom_op/custom_simple_slice_op.cc +++ b/test/custom_op/custom_simple_slice_op.cc @@ -17,8 +17,7 @@ #include "paddle/extension.h" -#define CHECK_INPUT(x) \ - PD_CHECK(x.place() == paddle::PlaceType::kCPU, #x " must be a CPU Tensor.") +#define CHECK_INPUT(x) PD_CHECK(x.is_cpu(), #x " must be a CPU Tensor.") std::vector SimpleSliceFunction(const paddle::Tensor& x, int64_t begin_index, diff --git a/test/custom_op/custom_tanh_op.cc b/test/custom_op/custom_tanh_op.cc index 399eb5b6366d779969b19b971baff8e4b763fecd..a7a61b9528352046dee112d252a582da4e4281a6 100644 --- a/test/custom_op/custom_tanh_op.cc +++ b/test/custom_op/custom_tanh_op.cc @@ -18,8 +18,7 @@ #include "paddle/extension.h" -#define CHECK_CPU_INPUT(x) \ - PD_CHECK(x.place() == paddle::PlaceType::kCPU, #x " must be a CPU Tensor.") +#define CHECK_CPU_INPUT(x) PD_CHECK(x.is_cpu(), #x " must be a CPU Tensor.") template void tanh_cpu_forward_kernel(const data_t* x_data, diff --git a/test/custom_op/dispatch_test_op.cc b/test/custom_op/dispatch_test_op.cc index 0f7d323b5451efba5a503d9039a03531e1773efb..39e1a24fe2327f6eaf464f10bf470e3a28d8e5be 100644 --- a/test/custom_op/dispatch_test_op.cc +++ b/test/custom_op/dispatch_test_op.cc @@ -27,7 +27,7 @@ void assign_cpu_kernel(const data_t* x_data, } std::vector DispatchTestInterger(const paddle::Tensor& x) { - auto out = paddle::Tensor(paddle::PlaceType::kCPU, x.shape()); + auto out = paddle::empty_like(x); PD_DISPATCH_INTEGRAL_TYPES( x.type(), "assign_cpu_kernel", ([&] { @@ -45,7 +45,7 @@ PD_BUILD_OP(dispatch_test_integer) std::vector DispatchTestFloatAndInteger( const paddle::Tensor& x) { - auto out = paddle::Tensor(paddle::PlaceType::kCPU, x.shape()); + auto out = paddle::empty_like(x); PD_DISPATCH_FLOATING_AND_INTEGRAL_TYPES( x.type(), "assign_cpu_kernel", ([&] { @@ -62,7 +62,7 @@ PD_BUILD_OP(dispatch_test_float_and_integer) .SetKernelFn(PD_KERNEL(DispatchTestFloatAndInteger)); std::vector DispatchTestComplex(const paddle::Tensor& x) { - auto out = paddle::Tensor(paddle::PlaceType::kCPU, x.shape()); + auto out = paddle::empty_like(x); PD_DISPATCH_COMPLEX_TYPES( x.type(), "assign_cpu_kernel", ([&] { @@ -80,7 +80,7 @@ PD_BUILD_OP(dispatch_test_complex) std::vector DispatchTestFloatAndComplex( const paddle::Tensor& x) { - auto out = paddle::Tensor(paddle::PlaceType::kCPU, x.shape()); + auto out = paddle::empty_like(x); PD_DISPATCH_FLOATING_AND_COMPLEX_TYPES( x.type(), "assign_cpu_kernel", ([&] { @@ -98,7 +98,7 @@ PD_BUILD_OP(dispatch_test_float_and_complex) std::vector DispatchTestFloatAndIntegerAndComplex( const paddle::Tensor& x) { - auto out = paddle::Tensor(paddle::PlaceType::kCPU, x.shape()); + auto out = paddle::empty_like(x); PD_DISPATCH_FLOATING_AND_INTEGRAL_AND_COMPLEX_TYPES( x.type(), "assign_cpu_kernel", ([&] { @@ -115,7 +115,7 @@ PD_BUILD_OP(dispatch_test_float_and_integer_and_complex) .SetKernelFn(PD_KERNEL(DispatchTestFloatAndIntegerAndComplex)); std::vector DispatchTestFloatAndHalf(const paddle::Tensor& x) { - auto out = paddle::Tensor(paddle::PlaceType::kCPU, x.shape()); + auto out = paddle::empty_like(x); PD_DISPATCH_FLOATING_AND_HALF_TYPES( x.type(), "assign_cpu_kernel", ([&] { diff --git a/test/custom_op/multi_out_test_op.cc b/test/custom_op/multi_out_test_op.cc index d9e0526e4206ea1be3aab7994430e0f9691a21dc..7007058cbb93ecb2164a6a417056c10ed1fdf8a6 100644 --- a/test/custom_op/multi_out_test_op.cc +++ b/test/custom_op/multi_out_test_op.cc @@ -34,7 +34,7 @@ void fill_constant_cpu_kernel(data_t* out_data, int64_t x_numel, data_t value) { } std::vector MultiOutCPU(const paddle::Tensor& x) { - auto out = paddle::Tensor(paddle::PlaceType::kCPU, x.shape()); + auto out = paddle::empty_like(x); PD_DISPATCH_FLOATING_TYPES( x.type(), "assign_cpu_kernel", ([&] { @@ -43,13 +43,13 @@ std::vector MultiOutCPU(const paddle::Tensor& x) { })); // fake multi output: Fake_float64 with float64 dtype - auto fake_float64 = paddle::Tensor(paddle::PlaceType::kCPU, x.shape()); + auto fake_float64 = paddle::empty_like(x); fill_constant_cpu_kernel( fake_float64.mutable_data(x.place()), x.size(), 0.); // fake multi output: ZFake_int32 with int32 dtype - auto zfake_int32 = paddle::Tensor(paddle::PlaceType::kCPU, x.shape()); + auto zfake_int32 = paddle::empty_like(x); fill_constant_cpu_kernel( zfake_int32.mutable_data(x.place()), x.size(), 1);