diff --git a/paddle/fluid/operators/conj_op.h b/paddle/fluid/operators/conj_op.h index 90724403d4bc7ee8c509c9a887e64b4946e18fb9..0b5a35f515ef0cde1f5c7d80003dd342b659918c 100644 --- a/paddle/fluid/operators/conj_op.h +++ b/paddle/fluid/operators/conj_op.h @@ -21,7 +21,7 @@ #include "paddle/pten/api/lib/utils/tensor_utils.h" #include "paddle/pten/include/core.h" #include "paddle/pten/kernels/cpu/conj_kernel.h" -#include "paddle/pten/kernels/cuda/conj_kernel.h" +#include "paddle/pten/kernels/gpu/conj_kernel.h" namespace paddle { namespace operators { diff --git a/paddle/pten/CMakeLists.txt b/paddle/pten/CMakeLists.txt index eb9a149dd6da4fb544396a1b0d599a80042885ba..799ec885b997d5eb224a424876d22d5964fe0325 100644 --- a/paddle/pten/CMakeLists.txt +++ b/paddle/pten/CMakeLists.txt @@ -27,7 +27,7 @@ set(PTEN_DEPS convert_utils dense_tensor pten_context kernel_factory kernel_cont set(PTEN_DEPS ${PTEN_DEPS} math_cpu linalg_cpu manipulation_cpu conj_kernel_cpu scale_kernel_cpu full_kernel_cpu) set(PTEN_DEPS ${PTEN_DEPS} nary unary binary) if(WITH_GPU OR WITH_ROCM) - set(PTEN_DEPS ${PTEN_DEPS} math_cuda linalg_cuda manipulation_cuda conj_kernel_cuda scale_kernel_cuda full_kernel_cuda) + set(PTEN_DEPS ${PTEN_DEPS} math_gpu linalg_gpu manipulation_gpu conj_kernel_gpu scale_kernel_gpu full_kernel_gpu) endif() if(WITH_XPU) set(PTEN_DEPS ${PTEN_DEPS} manipulation_xpu) diff --git a/paddle/pten/api/include/tensor.h b/paddle/pten/api/include/tensor.h index c8ef22c2ecda2b381625cc2868301b8de6ea2cfd..935c7d8e325d0d0cc0dbdf5ca321420001999cda 100644 --- a/paddle/pten/api/include/tensor.h +++ b/paddle/pten/api/include/tensor.h @@ -464,7 +464,7 @@ class PADDLE_API Tensor final { * unified to Tensor, but Tensor itself is heterogeneous. * * Tensor can generally be represented by void* and size_t, place. - * This is suitable for most scenarios including CPU, CUDA, HIP, CPU, etc., + * This is suitable for most scenarios including CPU, GPU, HIP, CPU, etc., * but there are a few cases where this definition cannot be described, * such as the Tensor representation in third-party lib such as Metal, * OpenCL, etc., as well as some special Tensor implementations, including diff --git a/paddle/pten/api/lib/ext_compat_utils.cc b/paddle/pten/api/lib/ext_compat_utils.cc index 791a8526f3847a8772cec7eabca9301d8aadda3f..6f1763eac3f03764e54e9924ebd3b83ccfccc29e 100644 --- a/paddle/pten/api/lib/ext_compat_utils.cc +++ b/paddle/pten/api/lib/ext_compat_utils.cc @@ -56,7 +56,7 @@ Backend ConvertExtPlaceToBackend(PlaceType p) { return Backend::CPU; #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) case PlaceType::kGPU: - return Backend::CUDA; + return Backend::GPU; #endif default: PADDLE_THROW( diff --git a/paddle/pten/api/lib/kernel_declare.h b/paddle/pten/api/lib/kernel_declare.h index e748a51082c52f5cb96786a37cc117c78225526f..4dbd46bff65adb5e42587d87b7b07e1b0d9d3ede 100644 --- a/paddle/pten/api/lib/kernel_declare.h +++ b/paddle/pten/api/lib/kernel_declare.h @@ -28,12 +28,12 @@ PT_DECLARE_KERNEL(scale, CPU, ALL_LAYOUT); PT_DECLARE_KERNEL(conj, CPU, ALL_LAYOUT); #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -PT_DECLARE_KERNEL(full_like, CUDA, ALL_LAYOUT); -PT_DECLARE_KERNEL(dot, CUDA, ALL_LAYOUT); -PT_DECLARE_KERNEL(flatten, CUDA, ALL_LAYOUT); -PT_DECLARE_KERNEL(sign, CUDA, ALL_LAYOUT); -PT_DECLARE_KERNEL(scale, CUDA, ALL_LAYOUT); -PT_DECLARE_KERNEL(conj, CUDA, ALL_LAYOUT); +PT_DECLARE_KERNEL(full_like, GPU, ALL_LAYOUT); +PT_DECLARE_KERNEL(dot, GPU, ALL_LAYOUT); +PT_DECLARE_KERNEL(flatten, GPU, ALL_LAYOUT); +PT_DECLARE_KERNEL(sign, GPU, ALL_LAYOUT); +PT_DECLARE_KERNEL(scale, GPU, ALL_LAYOUT); +PT_DECLARE_KERNEL(conj, GPU, ALL_LAYOUT); #endif #ifdef PADDLE_WITH_XPU diff --git a/paddle/pten/api/lib/utils.cc b/paddle/pten/api/lib/utils.cc index 06d604cb83afe91ed2da547e33bd39e02f276771..ddb29c8833f3b8c7f54d9e15acc17d91fdced52d 100644 --- a/paddle/pten/api/lib/utils.cc +++ b/paddle/pten/api/lib/utils.cc @@ -28,7 +28,7 @@ limitations under the License. */ PT_DECLARE_KERNEL(copy, CPU, ALL_LAYOUT); #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -PT_DECLARE_KERNEL(copy, CUDA, ALL_LAYOUT); +PT_DECLARE_KERNEL(copy, GPU, ALL_LAYOUT); #endif #ifdef PADDLE_WITH_XPU diff --git a/paddle/pten/backends/all_context.h b/paddle/pten/backends/all_context.h index d056af1b3a0d9abb11d9a2a69373c54252213e83..a7cb4abc2f242ec213dfee9cf7e39777ec3cdcb2 100644 --- a/paddle/pten/backends/all_context.h +++ b/paddle/pten/backends/all_context.h @@ -21,7 +21,7 @@ limitations under the License. */ // path replacement after implementing pten DeviceContext #include "paddle/pten/backends/cpu/cpu_context.h" -#include "paddle/pten/backends/cuda/cuda_context.h" +#include "paddle/pten/backends/gpu/gpu_context.h" #include "paddle/pten/backends/npu/npu_context.h" #include "paddle/pten/backends/xpu/xpu_context.h" diff --git a/paddle/pten/backends/cuda/cuda_context.h b/paddle/pten/backends/gpu/gpu_context.h similarity index 93% rename from paddle/pten/backends/cuda/cuda_context.h rename to paddle/pten/backends/gpu/gpu_context.h index 332fdd2fdaf432e8f6363ea577b94ffddd65ca2f..1adfd155ce3343849fa22e13ecca25dc2881fdad 100644 --- a/paddle/pten/backends/cuda/cuda_context.h +++ b/paddle/pten/backends/gpu/gpu_context.h @@ -20,7 +20,7 @@ limitations under the License. */ #include "paddle/fluid/platform/device_context.h" namespace pten { -using CUDAContext = paddle::platform::CUDADeviceContext; +using GPUContext = paddle::platform::CUDADeviceContext; } // namespace pten #endif diff --git a/paddle/pten/common/backend.h b/paddle/pten/common/backend.h index 95bbc88681a965edbee66f85c38bdc08cf461fd8..9944083248c4c7b718d31d3ccc4797cafbc09557 100644 --- a/paddle/pten/common/backend.h +++ b/paddle/pten/common/backend.h @@ -43,7 +43,7 @@ enum class Backend : uint8_t { CPU, // various acceleration devices' backends - CUDA, + GPU, XPU, // XPU currently does not exist at the same time as CUDA NPU, // NPU currently does not exist at the same time as CUDA @@ -99,8 +99,8 @@ inline std::ostream& operator<<(std::ostream& os, Backend backend) { case Backend::CPU: os << "CPU"; break; - case Backend::CUDA: - os << "CUDA"; + case Backend::GPU: + os << "GPU"; break; case Backend::XPU: os << "XPU"; diff --git a/paddle/pten/core/convert_utils.cc b/paddle/pten/core/convert_utils.cc index 936d4effdf27886065a7dc2d23dd96798f2d1bf3..bb8b41612868d7ad2a946ce2821d78394eed2264 100644 --- a/paddle/pten/core/convert_utils.cc +++ b/paddle/pten/core/convert_utils.cc @@ -23,7 +23,7 @@ Backend TransToPtenBackend(const paddle::platform::Place& place) { if (paddle::platform::is_cpu_place(place)) { return Backend::CPU; } else if (paddle::platform::is_gpu_place(place)) { - return Backend::CUDA; + return Backend::GPU; } else { return Backend::UNDEFINED; } @@ -84,7 +84,7 @@ paddle::platform::Place TransToFluidPlace(const Backend& backend) { case pten::Backend::CPU: return paddle::platform::CPUPlace(); #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) - case pten::Backend::CUDA: + case pten::Backend::GPU: return paddle::platform::CUDAPlace( paddle::platform::GetCurrentDeviceId()); #endif diff --git a/paddle/pten/core/kernel_registry.h b/paddle/pten/core/kernel_registry.h index 62a46e128e513a779b379c1c76f806169e1a426d..a33b13dac239708969402328a374aefd28c34415 100644 --- a/paddle/pten/core/kernel_registry.h +++ b/paddle/pten/core/kernel_registry.h @@ -57,7 +57,7 @@ struct KernelArgsParseFunctor { if (arg_type == std::type_index(typeid(const CPUContext&)) #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) || - arg_type == std::type_index(typeid(const CUDAContext&))) { + arg_type == std::type_index(typeid(const GPUContext&))) { #else ) { #endif diff --git a/paddle/pten/core/kernel_utils.h b/paddle/pten/core/kernel_utils.h index 82ffa573870dfddce1a281a044ace0d08d60050f..7a7ae283304bf426145d7fc88f4b6317964eadfd 100644 --- a/paddle/pten/core/kernel_utils.h +++ b/paddle/pten/core/kernel_utils.h @@ -181,7 +181,7 @@ struct KernelImpl { PT_SPECIALIZE_KernelCallHelper_FOR_DEVICE_CONTEXT(CPUContext); #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) - PT_SPECIALIZE_KernelCallHelper_FOR_DEVICE_CONTEXT(CUDAContext); + PT_SPECIALIZE_KernelCallHelper_FOR_DEVICE_CONTEXT(GPUContext); #endif #ifdef PADDLE_WITH_ASCEND_CL PT_SPECIALIZE_KernelCallHelper_FOR_DEVICE_CONTEXT(NPUContext); diff --git a/paddle/pten/include/linalg.h b/paddle/pten/include/linalg.h index 60ec451be2cc82d8f7ccdbf5d903d5e73c5b22c4..8f627f5fc8b0afe56b1e10788fae5ef9b78e6c68 100644 --- a/paddle/pten/include/linalg.h +++ b/paddle/pten/include/linalg.h @@ -18,7 +18,7 @@ #include "paddle/pten/api/lib/utils/storage.h" #include "paddle/pten/include/infermeta.h" #include "paddle/pten/kernels/cpu/linalg.h" -#include "paddle/pten/kernels/cuda/linalg.h" +#include "paddle/pten/kernels/gpu/linalg.h" namespace pten { diff --git a/paddle/pten/include/manipulation.h b/paddle/pten/include/manipulation.h index e138c51e307c4d2e167d984c1872634ed77a6723..e94f2a6180749dcaa085b6106ca9ad07ef165f9a 100644 --- a/paddle/pten/include/manipulation.h +++ b/paddle/pten/include/manipulation.h @@ -18,7 +18,7 @@ #include "paddle/pten/api/lib/utils/storage.h" #include "paddle/pten/include/infermeta.h" #include "paddle/pten/kernels/cpu/manipulation.h" -#include "paddle/pten/kernels/cuda/manipulation.h" +#include "paddle/pten/kernels/gpu/manipulation.h" #include "paddle/pten/kernels/xpu/manipulation.h" namespace pten { diff --git a/paddle/pten/include/math.h b/paddle/pten/include/math.h index 8295c5765411dc62db94b8c772bb64879278435b..83471692c8746b855839c68dcf9a957fc9ca700a 100644 --- a/paddle/pten/include/math.h +++ b/paddle/pten/include/math.h @@ -19,8 +19,8 @@ limitations under the License. */ #include "paddle/pten/include/infermeta.h" #include "paddle/pten/kernels/cpu/conj_kernel.h" #include "paddle/pten/kernels/cpu/math.h" -#include "paddle/pten/kernels/cuda/conj_kernel.h" -#include "paddle/pten/kernels/cuda/math.h" +#include "paddle/pten/kernels/gpu/conj_kernel.h" +#include "paddle/pten/kernels/gpu/math.h" #include "paddle/pten/kernels/scale_kernel.h" namespace pten { diff --git a/paddle/pten/kernels/CMakeLists.txt b/paddle/pten/kernels/CMakeLists.txt index d87def812d581cdf7aabe4f3585de98fc9c6dc15..818ce6cb77ae066f7f649ccc19e02f2fa9ae300e 100644 --- a/paddle/pten/kernels/CMakeLists.txt +++ b/paddle/pten/kernels/CMakeLists.txt @@ -5,8 +5,7 @@ add_subdirectory(hybird) add_subdirectory(cpu) if(WITH_GPU OR WITH_ROCM) - # NOTE(chenweihang): if hip can split from cuda impl, we should add hip dir - add_subdirectory(cuda) + add_subdirectory(gpu) endif() if(WITH_MKLDNN) # mkldnn will be deprecated and use the new name dnnl diff --git a/paddle/pten/kernels/cuda/CMakeLists.txt b/paddle/pten/kernels/cuda/CMakeLists.txt deleted file mode 100644 index 428b2762ca790e9c6248290f46ff4cde5120ad01..0000000000000000000000000000000000000000 --- a/paddle/pten/kernels/cuda/CMakeLists.txt +++ /dev/null @@ -1,17 +0,0 @@ -if(WITH_GPU) - nv_library(math_cuda SRCS math.cu DEPS eigen_function dense_tensor convert_utils kernel_context kernel_factory pten_transpose_cuda) - nv_library(linalg_cuda SRCS linalg.cu DEPS eigen_function dense_tensor kernel_context kernel_factory) - nv_library(utils_cuda SRCS utils.cu DEPS dense_tensor kernel_context kernel_factory memory convert_utils) - nv_library(manipulation_cuda SRCS manipulation.cu DEPS dense_tensor kernel_context kernel_factory utils_cuda unary) - nv_library(scale_kernel_cuda SRCS scale_kernel.cu DEPS dense_tensor kernel_context kernel_factory eigen_function) - nv_library(full_kernel_cuda SRCS full_kernel.cu DEPS dense_tensor kernel_context kernel_factory eigen_function) - nv_library(conj_kernel_cuda SRCS conj_kernel.cu DEPS dense_tensor kernel_context kernel_factory) -elseif(WITH_ROCM) - hip_library(math_cuda SRCS math.cu DEPS eigen_function dense_tensor convert_utils kernel_context kernel_factory pten_transpose_cuda) - hip_library(linalg_cuda SRCS linalg.cu DEPS eigen_function dense_tensor kernel_context kernel_factory) - hip_library(utils_cuda SRCS utils.cu DEPS dense_tensor kernel_context kernel_factory memory convert_utils) - hip_library(manipulation_cuda SRCS manipulation.cu DEPS dense_tensor kernel_context kernel_factory utils_cuda unary) - hip_library(scale_kernel_cuda SRCS scale_kernel.cu DEPS dense_tensor kernel_context kernel_factory eigen_function) - hip_library(full_kernel_cuda SRCS full_kernel.cu DEPS dense_tensor kernel_context kernel_factory eigen_function) - hip_library(conj_kernel_cuda SRCS conj_kernel.cu DEPS dense_tensor kernel_context kernel_factory) -endif() diff --git a/paddle/pten/kernels/gpu/CMakeLists.txt b/paddle/pten/kernels/gpu/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..11ff1608b814c727f935a188e4021386fc6a3c99 --- /dev/null +++ b/paddle/pten/kernels/gpu/CMakeLists.txt @@ -0,0 +1,17 @@ +if(WITH_GPU) + nv_library(math_gpu SRCS math.cu DEPS eigen_function dense_tensor convert_utils kernel_context kernel_factory pten_transpose_gpu) + nv_library(linalg_gpu SRCS linalg.cu DEPS eigen_function dense_tensor kernel_context kernel_factory) + nv_library(utils_gpu SRCS utils.cu DEPS dense_tensor kernel_context kernel_factory memory convert_utils) + nv_library(manipulation_gpu SRCS manipulation.cu DEPS dense_tensor kernel_context kernel_factory utils_gpu unary) + nv_library(scale_kernel_gpu SRCS scale_kernel.cu DEPS dense_tensor kernel_context kernel_factory eigen_function) + nv_library(full_kernel_gpu SRCS full_kernel.cu DEPS dense_tensor kernel_context kernel_factory eigen_function) + nv_library(conj_kernel_gpu SRCS conj_kernel.cu DEPS dense_tensor kernel_context kernel_factory) +elseif(WITH_ROCM) + hip_library(math_gpu SRCS math.cu DEPS eigen_function dense_tensor convert_utils kernel_context kernel_factory pten_transpose_gpu) + hip_library(linalg_gpu SRCS linalg.cu DEPS eigen_function dense_tensor kernel_context kernel_factory) + hip_library(utils_gpu SRCS utils.cu DEPS dense_tensor kernel_context kernel_factory memory convert_utils) + hip_library(manipulation_gpu SRCS manipulation.cu DEPS dense_tensor kernel_context kernel_factory utils_gpu unary) + hip_library(scale_kernel_gpu SRCS scale_kernel.cu DEPS dense_tensor kernel_context kernel_factory eigen_function) + hip_library(full_kernel_gpu SRCS full_kernel.cu DEPS dense_tensor kernel_context kernel_factory eigen_function) + hip_library(conj_kernel_gpu SRCS conj_kernel.cu DEPS dense_tensor kernel_context kernel_factory) +endif() diff --git a/paddle/pten/kernels/cuda/conj_kernel.cu b/paddle/pten/kernels/gpu/conj_kernel.cu similarity index 81% rename from paddle/pten/kernels/cuda/conj_kernel.cu rename to paddle/pten/kernels/gpu/conj_kernel.cu index f3d2296f562a0c18667fed1e71610e54ce35bf3d..cb4fef883fdacd1062f1001c26b2b634d548cd9e 100644 --- a/paddle/pten/kernels/cuda/conj_kernel.cu +++ b/paddle/pten/kernels/gpu/conj_kernel.cu @@ -12,23 +12,23 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/pten/kernels/cuda/conj_kernel.h" +#include "paddle/pten/kernels/gpu/conj_kernel.h" -#include "paddle/pten/backends/cuda/cuda_context.h" +#include "paddle/pten/backends/gpu/gpu_context.h" #include "paddle/pten/core/kernel_registry.h" #include "paddle/pten/kernels/hybird/math/conj_impl.h" namespace pten { template -void Conj(const CUDAContext& dev_ctx, const DenseTensor& x, DenseTensor* out) { - ConjImpl(dev_ctx, x, out); +void Conj(const GPUContext& dev_ctx, const DenseTensor& x, DenseTensor* out) { + ConjImpl(dev_ctx, x, out); } } // namespace pten PT_REGISTER_KERNEL(conj, - CUDA, + GPU, ALL_LAYOUT, pten::Conj, paddle::platform::complex, diff --git a/paddle/pten/kernels/cuda/conj_kernel.h b/paddle/pten/kernels/gpu/conj_kernel.h similarity index 86% rename from paddle/pten/kernels/cuda/conj_kernel.h rename to paddle/pten/kernels/gpu/conj_kernel.h index 8ed0049d877650ff93d723c4a8425a6834183052..7541f9290d246b37030cc2f9bedf0f229ec2b22f 100644 --- a/paddle/pten/kernels/cuda/conj_kernel.h +++ b/paddle/pten/kernels/gpu/conj_kernel.h @@ -17,13 +17,13 @@ limitations under the License. */ // CUDA and HIP use same api #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -#include "paddle/pten/backends/cuda/cuda_context.h" +#include "paddle/pten/backends/gpu/gpu_context.h" #include "paddle/pten/core/dense_tensor.h" namespace pten { template -void Conj(const CUDAContext& dev_ctx, const DenseTensor& x, DenseTensor* out); +void Conj(const GPUContext& dev_ctx, const DenseTensor& x, DenseTensor* out); } // namespace pten diff --git a/paddle/pten/kernels/cuda/full_kernel.cu b/paddle/pten/kernels/gpu/full_kernel.cu similarity index 93% rename from paddle/pten/kernels/cuda/full_kernel.cu rename to paddle/pten/kernels/gpu/full_kernel.cu index 8a6639a2dc4198038ab4ede07580fbf341548db1..16389d7749bf1d8edf5e224f6c2411c72cc9adb7 100644 --- a/paddle/pten/kernels/cuda/full_kernel.cu +++ b/paddle/pten/kernels/gpu/full_kernel.cu @@ -14,12 +14,12 @@ limitations under the License. */ #include "paddle/pten/kernels/full_kernel.h" -#include "paddle/pten/backends/cuda/cuda_context.h" +#include "paddle/pten/backends/gpu/gpu_context.h" #include "paddle/pten/core/kernel_registry.h" #include "paddle/pten/kernels/impl/full_kernel_impl.h" PT_REGISTER_CTX_KERNEL(full, - CUDA, + GPU, ALL_LAYOUT, pten::Full, float, @@ -34,7 +34,7 @@ PT_REGISTER_CTX_KERNEL(full, paddle::platform::complex) {} PT_REGISTER_CTX_KERNEL(full_like, - CUDA, + GPU, ALL_LAYOUT, pten::FullLike, float, diff --git a/paddle/pten/kernels/cuda/linalg.cu b/paddle/pten/kernels/gpu/linalg.cu similarity index 90% rename from paddle/pten/kernels/cuda/linalg.cu rename to paddle/pten/kernels/gpu/linalg.cu index da6511e2c8708ab85961d1d7a9daed8331d1ea41..c9bc4cbd07962e1c81ed0180868d184883f4e1c1 100644 --- a/paddle/pten/kernels/cuda/linalg.cu +++ b/paddle/pten/kernels/gpu/linalg.cu @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/pten/kernels/cuda/linalg.h" +#include "paddle/pten/kernels/gpu/linalg.h" #include "paddle/pten/core/kernel_registry.h" #include "paddle/pten/kernels/hybird/eigen/dot.h" @@ -24,15 +24,15 @@ namespace pten { template -void Dot(const CUDAContext& dev_ctx, +void Dot(const GPUContext& dev_ctx, const DenseTensor& x, const DenseTensor& y, DenseTensor* out) { - eigen::Dot(dev_ctx, x, y, out); + eigen::Dot(dev_ctx, x, y, out); } template -void Matmul(const CUDAContext& dev_ctx, +void Matmul(const GPUContext& dev_ctx, const DenseTensor& x, const DenseTensor& y, bool transpose_x, @@ -48,7 +48,7 @@ void Matmul(const CUDAContext& dev_ctx, paddle::platform::errors::InvalidArgument( "The Input(Y) dims size must not be equal 0," " but reviced dims size is 0. ")); - math::MatMulFunction( + math::MatMulFunction( dev_ctx, x, y, out, transpose_x, transpose_y); } @@ -59,7 +59,7 @@ using complex64 = ::paddle::platform::complex; using complex128 = ::paddle::platform::complex; PT_REGISTER_KERNEL(dot, - CUDA, + GPU, ALL_LAYOUT, pten::Dot, float, @@ -70,7 +70,7 @@ PT_REGISTER_KERNEL(dot, complex128) {} PT_REGISTER_KERNEL(matmul, - CUDA, + GPU, ALL_LAYOUT, pten::Matmul, float, diff --git a/paddle/pten/kernels/cuda/linalg.h b/paddle/pten/kernels/gpu/linalg.h similarity index 89% rename from paddle/pten/kernels/cuda/linalg.h rename to paddle/pten/kernels/gpu/linalg.h index 84f48ca609b270f999e4a8479ab8ea8f34c9773d..a848f55c7b9f0c05dc03b39a6f52b21ca733a988 100644 --- a/paddle/pten/kernels/cuda/linalg.h +++ b/paddle/pten/kernels/gpu/linalg.h @@ -17,19 +17,19 @@ // CUDA and HIP use same api #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -#include "paddle/pten/backends/cuda/cuda_context.h" +#include "paddle/pten/backends/gpu/gpu_context.h" #include "paddle/pten/core/dense_tensor.h" namespace pten { template -void Dot(const CUDAContext& dev_ctx, +void Dot(const GPUContext& dev_ctx, const DenseTensor& x, const DenseTensor& y, DenseTensor* out); template -void Matmul(const CUDAContext& dev_ctx, +void Matmul(const GPUContext& dev_ctx, const DenseTensor& x, const DenseTensor& y, bool transpose_x, diff --git a/paddle/pten/kernels/cuda/manipulation.cu b/paddle/pten/kernels/gpu/manipulation.cu similarity index 88% rename from paddle/pten/kernels/cuda/manipulation.cu rename to paddle/pten/kernels/gpu/manipulation.cu index 49bbf1b61c9916cc8eb6dfc1c4c798930d48c8b0..5a82e3e030b9ec4a9aaa84cd920c076a425017cb 100644 --- a/paddle/pten/kernels/cuda/manipulation.cu +++ b/paddle/pten/kernels/gpu/manipulation.cu @@ -14,15 +14,15 @@ #include "paddle/pten/api/ext/dispatch.h" #include "paddle/pten/infermeta/unary.h" -#include "paddle/pten/kernels/cuda/manipulation.h" -#include "paddle/pten/kernels/cuda/utils.h" +#include "paddle/pten/kernels/gpu/manipulation.h" +#include "paddle/pten/kernels/gpu/utils.h" #include "paddle/pten/kernels/hybird/cuda/cast_kernel_impl.h" #include "paddle/pten/kernels/hybird/general/manipulation.h" namespace pten { template -void Flatten(const CUDAContext& dev_ctx, +void Flatten(const GPUContext& dev_ctx, const DenseTensor& x, int start_axis, int stop_axis, @@ -36,7 +36,7 @@ void Flatten(const CUDAContext& dev_ctx, // Output Tensor, // is there a more flexible way to deal with this case? template -void FlattenWithXShape(const CUDAContext& dev_ctx, +void FlattenWithXShape(const GPUContext& dev_ctx, const DenseTensor& x, int start_axis, int stop_axis, @@ -46,7 +46,7 @@ void FlattenWithXShape(const CUDAContext& dev_ctx, general::SetXShape(x, xshape); } -void Reshape(const CUDAContext& dev_ctx, +void Reshape(const GPUContext& dev_ctx, const DenseTensor& x, const ScalarArray& shape, DenseTensor* out) { @@ -60,7 +60,7 @@ void Reshape(const CUDAContext& dev_ctx, out->ResetLoD(x.lod()); } -void ReshapeWithXShape(const CUDAContext& dev_ctx, +void ReshapeWithXShape(const GPUContext& dev_ctx, const DenseTensor& x, const ScalarArray& shape, DenseTensor* xshape, @@ -70,7 +70,7 @@ void ReshapeWithXShape(const CUDAContext& dev_ctx, } template -void Cast(const CUDAContext& dev_ctx, +void Cast(const GPUContext& dev_ctx, const DenseTensor& x, DataType out_dtype, DataType in_dtype, @@ -85,7 +85,7 @@ void Cast(const CUDAContext& dev_ctx, using float16 = paddle::platform::float16; PT_REGISTER_KERNEL(flatten, - CUDA, + GPU, ALL_LAYOUT, pten::Flatten, float, @@ -96,7 +96,7 @@ PT_REGISTER_KERNEL(flatten, int, int64_t) {} PT_REGISTER_KERNEL(flatten_with_xshape, - CUDA, + GPU, ALL_LAYOUT, pten::FlattenWithXShape, float, @@ -108,7 +108,7 @@ PT_REGISTER_KERNEL(flatten_with_xshape, #define PTEN_REGISTER_CAST_CUDA_BASE_TYPE(op_name, ...) \ PT_REGISTER_KERNEL(cast, \ - CUDA, \ + GPU, \ ALL_LAYOUT, \ pten::Cast, \ float, \ @@ -132,6 +132,6 @@ PTEN_REGISTER_CAST_CUDA_BASE_TYPE(cast, paddle::platform::bfloat16) PTEN_REGISTER_CAST_CUDA_BASE_TYPE(cast) #endif -PT_REGISTER_NO_TEMPLATE_KERNEL(reshape, CUDA, ANY, pten::Reshape, ALL_DTYPE) {} +PT_REGISTER_NO_TEMPLATE_KERNEL(reshape, GPU, ANY, pten::Reshape, ALL_DTYPE) {} PT_REGISTER_NO_TEMPLATE_KERNEL( - reshape_with_xshape, CUDA, ANY, pten::ReshapeWithXShape, ALL_DTYPE) {} + reshape_with_xshape, GPU, ANY, pten::ReshapeWithXShape, ALL_DTYPE) {} diff --git a/paddle/pten/kernels/cuda/manipulation.h b/paddle/pten/kernels/gpu/manipulation.h similarity index 86% rename from paddle/pten/kernels/cuda/manipulation.h rename to paddle/pten/kernels/gpu/manipulation.h index 165b08ad59a1cf4e123686dcb8d4e73178a6974b..b47fadd70bd1725cc9e7424c2ab57c1db3dccdf3 100644 --- a/paddle/pten/kernels/cuda/manipulation.h +++ b/paddle/pten/kernels/gpu/manipulation.h @@ -17,7 +17,7 @@ // CUDA and HIP use same api #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -#include "paddle/pten/backends/cuda/cuda_context.h" +#include "paddle/pten/backends/gpu/gpu_context.h" #include "paddle/pten/common/scalar_array.h" #include "paddle/pten/core/dense_tensor.h" #include "paddle/pten/core/kernel_registry.h" @@ -25,25 +25,25 @@ namespace pten { template -void Flatten(const CUDAContext& dev_ctx, +void Flatten(const GPUContext& dev_ctx, const DenseTensor& x, int start_axis, int stop_axis, DenseTensor* out); template -void Cast(const CUDAContext& dev_ctx, +void Cast(const GPUContext& dev_ctx, const DenseTensor& x, DataType out_dtype, DataType in_dtype, DenseTensor* out); -void Reshape(const CUDAContext& dev_ctx, +void Reshape(const GPUContext& dev_ctx, const DenseTensor& x, const ScalarArray& shape, DenseTensor* out); -void ReshapeWithXShape(const CUDAContext& dev_ctx, +void ReshapeWithXShape(const GPUContext& dev_ctx, const DenseTensor& x, const ScalarArray& shape, DenseTensor* xshape, diff --git a/paddle/pten/kernels/cuda/math.cu b/paddle/pten/kernels/gpu/math.cu similarity index 89% rename from paddle/pten/kernels/cuda/math.cu rename to paddle/pten/kernels/gpu/math.cu index 3dacc01e8b923b1334c3b848e1c2a5409574fd9d..59d816d2377cd0a19f0b6173ab254797545ed803 100644 --- a/paddle/pten/kernels/cuda/math.cu +++ b/paddle/pten/kernels/gpu/math.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/pten/kernels/cuda/math.h" +#include "paddle/pten/kernels/gpu/math.h" #include "paddle/fluid/operators/reduce_ops/reduce_functor_op.h" #include "paddle/pten/kernels/hybird/cuda/elementwise/elementwise.h" @@ -58,12 +58,12 @@ struct DivideFunctor { */ template -void Sign(const CUDAContext& dev_ctx, const DenseTensor& x, DenseTensor* out) { - eigen::Sign(dev_ctx, x, out); +void Sign(const GPUContext& dev_ctx, const DenseTensor& x, DenseTensor* out) { + eigen::Sign(dev_ctx, x, out); } template -void Mean(const CUDAContext& dev_ctx, +void Mean(const GPUContext& dev_ctx, const DenseTensor& x, const std::vector& dims, bool keep_dim, @@ -84,7 +84,7 @@ DEFINE_CUDA_ELEMENTWISE_OP(Multiply) DEFINE_CUDA_ELEMENTWISE_OP(Divide) template -void Sum(const CUDAContext& dev_ctx, +void Sum(const GPUContext& dev_ctx, const DenseTensor& x, const std::vector& dims, bool keep_dim, @@ -101,11 +101,10 @@ using float16 = paddle::platform::float16; using complex64 = ::paddle::platform::complex; using complex128 = ::paddle::platform::complex; -PT_REGISTER_KERNEL(sign, CUDA, ALL_LAYOUT, pten::Sign, float, double, float16) { -} -PT_REGISTER_KERNEL(mean, CUDA, ALL_LAYOUT, pten::Mean, float, double, bool) {} +PT_REGISTER_KERNEL(sign, GPU, ALL_LAYOUT, pten::Sign, float, double, float16) {} +PT_REGISTER_KERNEL(mean, GPU, ALL_LAYOUT, pten::Mean, float, double, bool) {} PT_REGISTER_KERNEL(add, - CUDA, + GPU, ALL_LAYOUT, pten::Add, float, @@ -116,7 +115,7 @@ PT_REGISTER_KERNEL(add, complex64, complex128) {} PT_REGISTER_KERNEL(subtract, - CUDA, + GPU, ALL_LAYOUT, pten::Subtract, float, @@ -127,7 +126,7 @@ PT_REGISTER_KERNEL(subtract, complex64, complex128) {} PT_REGISTER_KERNEL(divide, - CUDA, + GPU, ALL_LAYOUT, pten::Divide, float, @@ -138,7 +137,7 @@ PT_REGISTER_KERNEL(divide, complex64, complex128) {} PT_REGISTER_KERNEL(multiply, - CUDA, + GPU, ALL_LAYOUT, pten::Multiply, float, @@ -150,7 +149,7 @@ PT_REGISTER_KERNEL(multiply, complex64, complex128) {} PT_REGISTER_KERNEL(sum, - CUDA, + GPU, ALL_LAYOUT, pten::Sum, bool, diff --git a/paddle/pten/kernels/cuda/math.h b/paddle/pten/kernels/gpu/math.h similarity index 86% rename from paddle/pten/kernels/cuda/math.h rename to paddle/pten/kernels/gpu/math.h index 9cb379bcf7fadf30aa80d1f9113bb01fcf3947fb..5a872542fbd54cb8565d51cc5759e976db6f81c7 100644 --- a/paddle/pten/kernels/cuda/math.h +++ b/paddle/pten/kernels/gpu/math.h @@ -17,17 +17,17 @@ limitations under the License. */ // CUDA and HIP use same api #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -#include "paddle/pten/backends/cuda/cuda_context.h" +#include "paddle/pten/backends/gpu/gpu_context.h" #include "paddle/pten/common/scalar.h" #include "paddle/pten/core/dense_tensor.h" namespace pten { template -void Sign(const CUDAContext& dev_ctx, const DenseTensor& x, DenseTensor* out); +void Sign(const GPUContext& dev_ctx, const DenseTensor& x, DenseTensor* out); template -void Mean(const CUDAContext& dev_ctx, +void Mean(const GPUContext& dev_ctx, const DenseTensor& x, const std::vector& dims, bool keep_dim, @@ -35,35 +35,35 @@ void Mean(const CUDAContext& dev_ctx, DenseTensor* out); template -void Add(const CUDAContext& dev_ctx, +void Add(const GPUContext& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis, DenseTensor* out); template -void Subtract(const CUDAContext& dev_ctx, +void Subtract(const GPUContext& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis, DenseTensor* out); template -void Divide(const CUDAContext& dev_ctx, +void Divide(const GPUContext& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis, DenseTensor* out); template -void Multiply(const CUDAContext& dev_ctx, +void Multiply(const GPUContext& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis, DenseTensor* out); template -void Sum(const CUDAContext& dev_ctx, +void Sum(const GPUContext& dev_ctx, const DenseTensor& x, const std::vector& dims, bool keep_dim, @@ -75,7 +75,7 @@ void Sum(const CUDAContext& dev_ctx, #define DEFINE_CUDA_ELEMENTWISE_OP(name) \ template \ - void name(const CUDAContext& dev_ctx, \ + void name(const GPUContext& dev_ctx, \ const DenseTensor& x, \ const DenseTensor& y, \ int axis, \ diff --git a/paddle/pten/kernels/cuda/scale_kernel.cu b/paddle/pten/kernels/gpu/scale_kernel.cu similarity index 93% rename from paddle/pten/kernels/cuda/scale_kernel.cu rename to paddle/pten/kernels/gpu/scale_kernel.cu index 904976ae9567f837c08f24368117450e3ec42a37..e67fd4cfdccb3c3bfd7e917cf2dbf6be166ddffd 100644 --- a/paddle/pten/kernels/cuda/scale_kernel.cu +++ b/paddle/pten/kernels/gpu/scale_kernel.cu @@ -14,7 +14,7 @@ limitations under the License. */ #include "paddle/pten/kernels/scale_kernel.h" -#include "paddle/pten/backends/cuda/cuda_context.h" +#include "paddle/pten/backends/gpu/gpu_context.h" #include "paddle/pten/core/kernel_registry.h" #include "paddle/pten/kernels/impl/scale_kernel_impl.h" @@ -22,7 +22,7 @@ limitations under the License. */ #include "paddle/fluid/platform/float16.h" PT_REGISTER_CTX_KERNEL(scale, - CUDA, + GPU, ALL_LAYOUT, pten::Scale, float, diff --git a/paddle/pten/kernels/cuda/utils.cu b/paddle/pten/kernels/gpu/utils.cu similarity index 98% rename from paddle/pten/kernels/cuda/utils.cu rename to paddle/pten/kernels/gpu/utils.cu index cf1407e7208de5539806fcfc3e9bc5bb34d4b4a6..4d080be11e3ed9164ca58408589b3ace6bcb363b 100644 --- a/paddle/pten/kernels/cuda/utils.cu +++ b/paddle/pten/kernels/gpu/utils.cu @@ -16,11 +16,11 @@ limitations under the License. */ #include "paddle/pten/common/data_type.h" #include "paddle/pten/core/convert_utils.h" #include "paddle/pten/core/kernel_registry.h" -#include "paddle/pten/kernels/cuda/utils.h" +#include "paddle/pten/kernels/gpu/utils.h" namespace pten { -void Copy(const CUDAContext& dev_ctx, +void Copy(const GPUContext& dev_ctx, const DenseTensor& src, bool blocking, DenseTensor* dst) { @@ -234,4 +234,4 @@ void Copy(const CUDAContext& dev_ctx, } } // namespace pten -PT_REGISTER_NO_TEMPLATE_KERNEL(copy, CUDA, ALL_LAYOUT, pten::Copy, ALL_DTYPE) {} +PT_REGISTER_NO_TEMPLATE_KERNEL(copy, GPU, ALL_LAYOUT, pten::Copy, ALL_DTYPE) {} diff --git a/paddle/pten/kernels/cuda/utils.h b/paddle/pten/kernels/gpu/utils.h similarity index 91% rename from paddle/pten/kernels/cuda/utils.h rename to paddle/pten/kernels/gpu/utils.h index bd29347e3136513535a385222c0bae124a8953ee..3a455ad70c4dcb2552fa2722a3d9504b64a417a2 100644 --- a/paddle/pten/kernels/cuda/utils.h +++ b/paddle/pten/kernels/gpu/utils.h @@ -17,13 +17,13 @@ limitations under the License. */ // CUDA and HIP use same api #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -#include "paddle/pten/backends/cuda/cuda_context.h" +#include "paddle/pten/backends/gpu/gpu_context.h" #include "paddle/pten/core/dense_tensor.h" #include "paddle/pten/core/kernel_registry.h" namespace pten { -void Copy(const CUDAContext& dev_ctx, +void Copy(const GPUContext& dev_ctx, const DenseTensor& src, bool blocking, DenseTensor* dst); diff --git a/paddle/pten/kernels/hybird/CMakeLists.txt b/paddle/pten/kernels/hybird/CMakeLists.txt index 9d4d86f0674a962edd5a8300fdfb1152716c35df..1304aa1798c0caffdd8bac3facdaa3a6e89c1012 100644 --- a/paddle/pten/kernels/hybird/CMakeLists.txt +++ b/paddle/pten/kernels/hybird/CMakeLists.txt @@ -4,7 +4,7 @@ add_subdirectory(general) cc_library(pten_transpose_cpu SRCS transpose.cc DEPS dense_tensor pten_context) if(WITH_GPU) - nv_library(pten_transpose_cuda SRCS transpose.cu DEPS dense_tensor malloc pten_context) + nv_library(pten_transpose_gpu SRCS transpose.cu DEPS dense_tensor malloc pten_context) elseif(WITH_ROCM) - hip_library(pten_transpose_cuda SRCS transpose.cu DEPS dense_tensor malloc pten_context) + hip_library(pten_transpose_gpu SRCS transpose.cu DEPS dense_tensor malloc pten_context) endif() diff --git a/paddle/pten/kernels/hybird/cuda/cast_kernel_impl.h b/paddle/pten/kernels/hybird/cuda/cast_kernel_impl.h index 54a584d78d2eb646679d1775ac08d9056f95ad07..d8c58448c9867232d5a9512d6070a821a69104ed 100644 --- a/paddle/pten/kernels/hybird/cuda/cast_kernel_impl.h +++ b/paddle/pten/kernels/hybird/cuda/cast_kernel_impl.h @@ -15,7 +15,7 @@ #pragma once #include "paddle/fluid/platform/device/gpu/gpu_helper.h" #include "paddle/fluid/platform/float16.h" -#include "paddle/pten/backends/cuda/cuda_context.h" +#include "paddle/pten/backends/gpu/gpu_context.h" #include "paddle/pten/core/dense_tensor.h" #include "paddle/fluid/platform/aligned_vector.h" @@ -50,7 +50,7 @@ __global__ void CastCUDAKernel(const InT* in, const int64_t N, OutT* out) { } template -void CastCUDAKernelImpl(const CUDAContext& dev_ctx, +void CastCUDAKernelImpl(const GPUContext& dev_ctx, const DenseTensor& x, DenseTensor* out) { auto* in_data = x.data(); diff --git a/paddle/pten/kernels/hybird/cuda/reduce/reduce.h b/paddle/pten/kernels/hybird/cuda/reduce/reduce.h index f55d483de14621da6068ec33e1ede5eb0290a297..793e8505ec606b875d4bb2594d90c3755f521881 100644 --- a/paddle/pten/kernels/hybird/cuda/reduce/reduce.h +++ b/paddle/pten/kernels/hybird/cuda/reduce/reduce.h @@ -17,7 +17,7 @@ // CUDA and HIP use same api #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -#include "paddle/pten/backends/cuda/cuda_context.h" +#include "paddle/pten/backends/gpu/gpu_context.h" #include "paddle/pten/common/scalar.h" #include "paddle/pten/core/dense_tensor.h" #include "paddle/pten/kernels/hybird/cuda/reduce/reduce_cuda_impl.h" @@ -49,7 +49,7 @@ static inline std::vector GetReduceDim( } template class ReduceFunctor> -void Reduce(const CUDAContext& dev_ctx, +void Reduce(const GPUContext& dev_ctx, const DenseTensor& x, bool reduce_all, const std::vector& dims, diff --git a/paddle/pten/kernels/hybird/cuda/reduce/reduce_cuda_impl.h b/paddle/pten/kernels/hybird/cuda/reduce/reduce_cuda_impl.h index e7aecf3b27aaf1d61a1c4aeaedd2e82b416385a0..bdb883c1df8714fbf18d775a44985b11a0349637 100644 --- a/paddle/pten/kernels/hybird/cuda/reduce/reduce_cuda_impl.h +++ b/paddle/pten/kernels/hybird/cuda/reduce/reduce_cuda_impl.h @@ -42,7 +42,7 @@ namespace cub = hipcub; #include "paddle/fluid/operators/kernel_primitives/compute_primitives.h" #include "paddle/pten/api/ext/dispatch.h" #include "paddle/pten/api/include/tensor.h" -#include "paddle/pten/kernels/cuda/utils.h" +#include "paddle/pten/kernels/gpu/utils.h" #include "paddle/pten/kernels/hybird/math/cast_func.h" // Reduce split or not, Whether to use ReduceHigherDim @@ -820,7 +820,7 @@ void TensorReduceFunctorImpl(const pten::DenseTensor& x, y->Resize(out_dims); } else { PD_VISIT_ALL_TYPES(y->dtype(), "CastKernelImpl", ([&] { - pten::math::CastKernelImpl( + pten::math::CastKernelImpl( *dev_ctx, x, y); })); } diff --git a/paddle/pten/kernels/hybird/general/elementwise_base.h b/paddle/pten/kernels/hybird/general/elementwise_base.h index 827af86812ce7fa831dd9716297b475ca3b230bc..20154a8744f3d28e98a824f79d14d7863680a5b6 100644 --- a/paddle/pten/kernels/hybird/general/elementwise_base.h +++ b/paddle/pten/kernels/hybird/general/elementwise_base.h @@ -132,11 +132,11 @@ class MidWiseTransformIterator #if defined(__NVCC__) || defined(__HIPCC__) template -class RowwiseTransformIterator - : public thrust::iterator_adaptor, +class RowwiseTransformIterator + : public thrust::iterator_adaptor, const T *> { public: - typedef thrust::iterator_adaptor, + typedef thrust::iterator_adaptor, const T *> super_t; HOSTDEVICE RowwiseTransformIterator(const T *x, int n) @@ -152,11 +152,11 @@ class RowwiseTransformIterator }; template -class MidWiseTransformIterator - : public thrust::iterator_adaptor, +class MidWiseTransformIterator + : public thrust::iterator_adaptor, const T *> { public: - typedef thrust::iterator_adaptor, + typedef thrust::iterator_adaptor, const T *> super_t; HOSTDEVICE MidWiseTransformIterator(const T *x, int n, int post) diff --git a/paddle/pten/kernels/hybird/transpose.cu b/paddle/pten/kernels/hybird/transpose.cu index 5c5bd291176950647e2b5ee0d1c0cfcd80fe3abc..195277c216fe9127f3664c543c7a7e69fac93e46 100644 --- a/paddle/pten/kernels/hybird/transpose.cu +++ b/paddle/pten/kernels/hybird/transpose.cu @@ -14,7 +14,7 @@ #include "paddle/fluid/framework/ddim.h" #include "paddle/fluid/memory/memcpy.h" -#include "paddle/pten/backends/cuda/cuda_context.h" +#include "paddle/pten/backends/gpu/gpu_context.h" #include "paddle/pten/core/dense_tensor.h" #include "paddle/pten/kernels/hybird/math/cast_func.h" #include "paddle/pten/kernels/hybird/transpose.h" @@ -52,9 +52,9 @@ __global__ void TransposeNormalKernel(const T* in_ptr, } template -struct TransposeNormal { +struct TransposeNormal { // for dims >= 7 situation - void operator()(const CUDAContext& dev_ctx, + void operator()(const GPUContext& dev_ctx, const pten::DenseTensor& in, pten::DenseTensor* out, const std::vector& axis) { @@ -106,7 +106,7 @@ struct TransposeNormal { // define transpose normal #define DEFINE_GPU_TRANS_NORMAL(TYPE) \ - template struct TransposeNormal + template struct TransposeNormal DEFINE_GPU_TRANS_NORMAL(bool); DEFINE_GPU_TRANS_NORMAL(int8_t); diff --git a/paddle/pten/tests/api/scale_api.h b/paddle/pten/tests/api/scale_api.h index 1defbd02ddd1104b286d23b5bdbaf6ef62fe924e..d525b305c7409b76907ec2a70dcac940c9d6ccaa 100644 --- a/paddle/pten/tests/api/scale_api.h +++ b/paddle/pten/tests/api/scale_api.h @@ -142,13 +142,13 @@ static void ScaleCPU(DataType kernel_dtype, } #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -static void ScaleCUDA(DataType kernel_dtype, - const pten::CUDAContext& dev_ctx, - const pten::DenseTensor& x, - const Scalar& scale, - float bias, - bool bias_after_scale, - pten::DenseTensor* dense_out) { +static void ScaleGPU(DataType kernel_dtype, + const pten::GPUContext& dev_ctx, + const pten::DenseTensor& x, + const Scalar& scale, + float bias, + bool bias_after_scale, + pten::DenseTensor* dense_out) { switch (kernel_dtype) { case pten::DataType::FLOAT64: { pten::Scale( @@ -255,14 +255,14 @@ Tensor scale_switch_case(const Tensor& x, dense_out.get()); break; #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) - case Backend::CUDA: - ScaleCUDA(kernel_data_type, - static_cast(*dev_ctx), - *dense_x, - scale, - bias, - bias_after_scale, - dense_out.get()); + case Backend::GPU: + ScaleGPU(kernel_data_type, + static_cast(*dev_ctx), + *dense_x, + scale, + bias, + bias_after_scale, + dense_out.get()); break; #endif default: diff --git a/paddle/pten/tests/api/test_matmul_api.cc b/paddle/pten/tests/api/test_matmul_api.cc index 01ca4aad642ba2c0aabe82d33e4f077283b0589b..e29fa11d58d1d4816f475ef3e708aa5bcf009586 100644 --- a/paddle/pten/tests/api/test_matmul_api.cc +++ b/paddle/pten/tests/api/test_matmul_api.cc @@ -20,7 +20,7 @@ limitations under the License. */ #include "paddle/pten/api/lib/utils/allocator.h" #include "paddle/pten/core/dense_tensor.h" #include "paddle/pten/core/kernel_registry.h" -#include "paddle/pten/kernels/cuda/utils.h" +#include "paddle/pten/kernels/gpu/utils.h" namespace paddle { namespace tests { diff --git a/paddle/pten/tests/api/test_tensor_utils.cc b/paddle/pten/tests/api/test_tensor_utils.cc index c445e1867924ac88a4dd6524546735b8e84fe3e4..b59cee5dc7e8449fb40203b0fe4f0d2bbc20d01e 100644 --- a/paddle/pten/tests/api/test_tensor_utils.cc +++ b/paddle/pten/tests/api/test_tensor_utils.cc @@ -110,7 +110,7 @@ TEST(PtenUtils, VarToPtTensor) { pten::Backend expect_backend = pten::Backend::CPU; #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) - expect_backend = pten::Backend::CUDA; + expect_backend = pten::Backend::GPU; #endif auto tensor_def = pten::TensorArgDef( expect_backend, pten::DataLayout::NCHW, pten::DataType::INT32); diff --git a/paddle/pten/tests/api/test_to_api.cc b/paddle/pten/tests/api/test_to_api.cc index 5bb6f386c7eb124ab9ddac0f641bafa2c56e6e11..47e8ff7c2c87ed2ad9ad880b27522aa9e93a2560 100644 --- a/paddle/pten/tests/api/test_to_api.cc +++ b/paddle/pten/tests/api/test_to_api.cc @@ -64,7 +64,7 @@ TEST(API, copy_to) { // 2. test API #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) - auto tmp = paddle::experimental::copy_to(x, pten::Backend::CUDA, false); + auto tmp = paddle::experimental::copy_to(x, pten::Backend::GPU, false); auto out = paddle::experimental::copy_to(tmp, pten::Backend::CPU, true); #else auto out = paddle::experimental::copy_to(x, pten::Backend::CPU, false); @@ -80,7 +80,7 @@ TEST(Tensor, copy_to) { // 2. test API #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) - auto tmp = x.copy_to(pten::Backend::CUDA, false); + auto tmp = x.copy_to(pten::Backend::GPU, false); auto out = tmp.copy_to(pten::Backend::CPU, true); #else auto out = x.copy_to(pten::Backend::CPU, false); diff --git a/paddle/pten/tests/common/test_backend.cc b/paddle/pten/tests/common/test_backend.cc index 148785d4b630b1f02438ee867d47d704bbe4730f..8b006ee0aae60cda444ab693629802d62b45cd48 100644 --- a/paddle/pten/tests/common/test_backend.cc +++ b/paddle/pten/tests/common/test_backend.cc @@ -29,8 +29,8 @@ TEST(Backend, OStream) { oss << pten::Backend::CPU; EXPECT_EQ(oss.str(), "CPU"); oss.str(""); - oss << pten::Backend::CUDA; - EXPECT_EQ(oss.str(), "CUDA"); + oss << pten::Backend::GPU; + EXPECT_EQ(oss.str(), "GPU"); oss.str(""); oss << pten::Backend::XPU; EXPECT_EQ(oss.str(), "XPU");