diff --git a/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc b/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc index cc5e44686f2b19c65e4a4774b3add79101e6e0b3..537b5ef1e6e03e8a1b468072a10fde5d59222286 100644 --- a/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc @@ -126,7 +126,6 @@ void PrepareInputs(std::vector *input_slots, init_zero_tensor.lod.assign({one_batch.lod3}); lod_tensor_tensor.shape = rnn_link_data_shape; lod_tensor_tensor.lod.assign({one_batch.lod1}); - // clang-format off week_tensor.shape.assign( {static_cast(one_batch.rnn_week_datas.size()), static_cast(one_batch.rnn_week_datas.front().size())}); @@ -135,7 +134,6 @@ void PrepareInputs(std::vector *input_slots, {static_cast(one_batch.rnn_minute_datas.size()), static_cast(one_batch.rnn_minute_datas.front().size())}); minute_tensor.lod.assign({one_batch.lod3}); - // clang-format on // assign data TensorAssignData(&lod_attention_tensor, std::vector>({{0, 0}})); diff --git a/paddle/fluid/operators/fused/fused_softmax_mask.cu.h b/paddle/fluid/operators/fused/fused_softmax_mask.cu.h index 56806a8c17340f30ffc2d5df4aa28d998507b1bb..009a9253ab3516fe0f0937156fab5179b152f94f 100644 --- a/paddle/fluid/operators/fused/fused_softmax_mask.cu.h +++ b/paddle/fluid/operators/fused/fused_softmax_mask.cu.h @@ -173,7 +173,6 @@ void LaunchFusedSoftmaxMaskKernel(const T* src, dim3 block(warp_size, warps_per_block); dim3 grid(DIV_UP(seq_len, warps_per_block), batch_size, head_num); - // clang-format off int elements = ElementsCeil(seq_len); switch (elements) { case 1: { // <=32 @@ -193,17 +192,16 @@ void LaunchFusedSoftmaxMaskKernel(const T* src, SELECT_SOFTMAX_MASK_KERNEL(4); break; } - CASE_SOFTMAX_MASK_KERNEL(8); // <=256 - CASE_SOFTMAX_MASK_KERNEL(16); // <=512 - CASE_SOFTMAX_MASK_KERNEL(32); // <=1024 - CASE_SOFTMAX_MASK_KERNEL(64); // <=2048 - CASE_SOFTMAX_MASK_KERNEL(128); // <=4096 + CASE_SOFTMAX_MASK_KERNEL(8); // <=256 + CASE_SOFTMAX_MASK_KERNEL(16); // <=512 + CASE_SOFTMAX_MASK_KERNEL(32); // <=1024 + CASE_SOFTMAX_MASK_KERNEL(64); // <=2048 + CASE_SOFTMAX_MASK_KERNEL(128); // <=4096 default: PADDLE_THROW(platform::errors::InvalidArgument( "seq_len must be between (0, 4096], received the seq_len is %d", seq_len)); } - // clang-format on } } // namespace operators diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index c7561bc2fd40c43183b547a3ba0402b6a7f4ae4c..5575d839a2f79961d187939824f4c35080daed9f 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -1167,34 +1167,33 @@ All parameter, weight, gradient are variables in Paddle. .def("empty", []() { return kEmptyVarName; }) .def("temp", []() { return kTempVarName; }); - // clang-format off py::class_(m, "DeviceContext") .def_static("create", - [](paddle::platform::CPUPlace& place) - -> paddle::platform::DeviceContext* { - auto* context = new phi::CPUContext(); - context->SetAllocator( - paddle::memory::allocation::AllocatorFacade::Instance() - .GetAllocator(place) - .get()); - context->SetHostAllocator( - paddle::memory::allocation::AllocatorFacade::Instance() - .GetAllocator(paddle::platform::CPUPlace()) - .get()); - context->SetZeroAllocator( - paddle::memory::allocation::AllocatorFacade::Instance() - .GetZeroAllocator(place) - .get()); - return context; + [](paddle::platform::CPUPlace &place) + -> paddle::platform::DeviceContext * { + auto *context = new phi::CPUContext(); + context->SetAllocator( + paddle::memory::allocation::AllocatorFacade::Instance() + .GetAllocator(place) + .get()); + context->SetHostAllocator( + paddle::memory::allocation::AllocatorFacade::Instance() + .GetAllocator(paddle::platform::CPUPlace()) + .get()); + context->SetZeroAllocator( + paddle::memory::allocation::AllocatorFacade::Instance() + .GetZeroAllocator(place) + .get()); + return context; }) - .def_static("create", - [](paddle::platform::XPUPlace& place) - -> paddle::platform::DeviceContext* { + .def_static( + "create", + [](paddle::platform::XPUPlace &place) + -> paddle::platform::DeviceContext * { #ifndef PADDLE_WITH_XPU - PADDLE_THROW( - platform::errors::PermissionDenied( - "Cannot use XPUPlace in CPU/GPU version, " - "Please recompile or reinstall Paddle with XPU support.")); + PADDLE_THROW(platform::errors::PermissionDenied( + "Cannot use XPUPlace in CPU/GPU version, " + "Please recompile or reinstall Paddle with XPU support.")); #else auto* context = new paddle::platform::XPUDeviceContext(place); context->SetAllocator( @@ -1211,52 +1210,51 @@ All parameter, weight, gradient are variables in Paddle. .get()); return context; #endif - }) - .def_static("create", - [](paddle::platform::MLUPlace& place) - -> paddle::platform::DeviceContext* { + }) + .def_static( + "create", + [](paddle::platform::MLUPlace &place) + -> paddle::platform::DeviceContext * { #ifndef PADDLE_WITH_MLU - PADDLE_THROW( - platform::errors::PermissionDenied( - "Cannot use MLUPlace in CPU/GPU version, " - "Please recompile or reinstall Paddle with MLU support.")); + PADDLE_THROW(platform::errors::PermissionDenied( + "Cannot use MLUPlace in CPU/GPU version, " + "Please recompile or reinstall Paddle with MLU support.")); #else return new paddle::platform::MLUDeviceContext(place); #endif - }) - .def_static("create", - [](paddle::platform::NPUPlace& place) - -> paddle::platform::DeviceContext* { + }) + .def_static( + "create", + [](paddle::platform::NPUPlace &place) + -> paddle::platform::DeviceContext * { #ifndef PADDLE_WITH_ASCEND_CL - PADDLE_THROW( - platform::errors::PermissionDenied( - "Cannot use NPUPlace in CPU/GPU/XPU version, " - "Please recompile or reinstall Paddle with NPU support.")); + PADDLE_THROW(platform::errors::PermissionDenied( + "Cannot use NPUPlace in CPU/GPU/XPU version, " + "Please recompile or reinstall Paddle with NPU support.")); #else return new paddle::platform::NPUDeviceContext(place); #endif - }) - .def_static("create", - [](paddle::platform::CustomPlace& place) - -> paddle::platform::DeviceContext* { + }) + .def_static("create", + [](paddle::platform::CustomPlace &place) + -> paddle::platform::DeviceContext * { #ifndef PADDLE_WITH_CUSTOM_DEVICE - PADDLE_THROW( - platform::errors::PermissionDenied( - "Cannot use CustomPlace in CPU/GPU/XPU version, " - "Please recompile or reinstall Paddle with " - "CustomDevice support.")); + PADDLE_THROW(platform::errors::PermissionDenied( + "Cannot use CustomPlace in CPU/GPU/XPU version, " + "Please recompile or reinstall Paddle with " + "CustomDevice support.")); #else return new paddle::platform::CustomDeviceContext(place); #endif - }) - .def_static("create", - [](paddle::platform::CUDAPlace& place) - -> paddle::platform::DeviceContext* { + }) + .def_static( + "create", + [](paddle::platform::CUDAPlace &place) + -> paddle::platform::DeviceContext * { #if !defined(PADDLE_WITH_CUDA) && !defined(PADDLE_WITH_HIP) - PADDLE_THROW( - platform::errors::PermissionDenied( - "Cannot use CUDAPlace in CPU only version, " - "Please recompile or reinstall Paddle with CUDA support.")); + PADDLE_THROW(platform::errors::PermissionDenied( + "Cannot use CUDAPlace in CPU only version, " + "Please recompile or reinstall Paddle with CUDA support.")); #else auto* context = new phi::GPUContext(place); context->SetAllocator( @@ -1278,20 +1276,19 @@ All parameter, weight, gradient are variables in Paddle. context->PartialInitWithAllocator(); return context; #endif - }) - .def_static("create", - [](paddle::platform::CUDAPinnedPlace& place) - -> paddle::platform::DeviceContext* { + }) + .def_static( + "create", + [](paddle::platform::CUDAPinnedPlace &place) + -> paddle::platform::DeviceContext * { #if !defined(PADDLE_WITH_CUDA) && !defined(PADDLE_WITH_HIP) - PADDLE_THROW( - platform::errors::PermissionDenied( - "Cannot use CUDAPinnedPlace in CPU only version, " - "Please recompile or reinstall Paddle with CUDA support.")); + PADDLE_THROW(platform::errors::PermissionDenied( + "Cannot use CUDAPinnedPlace in CPU only version, " + "Please recompile or reinstall Paddle with CUDA support.")); #else return new paddle::platform::CUDAPinnedDeviceContext(place); #endif - });; -// clang-format on + }); #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) py::class_(m, "Communicator").def(py::init<>()); #endif diff --git a/paddle/infrt/common/type.h b/paddle/infrt/common/type.h index b532fc154ff02e03ba69f87362a89c4e79c88782..70dd2c5cb4662bc303f1fd34c18a09160df4ee1f 100644 --- a/paddle/infrt/common/type.h +++ b/paddle/infrt/common/type.h @@ -172,20 +172,54 @@ const Type& UI1(); template Type type_of(); -// clang-format off -template <> inline Type type_of() { return F32(); } -template <> inline Type type_of() { return F64(); } -template <> inline Type type_of() { return UI8(); } -template <> inline Type type_of() { return UI16(); } -template <> inline Type type_of() { return I32(); } -template <> inline Type type_of() { return UI32(); } -template <> inline Type type_of() { return UI1(); } -template <> inline Type type_of() { return I8(); } -template <> inline Type type_of() { return I64(); } -template <> inline Type type_of() { return UI64(); } -template <> inline Type type_of() { return I8(); } -template <> inline Type type_of() { return Void(); } -// clang-format on +template <> +inline Type type_of() { + return F32(); +} +template <> +inline Type type_of() { + return F64(); +} +template <> +inline Type type_of() { + return UI8(); +} +template <> +inline Type type_of() { + return UI16(); +} +template <> +inline Type type_of() { + return I32(); +} +template <> +inline Type type_of() { + return UI32(); +} +template <> +inline Type type_of() { + return UI1(); +} +template <> +inline Type type_of() { + return I8(); +} +template <> +inline Type type_of() { + return I64(); +} +template <> +inline Type type_of() { + return UI64(); +} +template <> +inline Type type_of() { + return I8(); +} +template <> +inline Type type_of() { + return Void(); +} template <> inline Type type_of() { Type x = Int(8); diff --git a/paddle/phi/api/lib/data_transform.cc b/paddle/phi/api/lib/data_transform.cc index 3b44b1876e20d132118e11aeb42d49d1fcc34649..4f5ecf0aee1197dd4abc507694d76b50a3819fbe 100644 --- a/paddle/phi/api/lib/data_transform.cc +++ b/paddle/phi/api/lib/data_transform.cc @@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -// clang-format off #include "paddle/phi/api/lib/data_transform.h" #include "paddle/phi/api/lib/kernel_dispatch.h" @@ -24,7 +23,6 @@ limitations under the License. */ #include "paddle/phi/kernels/transfer_layout_kernel.h" #include "paddle/fluid/framework/tensor_util.h" -// clang-format on namespace paddle { namespace experimental { diff --git a/paddle/phi/api/lib/tensor.cc b/paddle/phi/api/lib/tensor.cc index cf528beb800bac2dc899787d049ff49173be420c..70ee28bc2561e2873f58f6c5fd91d1e5e6503e92 100644 --- a/paddle/phi/api/lib/tensor.cc +++ b/paddle/phi/api/lib/tensor.cc @@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -// clang-format off #include "paddle/phi/api/include/tensor.h" #include @@ -35,7 +34,6 @@ limitations under the License. */ #include "paddle/phi/core/tensor_base.h" #include "paddle/phi/core/tensor_meta.h" #include "paddle/phi/core/tensor_utils.h" -// clang-format off namespace paddle { namespace experimental { @@ -312,8 +310,8 @@ void Tensor::set_impl(std::shared_ptr &&impl) { #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) gpuStream_t Tensor::stream() const { int device_id = phi::backends::gpu::GetCurrentDeviceId(); - auto* gpu_context = DeviceContextPool::Instance() - .Get(GPUPlace(device_id)); + auto *gpu_context = DeviceContextPool::Instance().Get( + GPUPlace(device_id)); return gpu_context->stream(); } #endif diff --git a/paddle/phi/api/lib/tensor_method.cc b/paddle/phi/api/lib/tensor_method.cc index 6d38bbda36310dd346f322a5faae66a139fb70b3..037cee79b637b5de1ab86b0e4f8843855f118d5a 100644 --- a/paddle/phi/api/lib/tensor_method.cc +++ b/paddle/phi/api/lib/tensor_method.cc @@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -// clang-format off #include "paddle/phi/api/include/tensor.h" #include "paddle/phi/common/int_array.h" @@ -25,7 +24,6 @@ limitations under the License. */ #include "paddle/phi/api/lib/kernel_dispatch.h" #include "paddle/phi/core/tensor_utils.h" #include "paddle/phi/infermeta/unary.h" -// clang-format off namespace paddle { namespace experimental { @@ -115,13 +113,12 @@ void Tensor::copy_(const Tensor &src, // Deep Copy AutoGrad info from src to self. *autograd_meta_ = *(src.autograd_meta_); } - kernel_key_set.backend_set = - kernel_key_set.backend_set | - BackendSet(phi::TransToPhiBackend(target_place)); + kernel_key_set.backend_set = kernel_key_set.backend_set | + BackendSet(phi::TransToPhiBackend(target_place)); auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey(); auto place = phi::TransToPhiPlace(kernel_key.backend()); - auto& pool = paddle::experimental::DeviceContextPool::Instance(); - auto* dev_ctx = pool.GetMutable( + auto &pool = paddle::experimental::DeviceContextPool::Instance(); + auto *dev_ctx = pool.GetMutable( place.GetType() == target_place.GetType() ? target_place : place); Backend kernel_backend = Backend::UNDEFINED;