From b54abbe80bcea17619de2010f3155038fe039166 Mon Sep 17 00:00:00 2001 From: Allen Guo Date: Tue, 9 Aug 2022 10:33:15 +0800 Subject: [PATCH] fix format for paddle/phi/api/lib/tensor.cc (#44972) --- .../tests/api/analyzer_rnn1_tester.cc | 2 - .../operators/fused/fused_softmax_mask.cu.h | 12 +- paddle/fluid/pybind/pybind.cc | 131 +++++++++--------- paddle/infrt/common/type.h | 62 +++++++-- paddle/phi/api/lib/data_transform.cc | 2 - paddle/phi/api/lib/tensor.cc | 6 +- paddle/phi/api/lib/tensor_method.cc | 11 +- 7 files changed, 123 insertions(+), 103 deletions(-) diff --git a/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc b/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc index cc5e44686f2..537b5ef1e6e 100644 --- a/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_rnn1_tester.cc @@ -126,7 +126,6 @@ void PrepareInputs(std::vector *input_slots, init_zero_tensor.lod.assign({one_batch.lod3}); lod_tensor_tensor.shape = rnn_link_data_shape; lod_tensor_tensor.lod.assign({one_batch.lod1}); - // clang-format off week_tensor.shape.assign( {static_cast(one_batch.rnn_week_datas.size()), static_cast(one_batch.rnn_week_datas.front().size())}); @@ -135,7 +134,6 @@ void PrepareInputs(std::vector *input_slots, {static_cast(one_batch.rnn_minute_datas.size()), static_cast(one_batch.rnn_minute_datas.front().size())}); minute_tensor.lod.assign({one_batch.lod3}); - // clang-format on // assign data TensorAssignData(&lod_attention_tensor, std::vector>({{0, 0}})); diff --git a/paddle/fluid/operators/fused/fused_softmax_mask.cu.h b/paddle/fluid/operators/fused/fused_softmax_mask.cu.h index 56806a8c173..009a9253ab3 100644 --- a/paddle/fluid/operators/fused/fused_softmax_mask.cu.h +++ b/paddle/fluid/operators/fused/fused_softmax_mask.cu.h @@ -173,7 +173,6 @@ void LaunchFusedSoftmaxMaskKernel(const T* src, dim3 block(warp_size, warps_per_block); dim3 grid(DIV_UP(seq_len, warps_per_block), batch_size, head_num); - // clang-format off int elements = ElementsCeil(seq_len); switch (elements) { case 1: { // <=32 @@ -193,17 +192,16 @@ void LaunchFusedSoftmaxMaskKernel(const T* src, SELECT_SOFTMAX_MASK_KERNEL(4); break; } - CASE_SOFTMAX_MASK_KERNEL(8); // <=256 - CASE_SOFTMAX_MASK_KERNEL(16); // <=512 - CASE_SOFTMAX_MASK_KERNEL(32); // <=1024 - CASE_SOFTMAX_MASK_KERNEL(64); // <=2048 - CASE_SOFTMAX_MASK_KERNEL(128); // <=4096 + CASE_SOFTMAX_MASK_KERNEL(8); // <=256 + CASE_SOFTMAX_MASK_KERNEL(16); // <=512 + CASE_SOFTMAX_MASK_KERNEL(32); // <=1024 + CASE_SOFTMAX_MASK_KERNEL(64); // <=2048 + CASE_SOFTMAX_MASK_KERNEL(128); // <=4096 default: PADDLE_THROW(platform::errors::InvalidArgument( "seq_len must be between (0, 4096], received the seq_len is %d", seq_len)); } - // clang-format on } } // namespace operators diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index c7561bc2fd4..5575d839a2f 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -1167,34 +1167,33 @@ All parameter, weight, gradient are variables in Paddle. .def("empty", []() { return kEmptyVarName; }) .def("temp", []() { return kTempVarName; }); - // clang-format off py::class_(m, "DeviceContext") .def_static("create", - [](paddle::platform::CPUPlace& place) - -> paddle::platform::DeviceContext* { - auto* context = new phi::CPUContext(); - context->SetAllocator( - paddle::memory::allocation::AllocatorFacade::Instance() - .GetAllocator(place) - .get()); - context->SetHostAllocator( - paddle::memory::allocation::AllocatorFacade::Instance() - .GetAllocator(paddle::platform::CPUPlace()) - .get()); - context->SetZeroAllocator( - paddle::memory::allocation::AllocatorFacade::Instance() - .GetZeroAllocator(place) - .get()); - return context; + [](paddle::platform::CPUPlace &place) + -> paddle::platform::DeviceContext * { + auto *context = new phi::CPUContext(); + context->SetAllocator( + paddle::memory::allocation::AllocatorFacade::Instance() + .GetAllocator(place) + .get()); + context->SetHostAllocator( + paddle::memory::allocation::AllocatorFacade::Instance() + .GetAllocator(paddle::platform::CPUPlace()) + .get()); + context->SetZeroAllocator( + paddle::memory::allocation::AllocatorFacade::Instance() + .GetZeroAllocator(place) + .get()); + return context; }) - .def_static("create", - [](paddle::platform::XPUPlace& place) - -> paddle::platform::DeviceContext* { + .def_static( + "create", + [](paddle::platform::XPUPlace &place) + -> paddle::platform::DeviceContext * { #ifndef PADDLE_WITH_XPU - PADDLE_THROW( - platform::errors::PermissionDenied( - "Cannot use XPUPlace in CPU/GPU version, " - "Please recompile or reinstall Paddle with XPU support.")); + PADDLE_THROW(platform::errors::PermissionDenied( + "Cannot use XPUPlace in CPU/GPU version, " + "Please recompile or reinstall Paddle with XPU support.")); #else auto* context = new paddle::platform::XPUDeviceContext(place); context->SetAllocator( @@ -1211,52 +1210,51 @@ All parameter, weight, gradient are variables in Paddle. .get()); return context; #endif - }) - .def_static("create", - [](paddle::platform::MLUPlace& place) - -> paddle::platform::DeviceContext* { + }) + .def_static( + "create", + [](paddle::platform::MLUPlace &place) + -> paddle::platform::DeviceContext * { #ifndef PADDLE_WITH_MLU - PADDLE_THROW( - platform::errors::PermissionDenied( - "Cannot use MLUPlace in CPU/GPU version, " - "Please recompile or reinstall Paddle with MLU support.")); + PADDLE_THROW(platform::errors::PermissionDenied( + "Cannot use MLUPlace in CPU/GPU version, " + "Please recompile or reinstall Paddle with MLU support.")); #else return new paddle::platform::MLUDeviceContext(place); #endif - }) - .def_static("create", - [](paddle::platform::NPUPlace& place) - -> paddle::platform::DeviceContext* { + }) + .def_static( + "create", + [](paddle::platform::NPUPlace &place) + -> paddle::platform::DeviceContext * { #ifndef PADDLE_WITH_ASCEND_CL - PADDLE_THROW( - platform::errors::PermissionDenied( - "Cannot use NPUPlace in CPU/GPU/XPU version, " - "Please recompile or reinstall Paddle with NPU support.")); + PADDLE_THROW(platform::errors::PermissionDenied( + "Cannot use NPUPlace in CPU/GPU/XPU version, " + "Please recompile or reinstall Paddle with NPU support.")); #else return new paddle::platform::NPUDeviceContext(place); #endif - }) - .def_static("create", - [](paddle::platform::CustomPlace& place) - -> paddle::platform::DeviceContext* { + }) + .def_static("create", + [](paddle::platform::CustomPlace &place) + -> paddle::platform::DeviceContext * { #ifndef PADDLE_WITH_CUSTOM_DEVICE - PADDLE_THROW( - platform::errors::PermissionDenied( - "Cannot use CustomPlace in CPU/GPU/XPU version, " - "Please recompile or reinstall Paddle with " - "CustomDevice support.")); + PADDLE_THROW(platform::errors::PermissionDenied( + "Cannot use CustomPlace in CPU/GPU/XPU version, " + "Please recompile or reinstall Paddle with " + "CustomDevice support.")); #else return new paddle::platform::CustomDeviceContext(place); #endif - }) - .def_static("create", - [](paddle::platform::CUDAPlace& place) - -> paddle::platform::DeviceContext* { + }) + .def_static( + "create", + [](paddle::platform::CUDAPlace &place) + -> paddle::platform::DeviceContext * { #if !defined(PADDLE_WITH_CUDA) && !defined(PADDLE_WITH_HIP) - PADDLE_THROW( - platform::errors::PermissionDenied( - "Cannot use CUDAPlace in CPU only version, " - "Please recompile or reinstall Paddle with CUDA support.")); + PADDLE_THROW(platform::errors::PermissionDenied( + "Cannot use CUDAPlace in CPU only version, " + "Please recompile or reinstall Paddle with CUDA support.")); #else auto* context = new phi::GPUContext(place); context->SetAllocator( @@ -1278,20 +1276,19 @@ All parameter, weight, gradient are variables in Paddle. context->PartialInitWithAllocator(); return context; #endif - }) - .def_static("create", - [](paddle::platform::CUDAPinnedPlace& place) - -> paddle::platform::DeviceContext* { + }) + .def_static( + "create", + [](paddle::platform::CUDAPinnedPlace &place) + -> paddle::platform::DeviceContext * { #if !defined(PADDLE_WITH_CUDA) && !defined(PADDLE_WITH_HIP) - PADDLE_THROW( - platform::errors::PermissionDenied( - "Cannot use CUDAPinnedPlace in CPU only version, " - "Please recompile or reinstall Paddle with CUDA support.")); + PADDLE_THROW(platform::errors::PermissionDenied( + "Cannot use CUDAPinnedPlace in CPU only version, " + "Please recompile or reinstall Paddle with CUDA support.")); #else return new paddle::platform::CUDAPinnedDeviceContext(place); #endif - });; -// clang-format on + }); #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) py::class_(m, "Communicator").def(py::init<>()); #endif diff --git a/paddle/infrt/common/type.h b/paddle/infrt/common/type.h index b532fc154ff..70dd2c5cb46 100644 --- a/paddle/infrt/common/type.h +++ b/paddle/infrt/common/type.h @@ -172,20 +172,54 @@ const Type& UI1(); template Type type_of(); -// clang-format off -template <> inline Type type_of() { return F32(); } -template <> inline Type type_of() { return F64(); } -template <> inline Type type_of() { return UI8(); } -template <> inline Type type_of() { return UI16(); } -template <> inline Type type_of() { return I32(); } -template <> inline Type type_of() { return UI32(); } -template <> inline Type type_of() { return UI1(); } -template <> inline Type type_of() { return I8(); } -template <> inline Type type_of() { return I64(); } -template <> inline Type type_of() { return UI64(); } -template <> inline Type type_of() { return I8(); } -template <> inline Type type_of() { return Void(); } -// clang-format on +template <> +inline Type type_of() { + return F32(); +} +template <> +inline Type type_of() { + return F64(); +} +template <> +inline Type type_of() { + return UI8(); +} +template <> +inline Type type_of() { + return UI16(); +} +template <> +inline Type type_of() { + return I32(); +} +template <> +inline Type type_of() { + return UI32(); +} +template <> +inline Type type_of() { + return UI1(); +} +template <> +inline Type type_of() { + return I8(); +} +template <> +inline Type type_of() { + return I64(); +} +template <> +inline Type type_of() { + return UI64(); +} +template <> +inline Type type_of() { + return I8(); +} +template <> +inline Type type_of() { + return Void(); +} template <> inline Type type_of() { Type x = Int(8); diff --git a/paddle/phi/api/lib/data_transform.cc b/paddle/phi/api/lib/data_transform.cc index 3b44b1876e2..4f5ecf0aee1 100644 --- a/paddle/phi/api/lib/data_transform.cc +++ b/paddle/phi/api/lib/data_transform.cc @@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -// clang-format off #include "paddle/phi/api/lib/data_transform.h" #include "paddle/phi/api/lib/kernel_dispatch.h" @@ -24,7 +23,6 @@ limitations under the License. */ #include "paddle/phi/kernels/transfer_layout_kernel.h" #include "paddle/fluid/framework/tensor_util.h" -// clang-format on namespace paddle { namespace experimental { diff --git a/paddle/phi/api/lib/tensor.cc b/paddle/phi/api/lib/tensor.cc index cf528beb800..70ee28bc256 100644 --- a/paddle/phi/api/lib/tensor.cc +++ b/paddle/phi/api/lib/tensor.cc @@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -// clang-format off #include "paddle/phi/api/include/tensor.h" #include @@ -35,7 +34,6 @@ limitations under the License. */ #include "paddle/phi/core/tensor_base.h" #include "paddle/phi/core/tensor_meta.h" #include "paddle/phi/core/tensor_utils.h" -// clang-format off namespace paddle { namespace experimental { @@ -312,8 +310,8 @@ void Tensor::set_impl(std::shared_ptr &&impl) { #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) gpuStream_t Tensor::stream() const { int device_id = phi::backends::gpu::GetCurrentDeviceId(); - auto* gpu_context = DeviceContextPool::Instance() - .Get(GPUPlace(device_id)); + auto *gpu_context = DeviceContextPool::Instance().Get( + GPUPlace(device_id)); return gpu_context->stream(); } #endif diff --git a/paddle/phi/api/lib/tensor_method.cc b/paddle/phi/api/lib/tensor_method.cc index 6d38bbda363..037cee79b63 100644 --- a/paddle/phi/api/lib/tensor_method.cc +++ b/paddle/phi/api/lib/tensor_method.cc @@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -// clang-format off #include "paddle/phi/api/include/tensor.h" #include "paddle/phi/common/int_array.h" @@ -25,7 +24,6 @@ limitations under the License. */ #include "paddle/phi/api/lib/kernel_dispatch.h" #include "paddle/phi/core/tensor_utils.h" #include "paddle/phi/infermeta/unary.h" -// clang-format off namespace paddle { namespace experimental { @@ -115,13 +113,12 @@ void Tensor::copy_(const Tensor &src, // Deep Copy AutoGrad info from src to self. *autograd_meta_ = *(src.autograd_meta_); } - kernel_key_set.backend_set = - kernel_key_set.backend_set | - BackendSet(phi::TransToPhiBackend(target_place)); + kernel_key_set.backend_set = kernel_key_set.backend_set | + BackendSet(phi::TransToPhiBackend(target_place)); auto kernel_key = kernel_key_set.GetHighestPriorityKernelKey(); auto place = phi::TransToPhiPlace(kernel_key.backend()); - auto& pool = paddle::experimental::DeviceContextPool::Instance(); - auto* dev_ctx = pool.GetMutable( + auto &pool = paddle::experimental::DeviceContextPool::Instance(); + auto *dev_ctx = pool.GetMutable( place.GetType() == target_place.GetType() ? target_place : place); Backend kernel_backend = Backend::UNDEFINED; -- GitLab