// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/argsort_kernel.h" #include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/eigen/common.h" #include "paddle/phi/kernels/funcs/eigen/eigen_function.h" #include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/transpose_kernel.h" namespace phi { template static void FullSort(Type input_height, Type input_width, int input_dim, const DenseTensor* input, T* t_out, Type* t_indices, bool descending) { #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (Type i = 0; i < input_height; ++i) { std::vector> col_vec; col_vec.reserve(input_width); if (input_dim == 1) { auto e_input = EigenVector::Flatten(*input); for (Type j = 0; j < input_width; ++j) { col_vec.push_back(std::pair(e_input(j), j)); } } else { auto e_input = EigenMatrix::Reshape(*input, input_dim - 1); for (Type j = 0; j < input_width; ++j) { col_vec.push_back(std::pair(e_input(i, j), j)); } } std::sort(col_vec.begin(), col_vec.end(), [&](const std::pair& l, const std::pair& r) { if (descending) return (std::isnan(static_cast(l.first)) && !std::isnan(static_cast(r.first))) || (l.first > r.first); else return (!std::isnan(static_cast(l.first)) && std::isnan(static_cast(r.first))) || (l.first < r.first); }); for (Type j = 0; j < input_width; ++j) { t_out[i * input_width + j] = col_vec[j].first; t_indices[i * input_width + j] = col_vec[j].second; } } } template void ArgsortKernel(const Context& dev_ctx, const DenseTensor& input, int axis, bool descending, DenseTensor* output, DenseTensor* indices) { auto in_dims = input.dims(); auto rank = in_dims.size(); axis = (axis < 0) ? (in_dims.size() + axis) : axis; T* out_data = dev_ctx.template Alloc(output); // For 0D Tensor if (rank == 0) { phi::Copy(dev_ctx, input, dev_ctx.GetPlace(), false, output); phi::funcs::set_constant(dev_ctx, indices, 0); return; } // Do full sort if (axis == -1 || axis + 1 == in_dims.size()) { const int64_t input_height = phi::product(phi::slice_ddim(in_dims, 0, in_dims.size() - 1)); const int64_t input_width = in_dims[in_dims.size() - 1]; int64_t* ids_data = dev_ctx.template Alloc(indices); FullSort(input_height, input_width, in_dims.size(), &input, out_data, ids_data, descending); } else { // If not full sort do transpose std::vector trans; for (int i = 0; i < axis; i++) { trans.push_back(i); } trans.push_back(in_dims.size() - 1); for (int i = axis + 1; i < in_dims.size() - 1; i++) { trans.push_back(i); } trans.push_back(axis); phi::DDim trans_dims(in_dims); for (size_t i = 0; i < trans.size(); i++) { trans_dims[i] = in_dims[trans[i]]; } DenseTensor trans_inp; trans_inp.Resize(trans_dims); dev_ctx.template Alloc(&trans_inp); // Do transpose TransposeKernel(dev_ctx, input, trans, &trans_inp); const int64_t input_height = phi::product(phi::slice_ddim(trans_dims, 0, trans_dims.size() - 1)); const int64_t input_width = trans_dims[trans_dims.size() - 1]; DenseTensor tmp_out; tmp_out.Resize(trans_dims); T* t_out = dev_ctx.template Alloc(&tmp_out); DenseTensor tmp_indices; tmp_indices.Resize(trans_dims); auto* t_ind = dev_ctx.template Alloc(&tmp_indices); FullSort(input_height, input_width, in_dims.size(), &trans_inp, t_out, t_ind, descending); dev_ctx.template Alloc(indices); TransposeKernel(dev_ctx, tmp_indices, trans, indices); // transpose back TransposeKernel(dev_ctx, tmp_out, trans, output); } } } // namespace phi PD_REGISTER_KERNEL( argsort, CPU, ALL_LAYOUT, phi::ArgsortKernel, float, double, int, int64_t) { }