argsort_op.h 5.3 KB
Newer Older
Y
Yibing Liu 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once
#include <algorithm>
#include <utility>
#include <vector>
19
#include "paddle/fluid/framework/eigen.h"
Y
Yibing Liu 已提交
20
#include "paddle/fluid/framework/op_registry.h"
21
#include "paddle/fluid/operators/transpose_op.h"
Y
Yibing Liu 已提交
22 23 24 25

namespace paddle {
namespace operators {

26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
template <typename T, int MajorType = Eigen::RowMajor,
          typename IndexType = Eigen::DenseIndex>
using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>;

template <typename T, int MajorType = Eigen::RowMajor,
          typename IndexType = Eigen::DenseIndex>
using EigenVector = framework::EigenVector<T, MajorType, IndexType>;

using Tensor = framework::Tensor;

template <typename T, typename Type>
static void FullSort(Type input_height, Type input_width, int input_dim,
                     const framework::Tensor* input, T* t_out, Type* t_indices,
                     bool descending) {
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
  for (Type i = 0; i < input_height; ++i) {
    std::vector<std::pair<T, Type>> col_vec;
    col_vec.reserve(input_width);
    if (input_dim == 1) {
      auto e_input = EigenVector<T>::Flatten(*input);
      for (Type j = 0; j < input_width; ++j) {
        col_vec.push_back(std::pair<T, Type>(e_input(j), j));
      }
    } else {
      auto e_input = EigenMatrix<T>::Reshape(*input, input_dim - 1);
      for (Type j = 0; j < input_width; ++j) {
        col_vec.push_back(std::pair<T, Type>(e_input(i, j), j));
      }
    }
    std::sort(col_vec.begin(), col_vec.end(),
              [&](const std::pair<T, Type>& l, const std::pair<T, Type>& r) {
                if (descending)
                  return l.first > r.first;
                else
                  return l.first < r.first;
              });

    for (Type j = 0; j < input_width; ++j) {
      t_out[i * input_width + j] = col_vec[j].first;
      t_indices[i * input_width + j] = col_vec[j].second;
    }
  }
}
Y
Yibing Liu 已提交
71 72 73 74
template <typename DeviceContext, typename T>
class ArgsortKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& ctx) const override {
Y
Yibing Liu 已提交
75 76 77
    auto* input = ctx.Input<framework::Tensor>("X");
    auto* output = ctx.Output<framework::Tensor>("Out");
    auto* indices = ctx.Output<framework::Tensor>("Indices");
Y
Yibing Liu 已提交
78
    int axis = ctx.Attr<int>("axis");
79
    bool descending = ctx.Attr<bool>("descending");
Y
Yibing Liu 已提交
80 81

    auto in_dims = input->dims();
82
    axis = (axis < 0) ? (in_dims.size() + axis) : axis;
Y
Yibing Liu 已提交
83 84

    T* out_data = output->mutable_data<T>(ctx.GetPlace());
Y
Yibing Liu 已提交
85

86 87 88 89 90
    // Do full sort
    if (axis == -1 || axis + 1 == in_dims.size()) {
      const int64_t input_height = framework::product(
          framework::slice_ddim(in_dims, 0, in_dims.size() - 1));
      const int64_t input_width = in_dims[in_dims.size() - 1];
Y
Yibing Liu 已提交
91

92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
      int64_t* ids_data = indices->mutable_data<int64_t>(ctx.GetPlace());
      FullSort<T, int64_t>(input_height, input_width, in_dims.size(), input,
                           out_data, ids_data, descending);
    } else {
      // If not full sort do transpose
      std::vector<int> trans;
      for (int i = 0; i < axis; i++) {
        trans.push_back(i);
      }
      trans.push_back(in_dims.size() - 1);
      for (int i = axis + 1; i < in_dims.size() - 1; i++) {
        trans.push_back(i);
      }
      trans.push_back(axis);
      framework::DDim trans_dims(in_dims);
      for (int i = 0; i < trans.size(); i++) {
        trans_dims[i] = in_dims[trans[i]];
Y
Yibing Liu 已提交
109 110
      }

111 112 113 114 115 116 117
      Tensor trans_inp;
      trans_inp.mutable_data<T>(trans_dims, ctx.GetPlace());
      int ndims = trans.size();
      auto& dev_ctx = ctx.template device_context<platform::CPUDeviceContext>();
      // Do transpose
      TransCompute<platform::CPUDeviceContext, T>(ndims, dev_ctx, *input,
                                                  &trans_inp, trans);
Y
Yibing Liu 已提交
118

119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
      const int64_t input_height = framework::product(
          framework::slice_ddim(trans_dims, 0, trans_dims.size() - 1));
      const int64_t input_width = trans_dims[trans_dims.size() - 1];

      Tensor tmp_out;
      T* t_out = tmp_out.mutable_data<T>(trans_dims, ctx.GetPlace());
      output->mutable_data<T>(ctx.GetPlace());

      Tensor tmp_indices;

      auto* t_ind =
          tmp_indices.mutable_data<int64_t>(trans_dims, ctx.GetPlace());

      FullSort<T, int64_t>(input_height, input_width, in_dims.size(),
                           &trans_inp, t_out, t_ind, descending);

      indices->mutable_data<int64_t>(ctx.GetPlace());
      TransCompute<platform::CPUDeviceContext, int64_t>(
          ndims, dev_ctx, tmp_indices, indices, trans);
      // transpose back
      TransCompute<platform::CPUDeviceContext, T>(ndims, dev_ctx, tmp_out,
                                                  output, trans);
Y
Yibing Liu 已提交
141 142 143 144 145 146
    }
  }
};

}  // namespace operators
}  // namespace paddle