masked_select_kernel.cu 3.3 KB
Newer Older
H
hong 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

15 16
#include "paddle/phi/kernels/masked_select_kernel.h"

H
hong 已提交
17 18 19 20 21
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/reverse.h>
#include <thrust/scan.h>

22
#include "paddle/phi/backends/gpu/gpu_context.h"
23
#include "paddle/phi/common/amp_type_traits.h"
24
#include "paddle/phi/core/kernel_registry.h"
25 26
#include "paddle/phi/kernels/expand_kernel.h"
#include "paddle/phi/kernels/funcs/common_shape.h"
27
#include "paddle/phi/kernels/funcs/select_impl.cu.h"
28

29
namespace phi {
H
hong 已提交
30

31 32 33
template <typename MT, typename InT, typename OutT>
struct MaskedSelectFunctor {
  HOSTDEVICE MaskedSelectFunctor() {}
H
hong 已提交
34

35 36 37 38 39 40 41 42 43
  HOSTDEVICE inline void operator()(OutT* out,
                                    const MT* mask,
                                    const InT* value,
                                    int num) {
    int store_fix = 0;
    for (int idx = 0; idx < num; idx++) {
      if (mask[idx]) {
        out[store_fix++] = value[idx];
      }
H
hong 已提交
44 45
    }
  }
46
};
H
hong 已提交
47 48 49 50 51 52

template <typename T, typename Context>
void MaskedSelectKernel(const Context& dev_ctx,
                        const DenseTensor& x,
                        const DenseTensor& mask,
                        DenseTensor* out) {
53 54
  DenseTensor mask_expand;
  DenseTensor x_expand;
H
hong 已提交
55

56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
  auto expanded_size = funcs::MatrixGetBroadcastBatchPortion(
      vectorize(x.dims()), vectorize(mask.dims()));

  DDim epxand_dims = make_ddim(expanded_size);
  if (mask.dims() != epxand_dims) {
    phi::ExpandKernel<bool, Context>(
        dev_ctx, mask, IntArray(expanded_size), &mask_expand);
  } else {
    mask_expand = mask;
  }

  if (x.dims() != epxand_dims) {
    phi::ExpandKernel<T, Context>(
        dev_ctx, x, IntArray(expanded_size), &x_expand);
  } else {
    x_expand = x;
  }

  auto input_dim = x_expand.dims();
  auto mask_dim = mask_expand.dims();
H
hong 已提交
76 77
  PADDLE_ENFORCE_EQ(input_dim,
                    mask_dim,
78
                    phi::errors::InvalidArgument(
H
hong 已提交
79 80 81 82 83 84
                        "The dim size of input and mask in OP(masked_selected) "
                        "must be equal, but got input dim:(%ld), mask dim: "
                        "(%ld). Please check input "
                        "value.",
                        input_dim,
                        mask_dim));
85

86 87
  using Functor = MaskedSelectFunctor<bool, T, T>;
  phi::funcs::SelectKernel<bool, T, T, 1, Functor>(
88
      dev_ctx, mask_expand, x_expand, out, Functor());
H
hong 已提交
89 90
}

91
}  // namespace phi
H
hong 已提交
92

93
PD_REGISTER_KERNEL(masked_select,
H
hong 已提交
94 95
                   GPU,
                   ALL_LAYOUT,
96
                   phi::MaskedSelectKernel,
H
hong 已提交
97 98 99
                   float,
                   double,
                   int,
100 101 102
                   int64_t,
                   phi::dtype::float16,
                   phi::dtype::bfloat16) {
103
  kernel->InputAt(1).SetDataType(phi::DataType::BOOL);
H
hong 已提交
104
}