full_kernel.cu 4.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15 16
#include "paddle/phi/kernels/full_kernel.h"

17 18 19 20
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/elementwise_base.h"
namespace phi {
21 22 23 24 25 26 27 28 29 30 31 32 33 34 35

template <typename InT, typename OutT = InT>
struct FullFuctor {
  OutT value;

  template <typename VType>
  explicit inline FullFuctor(VType val) {
    value = static_cast<OutT>(val);
  }

  __device__ __forceinline__ OutT operator()() const {
    return static_cast<OutT>(value);
  }
};

36 37
template <typename T, typename Context>
void FullKernel(const Context& dev_ctx,
38
                const IntArray& shape,
39
                const Scalar& val,
40
                DataType dtype,
41
                DenseTensor* out) {
42
  out->Resize(phi::make_ddim(shape.GetData()));
43
  int numel = out->numel();
44
  dev_ctx.template Alloc<T>(out);
45 46 47 48 49 50 51
  if (numel > 0) {
    // in transformer model the numel of outpout will be zero.
    std::vector<const DenseTensor*> inputs = {};
    std::vector<DenseTensor*> outputs = {out};
    // This function has no input, so the inputs.size() == 0. Use kUnary, but
    // the data will not be loaded in the kernel because the number of
    // parameters in the operator is 0
52
    phi::funcs::ElementwiseKernel<T>(
53 54 55 56
        dev_ctx, inputs, &outputs, FullFuctor<T>(val.to<T>()));
  }
}

57 58 59
template <typename T, typename Context>
void FullLikeKernel(const Context& dev_ctx,
                    const DenseTensor& x,
60
                    const Scalar& val,
61
                    DataType dtype,
62 63 64 65
                    DenseTensor* out) {
  auto value = val.to<float>();
  using CommonType = typename std::common_type<
      float,
66 67 68 69 70
      typename std::conditional<
          std::is_same<T, phi::dtype::float16>::value ||
              std::is_same<T, phi::dtype::bfloat16>::value,
          float,
          T>::type>::type;
71 72 73

  auto common_type_value = static_cast<CommonType>(value);

74 75 76 77 78 79 80
  // Check whether the filled value is valid
  bool is_out_range = true;
  if (std::isinf(value) || std::isnan(value)) {
    is_out_range = false;
  }

  if ((common_type_value >=
81
       static_cast<CommonType>(std::numeric_limits<T>::lowest())) &&
82 83 84 85 86 87 88 89
      (common_type_value <=
       static_cast<CommonType>(std::numeric_limits<T>::max()))) {
    is_out_range = false;
  }

  PADDLE_ENFORCE_EQ(
      is_out_range,
      false,
90
      phi::errors::InvalidArgument(
91 92 93 94 95 96 97 98 99
          "The filled value is out of range for target type, "
          "current kernel type is %s, the range should between %f "
          "and %f, but now value is %f.",
          typeid(T).name(),
          static_cast<CommonType>(std::numeric_limits<T>::lowest()),
          static_cast<CommonType>(std::numeric_limits<T>::max()),
          static_cast<float>(value)));
  std::vector<const DenseTensor*> inputs = {};
  std::vector<DenseTensor*> outputs = {out};
100
  dev_ctx.template Alloc<T>(out);
101 102 103 104 105
  // This function has no input, so the inputs.size() == 0. Use kUnary, but the
  // data will not be loaded in the kernel because the number of parameters in
  // the operator is 0
  int numel = out->numel();
  if (numel > 0) {
106
    phi::funcs::ElementwiseKernel<T>(
107 108 109 110
        dev_ctx, inputs, &outputs, FullFuctor<T>(value));
  }
}

111
}  // namespace phi
112

113
PD_REGISTER_KERNEL(full,
114 115
                   GPU,
                   ALL_LAYOUT,
116
                   phi::FullKernel,
117 118 119 120 121 122 123
                   float,
                   double,
                   uint8_t,
                   int16_t,
                   int,
                   int64_t,
                   bool,
124
                   phi::dtype::float16,
125
                   phi::dtype::bfloat16,
126 127
                   phi::dtype::complex<float>,
                   phi::dtype::complex<double>) {}
128

129
PD_REGISTER_KERNEL(full_like,
130 131
                   GPU,
                   ALL_LAYOUT,
132
                   phi::FullLikeKernel,
133 134
                   float,
                   double,
135
                   uint8_t,
136
                   int16_t,
137 138 139
                   int,
                   int64_t,
                   bool,
140
                   phi::dtype::bfloat16,
141 142 143
                   phi::dtype::float16) {
  kernel->InputAt(0).SetBackend(phi::Backend::ALL_BACKEND);
}