cast_kernel.cu 3.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

15
#include "paddle/phi/kernels/cast_kernel.h"
16

17 18 19 20
#include "paddle/phi/api/ext/dispatch.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/elementwise_base.h"
21

22
// See Note [ Why still include the fluid headers? ]
23
#include "paddle/fluid/platform/aligned_vector.h"
24
#include "paddle/fluid/platform/device/gpu/gpu_helper.h"
25
#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h"
26 27
#include "paddle/phi/common/bfloat16.h"
#include "paddle/phi/common/float16.h"
28

29
namespace phi {
30 31

template <typename InT, typename OutT>
32
struct CastFuctor {
33
  __device__ __forceinline__ OutT operator()(const InT x) const {
34
    return static_cast<OutT>(x);
35
  }
36
};
37

38 39 40 41
template <typename InT, typename OutT>
void CastCUDAKernelImpl(const GPUContext& dev_ctx,
                        const DenseTensor& x,
                        DenseTensor* out) {
42 43 44 45
  std::vector<const DenseTensor*> inputs;
  std::vector<DenseTensor*> outputs;
  inputs.emplace_back(&x);
  outputs.emplace_back(out);
46
  dev_ctx.Alloc<OutT>(out);
47
  phi::funcs::ElementwiseKernel<OutT>(
48
      dev_ctx, inputs, &outputs, CastFuctor<InT, OutT>());
49 50
}

51 52 53 54 55
template <typename T, typename Context>
void CastKernel(const Context& dev_ctx,
                const DenseTensor& x,
                DataType out_dtype,
                DenseTensor* out) {
56 57 58 59
  PD_VISIT_ALL_TYPES(out_dtype, "CastCUDAKernelImpl", ([&] {
                       CastCUDAKernelImpl<T, data_t>(dev_ctx, x, out);
                     }));
}
60

61
}  // namespace phi
62

63 64 65 66
#define PTEN_REGISTER_CAST_CUDA_BASE_TYPE(op_name, ...) \
  PT_REGISTER_KERNEL(cast,                              \
                     GPU,                               \
                     ALL_LAYOUT,                        \
67
                     phi::CastKernel,                   \
68 69 70 71 72 73 74
                     float,                             \
                     double,                            \
                     int,                               \
                     int64_t,                           \
                     int16_t,                           \
                     bool,                              \
                     uint8_t,                           \
75 76 77
                     phi::dtype::float16,               \
                     phi::dtype::complex<float>,        \
                     phi::dtype::complex<double>,       \
78 79 80
                     ##__VA_ARGS__) {                   \
    kernel->OutputAt(0).SetDataType(                    \
        paddle::experimental::DataType::UNDEFINED);     \
81 82 83
  }

#if !defined(PADDLE_WITH_HIP)
84
PTEN_REGISTER_CAST_CUDA_BASE_TYPE(cast, phi::dtype::bfloat16)
85 86 87
#else
PTEN_REGISTER_CAST_CUDA_BASE_TYPE(cast)
#endif