cast_op.cu 2.5 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Y
Yu Yang 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
Y
Yu Yang 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
Y
Yu Yang 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
Y
Yu Yang 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/cast_op.h"
K
Kexin Zhao 已提交
16
#include "paddle/fluid/platform/float16.h"
17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
#include "paddle/fluid/platform/gpu_launch_config.h"

namespace paddle {
namespace operators {

template <typename InT, typename OutT>
__global__ void CastCUDAKernel(const InT* in, const int64_t N, OutT* out) {
  CUDA_KERNEL_LOOP(index, N) { out[index] = static_cast<OutT>(in[index]); }
}

template <typename InT>
struct CastOpFunctor<platform::CUDADeviceContext, InT> {
  const framework::Tensor* in_;
  framework::Tensor* out_;
  const platform::CUDADeviceContext& ctx_;
  CastOpFunctor(const framework::Tensor* in, framework::Tensor* out,
                const platform::CUDADeviceContext& ctx)
      : in_(in), out_(out), ctx_(ctx) {}

  template <typename OutT>
  void apply() const {
    auto* in = in_->data<InT>();
    auto size = in_->numel();
    auto* out = out_->mutable_data<OutT>(ctx_.GetPlace());
    platform::GpuLaunchConfig config =
        platform::GetGpuLaunchConfig1D(ctx_, size);
    CastCUDAKernel<InT, OutT><<<config.block_per_grid, config.thread_per_block,
                                0, ctx_.stream()>>>(in, size, out);
  }
};

}  // namespace operators
}  // namespace paddle
Y
Yu Yang 已提交
50

51
namespace ops = paddle::operators;
Y
Yu Yang 已提交
52

53 54 55 56 57 58 59 60
REGISTER_OP_CUDA_KERNEL(
    cast, ops::CastOpKernel<paddle::platform::CUDADeviceContext, float>,
    ops::CastOpKernel<paddle::platform::CUDADeviceContext, double>,
    ops::CastOpKernel<paddle::platform::CUDADeviceContext, int>,
    ops::CastOpKernel<paddle::platform::CUDADeviceContext, int64_t>,
    ops::CastOpKernel<paddle::platform::CUDADeviceContext, bool>,
    ops::CastOpKernel<paddle::platform::CUDADeviceContext, uint8_t>,
    ops::CastOpKernel<paddle::platform::CUDADeviceContext,
61 62 63 64 65
                      paddle::platform::float16>,
    ops::CastOpKernel<paddle::platform::CUDADeviceContext,
                      paddle::platform::complex64>,
    ops::CastOpKernel<paddle::platform::CUDADeviceContext,
                      paddle::platform::complex128>);