// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/pten/kernels/cast_kernel.h" #include "paddle/pten/api/ext/dispatch.h" #include "paddle/pten/backends/cpu/cpu_context.h" #include "paddle/pten/core/kernel_registry.h" // See Note [ Why still include the fluid headers? ] #include "paddle/fluid/platform/transform.h" namespace pten { template struct CastOpTransformFunctor { HOSTDEVICE OutT operator()(InT in) const { return static_cast(in); } }; template void CastKernelImpl(const CPUContext& dev_ctx, const DenseTensor& x, DenseTensor* out) { auto* in_begin = x.data(); auto numel = x.numel(); auto* in_end = in_begin + numel; auto* out_begin = out->mutable_data(); paddle::platform::Transform trans; trans(dev_ctx, in_begin, in_end, out_begin, CastOpTransformFunctor()); } template void Cast(const ContextT& dev_ctx, const DenseTensor& x, DataType out_dtype, DataType in_dtype, DenseTensor* out) { PD_VISIT_ALL_TYPES(out_dtype, "CastKernelImpl", ([&] { CastKernelImpl(dev_ctx, x, out); })); } } // namespace pten PT_REGISTER_CTX_KERNEL(cast, CPU, ALL_LAYOUT, pten::Cast, float, double, int, int64_t, int16_t, bool, uint8_t, paddle::platform::float16, paddle::platform::bfloat16, paddle::platform::complex, paddle::platform::complex) { kernel->OutputAt(0).SetDataType(paddle::experimental::DataType::UNDEFINED); }