// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/backends/gpu/gpu_context.h" #ifndef PADDLE_WITH_XPU_KP #include "paddle/phi/common/complex.h" #include "paddle/phi/common/float16.h" #endif #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/impl/elementwise_kernel_impl.h" namespace phi { DEFINE_CUDA_ELEMENTWISE_OP(Add) template void GradAddKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, DenseTensor* out) { AddRawKernel(dev_ctx, x, y, -1, out); } } // namespace phi #ifdef PADDLE_WITH_XPU_KP PD_REGISTER_KERNEL(add_raw, KPS, ALL_LAYOUT, phi::AddRawKernel, float) {} #else using float16 = phi::dtype::float16; using bfloat16 = phi::dtype::bfloat16; using complex64 = ::phi::dtype::complex; using complex128 = ::phi::dtype::complex; PD_REGISTER_KERNEL(add_raw, KPS, ALL_LAYOUT, phi::AddRawKernel, float, double, int16_t, int, int64_t, float16, bfloat16, complex64, complex128) {} PD_REGISTER_KERNEL(grad_add, KPS, ALL_LAYOUT, phi::GradAddKernel, float, double, int16_t, int, int64_t, phi::dtype::float16, phi::dtype::bfloat16, complex64, complex128) {} #endif