// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/sparse/unary_kernel.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/elementwise_base.h" #include "paddle/phi/kernels/sparse/impl/unary_kernel_impl.h" namespace phi { namespace sparse { template struct DivScalarFunctor { T value_; explicit DivScalarFunctor(T value) : value_(value) {} __device__ __forceinline__ T operator()(const T x) const { return x / value_; } }; template void DivCooScalarKernel(const Context& dev_ctx, const SparseCooTensor& x, float scalar, SparseCooTensor* out) { EmptyLikeCooKernel(dev_ctx, x, out); std::vector ins = {&(x.non_zero_elements())}; std::vector outs = {out->mutable_non_zero_elements()}; DivScalarFunctor func(static_cast(scalar)); funcs::ElementwiseKernel>(dev_ctx, ins, &outs, func); } template void DivCsrScalarKernel(const Context& dev_ctx, const SparseCsrTensor& x, float scalar, SparseCsrTensor* out) { EmptyLikeCsrKernel(dev_ctx, x, out); std::vector ins = {&(x.non_zero_elements())}; std::vector outs = {out->mutable_non_zero_elements()}; DivScalarFunctor func(static_cast(scalar)); funcs::ElementwiseKernel>(dev_ctx, ins, &outs, func); } } // namespace sparse } // namespace phi #define PD_REGISTER_SPARSE_UNARY_GPU_KERNEL(name, prefix) \ PD_REGISTER_KERNEL(name##_coo, \ GPU, \ ALL_LAYOUT, \ phi::sparse::prefix##CooKernel, \ float, \ double) { \ kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO); \ } \ \ PD_REGISTER_KERNEL(name##_csr, \ GPU, \ ALL_LAYOUT, \ phi::sparse::prefix##CsrKernel, \ float, \ double) { \ kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_CSR); \ } PD_REGISTER_SPARSE_UNARY_GPU_KERNEL(sin, Sin) PD_REGISTER_SPARSE_UNARY_GPU_KERNEL(tan, Tan) PD_REGISTER_SPARSE_UNARY_GPU_KERNEL(asin, Asin) PD_REGISTER_SPARSE_UNARY_GPU_KERNEL(atan, Atan) PD_REGISTER_SPARSE_UNARY_GPU_KERNEL(sinh, Sinh) PD_REGISTER_SPARSE_UNARY_GPU_KERNEL(tanh, Tanh) PD_REGISTER_SPARSE_UNARY_GPU_KERNEL(asinh, Asinh) PD_REGISTER_SPARSE_UNARY_GPU_KERNEL(atanh, Atanh) PD_REGISTER_SPARSE_UNARY_GPU_KERNEL(sqrt, Sqrt) PD_REGISTER_SPARSE_UNARY_GPU_KERNEL(square, Square) PD_REGISTER_SPARSE_UNARY_GPU_KERNEL(log1p, Log1p) PD_REGISTER_SPARSE_UNARY_GPU_KERNEL(relu, Relu) PD_REGISTER_SPARSE_UNARY_GPU_KERNEL(abs, Abs) PD_REGISTER_SPARSE_UNARY_GPU_KERNEL(pow, Pow) PD_REGISTER_SPARSE_UNARY_GPU_KERNEL(scale, Scale) PD_REGISTER_KERNEL(divide_coo_scalar, GPU, ALL_LAYOUT, phi::sparse::DivCooScalarKernel, float, double) { kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO); } PD_REGISTER_KERNEL(divide_csr_scalar, GPU, ALL_LAYOUT, phi::sparse::DivCsrScalarKernel, float, double) { kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_CSR); } PD_REGISTER_KERNEL(cast_coo, GPU, ALL_LAYOUT, phi::sparse::CastCooKernel, float, double, int8_t, uint8_t, int16_t, int, int64_t, bool) {} PD_REGISTER_KERNEL(cast_csr, GPU, ALL_LAYOUT, phi::sparse::CastCsrKernel, float, double, int8_t, uint8_t, int16_t, int, int64_t, bool) {}