diff --git a/paddle/phi/backends/xpu/xpu2_op_list.cc b/paddle/phi/backends/xpu/xpu2_op_list.cc index 4071cabc27b55e5248b3e97d25ff2399c84cfcec..7908d1fc627c1af565f3ea9c9ed76e561210923a 100644 --- a/paddle/phi/backends/xpu/xpu2_op_list.cc +++ b/paddle/phi/backends/xpu/xpu2_op_list.cc @@ -66,6 +66,10 @@ XPUOpMap& get_kl2_ops() { phi::DataType::INT64})}, {"bilinear_interp_v2", XPUKernelSet({phi::DataType::FLOAT32})}, {"bilinear_interp_v2_grad", XPUKernelSet({phi::DataType::FLOAT32})}, + {"bitwise_and", XPUKernelSet({phi::DataType::BOOL})}, + {"bitwise_not", XPUKernelSet({phi::DataType::BOOL})}, + {"bitwise_or", XPUKernelSet({phi::DataType::BOOL})}, + {"bitwise_xor", XPUKernelSet({phi::DataType::BOOL})}, {"broadcast", XPUKernelSet({phi::DataType::FLOAT32})}, {"c_allgather", XPUKernelSet({phi::DataType::FLOAT16, diff --git a/paddle/phi/kernels/xpu/bitwise.cc b/paddle/phi/kernels/xpu/bitwise.cc new file mode 100644 index 0000000000000000000000000000000000000000..a897a37acd20cd2726a13dbf6b3f471a7c3b5531 --- /dev/null +++ b/paddle/phi/kernels/xpu/bitwise.cc @@ -0,0 +1,69 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/bitwise_kernel.h" + +#include "paddle/phi/backends/xpu/enforce_xpu.h" +#include "paddle/phi/core/kernel_registry.h" + +namespace phi { + +template +void BitwiseAndKernel(const Context& ctx, + const DenseTensor& x, + const DenseTensor& y, + DenseTensor* out) { + ctx.template Alloc(out); + int r = xpu::logical_and( + ctx.x_context(), x.data(), y.data(), out->data(), x.numel()); + PADDLE_ENFORCE_XDNN_SUCCESS(r, "bitwise and"); +} + +template +void BitwiseOrKernel(const Context& ctx, + const DenseTensor& x, + const DenseTensor& y, + DenseTensor* out) { + ctx.template Alloc(out); + int r = xpu::logical_or( + ctx.x_context(), x.data(), y.data(), out->data(), x.numel()); + PADDLE_ENFORCE_XDNN_SUCCESS(r, "bitwise or"); +} + +template +void BitwiseXorKernel(const Context& ctx, + const DenseTensor& x, + const DenseTensor& y, + DenseTensor* out) { + ctx.template Alloc(out); + int r = xpu::logical_xor( + ctx.x_context(), x.data(), y.data(), out->data(), x.numel()); + PADDLE_ENFORCE_XDNN_SUCCESS(r, "bitwise xor"); +} + +template +void BitwiseNotKernel(const Context& ctx, + const DenseTensor& x, + DenseTensor* out) { + ctx.template Alloc(out); + int r = + xpu::logical_not(ctx.x_context(), x.data(), out->data(), x.numel()); + PADDLE_ENFORCE_XDNN_SUCCESS(r, "bitwise not"); +} +} // namespace phi + +PD_REGISTER_KERNEL(bitwise_and, XPU, ALL_LAYOUT, phi::BitwiseAndKernel, bool) {} +PD_REGISTER_KERNEL(bitwise_or, XPU, ALL_LAYOUT, phi::BitwiseOrKernel, bool) {} +PD_REGISTER_KERNEL(bitwise_xor, XPU, ALL_LAYOUT, phi::BitwiseXorKernel, bool) {} +PD_REGISTER_KERNEL(bitwise_not, XPU, ALL_LAYOUT, phi::BitwiseNotKernel, bool) {}