// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include "paddle/phi/kernels/clip_kernel.h" #include "paddle/phi/backends/all_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/transform.h" #if defined(__NVCC__) || defined(__HIPCC__) #include "paddle/phi/kernels/funcs/broadcast_function.h" #endif namespace phi { template class ClipGradFunctor { public: explicit ClipGradFunctor(const T min, const T max) : min_(min), max_(max) {} HOSTDEVICE T operator()(const T x, const T y) const { return (y > min_ && y < max_) ? x : static_cast(0); } private: T min_; T max_; }; template void ClipGradKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& out_grad, const Scalar& min, const Scalar& max, DenseTensor* x_grad) { auto max_ = max.to(); auto min_ = min.to(); #if defined(__NVCC__) || defined(__HIPCC__) std::vector ins = {&out_grad, &x}; std::vector outs = {x_grad}; auto functor = ClipGradFunctor(min_, max_); dev_ctx.template Alloc(x_grad); phi::funcs::ElementwiseKernel(dev_ctx, ins, &outs, functor); #else int64_t numel = out_grad.numel(); auto* d_x_data = dev_ctx.template Alloc(x_grad); const T* d_out_data = out_grad.data(); const T* x_data = x.data(); paddle::platform::Transform trans; trans(dev_ctx, d_out_data, d_out_data + numel, x_data, d_x_data, ClipGradFunctor(min_, max_)); #endif } } // namespace phi