diff --git a/paddle/fluid/operators/huber_loss_op.cc b/paddle/fluid/operators/huber_loss_op.cc index 041f7487fd2575faa2407ea90c064a2cfdea96c5..3915ce5809c394738c58e80accccac531c268c23 100644 --- a/paddle/fluid/operators/huber_loss_op.cc +++ b/paddle/fluid/operators/huber_loss_op.cc @@ -12,47 +12,20 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/huber_loss_op.h" #include #include #include +#include "paddle/fluid/framework/infershape_utils.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/phi/infermeta/binary.h" + namespace paddle { namespace operators { class HuberLossOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - - void InferShape(framework::InferShapeContext* ctx) const override { - OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "HuberLoss"); - OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "HuberLoss"); - - auto x_dims = ctx->GetInputDim("X"); - auto y_dims = ctx->GetInputDim("Y"); - - PADDLE_ENFORCE_EQ(x_dims.size(), y_dims.size(), - platform::errors::InvalidArgument( - "Input(input) rank and Input(label) rank should be " - "same, but received input rank(%d) != label rank(%d)", - x_dims.size(), y_dims.size())); - - bool contain_unknown_dim = - phi::contain_unknown_dim(x_dims) || phi::contain_unknown_dim(y_dims); - if (ctx->IsRuntime() || !contain_unknown_dim) { - PADDLE_ENFORCE_EQ( - x_dims, y_dims, - platform::errors::InvalidArgument( - "The Input(input) and Input(label) should have the same " - "shape, but received input shape [%s] != label shape [%s]", - x_dims, y_dims)); - } - - auto out_dims = y_dims; - ctx->SetOutputDim("Residual", out_dims); - ctx->SetOutputDim("Out", out_dims); - ctx->ShareLoD("X", "Out"); - } }; template @@ -139,14 +112,11 @@ class HuberLossGradOpMaker : public framework::SingleGradOpMaker { } // namespace paddle namespace ops = paddle::operators; +DELCARE_INFER_SHAPE_FUNCTOR(huber_loss, HuberLossInferShapeFunctor, + PT_INFER_META(phi::HuberLossInferMeta)); + REGISTER_OPERATOR(huber_loss, ops::HuberLossOp, ops::HuberLossOpMaker, ops::HuberLossGradOpMaker, - ops::HuberLossGradOpMaker); + ops::HuberLossGradOpMaker, + HuberLossInferShapeFunctor); REGISTER_OPERATOR(huber_loss_grad, ops::HuberLossGradOp); -REGISTER_OP_CPU_KERNEL( - huber_loss, ops::HuberLossKernel, - ops::HuberLossKernel); -REGISTER_OP_CPU_KERNEL( - huber_loss_grad, - ops::HuberLossGradKernel, - ops::HuberLossGradKernel); diff --git a/paddle/fluid/operators/huber_loss_op.cu b/paddle/fluid/operators/huber_loss_op.cu deleted file mode 100644 index 4ce6856a7eade1b314d8aef1d039424ad42e07cf..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/huber_loss_op.cu +++ /dev/null @@ -1,24 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ -#include "paddle/fluid/operators/huber_loss_op.h" - -namespace ops = paddle::operators; -REGISTER_OP_CUDA_KERNEL( - huber_loss, - ops::HuberLossKernel, - ops::HuberLossKernel); -REGISTER_OP_CUDA_KERNEL( - huber_loss_grad, - ops::HuberLossGradKernel, - ops::HuberLossGradKernel); diff --git a/paddle/fluid/operators/huber_loss_op.h b/paddle/fluid/operators/huber_loss_op.h deleted file mode 100644 index ebe26f05ab3e47245176614fb2ce57c264ebf5f5..0000000000000000000000000000000000000000 --- a/paddle/fluid/operators/huber_loss_op.h +++ /dev/null @@ -1,123 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once -#include "paddle/fluid/framework/eigen.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/phi/core/hostdevice.h" - -namespace paddle { -namespace operators { - -using Tensor = framework::Tensor; -template -using EigenVector = framework::EigenVector; - -template -struct HuberLossForward { - HOSTDEVICE HuberLossForward(const T& delta) : delta(delta) {} - - HOSTDEVICE T operator()(const T& val) const { - T abs_val = std::abs(val); - if (abs_val <= delta) { - return static_cast(0.5) * val * val; - } else { - return delta * (abs_val - static_cast(0.5) * delta); - } - } - - T delta; -}; - -template -class HuberLossKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - auto* in0 = context.Input("X"); - auto* in1 = context.Input("Y"); - auto* out0 = context.Output("Residual"); - auto* out1 = context.Output("Out"); - auto delta = static_cast(context.Attr("delta")); - auto& place = - *context.template device_context().eigen_device(); - - auto x = EigenVector::Flatten(*in0); - auto y = EigenVector::Flatten(*in1); - out0->mutable_data(context.GetPlace()); - auto residual = EigenVector::Flatten(*out0); - residual.device(place) = y - x; - out1->mutable_data(context.GetPlace()); - auto loss = EigenVector::Flatten(*out1); - loss.device(place) = residual.unaryExpr(HuberLossForward(delta)); - } -}; - -template -struct HuberLossBackward { - HOSTDEVICE HuberLossBackward(const T& delta, T sign) - : sign(sign), delta(delta) {} - - HOSTDEVICE T operator()(const T& val) const { - T abs_val = std::abs(val); - if (abs_val <= delta) { - return sign * val; - } else { - if (val > 0) { - return sign * delta; - } else { - return -1 * sign * delta; - } - } - } - - T sign; - T delta; -}; - -template -class HuberLossGradKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - auto* in0 = context.Input("Residual"); - auto* in1 = context.Input(framework::GradVarName("Out")); - auto* out0 = context.Output(framework::GradVarName("X")); - auto* out1 = context.Output(framework::GradVarName("Y")); - auto delta = static_cast(context.Attr("delta")); - auto& place = - *context.template device_context().eigen_device(); - - auto residual = EigenVector::Flatten(*in0); - auto out_grad = EigenVector::Flatten(*in1); - - if (out0) { - out0->mutable_data(context.GetPlace()); - auto x_grad = EigenVector::Flatten(*out0); - x_grad.device(place) = - residual.unaryExpr(HuberLossBackward(delta, -1.0)); - x_grad.device(place) = out_grad * x_grad; - } - - if (out1) { - out1->mutable_data(context.GetPlace()); - auto y_grad = EigenVector::Flatten(*out1); - y_grad.device(place) = - residual.unaryExpr(HuberLossBackward(delta, 1.0)); - y_grad.device(place) = out_grad * y_grad; - } - } -}; - -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/huber_loss_op_npu.cc b/paddle/fluid/operators/huber_loss_op_npu.cc index 19ced131c00a2a861a5140697b8a199f013ad5bf..6fc6960d3db565d698b252347e5734f949e16211 100644 --- a/paddle/fluid/operators/huber_loss_op_npu.cc +++ b/paddle/fluid/operators/huber_loss_op_npu.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/huber_loss_op.h" +#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/platform/device/npu/npu_op_runner.h" namespace paddle { diff --git a/paddle/fluid/operators/huber_loss_op_xpu.cc b/paddle/fluid/operators/huber_loss_op_xpu.cc index 767ce542736e831e2ea587fc765ed6c0baf96589..ccddec2779515f26db10440633ab9d9894537182 100644 --- a/paddle/fluid/operators/huber_loss_op_xpu.cc +++ b/paddle/fluid/operators/huber_loss_op_xpu.cc @@ -13,8 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #ifdef PADDLE_WITH_XPU - -#include "paddle/fluid/operators/huber_loss_op.h" +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { diff --git a/paddle/phi/infermeta/binary.cc b/paddle/phi/infermeta/binary.cc index f79b5982f6194c8fe52b32320014add744942623..a964788b15e3122aeb2af857a25913543aad1c82 100644 --- a/paddle/phi/infermeta/binary.cc +++ b/paddle/phi/infermeta/binary.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/phi/infermeta/binary.h" +#include "paddle/phi/core/ddim.h" #include "paddle/phi/kernels/funcs/common_shape.h" namespace phi { @@ -188,4 +189,40 @@ void ElementwiseRawInferMeta(const MetaTensor& x, out->share_lod(x); } +void HuberLossInferMeta(const MetaTensor& input, + const MetaTensor& label, + float delta, + MetaTensor* out, + MetaTensor* residual, + MetaConfig config) { + auto input_dims = input.dims(); + auto label_dims = label.dims(); + + PADDLE_ENFORCE_EQ(input_dims.size(), + label_dims.size(), + phi::errors::InvalidArgument( + "Input(input) rank and Input(label) rank should be " + "same, but received input rank(%d) != label rank(%d)", + input_dims.size(), + label_dims.size())); + + bool contain_unknown_dim = phi::contain_unknown_dim(input_dims) || + phi::contain_unknown_dim(label_dims); + if (config.is_runtime || !contain_unknown_dim) { + PADDLE_ENFORCE_EQ( + input_dims, + label_dims, + phi::errors::InvalidArgument( + "The Input(input) and Input(label) should have the same " + "shape, but received input shape [%s] != label shape [%s]", + input_dims, + label_dims)); + } + + auto out_dims = label_dims; + residual->set_dims(out_dims); + out->set_dims(out_dims); + out->share_lod(input); +} + } // namespace phi diff --git a/paddle/phi/infermeta/binary.h b/paddle/phi/infermeta/binary.h index 5e3214127ee2361117a215ad7623b040599519df..93ef9f5f35abbac2fd6c2c804efeb5a767a0d20f 100644 --- a/paddle/phi/infermeta/binary.h +++ b/paddle/phi/infermeta/binary.h @@ -45,4 +45,11 @@ void ElementwiseRawInferMeta(const MetaTensor& x_meta, const MetaTensor& y_meta, int axis, MetaTensor* out); + +void HuberLossInferMeta(const MetaTensor& input_meta, + const MetaTensor& label_meta, + float delta, + MetaTensor* out, + MetaTensor* residual, + MetaConfig config = MetaConfig()); } // namespace phi diff --git a/paddle/phi/kernels/cpu/huber_loss_grad_kernel.cc b/paddle/phi/kernels/cpu/huber_loss_grad_kernel.cc new file mode 100644 index 0000000000000000000000000000000000000000..bd2349393e742911156e4c219d557f10acb42ded --- /dev/null +++ b/paddle/phi/kernels/cpu/huber_loss_grad_kernel.cc @@ -0,0 +1,22 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/huber_loss_grad_kernel.h" +#include "paddle/phi/backends/cpu/cpu_context.h" +#include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/kernels/impl/huber_loss_grad_kernel_impl.h" + +PT_REGISTER_KERNEL( + huber_loss_grad, CPU, ALL_LAYOUT, phi::HuberLossGradKernel, float, double) { +} diff --git a/paddle/phi/kernels/cpu/huber_loss_kernel.cc b/paddle/phi/kernels/cpu/huber_loss_kernel.cc new file mode 100644 index 0000000000000000000000000000000000000000..dfdab16bc85e33bbf1a10594784b5bddaad3f8d2 --- /dev/null +++ b/paddle/phi/kernels/cpu/huber_loss_kernel.cc @@ -0,0 +1,21 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/huber_loss_kernel.h" +#include "paddle/phi/backends/cpu/cpu_context.h" +#include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/kernels/impl/huber_loss_kernel_impl.h" + +PT_REGISTER_KERNEL( + huber_loss, CPU, ALL_LAYOUT, phi::HuberLossKernel, float, double) {} diff --git a/paddle/phi/kernels/gpu/huber_loss_grad_kernel.cu b/paddle/phi/kernels/gpu/huber_loss_grad_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..5e1e000a38d955fc8f20e609ce57f11a0379a1ac --- /dev/null +++ b/paddle/phi/kernels/gpu/huber_loss_grad_kernel.cu @@ -0,0 +1,22 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/backends/gpu/gpu_context.h" +#include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/kernels/huber_loss_grad_kernel.h" +#include "paddle/phi/kernels/impl/huber_loss_grad_kernel_impl.h" + +PT_REGISTER_KERNEL( + huber_loss_grad, GPU, ALL_LAYOUT, phi::HuberLossGradKernel, float, double) { +} diff --git a/paddle/phi/kernels/gpu/huber_loss_kernel.cu b/paddle/phi/kernels/gpu/huber_loss_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..2cca0c08a3f3bf42bb885805cf6b57ab49c9ed62 --- /dev/null +++ b/paddle/phi/kernels/gpu/huber_loss_kernel.cu @@ -0,0 +1,21 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/backends/gpu/gpu_context.h" +#include "paddle/phi/core/kernel_registry.h" +#include "paddle/phi/kernels/huber_loss_kernel.h" +#include "paddle/phi/kernels/impl/huber_loss_kernel_impl.h" + +PT_REGISTER_KERNEL( + huber_loss, GPU, ALL_LAYOUT, phi::HuberLossKernel, float, double) {} diff --git a/paddle/phi/kernels/huber_loss_grad_kernel.h b/paddle/phi/kernels/huber_loss_grad_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..c6246b1553197993e7c4cba2342120fa81f98ac4 --- /dev/null +++ b/paddle/phi/kernels/huber_loss_grad_kernel.h @@ -0,0 +1,30 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/phi/core/dense_tensor.h" +#include "paddle/phi/core/device_context.h" + +namespace phi { + +template +void HuberLossGradKernel(const Context& dev_ctx, + const DenseTensor& residual, + const DenseTensor& out_grad, + float delta, + DenseTensor* input_grad, + DenseTensor* label_grad); + +} // namespace phi diff --git a/paddle/phi/kernels/huber_loss_kernel.h b/paddle/phi/kernels/huber_loss_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..3533a9ec6ded525f304e68aa510b57f9989ccce9 --- /dev/null +++ b/paddle/phi/kernels/huber_loss_kernel.h @@ -0,0 +1,30 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/phi/core/dense_tensor.h" +#include "paddle/phi/core/device_context.h" + +namespace phi { + +template +void HuberLossKernel(const Context& dev_ctx, + const DenseTensor& input, + const DenseTensor& label, + float delta, + DenseTensor* out, + DenseTensor* residual); + +} // namespace phi diff --git a/paddle/phi/kernels/impl/huber_loss_grad_kernel_impl.h b/paddle/phi/kernels/impl/huber_loss_grad_kernel_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..b93578abba2b72374b54667a8b38665c20c9dc77 --- /dev/null +++ b/paddle/phi/kernels/impl/huber_loss_grad_kernel_impl.h @@ -0,0 +1,75 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/phi/kernels/funcs/eigen/common.h" +#include "paddle/phi/kernels/funcs/eigen/eigen_function.h" +#include "paddle/phi/kernels/huber_loss_grad_kernel.h" + +namespace phi { + +template +struct HuberLossBackward { + HOSTDEVICE HuberLossBackward(const T& delta, T sign) + : sign(sign), delta(delta) {} + + HOSTDEVICE T operator()(const T& val) const { + T abs_val = std::abs(val); + if (abs_val <= delta) { + return sign * val; + } else { + if (val > 0) { + return sign * delta; + } else { + return -1 * sign * delta; + } + } + } + + T sign; + T delta; +}; + +template +void HuberLossGradKernel(const Context& dev_ctx, + const DenseTensor& residual, + const DenseTensor& out_grad, + float delta, + DenseTensor* input_grad, + DenseTensor* label_grad) { + T delta_ = static_cast(delta); + auto& place = *dev_ctx.eigen_device(); + + auto eigen_residual = EigenVector::Flatten(residual); + auto eigen_out_grad = EigenVector::Flatten(out_grad); + + if (input_grad) { + dev_ctx.template Alloc(input_grad); + auto eigen_input_grad = EigenVector::Flatten(*input_grad); + eigen_input_grad.device(place) = + eigen_residual.unaryExpr(HuberLossBackward(delta_, -1.0)); + eigen_input_grad.device(place) = eigen_out_grad * eigen_input_grad; + } + + if (label_grad) { + dev_ctx.template Alloc(label_grad); + auto eigen_label_grad = EigenVector::Flatten(*label_grad); + eigen_label_grad.device(place) = + eigen_residual.unaryExpr(HuberLossBackward(delta_, 1.0)); + eigen_label_grad.device(place) = eigen_out_grad * eigen_label_grad; + } +} + +} // namespace phi diff --git a/paddle/phi/kernels/impl/huber_loss_kernel_impl.h b/paddle/phi/kernels/impl/huber_loss_kernel_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..7fbdc80c3829bf96ea8d3692095059e52a06b736 --- /dev/null +++ b/paddle/phi/kernels/impl/huber_loss_kernel_impl.h @@ -0,0 +1,61 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/phi/kernels/funcs/eigen/common.h" +#include "paddle/phi/kernels/funcs/eigen/eigen_function.h" +#include "paddle/phi/kernels/huber_loss_kernel.h" + +namespace phi { + +template +struct HuberLossForward { + HOSTDEVICE HuberLossForward(const T& delta) : delta(delta) {} + + HOSTDEVICE T operator()(const T& val) const { + T abs_val = std::abs(val); + if (abs_val <= delta) { + return static_cast(0.5) * val * val; + } else { + return delta * (abs_val - static_cast(0.5) * delta); + } + } + + T delta; +}; + +template +void HuberLossKernel(const Context& dev_ctx, + const DenseTensor& input, + const DenseTensor& label, + float delta, + DenseTensor* out, + DenseTensor* residual) { + T delta_ = static_cast(delta); + auto& place = *dev_ctx.eigen_device(); + + auto x = EigenVector::Flatten(input); + auto y = EigenVector::Flatten(label); + + dev_ctx.template Alloc(residual); + auto eigen_residual = EigenVector::Flatten(*residual); + eigen_residual.device(place) = y - x; + + dev_ctx.template Alloc(out); + auto loss = EigenVector::Flatten(*out); + loss.device(place) = eigen_residual.unaryExpr(HuberLossForward(delta_)); +} + +} // namespace phi diff --git a/paddle/phi/ops/compat/huber_loss_sig.cc b/paddle/phi/ops/compat/huber_loss_sig.cc new file mode 100644 index 0000000000000000000000000000000000000000..6e7183ff9f281f4f953cf6faeaa8f8b1f21bf408 --- /dev/null +++ b/paddle/phi/ops/compat/huber_loss_sig.cc @@ -0,0 +1,36 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/core/compat/op_utils.h" + +namespace phi { + +KernelSignature HuberLossOpArgumentMapping(const ArgumentMappingContext& ctx) { + return KernelSignature( + "huber_loss", {"X", "Y"}, {"delta"}, {"Out", "Residual"}); +} + +KernelSignature HuberLossGradOpArgumentMapping( + const ArgumentMappingContext& ctx) { + return KernelSignature("huber_loss_grad", + {"Residual", GradVarName("Out")}, + {"delta"}, + {GradVarName("X"), GradVarName("Y")}); +} + +} // namespace phi + +PT_REGISTER_ARG_MAPPING_FN(huber_loss, phi::HuberLossOpArgumentMapping); +PT_REGISTER_ARG_MAPPING_FN(huber_loss_grad, + phi::HuberLossGradOpArgumentMapping);