未验证 提交 6aafb2fa 编写于 作者: A Aurelius84 提交者: GitHub

[Pten] Migrate huber_loss into phi (#39761)

* migrate huber_loss into phi

* migrate infershape

* modify pten into phi
上级 740cfa94
......@@ -12,47 +12,20 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/huber_loss_op.h"
#include <memory>
#include <string>
#include <vector>
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/infermeta/binary.h"
namespace paddle {
namespace operators {
class HuberLossOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "HuberLoss");
OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "HuberLoss");
auto x_dims = ctx->GetInputDim("X");
auto y_dims = ctx->GetInputDim("Y");
PADDLE_ENFORCE_EQ(x_dims.size(), y_dims.size(),
platform::errors::InvalidArgument(
"Input(input) rank and Input(label) rank should be "
"same, but received input rank(%d) != label rank(%d)",
x_dims.size(), y_dims.size()));
bool contain_unknown_dim =
phi::contain_unknown_dim(x_dims) || phi::contain_unknown_dim(y_dims);
if (ctx->IsRuntime() || !contain_unknown_dim) {
PADDLE_ENFORCE_EQ(
x_dims, y_dims,
platform::errors::InvalidArgument(
"The Input(input) and Input(label) should have the same "
"shape, but received input shape [%s] != label shape [%s]",
x_dims, y_dims));
}
auto out_dims = y_dims;
ctx->SetOutputDim("Residual", out_dims);
ctx->SetOutputDim("Out", out_dims);
ctx->ShareLoD("X", "Out");
}
};
template <typename AttrType>
......@@ -139,14 +112,11 @@ class HuberLossGradOpMaker : public framework::SingleGradOpMaker<T> {
} // namespace paddle
namespace ops = paddle::operators;
DELCARE_INFER_SHAPE_FUNCTOR(huber_loss, HuberLossInferShapeFunctor,
PT_INFER_META(phi::HuberLossInferMeta));
REGISTER_OPERATOR(huber_loss, ops::HuberLossOp, ops::HuberLossOpMaker<float>,
ops::HuberLossGradOpMaker<paddle::framework::OpDesc>,
ops::HuberLossGradOpMaker<paddle::imperative::OpBase>);
ops::HuberLossGradOpMaker<paddle::imperative::OpBase>,
HuberLossInferShapeFunctor);
REGISTER_OPERATOR(huber_loss_grad, ops::HuberLossGradOp);
REGISTER_OP_CPU_KERNEL(
huber_loss, ops::HuberLossKernel<paddle::platform::CPUDeviceContext, float>,
ops::HuberLossKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_CPU_KERNEL(
huber_loss_grad,
ops::HuberLossGradKernel<paddle::platform::CPUDeviceContext, float>,
ops::HuberLossGradKernel<paddle::platform::CPUDeviceContext, double>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/huber_loss_op.h"
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
huber_loss,
ops::HuberLossKernel<paddle::platform::CUDADeviceContext, float>,
ops::HuberLossKernel<paddle::platform::CUDADeviceContext, double>);
REGISTER_OP_CUDA_KERNEL(
huber_loss_grad,
ops::HuberLossGradKernel<paddle::platform::CUDADeviceContext, float>,
ops::HuberLossGradKernel<paddle::platform::CUDADeviceContext, double>);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/core/hostdevice.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenVector = framework::EigenVector<T, MajorType, IndexType>;
template <typename T>
struct HuberLossForward {
HOSTDEVICE HuberLossForward(const T& delta) : delta(delta) {}
HOSTDEVICE T operator()(const T& val) const {
T abs_val = std::abs(val);
if (abs_val <= delta) {
return static_cast<T>(0.5) * val * val;
} else {
return delta * (abs_val - static_cast<T>(0.5) * delta);
}
}
T delta;
};
template <typename DeviceContext, typename T>
class HuberLossKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* in0 = context.Input<Tensor>("X");
auto* in1 = context.Input<Tensor>("Y");
auto* out0 = context.Output<Tensor>("Residual");
auto* out1 = context.Output<Tensor>("Out");
auto delta = static_cast<T>(context.Attr<float>("delta"));
auto& place =
*context.template device_context<DeviceContext>().eigen_device();
auto x = EigenVector<T>::Flatten(*in0);
auto y = EigenVector<T>::Flatten(*in1);
out0->mutable_data<T>(context.GetPlace());
auto residual = EigenVector<T>::Flatten(*out0);
residual.device(place) = y - x;
out1->mutable_data<T>(context.GetPlace());
auto loss = EigenVector<T>::Flatten(*out1);
loss.device(place) = residual.unaryExpr(HuberLossForward<T>(delta));
}
};
template <typename T>
struct HuberLossBackward {
HOSTDEVICE HuberLossBackward(const T& delta, T sign)
: sign(sign), delta(delta) {}
HOSTDEVICE T operator()(const T& val) const {
T abs_val = std::abs(val);
if (abs_val <= delta) {
return sign * val;
} else {
if (val > 0) {
return sign * delta;
} else {
return -1 * sign * delta;
}
}
}
T sign;
T delta;
};
template <typename DeviceContext, typename T>
class HuberLossGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* in0 = context.Input<Tensor>("Residual");
auto* in1 = context.Input<Tensor>(framework::GradVarName("Out"));
auto* out0 = context.Output<Tensor>(framework::GradVarName("X"));
auto* out1 = context.Output<Tensor>(framework::GradVarName("Y"));
auto delta = static_cast<T>(context.Attr<float>("delta"));
auto& place =
*context.template device_context<DeviceContext>().eigen_device();
auto residual = EigenVector<T>::Flatten(*in0);
auto out_grad = EigenVector<T>::Flatten(*in1);
if (out0) {
out0->mutable_data<T>(context.GetPlace());
auto x_grad = EigenVector<T>::Flatten(*out0);
x_grad.device(place) =
residual.unaryExpr(HuberLossBackward<T>(delta, -1.0));
x_grad.device(place) = out_grad * x_grad;
}
if (out1) {
out1->mutable_data<T>(context.GetPlace());
auto y_grad = EigenVector<T>::Flatten(*out1);
y_grad.device(place) =
residual.unaryExpr(HuberLossBackward<T>(delta, 1.0));
y_grad.device(place) = out_grad * y_grad;
}
}
};
} // namespace operators
} // namespace paddle
......@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/huber_loss_op.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"
namespace paddle {
......
......@@ -13,8 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#ifdef PADDLE_WITH_XPU
#include "paddle/fluid/operators/huber_loss_op.h"
#include "paddle/fluid/framework/op_registry.h"
namespace paddle {
namespace operators {
......
......@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/infermeta/binary.h"
#include "paddle/phi/core/ddim.h"
#include "paddle/phi/kernels/funcs/common_shape.h"
namespace phi {
......@@ -188,4 +189,40 @@ void ElementwiseRawInferMeta(const MetaTensor& x,
out->share_lod(x);
}
void HuberLossInferMeta(const MetaTensor& input,
const MetaTensor& label,
float delta,
MetaTensor* out,
MetaTensor* residual,
MetaConfig config) {
auto input_dims = input.dims();
auto label_dims = label.dims();
PADDLE_ENFORCE_EQ(input_dims.size(),
label_dims.size(),
phi::errors::InvalidArgument(
"Input(input) rank and Input(label) rank should be "
"same, but received input rank(%d) != label rank(%d)",
input_dims.size(),
label_dims.size()));
bool contain_unknown_dim = phi::contain_unknown_dim(input_dims) ||
phi::contain_unknown_dim(label_dims);
if (config.is_runtime || !contain_unknown_dim) {
PADDLE_ENFORCE_EQ(
input_dims,
label_dims,
phi::errors::InvalidArgument(
"The Input(input) and Input(label) should have the same "
"shape, but received input shape [%s] != label shape [%s]",
input_dims,
label_dims));
}
auto out_dims = label_dims;
residual->set_dims(out_dims);
out->set_dims(out_dims);
out->share_lod(input);
}
} // namespace phi
......@@ -45,4 +45,11 @@ void ElementwiseRawInferMeta(const MetaTensor& x_meta,
const MetaTensor& y_meta,
int axis,
MetaTensor* out);
void HuberLossInferMeta(const MetaTensor& input_meta,
const MetaTensor& label_meta,
float delta,
MetaTensor* out,
MetaTensor* residual,
MetaConfig config = MetaConfig());
} // namespace phi
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/huber_loss_grad_kernel.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/huber_loss_grad_kernel_impl.h"
PT_REGISTER_KERNEL(
huber_loss_grad, CPU, ALL_LAYOUT, phi::HuberLossGradKernel, float, double) {
}
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/huber_loss_kernel.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/huber_loss_kernel_impl.h"
PT_REGISTER_KERNEL(
huber_loss, CPU, ALL_LAYOUT, phi::HuberLossKernel, float, double) {}
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/huber_loss_grad_kernel.h"
#include "paddle/phi/kernels/impl/huber_loss_grad_kernel_impl.h"
PT_REGISTER_KERNEL(
huber_loss_grad, GPU, ALL_LAYOUT, phi::HuberLossGradKernel, float, double) {
}
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/huber_loss_kernel.h"
#include "paddle/phi/kernels/impl/huber_loss_kernel_impl.h"
PT_REGISTER_KERNEL(
huber_loss, GPU, ALL_LAYOUT, phi::HuberLossKernel, float, double) {}
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/device_context.h"
namespace phi {
template <typename T, typename Context>
void HuberLossGradKernel(const Context& dev_ctx,
const DenseTensor& residual,
const DenseTensor& out_grad,
float delta,
DenseTensor* input_grad,
DenseTensor* label_grad);
} // namespace phi
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/device_context.h"
namespace phi {
template <typename T, typename Context>
void HuberLossKernel(const Context& dev_ctx,
const DenseTensor& input,
const DenseTensor& label,
float delta,
DenseTensor* out,
DenseTensor* residual);
} // namespace phi
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/eigen/eigen_function.h"
#include "paddle/phi/kernels/huber_loss_grad_kernel.h"
namespace phi {
template <typename T>
struct HuberLossBackward {
HOSTDEVICE HuberLossBackward(const T& delta, T sign)
: sign(sign), delta(delta) {}
HOSTDEVICE T operator()(const T& val) const {
T abs_val = std::abs(val);
if (abs_val <= delta) {
return sign * val;
} else {
if (val > 0) {
return sign * delta;
} else {
return -1 * sign * delta;
}
}
}
T sign;
T delta;
};
template <typename T, typename Context>
void HuberLossGradKernel(const Context& dev_ctx,
const DenseTensor& residual,
const DenseTensor& out_grad,
float delta,
DenseTensor* input_grad,
DenseTensor* label_grad) {
T delta_ = static_cast<T>(delta);
auto& place = *dev_ctx.eigen_device();
auto eigen_residual = EigenVector<T>::Flatten(residual);
auto eigen_out_grad = EigenVector<T>::Flatten(out_grad);
if (input_grad) {
dev_ctx.template Alloc<T>(input_grad);
auto eigen_input_grad = EigenVector<T>::Flatten(*input_grad);
eigen_input_grad.device(place) =
eigen_residual.unaryExpr(HuberLossBackward<T>(delta_, -1.0));
eigen_input_grad.device(place) = eigen_out_grad * eigen_input_grad;
}
if (label_grad) {
dev_ctx.template Alloc<T>(label_grad);
auto eigen_label_grad = EigenVector<T>::Flatten(*label_grad);
eigen_label_grad.device(place) =
eigen_residual.unaryExpr(HuberLossBackward<T>(delta_, 1.0));
eigen_label_grad.device(place) = eigen_out_grad * eigen_label_grad;
}
}
} // namespace phi
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/eigen/eigen_function.h"
#include "paddle/phi/kernels/huber_loss_kernel.h"
namespace phi {
template <typename T>
struct HuberLossForward {
HOSTDEVICE HuberLossForward(const T& delta) : delta(delta) {}
HOSTDEVICE T operator()(const T& val) const {
T abs_val = std::abs(val);
if (abs_val <= delta) {
return static_cast<T>(0.5) * val * val;
} else {
return delta * (abs_val - static_cast<T>(0.5) * delta);
}
}
T delta;
};
template <typename T, typename Context>
void HuberLossKernel(const Context& dev_ctx,
const DenseTensor& input,
const DenseTensor& label,
float delta,
DenseTensor* out,
DenseTensor* residual) {
T delta_ = static_cast<T>(delta);
auto& place = *dev_ctx.eigen_device();
auto x = EigenVector<T>::Flatten(input);
auto y = EigenVector<T>::Flatten(label);
dev_ctx.template Alloc<T>(residual);
auto eigen_residual = EigenVector<T>::Flatten(*residual);
eigen_residual.device(place) = y - x;
dev_ctx.template Alloc<T>(out);
auto loss = EigenVector<T>::Flatten(*out);
loss.device(place) = eigen_residual.unaryExpr(HuberLossForward<T>(delta_));
}
} // namespace phi
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature HuberLossOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature(
"huber_loss", {"X", "Y"}, {"delta"}, {"Out", "Residual"});
}
KernelSignature HuberLossGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("huber_loss_grad",
{"Residual", GradVarName("Out")},
{"delta"},
{GradVarName("X"), GradVarName("Y")});
}
} // namespace phi
PT_REGISTER_ARG_MAPPING_FN(huber_loss, phi::HuberLossOpArgumentMapping);
PT_REGISTER_ARG_MAPPING_FN(huber_loss_grad,
phi::HuberLossGradOpArgumentMapping);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册