未验证 提交 74b91bce 编写于 作者: C co63oc 提交者: GitHub

Add huber_loss tests (#53535)

上级 50f0acc0
......@@ -18,6 +18,11 @@
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/huber_loss_grad_kernel_impl.h"
PD_REGISTER_KERNEL(
huber_loss_grad, GPU, ALL_LAYOUT, phi::HuberLossGradKernel, float, double) {
}
PD_REGISTER_KERNEL(huber_loss_grad,
GPU,
ALL_LAYOUT,
phi::HuberLossGradKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
......@@ -18,5 +18,11 @@
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/huber_loss_kernel_impl.h"
PD_REGISTER_KERNEL(
huber_loss, GPU, ALL_LAYOUT, phi::HuberLossKernel, float, double) {}
PD_REGISTER_KERNEL(huber_loss,
GPU,
ALL_LAYOUT,
phi::HuberLossKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
......@@ -14,6 +14,7 @@
#pragma once
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/eigen/eigen_function.h"
#include "paddle/phi/kernels/huber_loss_grad_kernel.h"
......@@ -26,14 +27,14 @@ struct HuberLossBackward {
: sign(sign), delta(delta) {}
HOSTDEVICE T operator()(const T& val) const {
T abs_val = std::abs(val);
T abs_val = abs(val);
if (abs_val <= delta) {
return sign * val;
} else {
if (val > 0) {
if (val > static_cast<T>(0)) {
return sign * delta;
} else {
return -1 * sign * delta;
return static_cast<T>(-1) * sign * delta;
}
}
}
......@@ -58,16 +59,16 @@ void HuberLossGradKernel(const Context& dev_ctx,
if (input_grad) {
dev_ctx.template Alloc<T>(input_grad);
auto eigen_input_grad = EigenVector<T>::Flatten(*input_grad);
eigen_input_grad.device(place) =
eigen_residual.unaryExpr(HuberLossBackward<T>(delta_, -1.0));
eigen_input_grad.device(place) = eigen_residual.unaryExpr(
HuberLossBackward<T>(delta_, static_cast<T>(-1.0)));
eigen_input_grad.device(place) = eigen_out_grad * eigen_input_grad;
}
if (label_grad) {
dev_ctx.template Alloc<T>(label_grad);
auto eigen_label_grad = EigenVector<T>::Flatten(*label_grad);
eigen_label_grad.device(place) =
eigen_residual.unaryExpr(HuberLossBackward<T>(delta_, 1.0));
eigen_label_grad.device(place) = eigen_residual.unaryExpr(
HuberLossBackward<T>(delta_, static_cast<T>(1.0)));
eigen_label_grad.device(place) = eigen_out_grad * eigen_label_grad;
}
}
......
......@@ -14,6 +14,7 @@
#pragma once
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/eigen/eigen_function.h"
#include "paddle/phi/kernels/huber_loss_kernel.h"
......@@ -25,7 +26,7 @@ struct HuberLossForward {
HOSTDEVICE HuberLossForward(const T& delta) : delta(delta) {}
HOSTDEVICE T operator()(const T& val) const {
T abs_val = std::abs(val);
T abs_val = abs(val);
if (abs_val <= delta) {
return static_cast<T>(0.5) * val * val;
} else {
......
......@@ -15,9 +15,10 @@
import unittest
import numpy as np
from eager_op_test import OpTest
from eager_op_test import OpTest, convert_float_to_uint16
import paddle
from paddle.fluid import core
def huber_loss_forward(val, delta):
......@@ -40,20 +41,24 @@ class TestHuberLossOp(OpTest):
self.python_api = huber_loss_wraper
self.delta = 1.0
self.init_dtype()
self.init_input()
shape = self.set_shape()
residual = self.inputs['Y'] - self.inputs['X']
loss = np.vectorize(huber_loss_forward)(residual, self.delta).astype(
'float32'
self.dtype
)
self.attrs = {'delta': self.delta}
self.outputs = {'Residual': residual, 'Out': loss.reshape(shape)}
def init_dtype(self):
self.dtype = np.float32
def init_input(self):
shape = self.set_shape()
self.inputs = {
'X': np.random.uniform(0, 1.0, shape).astype('float32'),
'Y': np.random.uniform(0, 1.0, shape).astype('float32'),
'X': np.random.uniform(0, 1.0, shape).astype(self.dtype),
'Y': np.random.uniform(0, 1.0, shape).astype(self.dtype),
}
def set_shape(self):
......@@ -66,14 +71,10 @@ class TestHuberLossOp(OpTest):
self.check_grad(['X', 'Y'], 'Out')
def test_check_grad_ingore_x(self):
self.check_grad(
['Y'], 'Out', max_relative_error=0.008, no_grad_set=set("residual")
)
self.check_grad(['Y'], 'Out', no_grad_set=set("residual"))
def test_check_grad_ingore_y(self):
self.check_grad(
['X'], 'Out', max_relative_error=0.008, no_grad_set=set('residual')
)
self.check_grad(['X'], 'Out', no_grad_set=set('residual'))
def TestHuberLossOp1(TestHuberLossOp):
......@@ -91,6 +92,72 @@ def TestHuberLossOp3(TestHuberLossOp):
return (6, 6, 1)
class TestHuberLossFP16Op(TestHuberLossOp):
def init_dtype(self):
self.dtype = np.float16
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not compiled with CUDA or not support bfloat16",
)
class TestHuberLossBF16Op(OpTest):
def setUp(self):
self.op_type = 'huber_loss'
self.python_out_sig = ["Out"]
self.python_api = huber_loss_wraper
self.delta = 1.0
self.init_dtype()
self.init_input()
shape = self.set_shape()
residual = self.inputs['Y'] - self.inputs['X']
loss = np.vectorize(huber_loss_forward)(residual, self.delta).astype(
self.np_dtype
)
self.attrs = {'delta': self.delta}
self.outputs = {'Residual': residual, 'Out': loss.reshape(shape)}
self.place = core.CUDAPlace(0)
self.inputs['X'] = convert_float_to_uint16(self.inputs['X'])
self.inputs['Y'] = convert_float_to_uint16(self.inputs['Y'])
self.outputs['Residual'] = convert_float_to_uint16(
self.outputs['Residual']
)
self.outputs['Out'] = convert_float_to_uint16(self.outputs['Out'])
def init_dtype(self):
self.dtype = np.uint16
self.np_dtype = np.float32
def init_input(self):
shape = self.set_shape()
self.inputs = {
'X': np.random.uniform(0, 1.0, shape).astype(self.np_dtype),
'Y': np.random.uniform(0, 1.0, shape).astype(self.np_dtype),
}
def set_shape(self):
return (100, 1)
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad_normal(self):
self.check_grad_with_place(self.place, ['X', 'Y'], 'Out')
def test_check_grad_ingore_x(self):
self.check_grad_with_place(
self.place, ['Y'], 'Out', no_grad_set=set("residual")
)
def test_check_grad_ingore_y(self):
self.check_grad_with_place(
self.place, ['X'], 'Out', no_grad_set=set('residual')
)
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
......@@ -1084,10 +1084,16 @@ def smooth_l1_loss(input, label, reduction='mean', delta=1.0, name=None):
out = _C_ops.huber_loss(input, label, delta)
else:
check_variable_and_dtype(
input, 'input', ['float32', 'float64'], 'smooth_l1_loss'
input,
'input',
['float16', 'float32', 'float64', 'uint16'],
'smooth_l1_loss',
)
check_variable_and_dtype(
label, 'label', ['float32', 'float64'], 'smooth_l1_loss'
label,
'label',
['float16', 'float32', 'float64', 'uint16'],
'smooth_l1_loss',
)
helper = LayerHelper('huber_loss', **locals())
residual = helper.create_variable_for_type_inference(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册