未验证 提交 c7cf12fc 编写于 作者: C ccrrong 提交者: GitHub

[phi] add yolov3_loss yaml and unittest (#44476)

* add yaml and unittest

* update yaml

* update backward yaml and unittest

* update yaml

* add Yolov3LossGradInferMeta

* update yolov3_loss_op.cc

* fix bug

* code format
上级 9bf80772
......@@ -15,6 +15,7 @@
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/imperative/type_defs.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/multiary.h"
namespace paddle {
......@@ -178,20 +179,6 @@ class Yolov3LossOpMaker : public framework::OpProtoAndCheckerMaker {
class Yolov3LossOpGrad : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(
ctx->HasInput("X"),
true,
platform::errors::NotFound("Input(X) should not be null"));
PADDLE_ENFORCE_EQ(
ctx->HasInput(framework::GradVarName("Loss")),
true,
platform::errors::NotFound("Input(Loss@GRAD) should not be null"));
auto dim_x = ctx->GetInputDim("X");
if (ctx->HasOutput(framework::GradVarName("X"))) {
ctx->SetOutputDim(framework::GradVarName("X"), dim_x);
}
}
protected:
framework::OpKernelType GetExpectedKernelType(
......@@ -234,10 +221,15 @@ namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(yolov3_loss,
Yolov3LossInferShapeFunctor,
PD_INFER_META(phi::Yolov3LossInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(yolov3_loss_grad,
Yolov3LossGradInferShapeFunctor,
PD_INFER_META(phi::Yolov3LossGradInferMeta));
REGISTER_OPERATOR(yolov3_loss,
ops::Yolov3LossOp,
ops::Yolov3LossOpMaker,
ops::Yolov3LossGradMaker<paddle::framework::OpDesc>,
ops::Yolov3LossGradMaker<paddle::imperative::OpBase>,
Yolov3LossInferShapeFunctor);
REGISTER_OPERATOR(yolov3_loss_grad, ops::Yolov3LossOpGrad);
REGISTER_OPERATOR(yolov3_loss_grad,
ops::Yolov3LossOpGrad,
Yolov3LossGradInferShapeFunctor);
......@@ -2626,6 +2626,18 @@
func : yolo_box
data_type : x
# yolov3_loss
- api : yolov3_loss
args : (Tensor x, Tensor gt_box, Tensor gt_label, Tensor gt_score, int[] anchors, int[] anchor_mask, int class_num, float ignore_thresh, int downsample_ratio, bool use_label_smooth=true, float scale_x_y=1.0)
output : Tensor(loss), Tensor(objectness_mask), Tensor(gt_match_mask)
infer_meta :
func : Yolov3LossInferMeta
kernel :
func : yolov3_loss
data_type : x
optional : gt_score
backward : yolov3_loss_grad
- api : zeros_like
args : (Tensor x, DataType dtype=DataType::UNDEFINED, Place place = {})
output : Tensor
......
......@@ -2531,3 +2531,13 @@
kernel :
func : where_grad
no_need_buffer : x, y
- backward_api : yolov3_loss_grad
forward : yolov3_loss(Tensor x, Tensor gt_box, Tensor gt_label, Tensor gt_score, int[] anchors, int[] anchor_mask, int class_num, float ignore_thresh, int downsample_ratio, bool use_label_smooth=true, float scale_x_y=1.0) -> Tensor(loss), Tensor(objectness_mask), Tensor(gt_match_mask)
args : (Tensor x, Tensor gt_box, Tensor gt_label, Tensor gt_score, Tensor objectness_mask, Tensor gt_match_mask, Tensor loss_grad, int[] anchors, int[] anchor_mask, int class_num, float ignore_thresh, int downsample_ratio, bool use_label_smooth=true, float scale_x_y=1.0)
output : Tensor(x_grad), Tensor(gt_box_grad), Tensor(gt_label_grad), Tensor(gt_score_grad)
infer_meta :
func : Yolov3LossGradInferMeta
kernel :
func : yolov3_loss_grad
optional : gt_score
......@@ -828,4 +828,28 @@ void UnStackGradInferMeta(const std::vector<const MetaTensor*>& out_grad,
x_grad->set_dtype(out_grad[0]->dtype());
}
void Yolov3LossGradInferMeta(const MetaTensor& x,
const MetaTensor& gt_box,
const MetaTensor& gt_label,
const MetaTensor& gt_score,
const MetaTensor& objectness_mask,
const MetaTensor& gt_match_mask,
const MetaTensor& loss_grad,
const std::vector<int>& anchors,
const std::vector<int>& anchor_mask,
int class_num,
float ignore_thresh,
int downsample_ratio,
bool use_label_smooth,
float scale_x_y,
MetaTensor* x_grad,
MetaTensor* gt_box_grad,
MetaTensor* gt_label_grad,
MetaTensor* gt_score_grad) {
if (x_grad) {
x_grad->set_dims(x.dims());
x_grad->set_dtype(x.dtype());
}
}
} // namespace phi
......@@ -328,4 +328,23 @@ void UnStackGradInferMeta(const std::vector<const MetaTensor*>& out_grad,
int axis,
MetaTensor* x_grad);
void Yolov3LossGradInferMeta(const MetaTensor& x,
const MetaTensor& gt_box,
const MetaTensor& gt_label,
const MetaTensor& gt_score,
const MetaTensor& objectness_mask,
const MetaTensor& gt_match_mask,
const MetaTensor& loss_grad,
const std::vector<int>& anchors,
const std::vector<int>& anchor_mask,
int class_num,
float ignore_thresh,
int downsample_ratio,
bool use_label_smooth,
float scale_x_y,
MetaTensor* x_grad,
MetaTensor* gt_box_grad,
MetaTensor* gt_label_grad,
MetaTensor* gt_score_grad);
} // namespace phi
......@@ -122,9 +122,9 @@ void Yolov3LossGradKernel(const Context& dev_ctx,
const DenseTensor& gt_box,
const DenseTensor& gt_label,
const paddle::optional<DenseTensor>& gt_score,
const DenseTensor& loss_grad,
const DenseTensor& objectness_mask,
const DenseTensor& gt_match_mask,
const DenseTensor& loss_grad,
const std::vector<int>& anchors,
const std::vector<int>& anchor_mask,
int class_num,
......
......@@ -24,9 +24,9 @@ void Yolov3LossGradKernel(const Context& dev_ctx,
const DenseTensor& gt_box,
const DenseTensor& gt_label,
const paddle::optional<DenseTensor>& gt_score,
const DenseTensor& loss_grad,
const DenseTensor& objectness_mask,
const DenseTensor& gt_match_mask,
const DenseTensor& loss_grad,
const std::vector<int>& anchors,
const std::vector<int>& anchor_mask,
int class_num,
......
......@@ -37,9 +37,9 @@ KernelSignature Yolov3LossGradOpArgumentMapping(
"GTBox",
"GTLabel",
"GTScore",
"Loss@GRAD",
"ObjectnessMask",
"GTMatchMask"},
"GTMatchMask",
"Loss@GRAD"},
{"anchors",
"anchor_mask",
"class_num",
......
......@@ -169,11 +169,41 @@ def YOLOv3Loss(x, gtbox, gtlabel, gtscore, attrs):
gt_matches.astype('int32'))
def yolo_loss_wrapper(x,
gt_box,
gt_label,
gt_score=None,
anchors=[
10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116,
90, 156, 198, 373, 326
],
anchor_mask=[0, 1, 2],
class_num=5,
ignore_thresh=0.7,
downsample_ratio=32,
use_label_smooth=True,
scale_x_y=1.):
loss = paddle.vision.ops.yolo_loss(x,
gt_box=gt_box,
gt_label=gt_label,
anchors=anchors,
anchor_mask=anchor_mask,
class_num=class_num,
ignore_thresh=ignore_thresh,
downsample_ratio=downsample_ratio,
gt_score=gt_score,
use_label_smooth=use_label_smooth,
scale_x_y=scale_x_y)
return loss
class TestYolov3LossOp(OpTest):
def setUp(self):
self.initTestCase()
self.op_type = 'yolov3_loss'
self.python_api = yolo_loss_wrapper
self.python_out_sig = ['Loss']
x = logit(np.random.uniform(0, 1, self.x_shape).astype('float64'))
gtbox = np.random.random(size=self.gtbox_shape).astype('float64')
gtlabel = np.random.randint(0, self.class_num, self.gtbox_shape[:2])
......@@ -212,11 +242,14 @@ class TestYolov3LossOp(OpTest):
def test_check_output(self):
place = core.CPUPlace()
self.check_output_with_place(place, atol=2e-3)
self.check_output_with_place(place, atol=2e-3, check_eager=True)
def test_check_grad_ignore_gtbox(self):
place = core.CPUPlace()
self.check_grad_with_place(place, ['X'], 'Loss', max_relative_error=0.2)
self.check_grad_with_place(place, ['X'],
'Loss',
max_relative_error=0.2,
check_eager=True)
def initTestCase(self):
self.anchors = [
......
......@@ -186,6 +186,12 @@ def yolo_loss(x,
scale_x_y=1.)
"""
if in_dygraph_mode():
loss, _, _ = _C_ops.final_state_yolov3_loss(
x, gt_box, gt_label, gt_score, anchors, anchor_mask, class_num,
ignore_thresh, downsample_ratio, use_label_smooth, scale_x_y)
return loss
if _non_static_mode():
loss, _, _ = _C_ops.yolov3_loss(
x, gt_box, gt_label, gt_score, 'anchors', anchors, 'anchor_mask',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册