diff --git a/paddle/fluid/operators/yolov3_loss_op.cc b/paddle/fluid/operators/yolov3_loss_op.cc index 66d618de596de84a0dd842139558f3e0bf646851..c76767dfdd464769ff8962a0512fd6a7705bef6c 100644 --- a/paddle/fluid/operators/yolov3_loss_op.cc +++ b/paddle/fluid/operators/yolov3_loss_op.cc @@ -57,7 +57,7 @@ class Yolov3LossOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_GT(class_num, 0, "Attr(class_num) should be an integer greater then 0."); - std::vector dim_out({1}); + std::vector dim_out({dim_x[0]}); ctx->SetOutputDim("Loss", framework::make_ddim(dim_out)); } @@ -93,7 +93,7 @@ class Yolov3LossOpMaker : public framework::OpProtoAndCheckerMaker { "box class id."); AddOutput("Loss", "The output yolov3 loss tensor, " - "This is a 1-D tensor with shape of [1]"); + "This is a 1-D tensor with shape of [N]"); AddAttr("class_num", "The number of classes to predict."); AddAttr>("anchors", diff --git a/paddle/fluid/operators/yolov3_loss_op.h b/paddle/fluid/operators/yolov3_loss_op.h index fac06b4204948f03af4f57c6fb72fea74d84a7f9..837ea156013b8b7a124440f306345d16aaabc0e8 100644 --- a/paddle/fluid/operators/yolov3_loss_op.h +++ b/paddle/fluid/operators/yolov3_loss_op.h @@ -33,99 +33,102 @@ static inline bool isZero(T x) { } template -static inline T CalcMSEWithWeight(const Tensor& x, const Tensor& y, - const Tensor& weight, const T mf) { - int numel = static_cast(x.numel()); +static inline void CalcMSEWithWeight(const Tensor& x, const Tensor& y, + const Tensor& weight, const T loss_weight, + T* loss) { + int n = x.dims()[0]; + int stride = x.numel() / n; const T* x_data = x.data(); const T* y_data = y.data(); const T* weight_data = weight.data(); - T error_sum = 0.0; - for (int i = 0; i < numel; i++) { - T xi = x_data[i]; - T yi = y_data[i]; - T weighti = weight_data[i]; - error_sum += pow(yi - xi, 2) * weighti; + for (int i = 0; i < n; i++) { + for (int j = 0; j < stride; j++) { + loss[i] += pow(y_data[j] - x_data[j], 2) * weight_data[j] * loss_weight; + } + x_data += stride; + y_data += stride; + weight_data += stride; } - - return error_sum / mf; } template -static void CalcMSEGradWithWeight(Tensor* grad, const Tensor& x, - const Tensor& y, const Tensor& weight, - const T mf) { - int numel = static_cast(grad->numel()); +static void CalcMSEGradWithWeight(const T* loss_grad, Tensor* grad, + const Tensor& x, const Tensor& y, + const Tensor& weight) { + int n = x.dims()[0]; + int stride = x.numel() / n; T* grad_data = grad->data(); const T* x_data = x.data(); const T* y_data = y.data(); const T* weight_data = weight.data(); - for (int i = 0; i < numel; i++) { - grad_data[i] = 2.0 * weight_data[i] * (x_data[i] - y_data[i]) / mf; + for (int i = 0; i < n; i++) { + for (int j = 0; j < stride; j++) { + grad_data[j] = + 2.0 * weight_data[j] * (x_data[j] - y_data[j]) * loss_grad[i]; + } + grad_data += stride; + x_data += stride; + y_data += stride; + weight_data += stride; } } template -struct SigmoidCrossEntropyForward { - T operator()(const T& x, const T& label) const { - T term1 = (x > 0) ? x : 0; - T term2 = x * label; - T term3 = std::log(static_cast(1.0) + std::exp(-(std::abs(x)))); - return term1 - term2 + term3; - } -}; - -template -struct SigmoidCrossEntropyBackward { - T operator()(const T& x, const T& label) const { - T sigmoid_x = - static_cast(1.0) / (static_cast(1.0) + std::exp(-1.0 * x)); - return sigmoid_x - label; - } -}; - -template -static inline T CalcSCEWithWeight(const Tensor& x, const Tensor& labels, - const Tensor& weight, const T mf) { - int numel = x.numel(); +static inline void CalcSCEWithWeight(const Tensor& x, const Tensor& label, + const Tensor& weight, const T loss_weight, + T* loss) { + int n = x.dims()[0]; + int stride = x.numel() / n; const T* x_data = x.data(); - const T* labels_data = labels.data(); + const T* label_data = label.data(); const T* weight_data = weight.data(); - T loss = 0.0; - for (int i = 0; i < numel; i++) { - T xi = x_data[i]; - T labeli = labels_data[i]; - T weighti = weight_data[i]; - loss += ((xi > 0.0 ? xi : 0.0) - xi * labeli + - std::log(1.0 + std::exp(-1.0 * std::abs(xi)))) * - weighti; + for (int i = 0; i < n; i++) { + for (int j = 0; j < stride; j++) { + T term1 = (x_data[j] > 0) ? x_data[j] : 0; + T term2 = x_data[j] * label_data[j]; + T term3 = std::log(1.0 + std::exp(-std::abs(x_data[j]))); + loss[i] += (term1 - term2 + term3) * weight_data[j] * loss_weight; + } + x_data += stride; + label_data += stride; + weight_data += stride; } - return loss / mf; } template -static inline void CalcSCEGradWithWeight(Tensor* grad, const Tensor& x, - const Tensor& labels, - const Tensor& weight, const T mf) { - int numel = grad->numel(); +static inline void CalcSCEGradWithWeight(const T* loss_grad, Tensor* grad, + const Tensor& x, const Tensor& label, + const Tensor& weight) { + int n = x.dims()[0]; + int stride = x.numel() / n; T* grad_data = grad->data(); const T* x_data = x.data(); - const T* labels_data = labels.data(); + const T* label_data = label.data(); const T* weight_data = weight.data(); - for (int i = 0; i < numel; i++) { - grad_data[i] = (1.0 / (1.0 + std::exp(-1.0 * x_data[i])) - labels_data[i]) * - weight_data[i] / mf; + // LOG(ERROR) << "SCE grad start"; + for (int i = 0; i < n; i++) { + for (int j = 0; j < stride; j++) { + grad_data[j] = (1.0 / (1.0 + std::exp(-x_data[j])) - label_data[j]) * + weight_data[j] * loss_grad[i]; + // if (j == 18) LOG(ERROR) << x_data[j] << " " << label_data[j] << " " << + // weight_data[j] << " " << loss_grad[i]; + } + grad_data += stride; + x_data += stride; + label_data += stride; + weight_data += stride; } } template -static void CalcPredResult(const Tensor& input, Tensor* pred_conf, - Tensor* pred_class, Tensor* pred_x, Tensor* pred_y, - Tensor* pred_w, Tensor* pred_h, const int anchor_num, - const int class_num) { +static void SplitPredResult(const Tensor& input, Tensor* pred_conf, + Tensor* pred_class, Tensor* pred_x, Tensor* pred_y, + Tensor* pred_w, Tensor* pred_h, + const int anchor_num, const int class_num) { const int n = input.dims()[0]; const int h = input.dims()[2]; const int w = input.dims()[3]; @@ -255,39 +258,20 @@ static void PreProcessGTBox(const Tensor& gt_box, const Tensor& gt_label, } } -static void ExpandObjMaskByClassNum(Tensor* obj_mask_expand, - const Tensor& obj_mask) { - const int n = obj_mask_expand->dims()[0]; - const int an_num = obj_mask_expand->dims()[1]; - const int h = obj_mask_expand->dims()[2]; - const int w = obj_mask_expand->dims()[3]; - const int class_num = obj_mask_expand->dims()[4]; - auto obj_mask_expand_t = EigenTensor::From(*obj_mask_expand); - auto obj_mask_t = EigenTensor::From(obj_mask); - - obj_mask_expand_t = obj_mask_t.reshape(Array5(n, an_num, h, w, 1)) - .broadcast(Array5(1, 1, 1, 1, class_num)); -} - template static void AddAllGradToInputGrad( - Tensor* grad, T loss, const Tensor& pred_x, const Tensor& pred_y, - const Tensor& pred_conf, const Tensor& pred_class, const Tensor& grad_x, - const Tensor& grad_y, const Tensor& grad_w, const Tensor& grad_h, - const Tensor& grad_conf_target, const Tensor& grad_conf_notarget, - const Tensor& grad_class, const int class_num, const float loss_weight_xy, - const float loss_weight_wh, const float loss_weight_conf_target, - const float loss_weight_conf_notarget, const float loss_weight_class) { - const int n = pred_x.dims()[0]; - const int an_num = pred_x.dims()[1]; - const int h = pred_x.dims()[2]; - const int w = pred_x.dims()[3]; + Tensor* grad, const Tensor& grad_x, const Tensor& grad_y, + const Tensor& grad_w, const Tensor& grad_h, const Tensor& grad_conf_target, + const Tensor& grad_conf_notarget, const Tensor& grad_class, + const int class_num, const float loss_weight_xy, const float loss_weight_wh, + const float loss_weight_conf_target, const float loss_weight_conf_notarget, + const float loss_weight_class) { + const int n = grad_x.dims()[0]; + const int an_num = grad_x.dims()[1]; + const int h = grad_x.dims()[2]; + const int w = grad_x.dims()[3]; const int attr_num = class_num + 5; auto grad_t = EigenTensor::From(*grad).setConstant(0.0); - auto pred_x_t = EigenTensor::From(pred_x); - auto pred_y_t = EigenTensor::From(pred_y); - auto pred_conf_t = EigenTensor::From(pred_conf); - auto pred_class_t = EigenTensor::From(pred_class); auto grad_x_t = EigenTensor::From(grad_x); auto grad_y_t = EigenTensor::From(grad_y); auto grad_w_t = EigenTensor::From(grad_w); @@ -300,23 +284,21 @@ static void AddAllGradToInputGrad( for (int j = 0; j < an_num; j++) { for (int k = 0; k < h; k++) { for (int l = 0; l < w; l++) { - grad_t(i, j * attr_num, k, l) = - grad_x_t(i, j, k, l) * loss * loss_weight_xy; + grad_t(i, j * attr_num, k, l) = grad_x_t(i, j, k, l) * loss_weight_xy; grad_t(i, j * attr_num + 1, k, l) = - grad_y_t(i, j, k, l) * loss * loss_weight_xy; + grad_y_t(i, j, k, l) * loss_weight_xy; grad_t(i, j * attr_num + 2, k, l) = - grad_w_t(i, j, k, l) * loss * loss_weight_wh; + grad_w_t(i, j, k, l) * loss_weight_wh; grad_t(i, j * attr_num + 3, k, l) = - grad_h_t(i, j, k, l) * loss * loss_weight_wh; + grad_h_t(i, j, k, l) * loss_weight_wh; grad_t(i, j * attr_num + 4, k, l) = - grad_conf_target_t(i, j, k, l) * loss * loss_weight_conf_target; + grad_conf_target_t(i, j, k, l) * loss_weight_conf_target; grad_t(i, j * attr_num + 4, k, l) += - grad_conf_notarget_t(i, j, k, l) * loss * - loss_weight_conf_notarget; + grad_conf_notarget_t(i, j, k, l) * loss_weight_conf_notarget; for (int c = 0; c < class_num; c++) { grad_t(i, j * attr_num + 5 + c, k, l) = - grad_class_t(i, j, k, l, c) * loss * loss_weight_class; + grad_class_t(i, j, k, l, c) * loss_weight_class; } } } @@ -356,8 +338,8 @@ class Yolov3LossKernel : public framework::OpKernel { pred_h.mutable_data({n, an_num, h, w}, ctx.GetPlace()); pred_conf.mutable_data({n, an_num, h, w}, ctx.GetPlace()); pred_class.mutable_data({n, an_num, h, w, class_num}, ctx.GetPlace()); - CalcPredResult(*input, &pred_conf, &pred_class, &pred_x, &pred_y, - &pred_w, &pred_h, an_num, class_num); + SplitPredResult(*input, &pred_conf, &pred_class, &pred_x, &pred_y, + &pred_w, &pred_h, an_num, class_num); Tensor obj_mask, noobj_mask; Tensor tx, ty, tw, th, tweight, tconf, tclass; @@ -388,25 +370,24 @@ class Yolov3LossKernel : public framework::OpKernel { obj_mask_expand_t = obj_mask_t.reshape(Array5(n, an_num, h, w, 1)) .broadcast(Array5(1, 1, 1, 1, class_num)); - T box_f = static_cast(an_num * h * w); - T class_f = static_cast(an_num * h * w * class_num); - T loss_x = CalcSCEWithWeight(pred_x, tx, obj_weight, box_f); - T loss_y = CalcSCEWithWeight(pred_y, ty, obj_weight, box_f); - T loss_w = CalcMSEWithWeight(pred_w, tw, obj_weight, box_f); - T loss_h = CalcMSEWithWeight(pred_h, th, obj_weight, box_f); - T loss_conf_target = - CalcSCEWithWeight(pred_conf, tconf, obj_mask, box_f); - T loss_conf_notarget = - CalcSCEWithWeight(pred_conf, tconf, noobj_mask, box_f); - T loss_class = - CalcSCEWithWeight(pred_class, tclass, obj_mask_expand, class_f); - - auto* loss_data = loss->mutable_data({1}, ctx.GetPlace()); - loss_data[0] = loss_weight_xy * (loss_x + loss_y) + - loss_weight_wh * (loss_w + loss_h) + - loss_weight_conf_target * loss_conf_target + - loss_weight_conf_notarget * loss_conf_notarget + - loss_weight_class * loss_class; + T* loss_data = loss->mutable_data({n}, ctx.GetPlace()); + memset(loss_data, 0, n * sizeof(T)); + CalcSCEWithWeight(pred_x, tx, obj_weight, loss_weight_xy, loss_data); + CalcSCEWithWeight(pred_y, ty, obj_weight, loss_weight_xy, loss_data); + CalcMSEWithWeight(pred_w, tw, obj_weight, loss_weight_wh, loss_data); + CalcMSEWithWeight(pred_h, th, obj_weight, loss_weight_wh, loss_data); + CalcSCEWithWeight(pred_conf, tconf, obj_mask, loss_weight_conf_target, + loss_data); + CalcSCEWithWeight(pred_conf, tconf, noobj_mask, + loss_weight_conf_notarget, loss_data); + CalcSCEWithWeight(pred_class, tclass, obj_mask_expand, loss_weight_class, + loss_data); + + // loss_data[0] = (loss_weight_xy * (loss_x + loss_y) + + // loss_weight_wh * (loss_w + loss_h) + + // loss_weight_conf_target * loss_conf_target + + // loss_weight_conf_notarget * loss_conf_notarget + + // loss_weight_class * loss_class) / n; } }; @@ -421,8 +402,8 @@ class Yolov3LossGradKernel : public framework::OpKernel { int class_num = ctx.Attr("class_num"); float ignore_thresh = ctx.Attr("ignore_thresh"); auto* input_grad = ctx.Output(framework::GradVarName("X")); - auto* output_grad = ctx.Input(framework::GradVarName("Loss")); - const T loss = output_grad->data()[0]; + auto* loss_grad = ctx.Input(framework::GradVarName("Loss")); + const T* loss_grad_data = loss_grad->data(); int input_size = ctx.Attr("input_size"); float loss_weight_xy = ctx.Attr("loss_weight_xy"); float loss_weight_wh = ctx.Attr("loss_weight_wh"); @@ -445,8 +426,8 @@ class Yolov3LossGradKernel : public framework::OpKernel { pred_h.mutable_data({n, an_num, h, w}, ctx.GetPlace()); pred_conf.mutable_data({n, an_num, h, w}, ctx.GetPlace()); pred_class.mutable_data({n, an_num, h, w, class_num}, ctx.GetPlace()); - CalcPredResult(*input, &pred_conf, &pred_class, &pred_x, &pred_y, - &pred_w, &pred_h, an_num, class_num); + SplitPredResult(*input, &pred_conf, &pred_class, &pred_x, &pred_y, + &pred_w, &pred_h, an_num, class_num); Tensor obj_mask, noobj_mask; Tensor tx, ty, tw, th, tweight, tconf, tclass; @@ -470,6 +451,8 @@ class Yolov3LossGradKernel : public framework::OpKernel { auto tweight_t = EigenTensor::From(tweight); obj_weight_t = obj_mask_t * tweight_t; + // LOG(ERROR) << obj_mask_t; + Tensor obj_mask_expand; obj_mask_expand.mutable_data({n, an_num, h, w, class_num}, ctx.GetPlace()); @@ -486,25 +469,23 @@ class Yolov3LossGradKernel : public framework::OpKernel { grad_conf_target.mutable_data({n, an_num, h, w}, ctx.GetPlace()); grad_conf_notarget.mutable_data({n, an_num, h, w}, ctx.GetPlace()); grad_class.mutable_data({n, an_num, h, w, class_num}, ctx.GetPlace()); - T box_f = static_cast(an_num * h * w); - T class_f = static_cast(an_num * h * w * class_num); - CalcSCEGradWithWeight(&grad_x, pred_x, tx, obj_weight, box_f); - CalcSCEGradWithWeight(&grad_y, pred_y, ty, obj_weight, box_f); - CalcMSEGradWithWeight(&grad_w, pred_w, tw, obj_weight, box_f); - CalcMSEGradWithWeight(&grad_h, pred_h, th, obj_weight, box_f); - CalcSCEGradWithWeight(&grad_conf_target, pred_conf, tconf, obj_mask, - box_f); - CalcSCEGradWithWeight(&grad_conf_notarget, pred_conf, tconf, noobj_mask, - box_f); - CalcSCEGradWithWeight(&grad_class, pred_class, tclass, obj_mask_expand, - class_f); + CalcSCEGradWithWeight(loss_grad_data, &grad_x, pred_x, tx, obj_weight); + CalcSCEGradWithWeight(loss_grad_data, &grad_y, pred_y, ty, obj_weight); + CalcMSEGradWithWeight(loss_grad_data, &grad_w, pred_w, tw, obj_weight); + CalcMSEGradWithWeight(loss_grad_data, &grad_h, pred_h, th, obj_weight); + CalcSCEGradWithWeight(loss_grad_data, &grad_conf_target, pred_conf, + tconf, obj_mask); + CalcSCEGradWithWeight(loss_grad_data, &grad_conf_notarget, pred_conf, + tconf, noobj_mask); + CalcSCEGradWithWeight(loss_grad_data, &grad_class, pred_class, tclass, + obj_mask_expand); input_grad->mutable_data({n, c, h, w}, ctx.GetPlace()); - AddAllGradToInputGrad( - input_grad, loss, pred_x, pred_y, pred_conf, pred_class, grad_x, grad_y, - grad_w, grad_h, grad_conf_target, grad_conf_notarget, grad_class, - class_num, loss_weight_xy, loss_weight_wh, loss_weight_conf_target, - loss_weight_conf_notarget, loss_weight_class); + AddAllGradToInputGrad(input_grad, grad_x, grad_y, grad_w, grad_h, + grad_conf_target, grad_conf_notarget, grad_class, + class_num, loss_weight_xy, loss_weight_wh, + loss_weight_conf_target, loss_weight_conf_notarget, + loss_weight_class); } }; diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index 0fe836683b029698b670bbb9f9bb258c2f3b68a0..9cf398f18f953f96908959da0c5ed72fd7f43722 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -470,6 +470,8 @@ class OpTest(unittest.TestCase): ] analytic_grads = self._get_gradient(inputs_to_check, place, output_names, no_grad_set) + # print(numeric_grads[0][0, 4, :, :]) + # print(analytic_grads[0][0, 4, :, :]) self._assert_is_close(numeric_grads, analytic_grads, inputs_to_check, max_relative_error, diff --git a/python/paddle/fluid/tests/unittests/test_yolov3_loss_op.py b/python/paddle/fluid/tests/unittests/test_yolov3_loss_op.py index 07e7155bbf1fe75a80b9c901b08901eb9428c91b..26367f213b2d01f78bfa35aab44c7c1728c1cab1 100644 --- a/python/paddle/fluid/tests/unittests/test_yolov3_loss_op.py +++ b/python/paddle/fluid/tests/unittests/test_yolov3_loss_op.py @@ -23,15 +23,23 @@ from op_test import OpTest from paddle.fluid import core -def mse(x, y, weight, num): - return ((y - x)**2 * weight).sum() / num - - -def sce(x, label, weight, num): +def mse(x, y, weight): + n = x.shape[0] + x = x.reshape((n, -1)) + y = y.reshape((n, -1)) + weight = weight.reshape((n, -1)) + return ((y - x)**2 * weight).sum(axis=1) + + +def sce(x, label, weight): + n = x.shape[0] + x = x.reshape((n, -1)) + label = label.reshape((n, -1)) + weight = weight.reshape((n, -1)) sigmoid_x = expit(x) term1 = label * np.log(sigmoid_x) term2 = (1.0 - label) * np.log(1.0 - sigmoid_x) - return ((-term1 - term2) * weight).sum() / num + return ((-term1 - term2) * weight).sum(axis=1) def box_iou(box1, box2): @@ -131,18 +139,24 @@ def YoloV3Loss(x, gtbox, gtlabel, attrs): tx, ty, tw, th, tweight, tconf, tcls, obj_mask, noobj_mask = build_target( gtbox, gtlabel, attrs, x.shape[2]) + # print("obj_mask: ", obj_mask[0, 0, :, :]) + # print("noobj_mask: ", noobj_mask[0, 0, :, :]) obj_weight = obj_mask * tweight obj_mask_expand = np.tile( np.expand_dims(obj_mask, 4), (1, 1, 1, 1, int(attrs['class_num']))) - box_f = an_num * h * w - class_f = an_num * h * w * class_num - loss_x = sce(pred_x, tx, obj_weight, box_f) - loss_y = sce(pred_y, ty, obj_weight, box_f) - loss_w = mse(pred_w, tw, obj_weight, box_f) - loss_h = mse(pred_h, th, obj_weight, box_f) - loss_conf_target = sce(pred_conf, tconf, obj_mask, box_f) - loss_conf_notarget = sce(pred_conf, tconf, noobj_mask, box_f) - loss_class = sce(pred_cls, tcls, obj_mask_expand, class_f) + loss_x = sce(pred_x, tx, obj_weight) + loss_y = sce(pred_y, ty, obj_weight) + loss_w = mse(pred_w, tw, obj_weight) + loss_h = mse(pred_h, th, obj_weight) + loss_conf_target = sce(pred_conf, tconf, obj_mask) + loss_conf_notarget = sce(pred_conf, tconf, noobj_mask) + loss_class = sce(pred_cls, tcls, obj_mask_expand) + + # print("loss_xy: ", loss_x + loss_y) + # print("loss_wh: ", loss_w + loss_h) + # print("loss_conf_target: ", loss_conf_target) + # print("loss_conf_notarget: ", loss_conf_notarget) + # print("loss_class: ", loss_class) return attrs['loss_weight_xy'] * (loss_x + loss_y) \ + attrs['loss_weight_wh'] * (loss_w + loss_h) \ @@ -178,10 +192,7 @@ class TestYolov3LossOp(OpTest): } self.inputs = {'X': x, 'GTBox': gtbox, 'GTLabel': gtlabel} - self.outputs = { - 'Loss': np.array( - [YoloV3Loss(x, gtbox, gtlabel, self.attrs)]).astype('float32') - } + self.outputs = {'Loss': YoloV3Loss(x, gtbox, gtlabel, self.attrs)} def test_check_output(self): place = core.CPUPlace() @@ -193,20 +204,20 @@ class TestYolov3LossOp(OpTest): place, ['X'], 'Loss', no_grad_set=set(["GTBox", "GTLabel"]), - max_relative_error=0.3) + max_relative_error=0.31) def initTestCase(self): - self.anchors = [10, 13, 12, 12] - self.class_num = 10 - self.ignore_thresh = 0.7 + self.anchors = [12, 12] + self.class_num = 5 + self.ignore_thresh = 0.3 self.input_size = 416 - self.x_shape = (5, len(self.anchors) // 2 * (5 + self.class_num), 7, 7) - self.gtbox_shape = (5, 10, 4) - self.loss_weight_xy = 1.4 + self.x_shape = (3, len(self.anchors) // 2 * (5 + self.class_num), 5, 5) + self.gtbox_shape = (3, 5, 4) + self.loss_weight_xy = 1.2 self.loss_weight_wh = 0.8 - self.loss_weight_conf_target = 1.1 - self.loss_weight_conf_notarget = 0.9 - self.loss_weight_class = 1.2 + self.loss_weight_conf_target = 2.0 + self.loss_weight_conf_notarget = 1.0 + self.loss_weight_class = 1.5 if __name__ == "__main__":