From 2a7bcfc82294c7856ad63a07cc27a4dfd90696ab Mon Sep 17 00:00:00 2001 From: Kolesnikov Sergey Date: Sat, 14 Apr 2018 15:20:28 +0300 Subject: [PATCH] loss weights --- mrcnn/config.py | 10 ++++++++++ mrcnn/model.py | 34 +++++++++++++++++++++------------- 2 files changed, 31 insertions(+), 13 deletions(-) diff --git a/mrcnn/config.py b/mrcnn/config.py index 77314b8..a651c18 100644 --- a/mrcnn/config.py +++ b/mrcnn/config.py @@ -164,6 +164,16 @@ class Config(object): # Weight decay regularization WEIGHT_DECAY = 0.0001 + # Loss weights for more precise optimization. + # Can be used for R-CNN training setup. + LOSS_WEIGHTS = { + "rpn_class_loss": 1., + "rpn_bbox_loss": 1., + "mrcnn_class_loss": 1., + "mrcnn_bbox_loss": 1., + "mrcnn_mask_loss": 1. + } + # Use RPN ROIs or externally generated ROIs for training # Keep this True for most situations. Set to False if you want to train # the head branches on ROI generated by code rather than the ROIs from diff --git a/mrcnn/model.py b/mrcnn/model.py index 185438b..b48da56 100644 --- a/mrcnn/model.py +++ b/mrcnn/model.py @@ -2122,31 +2122,37 @@ class MaskRCNN(): metrics. Then calls the Keras compile() function. """ # Optimizer object - optimizer = keras.optimizers.SGD(lr=learning_rate, momentum=momentum, - clipnorm=self.config.GRADIENT_CLIP_NORM) + optimizer = keras.optimizers.SGD( + lr=learning_rate, momentum=momentum, + clipnorm=self.config.GRADIENT_CLIP_NORM) # Add Losses # First, clear previously set losses to avoid duplication self.keras_model._losses = [] self.keras_model._per_input_losses = {} - loss_names = ["rpn_class_loss", "rpn_bbox_loss", - "mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss"] + loss_names = [ + "rpn_class_loss", "rpn_bbox_loss", + "mrcnn_class_loss", "mrcnn_bbox_loss", "mrcnn_mask_loss"] for name in loss_names: layer = self.keras_model.get_layer(name) if layer.output in self.keras_model.losses: continue - self.keras_model.add_loss( - tf.reduce_mean(layer.output, keep_dims=True)) + loss = ( + tf.reduce_mean(layer.output, keep_dims=True) + * self.config.LOSS_WEIGHTS.get(name, 1.)) + self.keras_model.add_loss(loss) # Add L2 Regularization # Skip gamma and beta weights of batch normalization layers. - reg_losses = [keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32) - for w in self.keras_model.trainable_weights - if 'gamma' not in w.name and 'beta' not in w.name] + reg_losses = [ + keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32) + for w in self.keras_model.trainable_weights + if 'gamma' not in w.name and 'beta' not in w.name] self.keras_model.add_loss(tf.add_n(reg_losses)) # Compile - self.keras_model.compile(optimizer=optimizer, loss=[ - None] * len(self.keras_model.outputs)) + self.keras_model.compile( + optimizer=optimizer, + loss=[None] * len(self.keras_model.outputs)) # Add metrics for losses for name in loss_names: @@ -2154,8 +2160,10 @@ class MaskRCNN(): continue layer = self.keras_model.get_layer(name) self.keras_model.metrics_names.append(name) - self.keras_model.metrics_tensors.append(tf.reduce_mean( - layer.output, keep_dims=True)) + loss = ( + tf.reduce_mean(layer.output, keep_dims=True) + * self.config.LOSS_WEIGHTS.get(name, 1.)) + self.keras_model.metrics_tensors.append(loss) def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1): """Sets model layers as trainable if their names match -- GitLab