提交 4b3214c0 编写于 作者: A A. Unique TensorFlower

Internal change

PiperOrigin-RevId: 341913626
上级 cb0ed243
......@@ -4,7 +4,6 @@ runtime:
mixed_precision_dtype: 'bfloat16'
task:
annotation_file: '/readahead/200M/placer/prod/home/snaggletooth/test/data/coco/instances_val2017.json'
gradient_clip_norm: 10.0
losses:
l2_weight_decay: 4.0e-05
model:
......
......@@ -3,7 +3,6 @@ runtime:
mixed_precision_dtype: 'bfloat16'
task:
annotation_file: '/readahead/200M/placer/prod/home/snaggletooth/test/data/coco/instances_val2017.json'
gradient_clip_norm: 0.0
losses:
l2_weight_decay: 4.0e-05
model:
......
......@@ -4,7 +4,6 @@ runtime:
mixed_precision_dtype: 'bfloat16'
task:
annotation_file: '/readahead/200M/placer/prod/home/snaggletooth/test/data/coco/instances_val2017.json'
gradient_clip_norm: 0.0
losses:
l2_weight_decay: 4.0e-05
model:
......
......@@ -4,7 +4,6 @@ runtime:
mixed_precision_dtype: 'bfloat16'
task:
annotation_file: '/readahead/200M/placer/prod/home/snaggletooth/test/data/coco/instances_val2017.json'
gradient_clip_norm: 0.0
losses:
l2_weight_decay: 4.0e-05
model:
......
......@@ -70,7 +70,6 @@ class ImageClassificationTask(cfg.TaskConfig):
validation_data: DataConfig = DataConfig(is_training=False)
losses: Losses = Losses()
evaluation: Evaluation = Evaluation()
gradient_clip_norm: float = 0.0
init_checkpoint: Optional[str] = None
init_checkpoint_modules: str = 'all' # all or backbone
......
......@@ -207,7 +207,6 @@ class MaskRCNNTask(cfg.TaskConfig):
init_checkpoint: Optional[str] = None
init_checkpoint_modules: str = 'all' # all or backbone
annotation_file: Optional[str] = None
gradient_clip_norm: float = 0.0
per_category_metrics: bool = False
# If set, we only use masks for the specified class IDs.
allowed_mask_class_ids: Optional[List[int]] = None
......
......@@ -128,7 +128,6 @@ class RetinaNetTask(cfg.TaskConfig):
init_checkpoint: Optional[str] = None
init_checkpoint_modules: str = 'all' # all or backbone
annotation_file: Optional[str] = None
gradient_clip_norm: float = 0.0
per_category_metrics: bool = False
......
......@@ -87,7 +87,6 @@ class SemanticSegmentationTask(cfg.TaskConfig):
train_data: DataConfig = DataConfig(is_training=True)
validation_data: DataConfig = DataConfig(is_training=False)
losses: Losses = Losses()
gradient_clip_norm: float = 0.0
init_checkpoint: Optional[str] = None
init_checkpoint_modules: Union[
str, List[str]] = 'all' # all, backbone, and/or decoder
......
......@@ -97,7 +97,6 @@ class VideoClassificationTask(cfg.TaskConfig):
train_data: DataConfig = DataConfig(is_training=True)
validation_data: DataConfig = DataConfig(is_training=False)
losses: Losses = Losses()
gradient_clip_norm: float = -1.0
def add_trainer(experiment: cfg.ExperimentConfig,
......
......@@ -180,11 +180,6 @@ class ImageClassificationTask(base_task.Task):
if isinstance(
optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
# Apply gradient clipping.
if self.task_config.gradient_clip_norm > 0:
grads, _ = tf.clip_by_global_norm(
grads, self.task_config.gradient_clip_norm)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: loss}
......
......@@ -280,11 +280,6 @@ class MaskRCNNTask(base_task.Task):
# Scales back gradient when LossScaleOptimizer is used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
# Apply gradient clipping.
if self.task_config.gradient_clip_norm > 0:
grads, _ = tf.clip_by_global_norm(
grads, self.task_config.gradient_clip_norm)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: losses['total_loss']}
......
......@@ -218,11 +218,6 @@ class RetinaNetTask(base_task.Task):
# Scales back gradient when LossScaleOptimizer is used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
# Apply gradient clipping.
if self.task_config.gradient_clip_norm > 0:
grads, _ = tf.clip_by_global_norm(
grads, self.task_config.gradient_clip_norm)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: loss}
......
......@@ -188,11 +188,6 @@ class SemanticSegmentationTask(base_task.Task):
# used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
# Apply gradient clipping.
if self.task_config.gradient_clip_norm > 0:
grads, _ = tf.clip_by_global_norm(
grads, self.task_config.gradient_clip_norm)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: loss}
......
......@@ -160,11 +160,6 @@ class VideoClassificationTask(base_task.Task):
# used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
# Apply gradient clipping.
if self.task_config.gradient_clip_norm > 0:
grads, _ = tf.clip_by_global_norm(
grads, self.task_config.gradient_clip_norm)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: loss}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册