未验证 提交 133dce63 编写于 作者: H Haodong Duan 提交者: GitHub

Fix name bug. (#47)

上级 7dc99cf6
......@@ -105,7 +105,7 @@ optimizer = dict(
weight_decay=0.0001) # this lr is used for 8 gpus
optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
# learning policy
lr_config = dict(policy='CosineAnealing', min_lr=0)
lr_config = dict(policy='CosineAnnealing', min_lr=0)
total_epochs = 180
checkpoint_config = dict(interval=5)
evaluation = dict(
......
......@@ -105,7 +105,7 @@ optimizer = dict(
weight_decay=0.0001) # this lr is used for 8 gpus
optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
# learning policy
lr_config = dict(policy='CosineAnealing', min_lr=0)
lr_config = dict(policy='CosineAnnealing', min_lr=0)
total_epochs = 180
checkpoint_config = dict(interval=5)
evaluation = dict(
......
......@@ -116,7 +116,7 @@ optimizer = dict(
weight_decay=0.0001) # this lr is used for 8 gpus
optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
# learning policy
lr_config = dict(policy='CosineAnealing', min_lr=0)
lr_config = dict(policy='CosineAnnealing', min_lr=0)
total_epochs = 180
checkpoint_config = dict(interval=5)
evaluation = dict(
......
......@@ -111,7 +111,7 @@ optimizer = dict(
weight_decay=0.0001) # this lr is used for 8 gpus
optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
# learning policy
lr_config = dict(policy='CosineAnealing', min_lr=0)
lr_config = dict(policy='CosineAnnealing', min_lr=0)
total_epochs = 256
checkpoint_config = dict(interval=4)
workflow = [('train', 1)]
......
......@@ -111,7 +111,7 @@ optimizer = dict(
weight_decay=0.0001) # this lr is used for 8 gpus
optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
# learning policy
lr_config = dict(policy='CosineAnealing', min_lr=0)
lr_config = dict(policy='CosineAnnealing', min_lr=0)
total_epochs = 256
checkpoint_config = dict(interval=4)
workflow = [('train', 1)]
......
......@@ -121,7 +121,7 @@ optimizer = dict(
weight_decay=0.0001) # this lr is used for 8 gpus
optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
# learning policy
lr_config = dict(policy='CosineAnealing', min_lr=0)
lr_config = dict(policy='CosineAnnealing', min_lr=0)
total_epochs = 256
checkpoint_config = dict(interval=4)
workflow = [('train', 1)]
......
......@@ -102,7 +102,7 @@ optimizer = dict(
optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
# learning policy
lr_config = dict(
policy='CosineAnealing',
policy='CosineAnnealing',
min_lr=0,
warmup='linear',
warmup_by_epoch=True,
......
......@@ -94,7 +94,7 @@ optimizer = dict(
weight_decay=0.0001) # this lr is used for 8 gpus
optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
# learning policy
lr_config = dict(policy='CosineAnealing', min_lr=0)
lr_config = dict(policy='CosineAnnealing', min_lr=0)
total_epochs = 256
checkpoint_config = dict(interval=4)
workflow = [('train', 1)]
......
......@@ -102,7 +102,7 @@ optimizer = dict(
optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
# learning policy
lr_config = dict(
policy='CosineAnealing',
policy='CosineAnnealing',
min_lr=0,
warmup='linear',
warmup_by_epoch=True,
......
......@@ -94,7 +94,7 @@ optimizer = dict(
weight_decay=0.0001) # this lr is used for 8 gpus
optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
# learning policy
lr_config = dict(policy='CosineAnealing', min_lr=0)
lr_config = dict(policy='CosineAnnealing', min_lr=0)
total_epochs = 256
checkpoint_config = dict(interval=4)
workflow = [('train', 1)]
......
......@@ -104,7 +104,7 @@ optimizer = dict(
weight_decay=0.0001) # this lr is used for 8 gpus
optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
# learning policy
lr_config = dict(policy='CosineAnealing', min_lr=0)
lr_config = dict(policy='CosineAnnealing', min_lr=0)
total_epochs = 256
checkpoint_config = dict(interval=4)
workflow = [('train', 1)]
......
......@@ -158,7 +158,7 @@ optimizer_config = dict( # Config used to build the optimizer hook
grad_clip=None) # Most of the methods do not use gradient clip
# learning policy
lr_config = dict( # Learning rate scheduler config used to register LrUpdater hook
policy='step', # Policy of scheduler, also support CosineAnealing, Cyclic, etc. Refer to details of supported LrUpdater from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py#L9
policy='step', # Policy of scheduler, also support CosineAnnealing, Cyclic, etc. Refer to details of supported LrUpdater from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py#L9
step=7) # Steps to decay the learning rate
total_epochs = 9 # Total epochs to train the model
......@@ -364,7 +364,7 @@ optimizer_config = dict( # Config used to build the optimizer hook
grad_clip=dict(max_norm=40, norm_type=2)) # Use gradient clip
# learning policy
lr_config = dict( # Learning rate scheduler config used to register LrUpdater hook
policy='step', # Policy of scheduler, also support CosineAnealing, Cyclic, etc. Refer to details of supported LrUpdater from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py#L9
policy='step', # Policy of scheduler, also support CosineAnnealing, Cyclic, etc. Refer to details of supported LrUpdater from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py#L9
step=[40, 80]) # Steps to decay the learning rate
total_epochs = 100 # Total epochs to train the model
checkpoint_config = dict( # Config to set the checkpoint hook, Refer to https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/checkpoint.py for implementation
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册