提交 4338730e 编写于 作者: littletomatodonkey's avatar littletomatodonkey

add ls training for dygraph

上级 6a0dce8a
......@@ -14,3 +14,5 @@
from .resnet_name import *
from .dpn import DPN68
from .densenet import DenseNet121
from .hrnet import HRNet_W18_C
\ No newline at end of file
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import paddle
import paddle.fluid as fluid
......@@ -268,26 +286,26 @@ class DenseNet(fluid.dygraph.Layer):
return y
def DenseNet121():
model = DenseNet(layers=121)
def DenseNet121(**args):
model = DenseNet(layers=121, **args)
return model
def DenseNet161():
model = DenseNet(layers=161)
def DenseNet161(**args):
model = DenseNet(layers=161, **args)
return model
def DenseNet169():
model = DenseNet(layers=169)
def DenseNet169(**args):
model = DenseNet(layers=169, **args)
return model
def DenseNet201():
model = DenseNet(layers=201)
def DenseNet201(**args):
model = DenseNet(layers=201, **args)
return model
def DenseNet264():
model = DenseNet(layers=264)
def DenseNet264(**args):
model = DenseNet(layers=264, **args)
return model
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import sys
import paddle
......@@ -386,26 +404,26 @@ class DPN(fluid.dygraph.Layer):
return net_arg
def DPN68():
model = DPN(layers=68)
def DPN68(**args):
model = DPN(layers=68, **args)
return model
def DPN92():
model = DPN(layers=92)
def DPN92(**args):
model = DPN(layers=92, **args)
return model
def DPN98():
model = DPN(layers=98)
def DPN98(**args):
model = DPN(layers=98, **args)
return model
def DPN107():
model = DPN(layers=107)
def DPN107(**args):
model = DPN(layers=107, **args)
return model
def DPN131():
model = DPN(layers=131)
return model
\ No newline at end of file
def DPN131(**args):
model = DPN(layers=131, **args)
return model
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import paddle
import paddle.fluid as fluid
......@@ -647,81 +665,81 @@ class HRNet(fluid.dygraph.Layer):
return y
def HRNet_W18_C():
model = HRNet(width=18)
def HRNet_W18_C(**args):
model = HRNet(width=18, **args)
return model
def HRNet_W30_C():
model = HRNet(width=30)
def HRNet_W30_C(**args):
model = HRNet(width=30, **args)
return model
def HRNet_W32_C():
model = HRNet(width=32)
def HRNet_W32_C(**args):
model = HRNet(width=32, **args)
return model
def HRNet_W40_C():
model = HRNet(width=40)
def HRNet_W40_C(**args):
model = HRNet(width=40, **args)
return model
def HRNet_W44_C():
model = HRNet(width=44)
def HRNet_W44_C(**args):
model = HRNet(width=44, **args)
return model
def HRNet_W48_C():
model = HRNet(width=48)
def HRNet_W48_C(**args):
model = HRNet(width=48, **args)
return model
def HRNet_W60_C():
model = HRNet(width=60)
def HRNet_W60_C(**args):
model = HRNet(width=60, **args)
return model
def HRNet_W64_C():
model = HRNet(width=64)
def HRNet_W64_C(**args):
model = HRNet(width=64, **args)
return model
def SE_HRNet_W18_C():
model = HRNet(width=18, has_se=True)
def SE_HRNet_W18_C(**args):
model = HRNet(width=18, has_se=True, **args)
return model
def SE_HRNet_W30_C():
model = HRNet(width=30, has_se=True)
def SE_HRNet_W30_C(**args):
model = HRNet(width=30, has_se=True, **args)
return model
def SE_HRNet_W32_C():
model = HRNet(width=32, has_se=True)
def SE_HRNet_W32_C(**args):
model = HRNet(width=32, has_se=True, **args)
return model
def SE_HRNet_W40_C():
model = HRNet(width=40, has_se=True)
def SE_HRNet_W40_C(**args):
model = HRNet(width=40, has_se=True, **args)
return model
def SE_HRNet_W44_C():
model = HRNet(width=44, has_se=True)
def SE_HRNet_W44_C(**args):
model = HRNet(width=44, has_se=True, **args)
return model
def SE_HRNet_W48_C():
model = HRNet(width=48, has_se=True)
def SE_HRNet_W48_C(**args):
model = HRNet(width=48, has_se=True, **args)
return model
def SE_HRNet_W60_C():
model = HRNet(width=60, has_se=True)
def SE_HRNet_W60_C(**args):
model = HRNet(width=60, has_se=True, **args)
return model
def SE_HRNet_W64_C():
model = HRNet(width=64, has_se=True)
def SE_HRNet_W64_C(**args):
model = HRNet(width=64, has_se=True, **args)
return model
......@@ -338,20 +338,29 @@ def run(dataloader, config, net, optimizer=None, epoch=0, mode='train'):
Returns:
"""
topk_name = 'top{}'.format(config.topk)
metric_list = OrderedDict([
("loss", AverageMeter('loss', '7.4f')),
("top1", AverageMeter('top1', '.4f')),
(topk_name, AverageMeter(topk_name, '.4f')),
("lr", AverageMeter(
'lr', 'f', need_avg=False)),
("batch_time", AverageMeter('elapse', '.3f')),
])
use_mix = config.get("use_mix", False) and mode == "train"
if use_mix:
metric_list = OrderedDict([
("loss", AverageMeter('loss', '7.4f')),
("lr", AverageMeter(
'lr', 'f', need_avg=False)),
("batch_time", AverageMeter('elapse', '.3f')),
])
else:
topk_name = 'top{}'.format(config.topk)
metric_list = OrderedDict([
("loss", AverageMeter('loss', '7.4f')),
("top1", AverageMeter('top1', '.4f')),
(topk_name, AverageMeter(topk_name, '.4f')),
("lr", AverageMeter(
'lr', 'f', need_avg=False)),
("batch_time", AverageMeter('elapse', '.3f')),
])
tic = time.time()
for idx, batch in enumerate(dataloader()):
bs = len(batch[0])
feeds = create_feeds(batch, config.get("use_mix", False))
feeds = create_feeds(batch, use_mix)
fetchs = compute(feeds, net, config, mode)
if mode == 'train':
avg_loss = net.scale_loss(fetchs['loss'])
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册