提交 d0b456c6 编写于 作者: M michaelowenliu

Merge remote-tracking branch 'upstream/develop' into develop

# 动态图执行 # 动态图执行
## 下载及添加路径
```
git clone https://github.com/PaddlePaddle/PaddleSeg
cd PaddleSeg
export PYTHONPATH=$PYTHONPATH:`pwd`
cd dygraph
```
## 训练 ## 训练
``` ```
python3 train.py --model_name UNet \ python3 train.py --model_name unet \
--dataset OpticDiscSeg \ --dataset OpticDiscSeg \
--input_size 192 192 \ --input_size 192 192 \
--num_epochs 10 \ --iters 10 \
--save_interval_epochs 1 \ --save_interval_iters 1 \
--do_eval \ --do_eval \
--save_dir output --save_dir output
``` ```
## 评估 ## 评估
``` ```
python3 val.py --model_name UNet \ python3 val.py --model_name unet \
--dataset OpticDiscSeg \ --dataset OpticDiscSeg \
--input_size 192 192 \ --input_size 192 192 \
--model_dir output/best_model --model_dir output/best_model
...@@ -21,7 +29,7 @@ python3 val.py --model_name UNet \ ...@@ -21,7 +29,7 @@ python3 val.py --model_name UNet \
## 预测 ## 预测
``` ```
python3 infer.py --model_name UNet \ python3 infer.py --model_name unet \
--dataset OpticDiscSeg \ --dataset OpticDiscSeg \
--model_dir output/best_model \ --model_dir output/best_model \
--input_size 192 192 --input_size 192 192
......
...@@ -19,7 +19,8 @@ from paddle.fluid.dygraph.parallel import ParallelEnv ...@@ -19,7 +19,8 @@ from paddle.fluid.dygraph.parallel import ParallelEnv
from dygraph.datasets import DATASETS from dygraph.datasets import DATASETS
import dygraph.transforms as T import dygraph.transforms as T
from dygraph.models import MODELS #from dygraph.models import MODELS
from dygraph.cvlibs import manager
from dygraph.utils import get_environ_info from dygraph.utils import get_environ_info
from dygraph.utils import logger from dygraph.utils import logger
from dygraph.core import train from dygraph.core import train
...@@ -33,7 +34,7 @@ def parse_args(): ...@@ -33,7 +34,7 @@ def parse_args():
'--model_name', '--model_name',
dest='model_name', dest='model_name',
help='Model type for training, which is one of {}'.format( help='Model type for training, which is one of {}'.format(
str(list(MODELS.keys()))), str(list(manager.MODELS.components_dict.keys()))),
type=str, type=str,
default='UNet') default='UNet')
...@@ -161,18 +162,15 @@ def main(args): ...@@ -161,18 +162,15 @@ def main(args):
eval_dataset = None eval_dataset = None
if args.do_eval: if args.do_eval:
eval_transforms = T.Compose( eval_transforms = T.Compose(
[T.Resize(args.input_size), [T.Padding((2049, 1025)),
T.Normalize()]) T.Normalize()])
eval_dataset = dataset( eval_dataset = dataset(
dataset_root=args.dataset_root, dataset_root=args.dataset_root,
transforms=eval_transforms, transforms=eval_transforms,
mode='val') mode='val')
if args.model_name not in MODELS: model = manager.MODELS[args.model_name](
raise Exception( num_classes=train_dataset.num_classes)
'`--model_name` is invalid. it should be one of {}'.format(
str(list(MODELS.keys()))))
model = MODELS[args.model_name](num_classes=train_dataset.num_classes)
# Creat optimizer # Creat optimizer
# todo, may less one than len(loader) # todo, may less one than len(loader)
...@@ -195,7 +193,6 @@ def main(args): ...@@ -195,7 +193,6 @@ def main(args):
save_dir=args.save_dir, save_dir=args.save_dir,
iters=args.iters, iters=args.iters,
batch_size=args.batch_size, batch_size=args.batch_size,
pretrained_model=args.pretrained_model,
resume_model=args.resume_model, resume_model=args.resume_model,
save_interval_iters=args.save_interval_iters, save_interval_iters=args.save_interval_iters,
log_iters=args.log_iters, log_iters=args.log_iters,
......
...@@ -19,7 +19,8 @@ from paddle.fluid.dygraph.parallel import ParallelEnv ...@@ -19,7 +19,8 @@ from paddle.fluid.dygraph.parallel import ParallelEnv
from dygraph.datasets import DATASETS from dygraph.datasets import DATASETS
import dygraph.transforms as T import dygraph.transforms as T
from dygraph.models import MODELS #from dygraph.models import MODELS
from dygraph.cvlibs import manager
from dygraph.utils import get_environ_info from dygraph.utils import get_environ_info
from dygraph.utils import logger from dygraph.utils import logger
from dygraph.core import train from dygraph.core import train
...@@ -33,7 +34,7 @@ def parse_args(): ...@@ -33,7 +34,7 @@ def parse_args():
'--model_name', '--model_name',
dest='model_name', dest='model_name',
help='Model type for training, which is one of {}'.format( help='Model type for training, which is one of {}'.format(
str(list(MODELS.keys()))), str(list(manager.MODELS.components_dict.keys()))),
type=str, type=str,
default='UNet') default='UNet')
...@@ -166,11 +167,9 @@ def main(args): ...@@ -166,11 +167,9 @@ def main(args):
transforms=eval_transforms, transforms=eval_transforms,
mode='val') mode='val')
if args.model_name not in MODELS: model = manager.MODELS[args.model_name](
raise Exception( num_classes=train_dataset.num_classes,
'`--model_name` is invalid. it should be one of {}'.format( pretrained_model=args.pretrained_model)
str(list(MODELS.keys()))))
model = MODELS[args.model_name](num_classes=train_dataset.num_classes)
# Creat optimizer # Creat optimizer
# todo, may less one than len(loader) # todo, may less one than len(loader)
...@@ -193,7 +192,6 @@ def main(args): ...@@ -193,7 +192,6 @@ def main(args):
save_dir=args.save_dir, save_dir=args.save_dir,
iters=args.iters, iters=args.iters,
batch_size=args.batch_size, batch_size=args.batch_size,
pretrained_model=args.pretrained_model,
resume_model=args.resume_model, resume_model=args.resume_model,
save_interval_iters=args.save_interval_iters, save_interval_iters=args.save_interval_iters,
log_iters=args.log_iters, log_iters=args.log_iters,
......
...@@ -34,7 +34,6 @@ def train(model, ...@@ -34,7 +34,6 @@ def train(model,
save_dir='output', save_dir='output',
iters=10000, iters=10000,
batch_size=2, batch_size=2,
pretrained_model=None,
resume_model=None, resume_model=None,
save_interval_iters=1000, save_interval_iters=1000,
log_iters=10, log_iters=10,
...@@ -47,8 +46,6 @@ def train(model, ...@@ -47,8 +46,6 @@ def train(model,
start_iter = 0 start_iter = 0
if resume_model is not None: if resume_model is not None:
start_iter = resume(model, optimizer, resume_model) start_iter = resume(model, optimizer, resume_model)
elif pretrained_model is not None:
load_pretrained_model(model, pretrained_model)
if not os.path.isdir(save_dir): if not os.path.isdir(save_dir):
if os.path.exists(save_dir): if os.path.exists(save_dir):
...@@ -126,7 +123,6 @@ def train(model, ...@@ -126,7 +123,6 @@ def train(model,
log_writer.add_scalar('Train/reader_cost', log_writer.add_scalar('Train/reader_cost',
avg_train_reader_cost, iter) avg_train_reader_cost, iter)
avg_loss = 0.0 avg_loss = 0.0
timer.restart()
if (iter % save_interval_iters == 0 if (iter % save_interval_iters == 0
or iter == iters) and ParallelEnv().local_rank == 0: or iter == iters) and ParallelEnv().local_rank == 0:
...@@ -162,5 +158,6 @@ def train(model, ...@@ -162,5 +158,6 @@ def train(model,
log_writer.add_scalar('Evaluate/mIoU', mean_iou, iter) log_writer.add_scalar('Evaluate/mIoU', mean_iou, iter)
log_writer.add_scalar('Evaluate/aAcc', avg_acc, iter) log_writer.add_scalar('Evaluate/aAcc', avg_acc, iter)
model.train() model.train()
timer.restart()
if use_vdl: if use_vdl:
log_writer.close() log_writer.close()
...@@ -19,7 +19,7 @@ from paddle.fluid.dygraph.parallel import ParallelEnv ...@@ -19,7 +19,7 @@ from paddle.fluid.dygraph.parallel import ParallelEnv
from dygraph.datasets import DATASETS from dygraph.datasets import DATASETS
import dygraph.transforms as T import dygraph.transforms as T
from dygraph.models import MODELS from dygraph.cvlibs import manager
from dygraph.utils import get_environ_info from dygraph.utils import get_environ_info
from dygraph.core import infer from dygraph.core import infer
...@@ -32,7 +32,7 @@ def parse_args(): ...@@ -32,7 +32,7 @@ def parse_args():
'--model_name', '--model_name',
dest='model_name', dest='model_name',
help='Model type for testing, which is one of {}'.format( help='Model type for testing, which is one of {}'.format(
str(list(MODELS.keys()))), str(list(manager.MODELS.components_dict.keys()))),
type=str, type=str,
default='UNet') default='UNet')
...@@ -99,11 +99,8 @@ def main(args): ...@@ -99,11 +99,8 @@ def main(args):
transforms=test_transforms, transforms=test_transforms,
mode='test') mode='test')
if args.model_name not in MODELS: model = manager.MODELS[args.model_name](
raise Exception( num_classes=test_dataset.num_classes)
'`--model_name` is invalid. it should be one of {}'.format(
str(list(MODELS.keys()))))
model = MODELS[args.model_name](num_classes=test_dataset.num_classes)
infer( infer(
model, model,
......
...@@ -14,37 +14,5 @@ ...@@ -14,37 +14,5 @@
from .architectures import * from .architectures import *
from .unet import UNet from .unet import UNet
from .hrnet import *
from .deeplab import * from .deeplab import *
from .fcn import *
# MODELS = {
# "UNet": UNet,
# "HRNet_W18_Small_V1": HRNet_W18_Small_V1,
# "HRNet_W18_Small_V2": HRNet_W18_Small_V2,
# "HRNet_W18": HRNet_W18,
# "HRNet_W30": HRNet_W30,
# "HRNet_W32": HRNet_W32,
# "HRNet_W40": HRNet_W40,
# "HRNet_W44": HRNet_W44,
# "HRNet_W48": HRNet_W48,
# "HRNet_W60": HRNet_W48,
# "HRNet_W64": HRNet_W64,
# "SE_HRNet_W18_Small_V1": SE_HRNet_W18_Small_V1,
# "SE_HRNet_W18_Small_V2": SE_HRNet_W18_Small_V2,
# "SE_HRNet_W18": SE_HRNet_W18,
# "SE_HRNet_W30": SE_HRNet_W30,
# "SE_HRNet_W32": SE_HRNet_W30,
# "SE_HRNet_W40": SE_HRNet_W40,
# "SE_HRNet_W44": SE_HRNet_W44,
# "SE_HRNet_W48": SE_HRNet_W48,
# "SE_HRNet_W60": SE_HRNet_W60,
# "SE_HRNet_W64": SE_HRNet_W64,
# "DeepLabV3P": DeepLabV3P,
# "deeplabv3p_resnet101_vd": deeplabv3p_resnet101_vd,
# "deeplabv3p_resnet101_vd_os8": deeplabv3p_resnet101_vd_os8,
# "deeplabv3p_resnet50_vd": deeplabv3p_resnet50_vd,
# "deeplabv3p_resnet50_vd_os8": deeplabv3p_resnet50_vd_os8,
# "deeplabv3p_xception65_deeplab": deeplabv3p_xception65_deeplab,
# "deeplabv3p_mobilenetv3_large": deeplabv3p_mobilenetv3_large,
# "deeplabv3p_mobilenetv3_small": deeplabv3p_mobilenetv3_small
# }
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import layer_utils
from .hrnet import *
from .resnet_vd import *
from .xception_deeplab import *
from .mobilenetv3 import *
...@@ -20,20 +20,38 @@ from paddle.fluid.param_attr import ParamAttr ...@@ -20,20 +20,38 @@ from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, Linear from paddle.fluid.dygraph.nn import Conv2D, Pool2D, Linear
from paddle.fluid.initializer import Normal from paddle.fluid.initializer import Normal
from paddle.fluid.dygraph import SyncBatchNorm as BatchNorm from paddle.nn import SyncBatchNorm as BatchNorm
from dygraph.cvlibs import manager
__all__ = [ __all__ = [
"HRNet_W18_Small_V1", "HRNet_W18_Small_V2", "HRNet_W18", "HRNet_W30", "HRNet_W18_Small_V1", "HRNet_W18_Small_V2", "HRNet_W18", "HRNet_W30",
"HRNet_W32", "HRNet_W40", "HRNet_W44", "HRNet_W48", "HRNet_W60", "HRNet_W32", "HRNet_W40", "HRNet_W44", "HRNet_W48", "HRNet_W60", "HRNet_W64"
"HRNet_W64", "SE_HRNet_W18_Small_V1", "SE_HRNet_W18_Small_V2",
"SE_HRNet_W18", "SE_HRNet_W30", "SE_HRNet_W32", "SE_HRNet_W40",
"SE_HRNet_W44", "SE_HRNet_W48", "SE_HRNet_W60", "SE_HRNet_W64"
] ]
class HRNet(fluid.dygraph.Layer): class HRNet(fluid.dygraph.Layer):
"""
HRNet:Deep High-Resolution Representation Learning for Visual Recognition
https://arxiv.org/pdf/1908.07919.pdf.
Args:
stage1_num_modules (int): number of modules for stage1. Default 1.
stage1_num_blocks (list): number of blocks per module for stage1. Default [4].
stage1_num_channels (list): number of channels per branch for stage1. Default [64].
stage2_num_modules (int): number of modules for stage2. Default 1.
stage2_num_blocks (list): number of blocks per module for stage2. Default [4, 4]
stage2_num_channels (list): number of channels per branch for stage2. Default [18, 36].
stage3_num_modules (int): number of modules for stage3. Default 4.
stage3_num_blocks (list): number of blocks per module for stage3. Default [4, 4, 4]
stage3_num_channels (list): number of channels per branch for stage3. Default [18, 36, 72].
stage4_num_modules (int): number of modules for stage4. Default 3.
stage4_num_blocks (list): number of blocks per module for stage4. Default [4, 4, 4, 4]
stage4_num_channels (list): number of channels per branch for stage4. Default [18, 36, 72. 144].
has_se (bool): whether to use Squeeze-and-Excitation module. Default False.
"""
def __init__(self, def __init__(self,
num_classes,
stage1_num_modules=1, stage1_num_modules=1,
stage1_num_blocks=[4], stage1_num_blocks=[4],
stage1_num_channels=[64], stage1_num_channels=[64],
...@@ -46,11 +64,9 @@ class HRNet(fluid.dygraph.Layer): ...@@ -46,11 +64,9 @@ class HRNet(fluid.dygraph.Layer):
stage4_num_modules=3, stage4_num_modules=3,
stage4_num_blocks=[4, 4, 4, 4], stage4_num_blocks=[4, 4, 4, 4],
stage4_num_channels=[18, 36, 72, 144], stage4_num_channels=[18, 36, 72, 144],
has_se=False, has_se=False):
ignore_index=255):
super(HRNet, self).__init__() super(HRNet, self).__init__()
self.num_classes = num_classes
self.stage1_num_modules = stage1_num_modules self.stage1_num_modules = stage1_num_modules
self.stage1_num_blocks = stage1_num_blocks self.stage1_num_blocks = stage1_num_blocks
self.stage1_num_channels = stage1_num_channels self.stage1_num_channels = stage1_num_channels
...@@ -64,8 +80,6 @@ class HRNet(fluid.dygraph.Layer): ...@@ -64,8 +80,6 @@ class HRNet(fluid.dygraph.Layer):
self.stage4_num_blocks = stage4_num_blocks self.stage4_num_blocks = stage4_num_blocks
self.stage4_num_channels = stage4_num_channels self.stage4_num_channels = stage4_num_channels
self.has_se = has_se self.has_se = has_se
self.ignore_index = ignore_index
self.EPS = 1e-5
self.conv_layer1_1 = ConvBNLayer( self.conv_layer1_1 = ConvBNLayer(
num_channels=3, num_channels=3,
...@@ -112,6 +126,7 @@ class HRNet(fluid.dygraph.Layer): ...@@ -112,6 +126,7 @@ class HRNet(fluid.dygraph.Layer):
num_modules=self.stage3_num_modules, num_modules=self.stage3_num_modules,
num_blocks=self.stage3_num_blocks, num_blocks=self.stage3_num_blocks,
num_filters=self.stage3_num_channels, num_filters=self.stage3_num_channels,
has_se=self.has_se,
name="st3") name="st3")
self.tr3 = TransitionLayer( self.tr3 = TransitionLayer(
...@@ -123,24 +138,9 @@ class HRNet(fluid.dygraph.Layer): ...@@ -123,24 +138,9 @@ class HRNet(fluid.dygraph.Layer):
num_modules=self.stage4_num_modules, num_modules=self.stage4_num_modules,
num_blocks=self.stage4_num_blocks, num_blocks=self.stage4_num_blocks,
num_filters=self.stage4_num_channels, num_filters=self.stage4_num_channels,
has_se=self.has_se,
name="st4") name="st4")
last_inp_channels = sum(self.stage4_num_channels)
self.conv_last_2 = ConvBNLayer(
num_channels=last_inp_channels,
num_filters=last_inp_channels,
filter_size=1,
stride=1,
name='conv-2')
self.conv_last_1 = Conv2D(
num_channels=last_inp_channels,
num_filters=self.num_classes,
filter_size=1,
stride=1,
padding=0,
param_attr=ParamAttr(
initializer=Normal(scale=0.001), name='conv-1_weights'))
def forward(self, x, label=None, mode='train'): def forward(self, x, label=None, mode='train'):
input_shape = x.shape[2:] input_shape = x.shape[2:]
conv1 = self.conv_layer1_1(x) conv1 = self.conv_layer1_1(x)
...@@ -162,40 +162,8 @@ class HRNet(fluid.dygraph.Layer): ...@@ -162,40 +162,8 @@ class HRNet(fluid.dygraph.Layer):
x2 = fluid.layers.resize_bilinear(st4[2], out_shape=(x0_h, x0_w)) x2 = fluid.layers.resize_bilinear(st4[2], out_shape=(x0_h, x0_w))
x3 = fluid.layers.resize_bilinear(st4[3], out_shape=(x0_h, x0_w)) x3 = fluid.layers.resize_bilinear(st4[3], out_shape=(x0_h, x0_w))
x = fluid.layers.concat([st4[0], x1, x2, x3], axis=1) x = fluid.layers.concat([st4[0], x1, x2, x3], axis=1)
x = self.conv_last_2(x)
logit = self.conv_last_1(x) return x
logit = fluid.layers.resize_bilinear(logit, input_shape)
if self.training:
if label is None:
raise Exception('Label is need during training')
return self._get_loss(logit, label)
else:
score_map = fluid.layers.softmax(logit, axis=1)
score_map = fluid.layers.transpose(score_map, [0, 2, 3, 1])
pred = fluid.layers.argmax(score_map, axis=3)
pred = fluid.layers.unsqueeze(pred, axes=[3])
return pred, score_map
def _get_loss(self, logit, label):
logit = fluid.layers.transpose(logit, [0, 2, 3, 1])
label = fluid.layers.transpose(label, [0, 2, 3, 1])
mask = label != self.ignore_index
mask = fluid.layers.cast(mask, 'float32')
loss, probs = fluid.layers.softmax_with_cross_entropy(
logit,
label,
ignore_index=self.ignore_index,
return_softmax=True,
axis=-1)
loss = loss * mask
avg_loss = fluid.layers.mean(loss) / (
fluid.layers.mean(mask) + self.EPS)
label.stop_gradient = True
mask.stop_gradient = True
return avg_loss
class ConvBNLayer(fluid.dygraph.Layer): class ConvBNLayer(fluid.dygraph.Layer):
...@@ -698,189 +666,9 @@ class LastClsOut(fluid.dygraph.Layer): ...@@ -698,189 +666,9 @@ class LastClsOut(fluid.dygraph.Layer):
return outs return outs
def HRNet_W18_Small_V1(num_classes): @manager.BACKBONES.add_component
model = HRNet( def HRNet_W18_Small_V1(**kwargs):
num_classes=num_classes,
stage1_num_modules=1,
stage1_num_blocks=[1],
stage1_num_channels=[32],
stage2_num_modules=1,
stage2_num_blocks=[2, 2],
stage2_num_channels=[16, 32],
stage3_num_modules=1,
stage3_num_blocks=[2, 2, 2],
stage3_num_channels=[16, 32, 64],
stage4_num_modules=1,
stage4_num_blocks=[2, 2, 2, 2],
stage4_num_channels=[16, 32, 64, 128])
return model
def HRNet_W18_Small_V2(num_classes):
model = HRNet(
num_classes=num_classes,
stage1_num_modules=1,
stage1_num_blocks=[2],
stage1_num_channels=[64],
stage2_num_modules=1,
stage2_num_blocks=[2, 2],
stage2_num_channels=[18, 36],
stage3_num_modules=1,
stage3_num_blocks=[2, 2, 2],
stage3_num_channels=[18, 36, 72],
stage4_num_modules=1,
stage4_num_blocks=[2, 2, 2, 2],
stage4_num_channels=[18, 36, 72, 144])
return model
def HRNet_W18(num_classes):
model = HRNet(
num_classes=num_classes,
stage1_num_modules=1,
stage1_num_blocks=[4],
stage1_num_channels=[64],
stage2_num_modules=1,
stage2_num_blocks=[4, 4],
stage2_num_channels=[18, 36],
stage3_num_modules=4,
stage3_num_blocks=[4, 4, 4],
stage3_num_channels=[18, 36, 72],
stage4_num_modules=3,
stage4_num_blocks=[4, 4, 4, 4],
stage4_num_channels=[18, 36, 72, 144])
return model
def HRNet_W30(num_classes):
model = HRNet(
num_classes=num_classes,
stage1_num_modules=1,
stage1_num_blocks=[4],
stage1_num_channels=[64],
stage2_num_modules=1,
stage2_num_blocks=[4, 4],
stage2_num_channels=[30, 60],
stage3_num_modules=4,
stage3_num_blocks=[4, 4, 4],
stage3_num_channels=[30, 60, 120],
stage4_num_modules=3,
stage4_num_blocks=[4, 4, 4, 4],
stage4_num_channels=[30, 60, 120, 240])
return model
def HRNet_W32(num_classes):
model = HRNet(
num_classes=num_classes,
stage1_num_modules=1,
stage1_num_blocks=[4],
stage1_num_channels=[64],
stage2_num_modules=1,
stage2_num_blocks=[4, 4],
stage2_num_channels=[32, 64],
stage3_num_modules=4,
stage3_num_blocks=[4, 4, 4],
stage3_num_channels=[32, 64, 128],
stage4_num_modules=3,
stage4_num_blocks=[4, 4, 4, 4],
stage4_num_channels=[32, 64, 128, 256])
return model
def HRNet_W40(num_classes):
model = HRNet(
num_classes=num_classes,
stage1_num_modules=1,
stage1_num_blocks=[4],
stage1_num_channels=[64],
stage2_num_modules=1,
stage2_num_blocks=[4, 4],
stage2_num_channels=[40, 80],
stage3_num_modules=4,
stage3_num_blocks=[4, 4, 4],
stage3_num_channels=[40, 80, 160],
stage4_num_modules=3,
stage4_num_blocks=[4, 4, 4, 4],
stage4_num_channels=[40, 80, 160, 320])
return model
def HRNet_W44(num_classes):
model = HRNet(
num_classes=num_classes,
stage1_num_modules=1,
stage1_num_blocks=[4],
stage1_num_channels=[64],
stage2_num_modules=1,
stage2_num_blocks=[4, 4],
stage2_num_channels=[44, 88],
stage3_num_modules=4,
stage3_num_blocks=[4, 4, 4],
stage3_num_channels=[44, 88, 176],
stage4_num_modules=3,
stage4_num_blocks=[4, 4, 4, 4],
stage4_num_channels=[44, 88, 176, 352])
return model
def HRNet_W48(num_classes):
model = HRNet(
num_classes=num_classes,
stage1_num_modules=1,
stage1_num_blocks=[4],
stage1_num_channels=[64],
stage2_num_modules=1,
stage2_num_blocks=[4, 4],
stage2_num_channels=[48, 96],
stage3_num_modules=4,
stage3_num_blocks=[4, 4, 4],
stage3_num_channels=[48, 96, 192],
stage4_num_modules=3,
stage4_num_blocks=[4, 4, 4, 4],
stage4_num_channels=[48, 96, 192, 384])
return model
def HRNet_W60(num_classes):
model = HRNet(
num_classes=num_classes,
stage1_num_modules=1,
stage1_num_blocks=[4],
stage1_num_channels=[64],
stage2_num_modules=1,
stage2_num_blocks=[4, 4],
stage2_num_channels=[60, 120],
stage3_num_modules=4,
stage3_num_blocks=[4, 4, 4],
stage3_num_channels=[60, 120, 240],
stage4_num_modules=3,
stage4_num_blocks=[4, 4, 4, 4],
stage4_num_channels=[60, 120, 240, 480])
return model
def HRNet_W64(num_classes):
model = HRNet(
num_classes=num_classes,
stage1_num_modules=1,
stage1_num_blocks=[4],
stage1_num_channels=[64],
stage2_num_modules=1,
stage2_num_blocks=[4, 4],
stage2_num_channels=[64, 128],
stage3_num_modules=4,
stage3_num_blocks=[4, 4, 4],
stage3_num_channels=[64, 128, 256],
stage4_num_modules=3,
stage4_num_blocks=[4, 4, 4, 4],
stage4_num_channels=[64, 128, 256, 512])
return model
def SE_HRNet_W18_Small_V1(num_classes):
model = HRNet( model = HRNet(
num_classes=num_classes,
stage1_num_modules=1, stage1_num_modules=1,
stage1_num_blocks=[1], stage1_num_blocks=[1],
stage1_num_channels=[32], stage1_num_channels=[32],
...@@ -893,13 +681,13 @@ def SE_HRNet_W18_Small_V1(num_classes): ...@@ -893,13 +681,13 @@ def SE_HRNet_W18_Small_V1(num_classes):
stage4_num_modules=1, stage4_num_modules=1,
stage4_num_blocks=[2, 2, 2, 2], stage4_num_blocks=[2, 2, 2, 2],
stage4_num_channels=[16, 32, 64, 128], stage4_num_channels=[16, 32, 64, 128],
has_se=True) **kwargs)
return model return model
def SE_HRNet_W18_Small_V2(num_classes): @manager.BACKBONES.add_component
def HRNet_W18_Small_V2(**kwargs):
model = HRNet( model = HRNet(
num_classes=num_classes,
stage1_num_modules=1, stage1_num_modules=1,
stage1_num_blocks=[2], stage1_num_blocks=[2],
stage1_num_channels=[64], stage1_num_channels=[64],
...@@ -912,13 +700,13 @@ def SE_HRNet_W18_Small_V2(num_classes): ...@@ -912,13 +700,13 @@ def SE_HRNet_W18_Small_V2(num_classes):
stage4_num_modules=1, stage4_num_modules=1,
stage4_num_blocks=[2, 2, 2, 2], stage4_num_blocks=[2, 2, 2, 2],
stage4_num_channels=[18, 36, 72, 144], stage4_num_channels=[18, 36, 72, 144],
has_se=True) **kwargs)
return model return model
def SE_HRNet_W18(num_classes): @manager.BACKBONES.add_component
def HRNet_W18(**kwargs):
model = HRNet( model = HRNet(
num_classes=num_classes,
stage1_num_modules=1, stage1_num_modules=1,
stage1_num_blocks=[4], stage1_num_blocks=[4],
stage1_num_channels=[64], stage1_num_channels=[64],
...@@ -931,13 +719,13 @@ def SE_HRNet_W18(num_classes): ...@@ -931,13 +719,13 @@ def SE_HRNet_W18(num_classes):
stage4_num_modules=3, stage4_num_modules=3,
stage4_num_blocks=[4, 4, 4, 4], stage4_num_blocks=[4, 4, 4, 4],
stage4_num_channels=[18, 36, 72, 144], stage4_num_channels=[18, 36, 72, 144],
has_se=True) **kwargs)
return model return model
def SE_HRNet_W30(num_classes): @manager.BACKBONES.add_component
def HRNet_W30(**kwargs):
model = HRNet( model = HRNet(
num_classes=num_classes,
stage1_num_modules=1, stage1_num_modules=1,
stage1_num_blocks=[4], stage1_num_blocks=[4],
stage1_num_channels=[64], stage1_num_channels=[64],
...@@ -950,13 +738,13 @@ def SE_HRNet_W30(num_classes): ...@@ -950,13 +738,13 @@ def SE_HRNet_W30(num_classes):
stage4_num_modules=3, stage4_num_modules=3,
stage4_num_blocks=[4, 4, 4, 4], stage4_num_blocks=[4, 4, 4, 4],
stage4_num_channels=[30, 60, 120, 240], stage4_num_channels=[30, 60, 120, 240],
has_se=True) **kwargs)
return model return model
def SE_HRNet_W32(num_classes): @manager.BACKBONES.add_component
def HRNet_W32(**kwargs):
model = HRNet( model = HRNet(
num_classes=num_classes,
stage1_num_modules=1, stage1_num_modules=1,
stage1_num_blocks=[4], stage1_num_blocks=[4],
stage1_num_channels=[64], stage1_num_channels=[64],
...@@ -969,13 +757,13 @@ def SE_HRNet_W32(num_classes): ...@@ -969,13 +757,13 @@ def SE_HRNet_W32(num_classes):
stage4_num_modules=3, stage4_num_modules=3,
stage4_num_blocks=[4, 4, 4, 4], stage4_num_blocks=[4, 4, 4, 4],
stage4_num_channels=[32, 64, 128, 256], stage4_num_channels=[32, 64, 128, 256],
has_se=True) **kwargs)
return model return model
def SE_HRNet_W40(num_classes): @manager.BACKBONES.add_component
def HRNet_W40(**kwargs):
model = HRNet( model = HRNet(
num_classes=num_classes,
stage1_num_modules=1, stage1_num_modules=1,
stage1_num_blocks=[4], stage1_num_blocks=[4],
stage1_num_channels=[64], stage1_num_channels=[64],
...@@ -988,13 +776,13 @@ def SE_HRNet_W40(num_classes): ...@@ -988,13 +776,13 @@ def SE_HRNet_W40(num_classes):
stage4_num_modules=3, stage4_num_modules=3,
stage4_num_blocks=[4, 4, 4, 4], stage4_num_blocks=[4, 4, 4, 4],
stage4_num_channels=[40, 80, 160, 320], stage4_num_channels=[40, 80, 160, 320],
has_se=True) **kwargs)
return model return model
def SE_HRNet_W44(num_classes): @manager.BACKBONES.add_component
def HRNet_W44(**kwargs):
model = HRNet( model = HRNet(
num_classes=num_classes,
stage1_num_modules=1, stage1_num_modules=1,
stage1_num_blocks=[4], stage1_num_blocks=[4],
stage1_num_channels=[64], stage1_num_channels=[64],
...@@ -1007,13 +795,13 @@ def SE_HRNet_W44(num_classes): ...@@ -1007,13 +795,13 @@ def SE_HRNet_W44(num_classes):
stage4_num_modules=3, stage4_num_modules=3,
stage4_num_blocks=[4, 4, 4, 4], stage4_num_blocks=[4, 4, 4, 4],
stage4_num_channels=[44, 88, 176, 352], stage4_num_channels=[44, 88, 176, 352],
has_se=True) **kwargs)
return model return model
def SE_HRNet_W48(num_classes): @manager.BACKBONES.add_component
def HRNet_W48(**kwargs):
model = HRNet( model = HRNet(
num_classes=num_classes,
stage1_num_modules=1, stage1_num_modules=1,
stage1_num_blocks=[4], stage1_num_blocks=[4],
stage1_num_channels=[64], stage1_num_channels=[64],
...@@ -1026,13 +814,13 @@ def SE_HRNet_W48(num_classes): ...@@ -1026,13 +814,13 @@ def SE_HRNet_W48(num_classes):
stage4_num_modules=3, stage4_num_modules=3,
stage4_num_blocks=[4, 4, 4, 4], stage4_num_blocks=[4, 4, 4, 4],
stage4_num_channels=[48, 96, 192, 384], stage4_num_channels=[48, 96, 192, 384],
has_se=True) **kwargs)
return model return model
def SE_HRNet_W60(num_classes): @manager.BACKBONES.add_component
def HRNet_W60(**kwargs):
model = HRNet( model = HRNet(
num_classes=num_classes,
stage1_num_modules=1, stage1_num_modules=1,
stage1_num_blocks=[4], stage1_num_blocks=[4],
stage1_num_channels=[64], stage1_num_channels=[64],
...@@ -1045,13 +833,13 @@ def SE_HRNet_W60(num_classes): ...@@ -1045,13 +833,13 @@ def SE_HRNet_W60(num_classes):
stage4_num_modules=3, stage4_num_modules=3,
stage4_num_blocks=[4, 4, 4, 4], stage4_num_blocks=[4, 4, 4, 4],
stage4_num_channels=[60, 120, 240, 480], stage4_num_channels=[60, 120, 240, 480],
has_se=True) **kwargs)
return model return model
def SE_HRNet_W64(num_classes): @manager.BACKBONES.add_component
def HRNet_W64(**kwargs):
model = HRNet( model = HRNet(
num_classes=num_classes,
stage1_num_modules=1, stage1_num_modules=1,
stage1_num_blocks=[4], stage1_num_blocks=[4],
stage1_num_channels=[64], stage1_num_channels=[64],
...@@ -1064,5 +852,5 @@ def SE_HRNet_W64(num_classes): ...@@ -1064,5 +852,5 @@ def SE_HRNet_W64(num_classes):
stage4_num_modules=3, stage4_num_modules=3,
stage4_num_blocks=[4, 4, 4, 4], stage4_num_blocks=[4, 4, 4, 4],
stage4_num_channels=[64, 128, 256, 512], stage4_num_channels=[64, 128, 256, 512],
has_se=True) **kwargs)
return model return model
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import os
import paddle
import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, Linear
from paddle.fluid.initializer import Normal
from paddle.nn import SyncBatchNorm as BatchNorm
from dygraph.cvlibs import manager
from dygraph import utils
__all__ = [
"fcn_hrnet_w18_small_v1", "fcn_hrnet_w18_small_v2", "fcn_hrnet_w18",
"fcn_hrnet_w30", "fcn_hrnet_w32", "fcn_hrnet_w40", "fcn_hrnet_w44",
"fcn_hrnet_w48", "fcn_hrnet_w60", "fcn_hrnet_w64"
]
class FCN(fluid.dygraph.Layer):
"""
Fully Convolutional Networks for Semantic Segmentation.
https://arxiv.org/abs/1411.4038
Args:
backbone (str): backbone name,
num_classes (int): the unique number of target classes.
in_channels (int): the channels of input feature maps.
channels (int): channels after conv layer before the last one.
pretrained_model (str): the path of pretrained model.
ignore_index (int): the value of ground-truth mask would be ignored while computing loss or doing evaluation. Default 255.
"""
def __init__(self,
backbone,
num_classes,
in_channels,
channels=None,
pretrained_model=None,
ignore_index=255,
**kwargs):
super(FCN, self).__init__()
self.num_classes = num_classes
self.ignore_index = ignore_index
self.EPS = 1e-5
if channels is None:
channels = in_channels
self.backbone = manager.BACKBONES[backbone](**kwargs)
self.conv_last_2 = ConvBNLayer(
num_channels=in_channels,
num_filters=channels,
filter_size=1,
stride=1,
name='conv-2')
self.conv_last_1 = Conv2D(
num_channels=channels,
num_filters=self.num_classes,
filter_size=1,
stride=1,
padding=0,
param_attr=ParamAttr(
initializer=Normal(scale=0.001), name='conv-1_weights'))
self.init_weight(pretrained_model)
def forward(self, x, label=None, mode='train'):
input_shape = x.shape[2:]
x = self.backbone(x)
x = self.conv_last_2(x)
logit = self.conv_last_1(x)
logit = fluid.layers.resize_bilinear(logit, input_shape)
if self.training:
if label is None:
raise Exception('Label is need during training')
return self._get_loss(logit, label)
else:
score_map = fluid.layers.softmax(logit, axis=1)
score_map = fluid.layers.transpose(score_map, [0, 2, 3, 1])
pred = fluid.layers.argmax(score_map, axis=3)
pred = fluid.layers.unsqueeze(pred, axes=[3])
return pred, score_map
def init_weight(self, pretrained_model=None):
"""
Initialize the parameters of model parts.
Args:
pretrained_model ([str], optional): the pretrained_model path of backbone. Defaults to None.
"""
if pretrained_model is not None:
if os.path.exists(pretrained_model):
utils.load_pretrained_model(self.backbone, pretrained_model)
utils.load_pretrained_model(self, pretrained_model)
else:
raise Exception('Pretrained model is not found: {}'.format(
pretrained_model))
def _get_loss(self, logit, label):
"""
compute forward loss of the model
Args:
logit (tensor): the logit of model output
label (tensor): ground truth
Returns:
avg_loss (tensor): forward loss
"""
logit = fluid.layers.transpose(logit, [0, 2, 3, 1])
label = fluid.layers.transpose(label, [0, 2, 3, 1])
mask = label != self.ignore_index
mask = fluid.layers.cast(mask, 'float32')
loss, probs = fluid.layers.softmax_with_cross_entropy(
logit,
label,
ignore_index=self.ignore_index,
return_softmax=True,
axis=-1)
loss = loss * mask
avg_loss = fluid.layers.mean(loss) / (
fluid.layers.mean(mask) + self.EPS)
label.stop_gradient = True
mask.stop_gradient = True
return avg_loss
class ConvBNLayer(fluid.dygraph.Layer):
def __init__(self,
num_channels,
num_filters,
filter_size,
stride=1,
groups=1,
act="relu",
name=None):
super(ConvBNLayer, self).__init__()
self._conv = Conv2D(
num_channels=num_channels,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=groups,
param_attr=ParamAttr(
initializer=Normal(scale=0.001), name=name + "_weights"),
bias_attr=False)
bn_name = name + '_bn'
self._batch_norm = BatchNorm(
num_filters,
weight_attr=ParamAttr(
name=bn_name + '_scale',
initializer=fluid.initializer.Constant(1.0)),
bias_attr=ParamAttr(
bn_name + '_offset',
initializer=fluid.initializer.Constant(0.0)))
self.act = act
def forward(self, input):
y = self._conv(input)
y = self._batch_norm(y)
if self.act == 'relu':
y = fluid.layers.relu(y)
return y
@manager.MODELS.add_component
def fcn_hrnet_w18_small_v1(*args, **kwargs):
return FCN(backbone='HRNet_W18_Small_V1', in_channels=240, **kwargs)
@manager.MODELS.add_component
def fcn_hrnet_w18_small_v2(*args, **kwargs):
return FCN(backbone='HRNet_W18_Small_V2', in_channels=270, **kwargs)
@manager.MODELS.add_component
def fcn_hrnet_w18(*args, **kwargs):
return FCN(backbone='HRNet_W18', in_channels=270, **kwargs)
@manager.MODELS.add_component
def fcn_hrnet_w30(*args, **kwargs):
return FCN(backbone='HRNet_W30', in_channels=450, **kwargs)
@manager.MODELS.add_component
def fcn_hrnet_w32(*args, **kwargs):
return FCN(backbone='HRNet_W32', in_channels=480, **kwargs)
@manager.MODELS.add_component
def fcn_hrnet_w40(*args, **kwargs):
return FCN(backbone='HRNet_W40', in_channels=600, **kwargs)
@manager.MODELS.add_component
def fcn_hrnet_w44(*args, **kwargs):
return FCN(backbone='HRNet_W44', in_channels=660, **kwargs)
@manager.MODELS.add_component
def fcn_hrnet_w48(*args, **kwargs):
return FCN(backbone='HRNet_W48', in_channels=720, **kwargs)
@manager.MODELS.add_component
def fcn_hrnet_w60(*args, **kwargs):
return FCN(backbone='HRNet_W60', in_channels=900, **kwargs)
@manager.MODELS.add_component
def fcn_hrnet_w64(*args, **kwargs):
return FCN(backbone='HRNet_W64', in_channels=960, **kwargs)
...@@ -12,13 +12,28 @@ ...@@ -12,13 +12,28 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import os
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.dygraph import Conv2D, Pool2D from paddle.fluid.dygraph import Conv2D, Pool2D
from paddle.fluid.dygraph import SyncBatchNorm as BatchNorm from paddle.nn import SyncBatchNorm as BatchNorm
from dygraph.cvlibs import manager
from dygraph import utils
class UNet(fluid.dygraph.Layer): class UNet(fluid.dygraph.Layer):
def __init__(self, num_classes, ignore_index=255): """
U-Net: Convolutional Networks for Biomedical Image Segmentation.
https://arxiv.org/abs/1505.04597
Args:
num_classes (int): the unique number of target classes.
pretrained_model (str): the path of pretrained model.
ignore_index (int): the value of ground-truth mask would be ignored while computing loss or doing evaluation. Default 255.
"""
def __init__(self, num_classes, pretrained_model=None, ignore_index=255):
super(UNet, self).__init__() super(UNet, self).__init__()
self.encode = UnetEncoder() self.encode = UnetEncoder()
self.decode = UnetDecode() self.decode = UnetDecode()
...@@ -26,6 +41,8 @@ class UNet(fluid.dygraph.Layer): ...@@ -26,6 +41,8 @@ class UNet(fluid.dygraph.Layer):
self.ignore_index = ignore_index self.ignore_index = ignore_index
self.EPS = 1e-5 self.EPS = 1e-5
self.init_weight(pretrained_model)
def forward(self, x, label=None): def forward(self, x, label=None):
encode_data, short_cuts = self.encode(x) encode_data, short_cuts = self.encode(x)
decode_data = self.decode(encode_data, short_cuts) decode_data = self.decode(encode_data, short_cuts)
...@@ -39,6 +56,20 @@ class UNet(fluid.dygraph.Layer): ...@@ -39,6 +56,20 @@ class UNet(fluid.dygraph.Layer):
pred = fluid.layers.unsqueeze(pred, axes=[3]) pred = fluid.layers.unsqueeze(pred, axes=[3])
return pred, score_map return pred, score_map
def init_weight(self, pretrained_model=None):
"""
Initialize the parameters of model parts.
Args:
pretrained_model ([str], optional): the pretrained_model path of backbone. Defaults to None.
"""
if pretrained_model is not None:
if os.path.exists(pretrained_model):
utils.load_pretrained_model(self.backbone, pretrained_model)
utils.load_pretrained_model(self, pretrained_model)
else:
raise Exception('Pretrained model is not found: {}'.format(
pretrained_model))
def _get_loss(self, logit, label): def _get_loss(self, logit, label):
logit = fluid.layers.transpose(logit, [0, 2, 3, 1]) logit = fluid.layers.transpose(logit, [0, 2, 3, 1])
label = fluid.layers.transpose(label, [0, 2, 3, 1]) label = fluid.layers.transpose(label, [0, 2, 3, 1])
...@@ -108,14 +139,14 @@ class DoubleConv(fluid.dygraph.Layer): ...@@ -108,14 +139,14 @@ class DoubleConv(fluid.dygraph.Layer):
filter_size=3, filter_size=3,
stride=1, stride=1,
padding=1) padding=1)
self.bn0 = BatchNorm(num_channels=num_filters) self.bn0 = BatchNorm(num_filters)
self.conv1 = Conv2D( self.conv1 = Conv2D(
num_channels=num_filters, num_channels=num_filters,
num_filters=num_filters, num_filters=num_filters,
filter_size=3, filter_size=3,
stride=1, stride=1,
padding=1) padding=1)
self.bn1 = BatchNorm(num_channels=num_filters) self.bn1 = BatchNorm(num_filters)
def forward(self, x): def forward(self, x):
x = self.conv0(x) x = self.conv0(x)
...@@ -166,3 +197,8 @@ class GetLogit(fluid.dygraph.Layer): ...@@ -166,3 +197,8 @@ class GetLogit(fluid.dygraph.Layer):
def forward(self, x): def forward(self, x):
x = self.conv(x) x = self.conv(x)
return x return x
@manager.MODELS.add_component
def unet(*args, **kwargs):
return UNet(*args, **kwargs)
...@@ -19,7 +19,6 @@ from paddle.fluid.dygraph.parallel import ParallelEnv ...@@ -19,7 +19,6 @@ from paddle.fluid.dygraph.parallel import ParallelEnv
from dygraph.datasets import DATASETS from dygraph.datasets import DATASETS
import dygraph.transforms as T import dygraph.transforms as T
#from dygraph.models import MODELS
from dygraph.cvlibs import manager from dygraph.cvlibs import manager
from dygraph.utils import get_environ_info from dygraph.utils import get_environ_info
from dygraph.utils import logger from dygraph.utils import logger
...@@ -167,8 +166,9 @@ def main(args): ...@@ -167,8 +166,9 @@ def main(args):
transforms=eval_transforms, transforms=eval_transforms,
mode='val') mode='val')
model = manager.MODELS[args.model_name](
model = manager.MODELS[args.model_name](num_classes=train_dataset.num_classes) num_classes=train_dataset.num_classes,
pretrained_model=args.pretrained_model)
# Creat optimizer # Creat optimizer
# todo, may less one than len(loader) # todo, may less one than len(loader)
...@@ -191,7 +191,6 @@ def main(args): ...@@ -191,7 +191,6 @@ def main(args):
save_dir=args.save_dir, save_dir=args.save_dir,
iters=args.iters, iters=args.iters,
batch_size=args.batch_size, batch_size=args.batch_size,
pretrained_model=args.pretrained_model,
resume_model=args.resume_model, resume_model=args.resume_model,
save_interval_iters=args.save_interval_iters, save_interval_iters=args.save_interval_iters,
log_iters=args.log_iters, log_iters=args.log_iters,
......
...@@ -19,7 +19,7 @@ from paddle.fluid.dygraph.parallel import ParallelEnv ...@@ -19,7 +19,7 @@ from paddle.fluid.dygraph.parallel import ParallelEnv
from dygraph.datasets import DATASETS from dygraph.datasets import DATASETS
import dygraph.transforms as T import dygraph.transforms as T
from dygraph.models import MODELS from dygraph.cvlibs import manager
from dygraph.utils import get_environ_info from dygraph.utils import get_environ_info
from dygraph.core import evaluate from dygraph.core import evaluate
...@@ -32,7 +32,7 @@ def parse_args(): ...@@ -32,7 +32,7 @@ def parse_args():
'--model_name', '--model_name',
dest='model_name', dest='model_name',
help='Model type for evaluation, which is one of {}'.format( help='Model type for evaluation, which is one of {}'.format(
str(list(MODELS.keys()))), str(list(manager.MODELS.components_dict.keys()))),
type=str, type=str,
default='UNet') default='UNet')
...@@ -87,11 +87,8 @@ def main(args): ...@@ -87,11 +87,8 @@ def main(args):
transforms=eval_transforms, transforms=eval_transforms,
mode='val') mode='val')
if args.model_name not in MODELS: model = manager.MODELS[args.model_name](
raise Exception( num_classes=eval_dataset.num_classes)
'`--model_name` is invalid. it should be one of {}'.format(
str(list(MODELS.keys()))))
model = MODELS[args.model_name](num_classes=eval_dataset.num_classes)
evaluate( evaluate(
model, model,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册