提交 ec0eba2b 编写于 作者: L LielinJiang

refine docs

上级 c3d166ab
...@@ -798,7 +798,7 @@ class Model(fluid.dygraph.Layer): ...@@ -798,7 +798,7 @@ class Model(fluid.dygraph.Layer):
"{} receives a shape {}, but the expected shape is {}.". "{} receives a shape {}, but the expected shape is {}.".
format(key, list(state.shape), list(param.shape))) format(key, list(state.shape), list(param.shape)))
return param, state return param, state
def _strip_postfix(path): def _strip_postfix(path):
path, ext = os.path.splitext(path) path, ext = os.path.splitext(path)
assert ext in ['', '.pdparams', '.pdopt', '.pdmodel'], \ assert ext in ['', '.pdparams', '.pdopt', '.pdmodel'], \
...@@ -936,35 +936,35 @@ class Model(fluid.dygraph.Layer): ...@@ -936,35 +936,35 @@ class Model(fluid.dygraph.Layer):
Args: Args:
train_data (Dataset|DataLoader): An iterable data loader is used for train_data (Dataset|DataLoader): An iterable data loader is used for
train. An instance of paddle paddle.io.Dataset or train. An instance of paddle paddle.io.Dataset or
paddle.io.Dataloader is recomended. paddle.io.Dataloader is recomended. Default: None.
eval_data (Dataset|DataLoader): An iterable data loader is used for eval_data (Dataset|DataLoader): An iterable data loader is used for
evaluation at the end of epoch. If None, will not do evaluation. evaluation at the end of epoch. If None, will not do evaluation.
An instance of paddle.io.Dataset or paddle.io.Dataloader An instance of paddle.io.Dataset or paddle.io.Dataloader
is recomended. is recomended. Default: None.
batch_size (int): Integer number. The batch size of train_data and eval_data. batch_size (int): Integer number. The batch size of train_data and eval_data.
When train_data and eval_data are both the instance of Dataloader, this When train_data and eval_data are both the instance of Dataloader, this
parameter will be ignored. parameter will be ignored. Default: 1.
epochs (int): Integer number. The number of epochs to train the model. epochs (int): Integer number. The number of epochs to train the model. Default: 1.
eval_freq (int): The frequency, in number of epochs, an evalutation eval_freq (int): The frequency, in number of epochs, an evalutation
is performed. is performed. Default: 1.
log_freq (int): The frequency, in number of steps, the training logs log_freq (int): The frequency, in number of steps, the training logs
are printed. are printed. Default: 10.
save_dir(str|None): The directory to save checkpoint during training. save_dir(str|None): The directory to save checkpoint during training.
If None, will not save checkpoint. If None, will not save checkpoint. Default: None.
save_freq (int): The frequency, in number of epochs, to save checkpoint. save_freq (int): The frequency, in number of epochs, to save checkpoint. Default: 1.
verbose (int): The verbosity mode, should be 0, 1, or 2. verbose (int): The verbosity mode, should be 0, 1, or 2.
0 = silent, 1 = progress bar, 2 = one line per epoch. 0 = silent, 1 = progress bar, 2 = one line per epoch. Default: 2.
drop_last (bool): whether drop the last incomplete batch of train_data drop_last (bool): whether drop the last incomplete batch of train_data
when dataset size is not divisible by the batch size. When train_data when dataset size is not divisible by the batch size. When train_data
is an instance of Dataloader, this parameter will be ignored. is an instance of Dataloader, this parameter will be ignored. Default: False.
shuffle (bool): whther to shuffle train_data. When train_data is an instance shuffle (bool): whther to shuffle train_data. When train_data is an instance
of Dataloader, this parameter will be ignored. of Dataloader, this parameter will be ignored. Default: True.
num_workers (int): the number of subprocess to load data, 0 for no subprocess num_workers (int): the number of subprocess to load data, 0 for no subprocess
used and loading data in main process. When train_data and eval_data are used and loading data in main process. When train_data and eval_data are
both the instance of Dataloader, this parameter will be ignored. both the instance of Dataloader, this parameter will be ignored. Default: 0.
callbacks (Callback|None): A list of `Callback` instances to apply callbacks (Callback|None): A list of `Callback` instances to apply
during training. If None, `ProgBarLogger` and `ModelCheckpoint` during training. If None, `ProgBarLogger` and `ModelCheckpoint`
are automatically inserted. are automatically inserted. Default: None.
""" """
assert train_data is not None, \ assert train_data is not None, \
...@@ -1066,18 +1066,20 @@ class Model(fluid.dygraph.Layer): ...@@ -1066,18 +1066,20 @@ class Model(fluid.dygraph.Layer):
evaluation. An instance of paddle.io.Dataset or evaluation. An instance of paddle.io.Dataset or
paddle.io.Dataloader is recomended. paddle.io.Dataloader is recomended.
batch_size (int): Integer number. The batch size of train_data and eval_data. batch_size (int): Integer number. The batch size of train_data and eval_data.
When train_data and eval_data are both the instance of Dataloader, this When eval_data is the instance of Dataloader, this argument will be ignored.
parameter will be ignored. Default: 1.
log_freq (int): The frequency, in number of steps, the eval logs log_freq (int): The frequency, in number of steps, the eval logs
are printed. are printed. Default: 10.
verbose (int): The verbosity mode, should be 0, 1, or 2. verbose (int): The verbosity mode, should be 0, 1, or 2.
0 = silent, 1 = progress bar, 2 = one line per epoch. 0 = silent, 1 = progress bar, 2 = one line per epoch. Default: 2.
num_workers (int): The number of subprocess to load data, 0 for no subprocess num_workers (int): The number of subprocess to load data, 0 for no subprocess
used and loading data in main process. When train_data and eval_data are used and loading data in main process. When train_data and eval_data are
both the instance of Dataloader, this parameter will be ignored. both the instance of Dataloader, this parameter will be ignored. Default: 0.
callbacks (Callback|None): A list of `Callback` instances to apply callbacks (Callback|None): A list of `Callback` instances to apply
during training. If None, `ProgBarLogger` and `ModelCheckpoint` during training. If None, `ProgBarLogger` and `ModelCheckpoint`
are automatically inserted. are automatically inserted. Default: None.
Returns:
dict: Result of metric.
""" """
if fluid.in_dygraph_mode(): if fluid.in_dygraph_mode():
...@@ -1142,16 +1144,18 @@ class Model(fluid.dygraph.Layer): ...@@ -1142,16 +1144,18 @@ class Model(fluid.dygraph.Layer):
is recomended. is recomended.
batch_size (int): Integer number. The batch size of train_data and eval_data. batch_size (int): Integer number. The batch size of train_data and eval_data.
When train_data and eval_data are both the instance of Dataloader, this When train_data and eval_data are both the instance of Dataloader, this
parameter will be ignored. argument will be ignored. Default: 1.
num_workers (int): the number of subprocess to load data, 0 for no subprocess num_workers (int): the number of subprocess to load data, 0 for no subprocess
used and loading data in main process. When train_data and eval_data are used and loading data in main process. When train_data and eval_data are
both the instance of Dataloader, this parameter will be ignored. both the instance of Dataloader, this argument will be ignored. Default: 0.
stack_output (bool): whether stack output field like a batch, as for an output stack_output (bool): whether stack output field like a batch, as for an output
filed of a sample is in shape [X, Y], test_data contains N samples, predict filed of a sample is in shape [X, Y], test_data contains N samples, predict
output field will be in shape [N, X, Y] if stack_output is True, and will output field will be in shape [N, X, Y] if stack_output is True, and will
be a length N list in shape [[X, Y], [X, Y], ....[X, Y]] if stack_outputs be a length N list in shape [[X, Y], [X, Y], ....[X, Y]] if stack_outputs
is False. stack_outputs as False is used for LoDTensor output situation, is False. stack_outputs as False is used for LoDTensor output situation,
it is recommended set as True if outputs contains no LoDTensor. Default False it is recommended set as True if outputs contains no LoDTensor. Default: False.
Returns:
list: output of models.
""" """
if fluid.in_dygraph_mode(): if fluid.in_dygraph_mode():
......
...@@ -263,7 +263,7 @@ class MobileNetV1(Model): ...@@ -263,7 +263,7 @@ class MobileNetV1(Model):
def _mobilenet(arch, pretrained=False, **kwargs): def _mobilenet(arch, pretrained=False, **kwargs):
model = MobileNetV1(num_classes=1000, with_pool=True, **kwargs) model = MobileNetV1(**kwargs)
if pretrained: if pretrained:
assert arch in model_urls, "{} model do not have a pretrained model now, you should set pretrained=False".format( assert arch in model_urls, "{} model do not have a pretrained model now, you should set pretrained=False".format(
arch) arch)
...@@ -276,12 +276,13 @@ def _mobilenet(arch, pretrained=False, **kwargs): ...@@ -276,12 +276,13 @@ def _mobilenet(arch, pretrained=False, **kwargs):
return model return model
def mobilenet_v1(pretrained=False, scale=1.0): def mobilenet_v1(pretrained=False, scale=1.0, **kwargs):
"""MobileNetV1 """MobileNetV1
Args: Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet. Default: False. pretrained (bool): If True, returns a model pre-trained on ImageNet. Default: False.
scale: (float): scale of channels in each layer. Default: 1.0. scale: (float): scale of channels in each layer. Default: 1.0.
""" """
model = _mobilenet('mobilenetv1_' + str(scale), pretrained, scale=scale) model = _mobilenet(
'mobilenetv1_' + str(scale), pretrained, scale=scale, **kwargs)
return model return model
...@@ -237,7 +237,7 @@ class MobileNetV2(Model): ...@@ -237,7 +237,7 @@ class MobileNetV2(Model):
def _mobilenet(arch, pretrained=False, **kwargs): def _mobilenet(arch, pretrained=False, **kwargs):
model = MobileNetV2(num_classes=1000, with_pool=True, **kwargs) model = MobileNetV2(**kwargs)
if pretrained: if pretrained:
assert arch in model_urls, "{} model do not have a pretrained model now, you should set pretrained=False".format( assert arch in model_urls, "{} model do not have a pretrained model now, you should set pretrained=False".format(
arch) arch)
...@@ -250,12 +250,13 @@ def _mobilenet(arch, pretrained=False, **kwargs): ...@@ -250,12 +250,13 @@ def _mobilenet(arch, pretrained=False, **kwargs):
return model return model
def mobilenet_v2(pretrained=False, scale=1.0): def mobilenet_v2(pretrained=False, scale=1.0, **kwargs):
"""MobileNetV2 """MobileNetV2
Args: Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet. Default: False. pretrained (bool): If True, returns a model pre-trained on ImageNet. Default: False.
scale: (float): scale of channels in each layer. Default: 1.0. scale: (float): scale of channels in each layer. Default: 1.0.
""" """
model = _mobilenet('mobilenetv2_' + str(scale), pretrained, scale=scale) model = _mobilenet(
'mobilenetv2_' + str(scale), pretrained, scale=scale, **kwargs)
return model return model
...@@ -30,8 +30,18 @@ __all__ = [ ...@@ -30,8 +30,18 @@ __all__ = [
] ]
model_urls = { model_urls = {
'resnet18': ('https://paddle-hapi.bj.bcebos.com/models/resnet18.pdparams',
'0ba53eea9bc970962d0ef96f7b94057e'),
'resnet34': ('https://paddle-hapi.bj.bcebos.com/models/resnet34.pdparams',
'46bc9f7c3dd2e55b7866285bee91eff3'),
'resnet50': ('https://paddle-hapi.bj.bcebos.com/models/resnet50.pdparams', 'resnet50': ('https://paddle-hapi.bj.bcebos.com/models/resnet50.pdparams',
'0884c9087266496c41c60d14a96f8530') '0884c9087266496c41c60d14a96f8530'),
'resnet101':
('https://paddle-hapi.bj.bcebos.com/models/resnet101.pdparams',
'fb07a451df331e4b0bb861ed97c3a9b9'),
'resnet152':
('https://paddle-hapi.bj.bcebos.com/models/resnet152.pdparams',
'f9c700f26d3644bb76ad2226ed5f5713'),
} }
...@@ -252,8 +262,8 @@ class ResNet(Model): ...@@ -252,8 +262,8 @@ class ResNet(Model):
return x return x
def _resnet(arch, Block, depth, pretrained): def _resnet(arch, Block, depth, pretrained, **kwargs):
model = ResNet(Block, depth, num_classes=1000, with_pool=True) model = ResNet(Block, depth, **kwargs)
if pretrained: if pretrained:
assert arch in model_urls, "{} model do not have a pretrained model now, you should set pretrained=False".format( assert arch in model_urls, "{} model do not have a pretrained model now, you should set pretrained=False".format(
arch) arch)
...@@ -265,46 +275,46 @@ def _resnet(arch, Block, depth, pretrained): ...@@ -265,46 +275,46 @@ def _resnet(arch, Block, depth, pretrained):
return model return model
def resnet18(pretrained=False): def resnet18(pretrained=False, **kwargs):
"""ResNet 18-layer model """ResNet 18-layer model
Args: Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet pretrained (bool): If True, returns a model pre-trained on ImageNet
""" """
return _resnet('resnet18', BasicBlock, 18, pretrained) return _resnet('resnet18', BasicBlock, 18, pretrained, **kwargs)
def resnet34(pretrained=False): def resnet34(pretrained=False, **kwargs):
"""ResNet 34-layer model """ResNet 34-layer model
Args: Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet pretrained (bool): If True, returns a model pre-trained on ImageNet
""" """
return _resnet('resnet34', BasicBlock, 34, pretrained) return _resnet('resnet34', BasicBlock, 34, pretrained, **kwargs)
def resnet50(pretrained=False): def resnet50(pretrained=False, **kwargs):
"""ResNet 50-layer model """ResNet 50-layer model
Args: Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet pretrained (bool): If True, returns a model pre-trained on ImageNet
""" """
return _resnet('resnet50', BottleneckBlock, 50, pretrained) return _resnet('resnet50', BottleneckBlock, 50, pretrained, **kwargs)
def resnet101(pretrained=False): def resnet101(pretrained=False, **kwargs):
"""ResNet 101-layer model """ResNet 101-layer model
Args: Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet pretrained (bool): If True, returns a model pre-trained on ImageNet
""" """
return _resnet('resnet101', BottleneckBlock, 101, pretrained) return _resnet('resnet101', BottleneckBlock, 101, pretrained, **kwargs)
def resnet152(pretrained=False): def resnet152(pretrained=False, **kwargs):
"""ResNet 152-layer model """ResNet 152-layer model
Args: Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet pretrained (bool): If True, returns a model pre-trained on ImageNet
""" """
return _resnet('resnet152', BottleneckBlock, 152, pretrained) return _resnet('resnet152', BottleneckBlock, 152, pretrained, **kwargs)
...@@ -137,7 +137,7 @@ def _vgg(arch, cfg, batch_norm, pretrained, **kwargs): ...@@ -137,7 +137,7 @@ def _vgg(arch, cfg, batch_norm, pretrained, **kwargs):
return model return model
def vgg11(pretrained=False, batch_norm=False): def vgg11(pretrained=False, batch_norm=False, **kwargs):
"""VGG 11-layer model """VGG 11-layer model
Args: Args:
...@@ -147,10 +147,10 @@ def vgg11(pretrained=False, batch_norm=False): ...@@ -147,10 +147,10 @@ def vgg11(pretrained=False, batch_norm=False):
model_name = 'vgg11' model_name = 'vgg11'
if batch_norm: if batch_norm:
model_name += ('_bn') model_name += ('_bn')
return _vgg(model_name, 'A', batch_norm, pretrained) return _vgg(model_name, 'A', batch_norm, pretrained, **kwargs)
def vgg13(pretrained=False, batch_norm=False): def vgg13(pretrained=False, batch_norm=False, **kwargs):
"""VGG 13-layer model """VGG 13-layer model
Args: Args:
...@@ -160,10 +160,10 @@ def vgg13(pretrained=False, batch_norm=False): ...@@ -160,10 +160,10 @@ def vgg13(pretrained=False, batch_norm=False):
model_name = 'vgg13' model_name = 'vgg13'
if batch_norm: if batch_norm:
model_name += ('_bn') model_name += ('_bn')
return _vgg(model_name, 'B', batch_norm, pretrained) return _vgg(model_name, 'B', batch_norm, pretrained, **kwargs)
def vgg16(pretrained=False, batch_norm=False): def vgg16(pretrained=False, batch_norm=False, **kwargs):
"""VGG 16-layer model """VGG 16-layer model
Args: Args:
...@@ -173,10 +173,10 @@ def vgg16(pretrained=False, batch_norm=False): ...@@ -173,10 +173,10 @@ def vgg16(pretrained=False, batch_norm=False):
model_name = 'vgg16' model_name = 'vgg16'
if batch_norm: if batch_norm:
model_name += ('_bn') model_name += ('_bn')
return _vgg(model_name, 'D', batch_norm, pretrained) return _vgg(model_name, 'D', batch_norm, pretrained, **kwargs)
def vgg19(pretrained=False, batch_norm=False): def vgg19(pretrained=False, batch_norm=False, **kwargs):
"""VGG 19-layer model """VGG 19-layer model
Args: Args:
...@@ -186,4 +186,4 @@ def vgg19(pretrained=False, batch_norm=False): ...@@ -186,4 +186,4 @@ def vgg19(pretrained=False, batch_norm=False):
model_name = 'vgg19' model_name = 'vgg19'
if batch_norm: if batch_norm:
model_name += ('_bn') model_name += ('_bn')
return _vgg(model_name, 'E', batch_norm, pretrained) return _vgg(model_name, 'E', batch_norm, pretrained, **kwargs)
...@@ -71,7 +71,7 @@ class Compose(object): ...@@ -71,7 +71,7 @@ class Compose(object):
except Exception as e: except Exception as e:
stack_info = traceback.format_exc() stack_info = traceback.format_exc()
print("fail to perform transform [{}] with error: " print("fail to perform transform [{}] with error: "
"{} and stack:\n{}".format(f, e, str(stack_info))) "{} and stack:\n{}".format(f, e, str(stack_info)))
raise e raise e
return data return data
...@@ -92,6 +92,7 @@ class BatchCompose(object): ...@@ -92,6 +92,7 @@ class BatchCompose(object):
these transforms perform on batch data. these transforms perform on batch data.
""" """
def __init__(self, transforms=[]): def __init__(self, transforms=[]):
self.transforms = transforms self.transforms = transforms
...@@ -102,7 +103,7 @@ class BatchCompose(object): ...@@ -102,7 +103,7 @@ class BatchCompose(object):
except Exception as e: except Exception as e:
stack_info = traceback.format_exc() stack_info = traceback.format_exc()
print("fail to perform batch transform [{}] with error: " print("fail to perform batch transform [{}] with error: "
"{} and stack:\n{}".format(f, e, str(stack_info))) "{} and stack:\n{}".format(f, e, str(stack_info)))
raise e raise e
# sample list to batch data # sample list to batch data
...@@ -112,7 +113,7 @@ class BatchCompose(object): ...@@ -112,7 +113,7 @@ class BatchCompose(object):
class Resize(object): class Resize(object):
"""Resize the input PIL Image to the given size. """Resize the input Image to the given size.
Args: Args:
size (int|list|tuple): Desired output size. If size is a sequence like size (int|list|tuple): Desired output size. If size is a sequence like
...@@ -130,13 +131,6 @@ class Resize(object): ...@@ -130,13 +131,6 @@ class Resize(object):
self.interpolation = interpolation self.interpolation = interpolation
def __call__(self, img, lbl): def __call__(self, img, lbl):
"""
Args:
img (PIL Image): Image to be scaled.
Returns:
PIL Image: Rescaled image.
"""
return F.resize(img, self.size, self.interpolation), lbl return F.resize(img, self.size, self.interpolation), lbl
...@@ -328,18 +322,22 @@ class Permute(object): ...@@ -328,18 +322,22 @@ class Permute(object):
Input image should be HWC mode and an instance of numpy.ndarray. Input image should be HWC mode and an instance of numpy.ndarray.
Args: Args:
mode: Output mode of input. Use "CHW" mode by default. mode: Output mode of input. Default: "CHW".
to_rgb: convert 'bgr' image to 'rgb'. Default: True.
""" """
def __init__(self, mode="CHW"): def __init__(self, mode="CHW", to_rgb=True):
assert mode in [ assert mode in [
"CHW" "CHW"
], "Only support 'CHW' mode, but received mode: {}".format(mode) ], "Only support 'CHW' mode, but received mode: {}".format(mode)
self.mode = mode self.mode = mode
self.to_rgb = to_rgb
def __call__(self, img, lbl): def __call__(self, img, lbl):
if self.to_rgb:
img = img[..., ::-1]
if self.mode == "CHW": if self.mode == "CHW":
return img.transpose((2, 0, 1))[::-1, ...], lbl return img.transpose((2, 0, 1)), lbl
return img, lbl return img, lbl
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册