提交 19872341 编写于 作者: M michaelowenliu

fix a typo

上级 0e666bfe
......@@ -35,30 +35,20 @@ class ANN(nn.Layer):
It mainly consists of AFNB and APNB modules.
Args:
num_classes (int): the unique number of target classes.
backbone (Paddle.nn.Layer): backbone network, currently support Resnet50/101.
model_pretrained (str): the path of pretrained model. Defaullt to None.
backbone_indices (tuple): two values in the tuple indicte the indices of output of backbone.
the first index will be taken as low-level features; the second one will be
taken as high-level features in AFNB module. Usually backbone consists of four
downsampling stage, and return an output of each stage, so we set default (2, 3),
which means taking feature map of the third stage and the fourth stage in backbone.
the first index will be taken as low-level features; the second one will be
taken as high-level features in AFNB module. Usually backbone consists of four
downsampling stage, and return an output of each stage, so we set default (2, 3),
which means taking feature map of the third stage and the fourth stage in backbone.
backbone_channels (tuple): the same length with "backbone_indices". It indicates the channels of corresponding index.
key_value_channels (int): the key and value channels of self-attention map in both AFNB and APNB modules.
Default to 256.
Default to 256.
inter_channels (int): both input and output channels of APNB modules.
psp_size (tuple): the out size of pooled feature maps. Default to (1, 3, 6, 8).
enable_auxiliary_loss (bool): a bool values indictes whether adding auxiliary loss. Default to True.
"""
def __init__(self,
......@@ -156,21 +146,13 @@ class AFNB(nn.Layer):
Args:
low_in_channels (int): low-level-feature channels.
high_in_channels (int): high-level-feature channels.
out_channels (int): out channels of AFNB module.
key_channels (int): the key channels in self-attention block.
value_channels (int): the value channels in self-attention block.
dropout_prob (float): the dropout rate of output.
sizes (tuple): the number of AFNB modules. Default to ([1]).
psp_size (tuple): the out size of pooled feature maps. Default to (1, 3, 6, 8).
"""
def __init__(self,
......@@ -214,19 +196,12 @@ class APNB(nn.Layer):
Args:
in_channels (int): the input channels of APNB module.
out_channels (int): out channels of APNB module.
key_channels (int): the key channels in self-attention block.
value_channels (int): the value channels in self-attention block.
dropout_prob (float): the dropout rate of output.
sizes (tuple): the number of AFNB modules. Default to ([1]).
psp_size (tuple): the out size of pooled feature maps. Default to (1, 3, 6, 8).
"""
def __init__(self,
......@@ -279,17 +254,11 @@ class SelfAttentionBlock_AFNB(nn.Layer):
Args:
low_in_channels (int): low-level-feature channels.
high_in_channels (int): high-level-feature channels.
key_channels (int): the key channels in self-attention block.
value_channels (int): the value channels in self-attention block.
out_channels (int): out channels of AFNB module.
scale (int): pooling size. Defaut to 1.
psp_size (tuple): the out size of pooled feature maps. Default to (1, 3, 6, 8).
"""
......@@ -366,15 +335,10 @@ class SelfAttentionBlock_APNB(nn.Layer):
Args:
in_channels (int): the input channels of APNB module.
out_channels (int): out channels of APNB module.
key_channels (int): the key channels in self-attention block.
value_channels (int): the value channels in self-attention block.
scale (int): pooling size. Defaut to 1.
psp_size (tuple): the out size of pooled feature maps. Default to (1, 3, 6, 8).
"""
......
......@@ -18,7 +18,6 @@ from paddle import nn
import paddle.nn.functional as F
from paddle.nn import Conv2d
from paddle.nn import SyncBatchNorm as BatchNorm
from paddle.nn.layer import activation
class ConvBnRelu(nn.Layer):
......@@ -94,11 +93,8 @@ class AuxLayer(nn.Layer):
Args:
in_channels (int): the number of input channels.
inter_channels (int): intermediate channels.
out_channels (int): the number of output channels, which is usually num_classes.
dropout_prob (float): the droput rate. Default to 0.1.
"""
......
......@@ -28,15 +28,10 @@ class ASPPModule(nn.Layer):
Args:
aspp_ratios (tuple): the dilation rate using in ASSP module.
in_channels (int): the number of input channels.
out_channels (int): the number of output channels.
sep_conv (bool): if using separable conv in ASPP module.
image_pooling: if augmented with image-level features.
"""
def __init__(self,
......@@ -106,11 +101,8 @@ class PPModule(nn.Layer):
Args:
in_channels (int): the number of intput channels to pyramid pooling module.
out_channels (int): the number of output channels after pyramid pooling module.
bin_sizes (tuple): the out size of pooled feature maps. Default to (1,2,3,6).
dim_reduction (bool): a bool value represent if reduing dimention after pooling. Default to True.
"""
......@@ -152,7 +144,6 @@ class PPModule(nn.Layer):
Args:
in_channels (int): the number of intput channels to pyramid pooling module.
size (int): the out size of the pooled layer.
Returns:
......
......@@ -38,25 +38,19 @@ class DeepLabV3P(nn.Layer):
Args:
num_classes (int): the unique number of target classes.
backbone (paddle.nn.Layer): backbone network, currently support Xception65, Resnet101_vd.
model_pretrained (str): the path of pretrained model.
aspp_ratios (tuple): the dilation rate using in ASSP module.
if output_stride=16, aspp_ratios should be set as (1, 6, 12, 18).
if output_stride=8, aspp_ratios is (1, 12, 24, 36).
if output_stride=16, aspp_ratios should be set as (1, 6, 12, 18).
if output_stride=8, aspp_ratios is (1, 12, 24, 36).
backbone_indices (tuple): two values in the tuple indicte the indices of output of backbone.
the first index will be taken as a low-level feature in Deconder component;
the second one will be taken as input of ASPP component.
Usually backbone consists of four downsampling stage, and return an output of
each stage, so we set default (0, 3), which means taking feature map of the first
stage in backbone as low-level feature used in Decoder, and feature map of the fourth
stage as input of ASPP.
the first index will be taken as a low-level feature in Deconder component;
the second one will be taken as input of ASPP component.
Usually backbone consists of four downsampling stage, and return an output of
each stage, so we set default (0, 3), which means taking feature map of the first
stage in backbone as low-level feature used in Decoder, and feature map of the fourth
stage as input of ASPP.
backbone_channels (tuple): the same length with "backbone_indices". It indicates the channels of corresponding index.
"""
def __init__(self,
......@@ -118,7 +112,6 @@ class DeepLabV3(nn.Layer):
Args:
Refer to DeepLabV3P above
"""
def __init__(self,
......@@ -178,7 +171,6 @@ class Decoder(nn.Layer):
Args:
num_classes (int): the number of classes.
in_channels (int): the number of input channels in decoder module.
"""
......
......@@ -15,7 +15,7 @@
import paddle.nn.functional as F
from paddle import nn
from paddleseg.cvlibs import manager
from paddleseg.models.common import layer_libs
from paddleseg.models.common import layer_libs, pyramid_pool
@manager.MODELS.add_component
......@@ -33,12 +33,9 @@ class FastSCNN(nn.Layer):
Args:
num_classes (int): the unique number of target classes. Default to 2.
model_pretrained (str): the path of pretrained model. Defaullt to None.
enable_auxiliary_loss (bool): a bool values indictes whether adding auxiliary loss.
if true, auxiliary loss will be added after LearningToDownsample module, where the weight is 0.4. Default to False.
if true, auxiliary loss will be added after LearningToDownsample module, where the weight is 0.4. Default to False.
"""
def __init__(self,
......@@ -55,7 +52,7 @@ class FastSCNN(nn.Layer):
self.classifier = Classifier(128, num_classes)
if enable_auxiliary_loss:
self.auxlayer = model_utils.AuxLayer(64, 32, num_classes)
self.auxlayer = layer_libs.AuxLayer(64, 32, num_classes)
self.enable_auxiliary_loss = enable_auxiliary_loss
......@@ -101,9 +98,7 @@ class LearningToDownsample(nn.Layer):
Args:
dw_channels1 (int): the input channels of the first sep conv. Default to 32.
dw_channels2 (int): the input channels of the second sep conv. Default to 48.
out_channels (int): the output channels of LearningToDownsample module. Default to 64.
"""
......@@ -141,13 +136,9 @@ class GlobalFeatureExtractor(nn.Layer):
Args:
in_channels (int): the number of input channels to the module. Default to 64.
block_channels (tuple): a tuple represents output channels of each bottleneck block. Default to (64, 96, 128).
out_channels (int): the number of output channels of the module. Default to 128.
expansion (int): the expansion factor in bottleneck. Default to 6.
num_blocks (tuple): it indicates the repeat time of each bottleneck. Default to (3, 3, 3).
"""
......@@ -169,7 +160,7 @@ class GlobalFeatureExtractor(nn.Layer):
block_channels[2], num_blocks[2],
expansion, 1)
self.ppm = model_utils.PPModule(
self.ppm = pyramid_pool.PPModule(
block_channels[2], out_channels, dim_reduction=True)
def _make_layer(self,
......@@ -199,11 +190,8 @@ class LinearBottleneck(nn.Layer):
Args:
in_channels (int): the number of input channels to bottleneck block.
out_channels (int): the number of output channels of bottleneck block.
expansion (int). the expansion factor in bottleneck. Default to 6.
stride (int). the stride used in depth-wise conv.
"""
......@@ -257,9 +245,7 @@ class FeatureFusionModule(nn.Layer):
Args:
high_in_channels (int): the channels of high-resolution feature (output of LearningToDownsample).
low_in_channels (int). the channels of low-resolution feature (output of GlobalFeatureExtractor).
out_channels (int). the output channels of this module.
"""
......@@ -309,9 +295,7 @@ class Classifier(nn.Layer):
Args:
input_channels (int): the input channels to this module.
num_classes (int). the unique number of target classes.
"""
def __init__(self, input_channels, num_classes):
......
......@@ -32,28 +32,19 @@ class GCNet(nn.Layer):
(https://arxiv.org/pdf/1904.11492.pdf)
Args:
num_classes (int): the unique number of target classes.
backbone (Paddle.nn.Layer): backbone network, currently support Resnet50/101.
model_pretrained (str): the path of pretrained model. Defaullt to None.
backbone_indices (tuple): two values in the tuple indicte the indices of output of backbone.
the first index will be taken as a deep-supervision feature in auxiliary layer;
the second one will be taken as input of GlobalContextBlock. Usually backbone
consists of four downsampling stage, and return an output of each stage, so we
set default (2, 3), which means taking feature map of the third stage (res4b22)
and the fourth stage (res5c) in backbone.
the first index will be taken as a deep-supervision feature in auxiliary layer;
the second one will be taken as input of GlobalContextBlock. Usually backbone
consists of four downsampling stage, and return an output of each stage, so we
set default (2, 3), which means taking feature map of the third stage (res4b22)
and the fourth stage (res5c) in backbone.
backbone_channels (tuple): the same length with "backbone_indices". It indicates the channels of corresponding index.
gc_channels (int): input channels to Global Context Block. Default to 512.
ratio (float): it indictes the ratio of attention channels and gc_channels. Default to 1/4.
enable_auxiliary_loss (bool): a bool values indictes whether adding auxiliary loss. Default to True.
"""
def __init__(self,
......
......@@ -33,26 +33,18 @@ class PSPNet(nn.Layer):
Args:
num_classes (int): the unique number of target classes.
backbone (Paddle.nn.Layer): backbone network, currently support Resnet50/101.
model_pretrained (str): the path of pretrained model. Defaullt to None.
backbone_indices (tuple): two values in the tuple indicte the indices of output of backbone.
the first index will be taken as a deep-supervision feature in auxiliary layer;
the second one will be taken as input of Pyramid Pooling Module (PPModule).
Usually backbone consists of four downsampling stage, and return an output of
each stage, so we set default (2, 3), which means taking feature map of the third
stage (res4b22) in backbone, and feature map of the fourth stage (res5c) as input of PPModule.
backbone_channels (tuple): the same length with "backbone_indices". It indicates the channels of corresponding index.
pp_out_channels (int): output channels after Pyramid Pooling Module. Default to 1024.
bin_sizes (tuple): the out size of pooled feature maps. Default to (1,2,3,6).
enable_auxiliary_loss (bool): a bool values indictes whether adding auxiliary loss. Default to True.
"""
def __init__(self,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册