未验证 提交 5b20aa32 编写于 作者: F Feng Ni 提交者: GitHub

remove name and norm_name in head and fpn (#2649)

* fix name of ConvNormLayer for fcos solov2

* fix name in other head convbn

* fix yolo name, fix ttf_head name, test=document_fix
上级 5aafa1b6
......@@ -35,8 +35,8 @@ class ConvBNLayer(nn.Layer):
norm_type='bn',
norm_decay=0.,
act="leaky",
name=None,
data_format='NCHW'):
data_format='NCHW',
name=''):
"""
conv + bn + activation layer
......@@ -50,7 +50,6 @@ class ConvBNLayer(nn.Layer):
norm_type (str): batch norm type, default bn
norm_decay (str): decay for weight and bias of batch norm layer, default 0.
act (str): activation function type, default 'leaky', which means leaky_relu
name (str): layer name
data_format (str): data format, NCHW or NHWC
"""
super(ConvBNLayer, self).__init__()
......@@ -68,7 +67,6 @@ class ConvBNLayer(nn.Layer):
ch_out,
norm_type=norm_type,
norm_decay=norm_decay,
name=name,
data_format=data_format)
self.act = act
......@@ -91,7 +89,6 @@ class DownSample(nn.Layer):
padding=1,
norm_type='bn',
norm_decay=0.,
name=None,
data_format='NCHW'):
"""
downsample layer
......@@ -104,7 +101,6 @@ class DownSample(nn.Layer):
padding (int): padding size, default 1
norm_type (str): batch norm type, default bn
norm_decay (str): decay for weight and bias of batch norm layer, default 0.
name (str): layer name
data_format (str): data format, NCHW or NHWC
"""
......@@ -118,8 +114,7 @@ class DownSample(nn.Layer):
padding=padding,
norm_type=norm_type,
norm_decay=norm_decay,
data_format=data_format,
name=name)
data_format=data_format)
self.ch_out = ch_out
def forward(self, inputs):
......@@ -133,7 +128,6 @@ class BasicBlock(nn.Layer):
ch_out,
norm_type='bn',
norm_decay=0.,
name=None,
data_format='NCHW'):
"""
BasicBlock layer of DarkNet
......@@ -143,7 +137,6 @@ class BasicBlock(nn.Layer):
ch_out (int): output channel
norm_type (str): batch norm type, default bn
norm_decay (str): decay for weight and bias of batch norm layer, default 0.
name (str): layer name
data_format (str): data format, NCHW or NHWC
"""
......@@ -157,8 +150,7 @@ class BasicBlock(nn.Layer):
padding=0,
norm_type=norm_type,
norm_decay=norm_decay,
data_format=data_format,
name=name + '.0')
data_format=data_format)
self.conv2 = ConvBNLayer(
ch_in=ch_out,
ch_out=ch_out * 2,
......@@ -167,8 +159,7 @@ class BasicBlock(nn.Layer):
padding=1,
norm_type=norm_type,
norm_decay=norm_decay,
data_format=data_format,
name=name + '.1')
data_format=data_format)
def forward(self, inputs):
conv1 = self.conv1(inputs)
......@@ -205,8 +196,7 @@ class Blocks(nn.Layer):
ch_out,
norm_type=norm_type,
norm_decay=norm_decay,
data_format=data_format,
name=name + '.0')
data_format=data_format)
self.res_out_list = []
for i in range(1, count):
block_name = '{}.{}'.format(name, i)
......@@ -217,8 +207,7 @@ class Blocks(nn.Layer):
ch_out,
norm_type=norm_type,
norm_decay=norm_decay,
data_format=data_format,
name=block_name))
data_format=data_format))
self.res_out_list.append(res_out)
self.ch_out = ch_out
......@@ -272,16 +261,14 @@ class DarkNet(nn.Layer):
padding=1,
norm_type=norm_type,
norm_decay=norm_decay,
data_format=data_format,
name='yolo_input')
data_format=data_format)
self.downsample0 = DownSample(
ch_in=32,
ch_out=32 * 2,
norm_type=norm_type,
norm_decay=norm_decay,
data_format=data_format,
name='yolo_input.downsample')
data_format=data_format)
self._out_channels = []
self.darknet_conv_block_list = []
......@@ -311,8 +298,7 @@ class DarkNet(nn.Layer):
ch_out=32 * (2**(i + 2)),
norm_type=norm_type,
norm_decay=norm_decay,
data_format=data_format,
name=down_name))
data_format=data_format))
self.downsample_list.append(downsample)
def forward(self, inputs):
......
......@@ -128,10 +128,8 @@ class XConvNormHead(nn.Layer):
filter_size=3,
stride=1,
norm_type=self.norm_type,
norm_name=head_conv_name + '_norm',
freeze_norm=self.freeze_norm,
initializer=initializer,
name=head_conv_name))
initializer=initializer))
self.bbox_head_convs.append(head_conv)
fan = conv_dim * resolution * resolution
......
......@@ -81,10 +81,8 @@ class FCOSFeat(nn.Layer):
stride=1,
norm_type=norm_type,
use_dcn=use_dcn,
norm_name=cls_conv_name + '_norm',
bias_on=True,
lr_scale=2.,
name=cls_conv_name))
lr_scale=2.))
self.cls_subnet_convs.append(cls_conv)
reg_conv_name = 'fcos_head_reg_tower_conv_{}'.format(i)
......@@ -97,10 +95,8 @@ class FCOSFeat(nn.Layer):
stride=1,
norm_type=norm_type,
use_dcn=use_dcn,
norm_name=reg_conv_name + '_norm',
bias_on=True,
lr_scale=2.,
name=reg_conv_name))
lr_scale=2.))
self.reg_subnet_convs.append(reg_conv)
def forward(self, fpn_feat):
......
......@@ -63,10 +63,8 @@ class MaskFeat(nn.Layer):
filter_size=3,
stride=1,
norm_type=self.norm_type,
norm_name=conv_name + '_norm',
initializer=KaimingNormal(fan_in=fan_conv),
skip_quant=True,
name=conv_name))
skip_quant=True))
mask_conv.add_sublayer(conv_name + 'act', nn.ReLU())
else:
for i in range(self.num_convs):
......
......@@ -75,9 +75,7 @@ class SOLOv2MaskHead(nn.Layer):
ch_out=self.mid_channels,
filter_size=3,
stride=1,
norm_type='gn',
norm_name=conv_feat_name + '.conv' + str(i) + '.gn',
name=conv_feat_name + '.conv' + str(i)))
norm_type='gn'))
self.add_sublayer('conv_pre_feat' + str(i), conv_pre_feat)
self.convs_all_levels.append(conv_pre_feat)
else:
......@@ -94,9 +92,7 @@ class SOLOv2MaskHead(nn.Layer):
ch_out=self.mid_channels,
filter_size=3,
stride=1,
norm_type='gn',
norm_name=conv_feat_name + '.conv' + str(j) + '.gn',
name=conv_feat_name + '.conv' + str(j)))
norm_type='gn'))
conv_pre_feat.add_sublayer(
conv_feat_name + '.conv' + str(j) + 'act', nn.ReLU())
conv_pre_feat.add_sublayer(
......@@ -114,9 +110,7 @@ class SOLOv2MaskHead(nn.Layer):
ch_out=self.out_channels,
filter_size=1,
stride=1,
norm_type='gn',
norm_name=conv_pred_name + '.gn',
name=conv_pred_name))
norm_type='gn'))
def forward(self, inputs):
"""
......@@ -216,9 +210,7 @@ class SOLOv2Head(nn.Layer):
ch_out=self.seg_feat_channels,
filter_size=3,
stride=1,
norm_type='gn',
norm_name='bbox_head.kernel_convs.{}.gn'.format(i),
name='bbox_head.kernel_convs.{}'.format(i)))
norm_type='gn'))
self.kernel_pred_convs.append(kernel_conv)
ch_in = self.in_channels if i == 0 else self.seg_feat_channels
cate_conv = self.add_sublayer(
......@@ -228,9 +220,7 @@ class SOLOv2Head(nn.Layer):
ch_out=self.seg_feat_channels,
filter_size=3,
stride=1,
norm_type='gn',
norm_name='bbox_head.cate_convs.{}.gn'.format(i),
name='bbox_head.cate_convs.{}'.format(i)))
norm_type='gn'))
self.cate_pred_convs.append(cate_conv)
self.solo_kernel = self.add_sublayer(
......@@ -241,11 +231,9 @@ class SOLOv2Head(nn.Layer):
kernel_size=3,
stride=1,
padding=1,
weight_attr=ParamAttr(
name="bbox_head.solo_kernel.weight",
initializer=Normal(
weight_attr=ParamAttr(initializer=Normal(
mean=0., std=0.01)),
bias_attr=ParamAttr(name="bbox_head.solo_kernel.bias")))
bias_attr=True))
self.solo_cate = self.add_sublayer(
'bbox_head.solo_cate',
nn.Conv2D(
......@@ -254,13 +242,9 @@ class SOLOv2Head(nn.Layer):
kernel_size=3,
stride=1,
padding=1,
weight_attr=ParamAttr(
name="bbox_head.solo_cate.weight",
initializer=Normal(
weight_attr=ParamAttr(initializer=Normal(
mean=0., std=0.01)),
bias_attr=ParamAttr(
name="bbox_head.solo_cate.bias",
initializer=Constant(
bias_attr=ParamAttr(initializer=Constant(
value=float(-np.log((1 - 0.01) / 0.01))))))
def _points_nms(self, heat, kernel_size=2):
......
......@@ -28,8 +28,7 @@ class SepConvLayer(nn.Layer):
out_channels,
kernel_size=3,
padding=1,
conv_decay=0,
name=None):
conv_decay=0):
super(SepConvLayer, self).__init__()
self.dw_conv = nn.Conv2D(
in_channels=in_channels,
......@@ -38,16 +37,13 @@ class SepConvLayer(nn.Layer):
stride=1,
padding=padding,
groups=in_channels,
weight_attr=ParamAttr(
name=name + "_dw_weights", regularizer=L2Decay(conv_decay)),
weight_attr=ParamAttr(regularizer=L2Decay(conv_decay)),
bias_attr=False)
self.bn = nn.BatchNorm2D(
in_channels,
weight_attr=ParamAttr(
name=name + "_bn_scale", regularizer=L2Decay(0.)),
bias_attr=ParamAttr(
name=name + "_bn_offset", regularizer=L2Decay(0.)))
weight_attr=ParamAttr(regularizer=L2Decay(0.)),
bias_attr=ParamAttr(regularizer=L2Decay(0.)))
self.pw_conv = nn.Conv2D(
in_channels=in_channels,
......@@ -55,8 +51,7 @@ class SepConvLayer(nn.Layer):
kernel_size=1,
stride=1,
padding=0,
weight_attr=ParamAttr(
name=name + "_pw_weights", regularizer=L2Decay(conv_decay)),
weight_attr=ParamAttr(regularizer=L2Decay(conv_decay)),
bias_attr=False)
def forward(self, x):
......@@ -125,8 +120,7 @@ class SSDHead(nn.Layer):
out_channels=num_prior * 4,
kernel_size=kernel_size,
padding=padding,
conv_decay=conv_decay,
name=box_conv_name))
conv_decay=conv_decay))
self.box_convs.append(box_conv)
score_conv_name = "scores{}".format(i)
......@@ -146,8 +140,7 @@ class SSDHead(nn.Layer):
out_channels=num_prior * self.num_classes,
kernel_size=kernel_size,
padding=padding,
conv_decay=conv_decay,
name=score_conv_name))
conv_decay=conv_decay))
self.score_convs.append(score_conv)
@classmethod
......
......@@ -61,8 +61,7 @@ class HMHead(nn.Layer):
LiteConv(
in_channels=ch_in if i == 0 else ch_out,
out_channels=ch_out,
norm_type=norm_type,
name=lite_name))
norm_type=norm_type))
head_conv.add_sublayer(lite_name + '.act', nn.ReLU6())
else:
if dcn_head:
......@@ -85,11 +84,9 @@ class HMHead(nn.Layer):
bias_attr=ParamAttr(
learning_rate=2., regularizer=L2Decay(0.))))
head_conv.add_sublayer(name + '.act', nn.ReLU())
self.feat = self.add_sublayer('hm_feat', head_conv)
self.feat = head_conv
bias_init = float(-np.log((1 - 0.01) / 0.01))
self.head = self.add_sublayer(
'hm_head',
nn.Conv2D(
self.head = nn.Conv2D(
in_channels=ch_out,
out_channels=num_classes,
kernel_size=1,
......@@ -97,7 +94,7 @@ class HMHead(nn.Layer):
bias_attr=ParamAttr(
learning_rate=2.,
regularizer=L2Decay(0.),
initializer=Constant(bias_init))))
initializer=Constant(bias_init)))
def forward(self, feat):
out = self.feat(feat)
......@@ -139,8 +136,7 @@ class WHHead(nn.Layer):
LiteConv(
in_channels=ch_in if i == 0 else ch_out,
out_channels=ch_out,
norm_type=norm_type,
name=lite_name))
norm_type=norm_type))
head_conv.add_sublayer(lite_name + '.act', nn.ReLU6())
else:
if dcn_head:
......@@ -164,16 +160,14 @@ class WHHead(nn.Layer):
learning_rate=2., regularizer=L2Decay(0.))))
head_conv.add_sublayer(name + '.act', nn.ReLU())
self.feat = self.add_sublayer('wh_feat', head_conv)
self.head = self.add_sublayer(
'wh_head',
nn.Conv2D(
self.feat = head_conv
self.head = nn.Conv2D(
in_channels=ch_out,
out_channels=4,
kernel_size=1,
weight_attr=ParamAttr(initializer=Normal(0, 0.001)),
bias_attr=ParamAttr(
learning_rate=2., regularizer=L2Decay(0.))))
learning_rate=2., regularizer=L2Decay(0.)))
def forward(self, feat):
out = self.feat(feat)
......
......@@ -4,7 +4,6 @@ import paddle.nn.functional as F
from paddle import ParamAttr
from paddle.regularizer import L2Decay
from ppdet.core.workspace import register
from ..backbones.darknet import ConvBNLayer
def _de_sigmoid(x, eps=1e-7):
......
......@@ -117,22 +117,18 @@ class ConvNormLayer(nn.Layer):
norm_decay=0.,
norm_groups=32,
use_dcn=False,
norm_name=None,
bias_on=False,
lr_scale=1.,
freeze_norm=False,
initializer=Normal(
mean=0., std=0.01),
skip_quant=False,
name=None):
skip_quant=False):
super(ConvNormLayer, self).__init__()
assert norm_type in ['bn', 'sync_bn', 'gn']
if bias_on:
bias_attr = ParamAttr(
name=name + "_bias",
initializer=Constant(value=0.),
learning_rate=lr_scale)
initializer=Constant(value=0.), learning_rate=lr_scale)
else:
bias_attr = False
......@@ -145,9 +141,7 @@ class ConvNormLayer(nn.Layer):
padding=(filter_size - 1) // 2,
groups=groups,
weight_attr=ParamAttr(
name=name + "_weight",
initializer=initializer,
learning_rate=1.),
initializer=initializer, learning_rate=1.),
bias_attr=bias_attr)
if skip_quant:
self.conv.skip_quant = True
......@@ -161,24 +155,17 @@ class ConvNormLayer(nn.Layer):
padding=(filter_size - 1) // 2,
groups=groups,
weight_attr=ParamAttr(
name=name + "_weight",
initializer=initializer,
learning_rate=1.),
initializer=initializer, learning_rate=1.),
bias_attr=True,
lr_scale=2.,
regularizer=L2Decay(norm_decay),
skip_quant=skip_quant,
name=name)
skip_quant=skip_quant)
norm_lr = 0. if freeze_norm else 1.
param_attr = ParamAttr(
name=norm_name + "_scale",
learning_rate=norm_lr,
regularizer=L2Decay(norm_decay))
learning_rate=norm_lr, regularizer=L2Decay(norm_decay))
bias_attr = ParamAttr(
name=norm_name + "_offset",
learning_rate=norm_lr,
regularizer=L2Decay(norm_decay))
learning_rate=norm_lr, regularizer=L2Decay(norm_decay))
if norm_type == 'bn':
self.norm = nn.BatchNorm2D(
ch_out, weight_attr=param_attr, bias_attr=bias_attr)
......@@ -215,27 +202,21 @@ class LiteConv(nn.Layer):
stride=stride,
groups=in_channels,
norm_type=norm_type,
initializer=XavierUniform(),
norm_name=name + '.conv1.norm',
name=name + '.conv1')
initializer=XavierUniform())
conv2 = ConvNormLayer(
in_channels,
out_channels,
filter_size=1,
stride=stride,
norm_type=norm_type,
initializer=XavierUniform(),
norm_name=name + '.conv2.norm',
name=name + '.conv2')
initializer=XavierUniform())
conv3 = ConvNormLayer(
out_channels,
out_channels,
filter_size=1,
stride=stride,
norm_type=norm_type,
initializer=XavierUniform(),
norm_name=name + '.conv3.norm',
name=name + '.conv3')
initializer=XavierUniform())
conv4 = ConvNormLayer(
out_channels,
out_channels,
......@@ -243,9 +224,7 @@ class LiteConv(nn.Layer):
stride=stride,
groups=out_channels,
norm_type=norm_type,
initializer=XavierUniform(),
norm_name=name + '.conv4.norm',
name=name + '.conv4')
initializer=XavierUniform())
conv_list = [conv1, conv2, conv3, conv4]
self.lite_conv.add_sublayer('conv1', conv1)
self.lite_conv.add_sublayer('relu6_1', nn.ReLU6())
......
......@@ -105,10 +105,8 @@ class FPN(nn.Layer):
stride=1,
norm_type=self.norm_type,
norm_decay=self.norm_decay,
norm_name=lateral_name + '_norm',
freeze_norm=self.freeze_norm,
initializer=XavierUniform(fan_out=in_c),
name=lateral_name))
initializer=XavierUniform(fan_out=in_c)))
else:
lateral = self.add_sublayer(
lateral_name,
......@@ -131,10 +129,8 @@ class FPN(nn.Layer):
stride=1,
norm_type=self.norm_type,
norm_decay=self.norm_decay,
norm_name=fpn_name + '_norm',
freeze_norm=self.freeze_norm,
initializer=XavierUniform(fan_out=fan),
name=fpn_name))
initializer=XavierUniform(fan_out=fan)))
else:
fpn_conv = self.add_sublayer(
fpn_name,
......@@ -166,10 +162,8 @@ class FPN(nn.Layer):
stride=2,
norm_type=self.norm_type,
norm_decay=self.norm_decay,
norm_name=extra_fpn_name + '_norm',
freeze_norm=self.freeze_norm,
initializer=XavierUniform(fan_out=fan),
name=extra_fpn_name))
initializer=XavierUniform(fan_out=fan)))
else:
extra_fpn_conv = self.add_sublayer(
extra_fpn_name,
......
......@@ -29,7 +29,7 @@ __all__ = ['TTFFPN']
class Upsample(nn.Layer):
def __init__(self, ch_in, ch_out, norm_type='bn', name=None):
def __init__(self, ch_in, ch_out, norm_type='bn'):
super(Upsample, self).__init__()
fan_in = ch_in * 3 * 3
stdv = 1. / math.sqrt(fan_in)
......@@ -46,7 +46,7 @@ class Upsample(nn.Layer):
regularizer=L2Decay(0.))
self.bn = batch_norm(
ch_out, norm_type=norm_type, initializer=Constant(1.), name=name)
ch_out, norm_type=norm_type, initializer=Constant(1.))
def forward(self, feat):
dcn = self.dcn(feat)
......@@ -57,7 +57,7 @@ class Upsample(nn.Layer):
class DeConv(nn.Layer):
def __init__(self, ch_in, ch_out, norm_type='bn', name=None):
def __init__(self, ch_in, ch_out, norm_type='bn'):
super(DeConv, self).__init__()
self.deconv = nn.Sequential()
conv1 = ConvNormLayer(
......@@ -66,9 +66,7 @@ class DeConv(nn.Layer):
stride=1,
filter_size=1,
norm_type=norm_type,
initializer=XavierUniform(),
norm_name=name + '.conv1.norm',
name=name + '.conv1')
initializer=XavierUniform())
conv2 = nn.Conv2DTranspose(
in_channels=ch_out,
out_channels=ch_out,
......@@ -78,17 +76,14 @@ class DeConv(nn.Layer):
groups=ch_out,
weight_attr=ParamAttr(initializer=XavierUniform()),
bias_attr=False)
bn = batch_norm(
ch_out, norm_type=norm_type, norm_decay=0., name=name + '.bn')
bn = batch_norm(ch_out, norm_type=norm_type, norm_decay=0.)
conv3 = ConvNormLayer(
ch_in=ch_out,
ch_out=ch_out,
stride=1,
filter_size=1,
norm_type=norm_type,
initializer=XavierUniform(),
norm_name=name + '.conv3.norm',
name=name + '.conv3')
initializer=XavierUniform())
self.deconv.add_sublayer('conv1', conv1)
self.deconv.add_sublayer('relu6_1', nn.ReLU6())
......@@ -103,12 +98,10 @@ class DeConv(nn.Layer):
class LiteUpsample(nn.Layer):
def __init__(self, ch_in, ch_out, norm_type='bn', name=None):
def __init__(self, ch_in, ch_out, norm_type='bn'):
super(LiteUpsample, self).__init__()
self.deconv = DeConv(
ch_in, ch_out, norm_type=norm_type, name=name + '.deconv')
self.conv = LiteConv(
ch_in, ch_out, norm_type=norm_type, name=name + '.liteconv')
self.deconv = DeConv(ch_in, ch_out, norm_type=norm_type)
self.conv = LiteConv(ch_in, ch_out, norm_type=norm_type)
def forward(self, inputs):
deconv_up = self.deconv(inputs)
......@@ -139,8 +132,7 @@ class ShortCut(nn.Layer):
in_channels=in_channels,
out_channels=ch_out,
with_act=i < layer_num - 1,
norm_type=norm_type,
name=shortcut_name))
norm_type=norm_type))
else:
shortcut_conv.add_sublayer(
shortcut_name,
......@@ -155,7 +147,7 @@ class ShortCut(nn.Layer):
if i < layer_num - 1:
shortcut_conv.add_sublayer(shortcut_name + '.act',
nn.ReLU())
self.shortcut = self.add_sublayer('short', shortcut_conv)
self.shortcut = self.add_sublayer('shortcut', shortcut_conv)
def forward(self, feat):
out = self.shortcut(feat)
......@@ -208,10 +200,7 @@ class TTFFPN(nn.Layer):
upsample = self.add_sublayer(
'upsample.' + str(i),
upsample_module(
in_c,
out_c,
norm_type=norm_type,
name='deconv_layers.' + str(i)))
in_c, out_c, norm_type=norm_type))
self.upsample_list.append(upsample)
if i < self.shortcut_len:
shortcut = self.add_sublayer(
......
......@@ -25,9 +25,9 @@ from ..shape_spec import ShapeSpec
__all__ = ['YOLOv3FPN', 'PPYOLOFPN']
def add_coord(x):
def add_coord(x, data_format):
b = x.shape[0]
if self.data_format == 'NCHW':
if data_format == 'NCHW':
h = x.shape[2]
w = x.shape[3]
else:
......@@ -35,14 +35,14 @@ def add_coord(x):
w = x.shape[2]
gx = paddle.arange(w, dtype='float32') / (w - 1.) * 2.0 - 1.
if self.data_format == 'NCHW':
if data_format == 'NCHW':
gx = gx.reshape([1, 1, 1, w]).expand([b, 1, h, w])
else:
gx = gx.reshape([1, 1, w, 1]).expand([b, h, w, 1])
gx.stop_gradient = True
gy = paddle.arange(h, dtype='float32') / (h - 1.) * 2.0 - 1.
if self.data_format == 'NCHW':
if data_format == 'NCHW':
gy = gy.reshape([1, 1, h, 1]).expand([b, 1, h, w])
else:
gy = gy.reshape([1, h, 1, 1]).expand([b, h, w, 1])
......@@ -237,7 +237,7 @@ class CoordConv(nn.Layer):
self.data_format = data_format
def forward(self, x):
gx, gy = add_coord(x)
gx, gy = add_coord(x, self.data_format)
if self.data_format == 'NCHW':
y = paddle.concat([x, gx, gy], axis=1)
else:
......@@ -509,7 +509,7 @@ class PPYOLOFPN(nn.Layer):
norm_type='bn',
data_format='NCHW',
coord_conv=False,
conv_block_num=3,
conv_block_num=2,
drop_block=False,
block_size=3,
keep_prob=0.9,
......
......@@ -53,9 +53,7 @@ def batch_norm(ch,
norm_type='bn',
norm_decay=0.,
initializer=None,
name=None,
data_format='NCHW'):
bn_name = name + '.bn'
if norm_type == 'sync_bn':
batch_norm = nn.SyncBatchNorm
else:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册