提交 6ca975c1 编写于 作者: M michaelowenliu

remove extra line break

上级 5db7e067
......@@ -73,7 +73,6 @@ class ANN(nn.Layer):
utils.load_entire_model(self, pretrained)
def forward(self, input):
feat_list = self.backbone(input)
logit_list = self.head(feat_list)
return [
......@@ -154,7 +153,6 @@ class ANNHead(nn.Layer):
self.init_weight()
def forward(self, feat_list):
logit_list = []
low_level_x = feat_list[self.backbone_indices[0]]
high_level_x = feat_list[self.backbone_indices[1]]
......
......@@ -53,7 +53,6 @@ class Activation(nn.Layer):
act, act_dict.keys()))
def forward(self, x):
if self._act is not None:
return self.act_func(x)
else:
......
......@@ -44,7 +44,6 @@ class ASPPModule(nn.Layer):
self.aspp_blocks = []
for ratio in aspp_ratios:
if sep_conv and ratio > 1:
conv_func = layer_libs.SeparableConvBNReLU
else:
......@@ -76,7 +75,6 @@ class ASPPModule(nn.Layer):
self.dropout = nn.Dropout(p=0.1) # drop rate
def forward(self, x):
outputs = []
for block in self.aspp_blocks:
y = block(x)
......@@ -141,7 +139,6 @@ class PPModule(nn.Layer):
After pooling, the channels are reduced to 1/len(bin_sizes) immediately, while some other implementations
keep the channels to be same.
Args:
in_channels (int): the number of intput channels to pyramid pooling module.
size (int): the out size of the pooled layer.
......
......@@ -73,7 +73,6 @@ class DeepLabV3P(nn.Layer):
utils.load_entire_model(self, pretrained)
def forward(self, input):
feat_list = self.backbone(input)
logit_list = self.head(feat_list)
return [
......@@ -122,7 +121,6 @@ class DeepLabV3PHead(nn.Layer):
self.init_weight()
def forward(self, feat_list):
logit_list = []
low_level_feat = feat_list[self.backbone_indices[0]]
x = feat_list[self.backbone_indices[1]]
......@@ -171,7 +169,6 @@ class DeepLabV3(nn.Layer):
utils.load_entire_model(self, pretrained)
def forward(self, input):
feat_list = self.backbone(input)
logit_list = self.head(feat_list)
return [
......@@ -205,9 +202,7 @@ class DeepLabV3Head(nn.Layer):
self.init_weight()
def forward(self, feat_list):
logit_list = []
x = feat_list[self.backbone_indices[0]]
x = self.aspp(x)
logit = self.cls(x)
......
......@@ -61,7 +61,6 @@ class FastSCNN(nn.Layer):
utils.load_entire_model(self, pretrained)
def forward(self, input, label=None):
logit_list = []
higher_res_features = self.learning_to_downsample(input)
x = self.global_feature_extractor(higher_res_features)
......@@ -274,9 +273,7 @@ class FeatureFusionModule(nn.Layer):
low_res_input = F.resize_bilinear(input=low_res_input, scale=4)
low_res_input = self.dwconv(low_res_input)
low_res_input = self.conv_low_res(low_res_input)
high_res_input = self.conv_high_res(high_res_input)
x = high_res_input + low_res_input
return self.relu(x)
......
......@@ -70,7 +70,6 @@ class GCNet(nn.Layer):
utils.load_entire_model(self, pretrained)
def forward(self, input):
feat_list = self.backbone(input)
logit_list = self.head(feat_list)
return [
......@@ -142,7 +141,6 @@ class GCNetHead(nn.Layer):
self.init_weight()
def forward(self, feat_list):
logit_list = []
x = feat_list[self.backbone_indices[1]]
......
......@@ -70,7 +70,6 @@ class PSPNet(nn.Layer):
utils.load_entire_model(self, pretrained)
def forward(self, input):
feat_list = self.backbone(input)
logit_list = self.head(feat_list)
return [
......@@ -130,9 +129,7 @@ class PSPNetHead(nn.Layer):
self.init_weight()
def forward(self, feat_list):
logit_list = []
x = feat_list[self.backbone_indices[1]]
x = self.psp_module(x)
x = F.dropout(x, p=0.1) # dropout_prob
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册