提交 6ca975c1 编写于 作者: M michaelowenliu

remove extra line break

上级 5db7e067
...@@ -73,7 +73,6 @@ class ANN(nn.Layer): ...@@ -73,7 +73,6 @@ class ANN(nn.Layer):
utils.load_entire_model(self, pretrained) utils.load_entire_model(self, pretrained)
def forward(self, input): def forward(self, input):
feat_list = self.backbone(input) feat_list = self.backbone(input)
logit_list = self.head(feat_list) logit_list = self.head(feat_list)
return [ return [
...@@ -154,7 +153,6 @@ class ANNHead(nn.Layer): ...@@ -154,7 +153,6 @@ class ANNHead(nn.Layer):
self.init_weight() self.init_weight()
def forward(self, feat_list): def forward(self, feat_list):
logit_list = [] logit_list = []
low_level_x = feat_list[self.backbone_indices[0]] low_level_x = feat_list[self.backbone_indices[0]]
high_level_x = feat_list[self.backbone_indices[1]] high_level_x = feat_list[self.backbone_indices[1]]
......
...@@ -53,7 +53,6 @@ class Activation(nn.Layer): ...@@ -53,7 +53,6 @@ class Activation(nn.Layer):
act, act_dict.keys())) act, act_dict.keys()))
def forward(self, x): def forward(self, x):
if self._act is not None: if self._act is not None:
return self.act_func(x) return self.act_func(x)
else: else:
......
...@@ -44,7 +44,6 @@ class ASPPModule(nn.Layer): ...@@ -44,7 +44,6 @@ class ASPPModule(nn.Layer):
self.aspp_blocks = [] self.aspp_blocks = []
for ratio in aspp_ratios: for ratio in aspp_ratios:
if sep_conv and ratio > 1: if sep_conv and ratio > 1:
conv_func = layer_libs.SeparableConvBNReLU conv_func = layer_libs.SeparableConvBNReLU
else: else:
...@@ -76,7 +75,6 @@ class ASPPModule(nn.Layer): ...@@ -76,7 +75,6 @@ class ASPPModule(nn.Layer):
self.dropout = nn.Dropout(p=0.1) # drop rate self.dropout = nn.Dropout(p=0.1) # drop rate
def forward(self, x): def forward(self, x):
outputs = [] outputs = []
for block in self.aspp_blocks: for block in self.aspp_blocks:
y = block(x) y = block(x)
...@@ -141,7 +139,6 @@ class PPModule(nn.Layer): ...@@ -141,7 +139,6 @@ class PPModule(nn.Layer):
After pooling, the channels are reduced to 1/len(bin_sizes) immediately, while some other implementations After pooling, the channels are reduced to 1/len(bin_sizes) immediately, while some other implementations
keep the channels to be same. keep the channels to be same.
Args: Args:
in_channels (int): the number of intput channels to pyramid pooling module. in_channels (int): the number of intput channels to pyramid pooling module.
size (int): the out size of the pooled layer. size (int): the out size of the pooled layer.
......
...@@ -73,7 +73,6 @@ class DeepLabV3P(nn.Layer): ...@@ -73,7 +73,6 @@ class DeepLabV3P(nn.Layer):
utils.load_entire_model(self, pretrained) utils.load_entire_model(self, pretrained)
def forward(self, input): def forward(self, input):
feat_list = self.backbone(input) feat_list = self.backbone(input)
logit_list = self.head(feat_list) logit_list = self.head(feat_list)
return [ return [
...@@ -122,7 +121,6 @@ class DeepLabV3PHead(nn.Layer): ...@@ -122,7 +121,6 @@ class DeepLabV3PHead(nn.Layer):
self.init_weight() self.init_weight()
def forward(self, feat_list): def forward(self, feat_list):
logit_list = [] logit_list = []
low_level_feat = feat_list[self.backbone_indices[0]] low_level_feat = feat_list[self.backbone_indices[0]]
x = feat_list[self.backbone_indices[1]] x = feat_list[self.backbone_indices[1]]
...@@ -171,7 +169,6 @@ class DeepLabV3(nn.Layer): ...@@ -171,7 +169,6 @@ class DeepLabV3(nn.Layer):
utils.load_entire_model(self, pretrained) utils.load_entire_model(self, pretrained)
def forward(self, input): def forward(self, input):
feat_list = self.backbone(input) feat_list = self.backbone(input)
logit_list = self.head(feat_list) logit_list = self.head(feat_list)
return [ return [
...@@ -205,9 +202,7 @@ class DeepLabV3Head(nn.Layer): ...@@ -205,9 +202,7 @@ class DeepLabV3Head(nn.Layer):
self.init_weight() self.init_weight()
def forward(self, feat_list): def forward(self, feat_list):
logit_list = [] logit_list = []
x = feat_list[self.backbone_indices[0]] x = feat_list[self.backbone_indices[0]]
x = self.aspp(x) x = self.aspp(x)
logit = self.cls(x) logit = self.cls(x)
......
...@@ -61,7 +61,6 @@ class FastSCNN(nn.Layer): ...@@ -61,7 +61,6 @@ class FastSCNN(nn.Layer):
utils.load_entire_model(self, pretrained) utils.load_entire_model(self, pretrained)
def forward(self, input, label=None): def forward(self, input, label=None):
logit_list = [] logit_list = []
higher_res_features = self.learning_to_downsample(input) higher_res_features = self.learning_to_downsample(input)
x = self.global_feature_extractor(higher_res_features) x = self.global_feature_extractor(higher_res_features)
...@@ -274,9 +273,7 @@ class FeatureFusionModule(nn.Layer): ...@@ -274,9 +273,7 @@ class FeatureFusionModule(nn.Layer):
low_res_input = F.resize_bilinear(input=low_res_input, scale=4) low_res_input = F.resize_bilinear(input=low_res_input, scale=4)
low_res_input = self.dwconv(low_res_input) low_res_input = self.dwconv(low_res_input)
low_res_input = self.conv_low_res(low_res_input) low_res_input = self.conv_low_res(low_res_input)
high_res_input = self.conv_high_res(high_res_input) high_res_input = self.conv_high_res(high_res_input)
x = high_res_input + low_res_input x = high_res_input + low_res_input
return self.relu(x) return self.relu(x)
......
...@@ -70,7 +70,6 @@ class GCNet(nn.Layer): ...@@ -70,7 +70,6 @@ class GCNet(nn.Layer):
utils.load_entire_model(self, pretrained) utils.load_entire_model(self, pretrained)
def forward(self, input): def forward(self, input):
feat_list = self.backbone(input) feat_list = self.backbone(input)
logit_list = self.head(feat_list) logit_list = self.head(feat_list)
return [ return [
...@@ -142,7 +141,6 @@ class GCNetHead(nn.Layer): ...@@ -142,7 +141,6 @@ class GCNetHead(nn.Layer):
self.init_weight() self.init_weight()
def forward(self, feat_list): def forward(self, feat_list):
logit_list = [] logit_list = []
x = feat_list[self.backbone_indices[1]] x = feat_list[self.backbone_indices[1]]
......
...@@ -70,7 +70,6 @@ class PSPNet(nn.Layer): ...@@ -70,7 +70,6 @@ class PSPNet(nn.Layer):
utils.load_entire_model(self, pretrained) utils.load_entire_model(self, pretrained)
def forward(self, input): def forward(self, input):
feat_list = self.backbone(input) feat_list = self.backbone(input)
logit_list = self.head(feat_list) logit_list = self.head(feat_list)
return [ return [
...@@ -130,9 +129,7 @@ class PSPNetHead(nn.Layer): ...@@ -130,9 +129,7 @@ class PSPNetHead(nn.Layer):
self.init_weight() self.init_weight()
def forward(self, feat_list): def forward(self, feat_list):
logit_list = [] logit_list = []
x = feat_list[self.backbone_indices[1]] x = feat_list[self.backbone_indices[1]]
x = self.psp_module(x) x = self.psp_module(x)
x = F.dropout(x, p=0.1) # dropout_prob x = F.dropout(x, p=0.1) # dropout_prob
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册