提交 7a4b2b1f 编写于 作者: littletomatodonkey's avatar littletomatodonkey

fix layer helper

上级 32dc1c1c
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import paddle
import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.initializer import MSRA
from paddle.fluid.contrib.model_stat import summary
__all__ = ["GhostNet", "GhostNetV1"]
class GhostNet():
def __init__(self, width_mult):
cfgs = [
# k, t, c, SE, s
[3, 16, 16, 0, 1],
[3, 48, 24, 0, 2],
[3, 72, 24, 0, 1],
[5, 72, 40, 1, 2],
[5, 120, 40, 1, 1],
[3, 240, 80, 0, 2],
[3, 200, 80, 0, 1],
[3, 184, 80, 0, 1],
[3, 184, 80, 0, 1],
[3, 480, 112, 1, 1],
[3, 672, 112, 1, 1],
[5, 672, 160, 1, 2],
[5, 960, 160, 0, 1],
[5, 960, 160, 1, 1],
[5, 960, 160, 0, 1],
[5, 960, 160, 1, 1]
]
self.cfgs = cfgs
self.width_mult = width_mult
def _make_divisible(self, v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def conv_bn_layer(self,
input,
num_filters,
filter_size,
stride=1,
groups=1,
act=None,
name=None,
data_format="NCHW"):
print("conv bn, num_filters: {}, filter_size: {}, stride: {}".format(
num_filters, filter_size, stride))
x = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=groups,
act=None,
param_attr=ParamAttr(
initializer=fluid.initializer.MSRA(), name=name + "_weights"),
bias_attr=False,
name=name + "_conv_op",
data_format=data_format)
x = fluid.layers.batch_norm(
input=x,
act=act,
name=name + "_bn",
param_attr=ParamAttr(name=name + "_bn_scale"),
bias_attr=ParamAttr(name=name + "_bn_offset"),
moving_mean_name=name + "_bn_mean",
moving_variance_name=name + "_bn_variance",
data_layout=data_format)
return x
def SElayer(self, input, num_channels, reduction_ratio=4, name=None):
pool = fluid.layers.pool2d(
input=input, pool_size=0, pool_type='avg', global_pooling=True)
stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
squeeze = fluid.layers.fc(
input=pool,
size=num_channels // reduction_ratio,
act='relu',
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv),
name=name + '_sqz_weights'),
bias_attr=ParamAttr(name=name + '_sqz_offset'))
stdv = 1.0 / math.sqrt(squeeze.shape[1] * 1.0)
excitation = fluid.layers.fc(
input=squeeze,
size=num_channels,
act=None,
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv),
name=name + '_exc_weights'),
bias_attr=ParamAttr(name=name + '_exc_offset'))
excitation = fluid.layers.clip(
x=excitation, min=0, max=1, name=name + '_clip')
scale = fluid.layers.elementwise_mul(x=input, y=excitation, axis=0)
return scale
def depthwise_conv(self,
inp,
oup,
kernel_size,
stride=1,
relu=False,
name=None,
data_format="NCHW"):
return self.conv_bn_layer(
input=inp,
num_filters=oup,
filter_size=kernel_size,
stride=stride,
groups=inp.shape[1] if data_format == "NCHW" else inp.shape[-1],
act="relu" if relu else None,
name=name + "_dw",
data_format=data_format)
def GhostModule(self,
inp,
oup,
kernel_size=1,
ratio=2,
dw_size=3,
stride=1,
relu=True,
name=None,
data_format="NCHW"):
self.oup = oup
init_channels = math.ceil(oup / ratio)
new_channels = init_channels * (ratio - 1)
primary_conv = self.conv_bn_layer(
input=inp,
num_filters=init_channels,
filter_size=kernel_size,
stride=stride,
groups=1,
act="relu" if relu else None,
name=name + "_primary_conv",
data_format="NCHW")
cheap_operation = self.conv_bn_layer(
input=primary_conv,
num_filters=new_channels,
filter_size=dw_size,
stride=1,
groups=init_channels,
act="relu" if relu else None,
name=name + "_cheap_operation",
data_format=data_format)
out = fluid.layers.concat(
[primary_conv, cheap_operation], axis=1, name=name + "_concat")
return out[:, :self.oup, :, :]
def GhostBottleneck(self,
inp,
hidden_dim,
oup,
kernel_size,
stride,
use_se,
name=None,
data_format="NCHW"):
inp_channels = inp.shape[1]
x = self.GhostModule(
inp=inp,
oup=hidden_dim,
kernel_size=1,
stride=1,
relu=True,
name=name + "GhostBottle_1",
data_format="NCHW")
if stride == 2:
x = self.depthwise_conv(
inp=x,
oup=hidden_dim,
kernel_size=kernel_size,
stride=stride,
relu=False,
name=name + "_dw2",
data_format="NCHW")
if use_se:
x = self.SElayer(
input=x, num_channels=hidden_dim, name=name + "SElayer")
x = self.GhostModule(
inp=x,
oup=oup,
kernel_size=1,
relu=False,
name=name + "GhostModule_2")
if stride == 1 and inp_channels == oup:
shortcut = inp
else:
shortcut = self.depthwise_conv(
inp=inp,
oup=inp_channels,
kernel_size=kernel_size,
stride=stride,
relu=False,
name=name + "shortcut_depthwise_conv",
data_format="NCHW")
shortcut = self.conv_bn_layer(
input=shortcut,
num_filters=oup,
filter_size=1,
stride=1,
groups=1,
act=None,
name=name + "shortcut_conv_bn",
data_format="NCHW")
return fluid.layers.elementwise_add(
x=x, y=shortcut, axis=-1, act=None, name=name + "elementwise_add")
def net(self, input, class_dim=1000):
# build first layer:
output_channel = self._make_divisible(16 * self.width_mult, 4)
x = self.conv_bn_layer(
input=input,
num_filters=output_channel,
filter_size=3,
stride=2,
groups=1,
act="relu",
name="firstlayer",
data_format="NCHW")
input_channel = output_channel
# build inverted residual blocks
idx = 0
for k, exp_size, c, use_se, s in self.cfgs:
output_channel = self._make_divisible(c * self.width_mult, 4)
hidden_channel = self._make_divisible(exp_size * self.width_mult,
4)
x = self.GhostBottleneck(
inp=x,
hidden_dim=hidden_channel,
oup=output_channel,
kernel_size=k,
stride=s,
use_se=use_se,
name="GhostBottle_" + str(idx),
data_format="NCHW")
input_channel = output_channel
idx += 1
# build last several layers
output_channel = self._make_divisible(exp_size * self.width_mult, 4)
x = self.conv_bn_layer(
input=x,
num_filters=output_channel,
filter_size=1,
stride=1,
groups=1,
act="relu",
name="lastlayer",
data_format="NCHW")
x = fluid.layers.pool2d(
input=x, pool_type='avg', global_pooling=True, data_format="NCHW")
input_channel = output_channel
output_channel = 1280
stdv = 1.0 / math.sqrt(x.shape[1] * 1.0)
out = fluid.layers.conv2d(
input=x,
num_filters=output_channel,
filter_size=1,
groups=1,
param_attr=ParamAttr(
name="fc_0_w",
initializer=fluid.initializer.Uniform(-stdv, stdv)),
bias_attr=False,
name="fc_0")
out = fluid.layers.batch_norm(
input=out,
act="relu",
name="fc_0_bn",
param_attr=ParamAttr(name="fc_0_bn_scale"),
bias_attr=ParamAttr(name="fc_0_bn_offset"),
moving_mean_name="fc_0_bn_mean",
moving_variance_name="fc_0_bn_variance",
data_layout="NCHW")
out = fluid.layers.dropout(x=out, dropout_prob=0.2)
stdv = 1.0 / math.sqrt(out.shape[1] * 1.0)
out = fluid.layers.fc(
input=out,
size=class_dim,
param_attr=ParamAttr(
name="fc_1_w",
initializer=fluid.initializer.Uniform(-stdv, stdv)),
bias_attr=ParamAttr(name="fc_1_bias"))
return out
def GhostNet_0_5():
model = GhostNet(width_mult=0.5)
return model
def GhostNet_1_0():
model = GhostNet(width_mult=1.0)
return model
def GhostNet_1_3():
model = GhostNet(width_mult=1.3)
return model
if __name__ == "__main__":
# from calc_flops import summary
image = fluid.data(name='image', shape=[-1, 3, 224, 224], dtype='float32')
model = GhostNet_1_3()
out = model.net(input=image, class_dim=1000)
test_program = fluid.default_main_program().clone(for_test=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
total_flops_params, is_quantize = summary(test_program)
...@@ -24,9 +24,11 @@ from .se_resnext_vd import SE_ResNeXt50_vd_32x4d, SE_ResNeXt50_vd_32x4d, SENet15 ...@@ -24,9 +24,11 @@ from .se_resnext_vd import SE_ResNeXt50_vd_32x4d, SE_ResNeXt50_vd_32x4d, SENet15
from .dpn import DPN68 from .dpn import DPN68
from .densenet import DenseNet121 from .densenet import DenseNet121
from .hrnet import HRNet_W18_C from .hrnet import HRNet_W18_C
from .efficientnet import EfficientNetB0
from .googlenet import GoogLeNet
from .mobilenet_v1 import MobileNetV1_x0_25, MobileNetV1_x0_5, MobileNetV1_x0_75, MobileNetV1 from .mobilenet_v1 import MobileNetV1_x0_25, MobileNetV1_x0_5, MobileNetV1_x0_75, MobileNetV1
from .mobilenet_v2 import MobileNetV2_x0_25, MobileNetV2_x0_5, MobileNetV2_x0_75, MobileNetV2, MobileNetV2_x1_5, MobileNetV2_x2_0 from .mobilenet_v2 import MobileNetV2_x0_25, MobileNetV2_x0_5, MobileNetV2_x0_75, MobileNetV2, MobileNetV2_x1_5, MobileNetV2_x2_0
from .mobilenet_v3 import MobileNetV3_small_x0_35, MobileNetV3_small_x0_5, MobileNetV3_small_x0_75, MobileNetV3_small_x1_0, MobileNetV3_small_x1_25, MobileNetV3_large_x0_35, MobileNetV3_large_x0_5, MobileNetV3_large_x0_75, MobileNetV3_large_x1_0, MobileNetV3_large_x1_25 from .mobilenet_v3 import MobileNetV3_small_x0_35, MobileNetV3_small_x0_5, MobileNetV3_small_x0_75, MobileNetV3_small_x1_0, MobileNetV3_small_x1_25, MobileNetV3_large_x0_35, MobileNetV3_large_x0_5, MobileNetV3_large_x0_75, MobileNetV3_large_x1_0, MobileNetV3_large_x1_25
from .shufflenet_v2 import ShuffleNetV2_x0_25, ShuffleNetV2_x0_33, ShuffleNetV2_x0_5, ShuffleNetV2, ShuffleNetV2_x1_5, ShuffleNetV2_x2_0, ShuffleNetV2_swish from .shufflenet_v2 import ShuffleNetV2_x0_25, ShuffleNetV2_x0_33, ShuffleNetV2_x0_5, ShuffleNetV2, ShuffleNetV2_x1_5, ShuffleNetV2_x2_0, ShuffleNetV2_swish
from .distillation_models import ResNet50_vd_distill_MobileNetV3_large_x1_0 from .distillation_models import ResNet50_vd_distill_MobileNetV3_large_x1_0
\ No newline at end of file
...@@ -20,7 +20,6 @@ import numpy as np ...@@ -20,7 +20,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout
import math import math
......
...@@ -21,7 +21,6 @@ import sys ...@@ -21,7 +21,6 @@ import sys
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear
import math import math
......
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout
import math import math
import collections import collections
...@@ -491,6 +490,7 @@ class SEBlock(fluid.dygraph.Layer): ...@@ -491,6 +490,7 @@ class SEBlock(fluid.dygraph.Layer):
num_squeezed_channels, num_squeezed_channels,
oup, oup,
1, 1,
act="sigmoid",
use_bias=True, use_bias=True,
padding_type=padding_type, padding_type=padding_type,
name=name + "_se_expand") name=name + "_se_expand")
...@@ -499,8 +499,6 @@ class SEBlock(fluid.dygraph.Layer): ...@@ -499,8 +499,6 @@ class SEBlock(fluid.dygraph.Layer):
x = self._pool(inputs) x = self._pool(inputs)
x = self._conv1(x) x = self._conv1(x)
x = self._conv2(x) x = self._conv2(x)
layer_helper = LayerHelper(self.full_name(), act='sigmoid')
x = layer_helper.append_activation(x)
return fluid.layers.elementwise_mul(inputs, x) return fluid.layers.elementwise_mul(inputs, x)
...@@ -565,18 +563,17 @@ class MbConvBlock(fluid.dygraph.Layer): ...@@ -565,18 +563,17 @@ class MbConvBlock(fluid.dygraph.Layer):
def forward(self, inputs): def forward(self, inputs):
x = inputs x = inputs
layer_helper = LayerHelper(self.full_name(), act='swish')
if self.expand_ratio != 1: if self.expand_ratio != 1:
x = self._ecn(x) x = self._ecn(x)
x = layer_helper.append_activation(x) x = fluid.layers.swish(x)
x = self._dcn(x) x = self._dcn(x)
x = layer_helper.append_activation(x) x = fluid.layers.swish(x)
if self.has_se: if self.has_se:
x = self._se(x) x = self._se(x)
x = self._pcn(x) x = self._pcn(x)
if self.id_skip and \ if self.id_skip and \
self.block_args.stride == 1 and \ self.block_args.stride == 1 and \
self.block_args.input_filters == self.block_args.output_filters: self.block_args.input_filters == self.block_args.output_filters:
if self.drop_connect_rate: if self.drop_connect_rate:
x = _drop_connect(x, self.drop_connect_rate, self.is_test) x = _drop_connect(x, self.drop_connect_rate, self.is_test)
x = fluid.layers.elementwise_add(x, inputs) x = fluid.layers.elementwise_add(x, inputs)
...@@ -697,8 +694,7 @@ class ExtractFeatures(fluid.dygraph.Layer): ...@@ -697,8 +694,7 @@ class ExtractFeatures(fluid.dygraph.Layer):
def forward(self, inputs): def forward(self, inputs):
x = self._conv_stem(inputs) x = self._conv_stem(inputs)
layer_helper = LayerHelper(self.full_name(), act='swish') x = fluid.layers.swish(x)
x = layer_helper.append_activation(x)
for _mc_block in self.conv_seq: for _mc_block in self.conv_seq:
x = _mc_block(x) x = _mc_block(x)
return x return x
...@@ -914,4 +910,4 @@ def EfficientNetB7(is_test=False, ...@@ -914,4 +910,4 @@ def EfficientNetB7(is_test=False,
override_params=override_params, override_params=override_params,
use_se=use_se, use_se=use_se,
**args) **args)
return model return model
\ No newline at end of file
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear
import math import math
__all__ = ['GoogLeNet_DY'] __all__ = ['GoogLeNet']
def xavier(channels, filter_size, name): def xavier(channels, filter_size, name):
stdv = (3.0 / (filter_size**2 * channels))**0.5 stdv = (3.0 / (filter_size**2 * channels))**0.5
param_attr = ParamAttr( param_attr = ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv), initializer=fluid.initializer.Uniform(-stdv, stdv),
name=name + "_weights") name=name + "_weights")
return param_attr return param_attr
...@@ -90,8 +89,8 @@ class Inception(fluid.dygraph.Layer): ...@@ -90,8 +89,8 @@ class Inception(fluid.dygraph.Layer):
convprj = self._convprj(pool) convprj = self._convprj(pool)
cat = fluid.layers.concat([conv1, conv3, conv5, convprj], axis=1) cat = fluid.layers.concat([conv1, conv3, conv5, convprj], axis=1)
layer_helper = LayerHelper(self.full_name(), act="relu") cat = fluid.layers.relu(cat)
return layer_helper.append_activation(cat) return cat
class GoogleNetDY(fluid.dygraph.Layer): class GoogleNetDY(fluid.dygraph.Layer):
...@@ -205,4 +204,4 @@ class GoogleNetDY(fluid.dygraph.Layer): ...@@ -205,4 +204,4 @@ class GoogleNetDY(fluid.dygraph.Layer):
def GoogLeNet(**args): def GoogLeNet(**args):
model = GoogleNetDY(**args) model = GoogleNetDY(**args)
return model return model
\ No newline at end of file
...@@ -20,7 +20,6 @@ import numpy as np ...@@ -20,7 +20,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear
import math import math
...@@ -495,8 +494,7 @@ class FuseLayers(fluid.dygraph.Layer): ...@@ -495,8 +494,7 @@ class FuseLayers(fluid.dygraph.Layer):
residual = fluid.layers.elementwise_add( residual = fluid.layers.elementwise_add(
x=residual, y=y, act=None) x=residual, y=y, act=None)
layer_helper = LayerHelper(self.full_name(), act='relu') residual = fluid.layers.relu(residual)
residual = layer_helper.append_activation(residual)
outs.append(residual) outs.append(residual)
return outs return outs
......
...@@ -20,7 +20,6 @@ import numpy as np ...@@ -20,7 +20,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout
from paddle.fluid.initializer import MSRA from paddle.fluid.initializer import MSRA
import math import math
......
...@@ -20,7 +20,6 @@ import numpy as np ...@@ -20,7 +20,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout
import math import math
......
...@@ -20,7 +20,6 @@ import numpy as np ...@@ -20,7 +20,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout
import math import math
......
...@@ -20,7 +20,6 @@ import numpy as np ...@@ -20,7 +20,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout
import math import math
...@@ -143,9 +142,8 @@ class BottleneckBlock(fluid.dygraph.Layer): ...@@ -143,9 +142,8 @@ class BottleneckBlock(fluid.dygraph.Layer):
short = inputs short = inputs
else: else:
short = self.short(inputs) short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=conv2) y = fluid.layers.elementwise_add(x=short, y=conv2, act='relu')
layer_helper = LayerHelper(self.full_name(), act='relu') return y
return layer_helper.append_activation(y)
class Res2Net(fluid.dygraph.Layer): class Res2Net(fluid.dygraph.Layer):
......
...@@ -20,7 +20,6 @@ import numpy as np ...@@ -20,7 +20,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout
import math import math
...@@ -47,7 +46,11 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -47,7 +46,11 @@ class ConvBNLayer(fluid.dygraph.Layer):
self.is_vd_mode = is_vd_mode self.is_vd_mode = is_vd_mode
self._pool2d_avg = Pool2D( self._pool2d_avg = Pool2D(
pool_size=2, pool_stride=2, pool_padding=0, pool_type='avg', ceil_mode=True) pool_size=2,
pool_stride=2,
pool_padding=0,
pool_type='avg',
ceil_mode=True)
self._conv = Conv2D( self._conv = Conv2D(
num_channels=num_channels, num_channels=num_channels,
num_filters=num_filters, num_filters=num_filters,
...@@ -150,9 +153,8 @@ class BottleneckBlock(fluid.dygraph.Layer): ...@@ -150,9 +153,8 @@ class BottleneckBlock(fluid.dygraph.Layer):
short = inputs short = inputs
else: else:
short = self.short(inputs) short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=conv2) y = fluid.layers.elementwise_add(x=short, y=conv2, act='relu')
layer_helper = LayerHelper(self.full_name(), act='relu') return y
return layer_helper.append_activation(y)
class Res2Net_vd(fluid.dygraph.Layer): class Res2Net_vd(fluid.dygraph.Layer):
......
...@@ -20,7 +20,6 @@ import numpy as np ...@@ -20,7 +20,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout
import math import math
...@@ -118,10 +117,8 @@ class BottleneckBlock(fluid.dygraph.Layer): ...@@ -118,10 +117,8 @@ class BottleneckBlock(fluid.dygraph.Layer):
else: else:
short = self.short(inputs) short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=conv2) y = fluid.layers.elementwise_add(x=short, y=conv2, act="relu")
return y
layer_helper = LayerHelper(self.full_name(), act="relu")
return layer_helper.append_activation(y)
class BasicBlock(fluid.dygraph.Layer): class BasicBlock(fluid.dygraph.Layer):
...@@ -165,10 +162,8 @@ class BasicBlock(fluid.dygraph.Layer): ...@@ -165,10 +162,8 @@ class BasicBlock(fluid.dygraph.Layer):
short = inputs short = inputs
else: else:
short = self.short(inputs) short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=conv1) y = fluid.layers.elementwise_add(x=short, y=conv1, act="relu")
return y
layer_helper = LayerHelper(self.full_name(), act="relu")
return layer_helper.append_activation(y)
class ResNet(fluid.dygraph.Layer): class ResNet(fluid.dygraph.Layer):
......
import numpy as np
import argparse
import ast
import paddle
import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid import framework
import math
import sys
import time
class ConvBNLayer(fluid.dygraph.Layer):
def __init__(self,
num_channels,
num_filters,
filter_size,
stride=1,
groups=1,
act=None,
name=None):
super(ConvBNLayer, self).__init__()
self._conv = Conv2D(
num_channels=num_channels,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=groups,
act=None,
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False)
if name == "conv1":
bn_name = "bn_" + name
else:
bn_name = "bn" + name[3:]
self._batch_norm = BatchNorm(num_filters,
act=act,
param_attr=ParamAttr(name=bn_name + '_scale'),
bias_attr=ParamAttr(bn_name + '_offset'),
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance')
def forward(self, inputs):
y = self._conv(inputs)
y = self._batch_norm(y)
return y
class BottleneckBlock(fluid.dygraph.Layer):
def __init__(self,
num_channels,
num_filters,
stride,
shortcut=True,
name=None):
super(BottleneckBlock, self).__init__()
self.conv0 = ConvBNLayer(
num_channels=num_channels,
num_filters=num_filters,
filter_size=1,
act='relu',
name=name+"_branch2a")
self.conv1 = ConvBNLayer(
num_channels=num_filters,
num_filters=num_filters,
filter_size=3,
stride=stride,
act='relu',
name=name+"_branch2b")
self.conv2 = ConvBNLayer(
num_channels=num_filters,
num_filters=num_filters * 4,
filter_size=1,
act=None,
name=name+"_branch2c")
if not shortcut:
self.short = ConvBNLayer(
num_channels=num_channels,
num_filters=num_filters * 4,
filter_size=1,
stride=stride,
name=name + "_branch1")
self.shortcut = shortcut
self._num_channels_out = num_filters * 4
def forward(self, inputs):
y = self.conv0(inputs)
conv1 = self.conv1(y)
conv2 = self.conv2(conv1)
if self.shortcut:
short = inputs
else:
short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=conv2)
layer_helper = LayerHelper(self.full_name(), act='relu')
return layer_helper.append_activation(y)
class ResNet(fluid.dygraph.Layer):
def __init__(self, layers=50, class_dim=1000):
super(ResNet, self).__init__()
self.layers = layers
supported_layers = [50, 101, 152]
assert layers in supported_layers, \
"supported layers are {} but input layer is {}".format(supported_layers, layers)
if layers == 50:
depth = [3, 4, 6, 3]
elif layers == 101:
depth = [3, 4, 23, 3]
elif layers == 152:
depth = [3, 8, 36, 3]
num_channels = [64, 256, 512, 1024]
num_filters = [64, 128, 256, 512]
self.conv = ConvBNLayer(
num_channels=3,
num_filters=64,
filter_size=7,
stride=2,
act='relu',
name="conv1")
self.pool2d_max = Pool2D(
pool_size=3,
pool_stride=2,
pool_padding=1,
pool_type='max')
self.bottleneck_block_list = []
for block in range(len(depth)):
shortcut = False
for i in range(depth[block]):
if layers in [101, 152] and block == 2:
if i == 0:
conv_name="res"+str(block+2)+"a"
else:
conv_name="res"+str(block+2)+"b"+str(i)
else:
conv_name="res"+str(block+2)+chr(97+i)
bottleneck_block = self.add_sublayer(
'bb_%d_%d' % (block, i),
BottleneckBlock(
num_channels=num_channels[block]
if i == 0 else num_filters[block] * 4,
num_filters=num_filters[block],
stride=2 if i == 0 and block != 0 else 1,
shortcut=shortcut,
name=conv_name))
self.bottleneck_block_list.append(bottleneck_block)
shortcut = True
self.pool2d_avg = Pool2D(
pool_size=7, pool_type='avg', global_pooling=True)
self.pool2d_avg_output = num_filters[len(num_filters) - 1] * 4 * 1 * 1
stdv = 1.0 / math.sqrt(2048 * 1.0)
self.out = Linear(self.pool2d_avg_output,
class_dim,
param_attr=ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv), name="fc_0.w_0"),
bias_attr=ParamAttr(name="fc_0.b_0"))
def forward(self, inputs):
y = self.conv(inputs)
y = self.pool2d_max(y)
for bottleneck_block in self.bottleneck_block_list:
y = bottleneck_block(y)
y = self.pool2d_avg(y)
y = fluid.layers.reshape(y, shape=[-1, self.pool2d_avg_output])
y = self.out(y)
return y
def ResNet50(**args):
model = ResNet(layers=50, **args)
return model
def ResNet101(**args):
model = ResNet(layers=101, **args)
return model
def ResNet152(**args):
model = ResNet(layers=152, **args)
return model
if __name__ == "__main__":
import numpy as np
place = fluid.CPUPlace()
with fluid.dygraph.guard(place):
model = ResNet50()
img = np.random.uniform(0, 255, [1, 3, 224, 224]).astype('float32')
img = fluid.dygraph.to_variable(img)
res = model(img)
print(res.shape)
...@@ -20,7 +20,6 @@ import numpy as np ...@@ -20,7 +20,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout
import math import math
...@@ -120,10 +119,8 @@ class BottleneckBlock(fluid.dygraph.Layer): ...@@ -120,10 +119,8 @@ class BottleneckBlock(fluid.dygraph.Layer):
else: else:
short = self.short(inputs) short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=conv2) y = fluid.layers.elementwise_add(x=short, y=conv2, act='relu')
return y
layer_helper = LayerHelper(self.full_name(), act='relu')
return layer_helper.append_activation(y)
class BasicBlock(fluid.dygraph.Layer): class BasicBlock(fluid.dygraph.Layer):
...@@ -167,10 +164,8 @@ class BasicBlock(fluid.dygraph.Layer): ...@@ -167,10 +164,8 @@ class BasicBlock(fluid.dygraph.Layer):
short = inputs short = inputs
else: else:
short = self.short(inputs) short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=conv1) y = fluid.layers.elementwise_add(x=short, y=conv1, act='relu')
return y
layer_helper = LayerHelper(self.full_name(), act='relu')
return layer_helper.append_activation(y)
class ResNet_vc(fluid.dygraph.Layer): class ResNet_vc(fluid.dygraph.Layer):
......
...@@ -20,7 +20,6 @@ import numpy as np ...@@ -20,7 +20,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout
import math import math
...@@ -130,10 +129,8 @@ class BottleneckBlock(fluid.dygraph.Layer): ...@@ -130,10 +129,8 @@ class BottleneckBlock(fluid.dygraph.Layer):
short = inputs short = inputs
else: else:
short = self.short(inputs) short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=conv2) y = fluid.layers.elementwise_add(x=short, y=conv2, act='relu')
return y
layer_helper = LayerHelper(self.full_name(), act='relu')
return layer_helper.append_activation(y)
class BasicBlock(fluid.dygraph.Layer): class BasicBlock(fluid.dygraph.Layer):
...@@ -179,10 +176,8 @@ class BasicBlock(fluid.dygraph.Layer): ...@@ -179,10 +176,8 @@ class BasicBlock(fluid.dygraph.Layer):
short = inputs short = inputs
else: else:
short = self.short(inputs) short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=conv1) y = fluid.layers.elementwise_add(x=short, y=conv1, act='relu')
return y
layer_helper = LayerHelper(self.full_name(), act='relu')
return layer_helper.append_activation(y)
class ResNet_vd(fluid.dygraph.Layer): class ResNet_vd(fluid.dygraph.Layer):
......
...@@ -20,7 +20,6 @@ import numpy as np ...@@ -20,7 +20,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout
import math import math
...@@ -122,10 +121,8 @@ class BottleneckBlock(fluid.dygraph.Layer): ...@@ -122,10 +121,8 @@ class BottleneckBlock(fluid.dygraph.Layer):
else: else:
short = self.short(inputs) short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=conv2) y = fluid.layers.elementwise_add(x=short, y=conv2, act='relu')
return y
layer_helper = LayerHelper(self.full_name(), act='relu')
return layer_helper.append_activation(y)
class ResNeXt(fluid.dygraph.Layer): class ResNeXt(fluid.dygraph.Layer):
......
...@@ -20,7 +20,6 @@ import numpy as np ...@@ -20,7 +20,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout
import math import math
...@@ -46,7 +45,11 @@ class ConvBNLayer(fluid.dygraph.Layer): ...@@ -46,7 +45,11 @@ class ConvBNLayer(fluid.dygraph.Layer):
self.is_vd_mode = is_vd_mode self.is_vd_mode = is_vd_mode
self._pool2d_avg = Pool2D( self._pool2d_avg = Pool2D(
pool_size=2, pool_stride=2, pool_padding=0, pool_type='avg', ceil_mode=True) pool_size=2,
pool_stride=2,
pool_padding=0,
pool_type='avg',
ceil_mode=True)
self._conv = Conv2D( self._conv = Conv2D(
num_channels=num_channels, num_channels=num_channels,
num_filters=num_filters, num_filters=num_filters,
...@@ -131,10 +134,8 @@ class BottleneckBlock(fluid.dygraph.Layer): ...@@ -131,10 +134,8 @@ class BottleneckBlock(fluid.dygraph.Layer):
else: else:
short = self.short(inputs) short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=conv2) y = fluid.layers.elementwise_add(x=short, y=conv2, act='relu')
return y
layer_helper = LayerHelper(self.full_name(), act='relu')
return layer_helper.append_activation(y)
class ResNeXt(fluid.dygraph.Layer): class ResNeXt(fluid.dygraph.Layer):
......
...@@ -19,7 +19,6 @@ import numpy as np ...@@ -19,7 +19,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout
import math import math
...@@ -137,10 +136,8 @@ class BottleneckBlock(fluid.dygraph.Layer): ...@@ -137,10 +136,8 @@ class BottleneckBlock(fluid.dygraph.Layer):
short = inputs short = inputs
else: else:
short = self.short(inputs) short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=scale) y = fluid.layers.elementwise_add(x=short, y=scale, act='relu')
return y
layer_helper = LayerHelper(self.full_name(), act='relu')
return layer_helper.append_activation(y)
class BasicBlock(fluid.dygraph.Layer): class BasicBlock(fluid.dygraph.Layer):
...@@ -194,10 +191,8 @@ class BasicBlock(fluid.dygraph.Layer): ...@@ -194,10 +191,8 @@ class BasicBlock(fluid.dygraph.Layer):
short = inputs short = inputs
else: else:
short = self.short(inputs) short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=scale) y = fluid.layers.elementwise_add(x=short, y=scale, act='relu')
return y
layer_helper = LayerHelper(self.full_name(), act='relu')
return layer_helper.append_activation(y)
class SELayer(fluid.dygraph.Layer): class SELayer(fluid.dygraph.Layer):
......
...@@ -20,7 +20,6 @@ import numpy as np ...@@ -20,7 +20,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout
import math import math
...@@ -131,10 +130,8 @@ class BottleneckBlock(fluid.dygraph.Layer): ...@@ -131,10 +130,8 @@ class BottleneckBlock(fluid.dygraph.Layer):
short = inputs short = inputs
else: else:
short = self.short(inputs) short = self.short(inputs)
y = fluid.layers.elementwise_add(x=short, y=scale) y = fluid.layers.elementwise_add(x=short, y=scale, act='relu')
return y
layer_helper = LayerHelper(self.full_name(), act='relu')
return layer_helper.append_activation(y)
class SELayer(fluid.dygraph.Layer): class SELayer(fluid.dygraph.Layer):
......
...@@ -20,7 +20,6 @@ import numpy as np ...@@ -20,7 +20,6 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout
from paddle.fluid.initializer import MSRA from paddle.fluid.initializer import MSRA
import math import math
......
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear
import math import math
...@@ -99,11 +98,10 @@ class EntryFlowBottleneckBlock(fluid.dygraph.Layer): ...@@ -99,11 +98,10 @@ class EntryFlowBottleneckBlock(fluid.dygraph.Layer):
def forward(self, inputs): def forward(self, inputs):
conv0 = inputs conv0 = inputs
short = self._short(inputs) short = self._short(inputs)
layer_helper = LayerHelper(self.full_name(), act="relu")
if self.relu_first: if self.relu_first:
conv0 = layer_helper.append_activation(conv0) conv0 = fluid.layers.relu(conv0)
conv1 = self._conv1(conv0) conv1 = self._conv1(conv0)
conv2 = layer_helper.append_activation(conv1) conv2 = fluid.layers.relu(conv1)
conv2 = self._conv2(conv2) conv2 = self._conv2(conv2)
pool = self._pool(conv2) pool = self._pool(conv2)
return fluid.layers.elementwise_add(x=short, y=pool) return fluid.layers.elementwise_add(x=short, y=pool)
...@@ -177,12 +175,11 @@ class MiddleFlowBottleneckBlock(fluid.dygraph.Layer): ...@@ -177,12 +175,11 @@ class MiddleFlowBottleneckBlock(fluid.dygraph.Layer):
name=name + "_branch2c_weights") name=name + "_branch2c_weights")
def forward(self, inputs): def forward(self, inputs):
layer_helper = LayerHelper(self.full_name(), act="relu") conv0 = fluid.layers.relu(inputs)
conv0 = layer_helper.append_activation(inputs)
conv0 = self._conv_0(conv0) conv0 = self._conv_0(conv0)
conv1 = layer_helper.append_activation(conv0) conv1 = fluid.layers.relu(conv0)
conv1 = self._conv_1(conv1) conv1 = self._conv_1(conv1)
conv2 = layer_helper.append_activation(conv1) conv2 = fluid.layers.relu(conv1)
conv2 = self._conv_2(conv2) conv2 = self._conv_2(conv2)
return fluid.layers.elementwise_add(x=inputs, y=conv2) return fluid.layers.elementwise_add(x=inputs, y=conv2)
...@@ -276,10 +273,9 @@ class ExitFlowBottleneckBlock(fluid.dygraph.Layer): ...@@ -276,10 +273,9 @@ class ExitFlowBottleneckBlock(fluid.dygraph.Layer):
def forward(self, inputs): def forward(self, inputs):
short = self._short(inputs) short = self._short(inputs)
layer_helper = LayerHelper(self.full_name(), act="relu") conv0 = fluid.layers.relu(inputs)
conv0 = layer_helper.append_activation(inputs)
conv1 = self._conv_1(conv0) conv1 = self._conv_1(conv0)
conv2 = layer_helper.append_activation(conv1) conv2 = fluid.layers.relu(conv1)
conv2 = self._conv_2(conv2) conv2 = self._conv_2(conv2)
pool = self._pool(conv2) pool = self._pool(conv2)
return fluid.layers.elementwise_add(x=short, y=pool) return fluid.layers.elementwise_add(x=short, y=pool)
...@@ -306,12 +302,11 @@ class ExitFlow(fluid.dygraph.Layer): ...@@ -306,12 +302,11 @@ class ExitFlow(fluid.dygraph.Layer):
bias_attr=ParamAttr(name="fc_offset")) bias_attr=ParamAttr(name="fc_offset"))
def forward(self, inputs): def forward(self, inputs):
layer_helper = LayerHelper(self.full_name(), act="relu")
conv0 = self._conv_0(inputs) conv0 = self._conv_0(inputs)
conv1 = self._conv_1(conv0) conv1 = self._conv_1(conv0)
conv1 = layer_helper.append_activation(conv1) conv1 = fluid.layers.relu(conv1)
conv2 = self._conv_2(conv1) conv2 = self._conv_2(conv1)
conv2 = layer_helper.append_activation(conv2) conv2 = fluid.layers.relu(conv2)
pool = self._pool(conv2) pool = self._pool(conv2)
pool = fluid.layers.reshape(pool, [0, -1]) pool = fluid.layers.reshape(pool, [0, -1])
out = self._out(pool) out = self._out(pool)
......
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.param_attr import ParamAttr from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout
__all__ = ["Xception41_deeplab", "Xception65_deeplab", "Xception71_deeplab"] __all__ = ["Xception41_deeplab", "Xception65_deeplab", "Xception71_deeplab"]
...@@ -226,13 +225,12 @@ class Xception_Block(fluid.dygraph.Layer): ...@@ -226,13 +225,12 @@ class Xception_Block(fluid.dygraph.Layer):
name=name + "/shortcut") name=name + "/shortcut")
def forward(self, inputs): def forward(self, inputs):
layer_helper = LayerHelper(self.full_name(), act='relu')
if not self.activation_fn_in_separable_conv: if not self.activation_fn_in_separable_conv:
x = layer_helper.append_activation(inputs) x = fluid.layers.relu(inputs)
x = self._conv1(x) x = self._conv1(x)
x = layer_helper.append_activation(x) x = fluid.layers.relu(x)
x = self._conv2(x) x = self._conv2(x)
x = layer_helper.append_activation(x) x = fluid.layers.relu(x)
x = self._conv3(x) x = self._conv3(x)
else: else:
x = self._conv1(inputs) x = self._conv1(inputs)
......
...@@ -5,5 +5,5 @@ export PYTHONPATH=$PWD:$PYTHONPATH ...@@ -5,5 +5,5 @@ export PYTHONPATH=$PWD:$PYTHONPATH
python -m paddle.distributed.launch \ python -m paddle.distributed.launch \
--selected_gpus="0,1,2,3" \ --selected_gpus="0,1,2,3" \
tools/train.py \ tools/train.py \
-c ./configs/ResNet/ResNet50_vd.yaml \ -c ./configs/ResNet/ResNet50.yaml \
-o print_interval=10 -o print_interval=10
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册