提交 edd7382d 编写于 作者: littletomatodonkey's avatar littletomatodonkey

fix googlenet

上级 8200c7ba
import paddle import paddle
import paddle.fluid as fluid from paddle import ParamAttr
from paddle.fluid.param_attr import ParamAttr import paddle.nn as nn
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear import paddle.nn.functional as F
from paddle.nn import Conv2d, Pool2D, BatchNorm, Linear, Dropout
from paddle.nn.initializer import Uniform
import math import math
__all__ = ['GoogLeNet'] __all__ = ['GoogLeNet']
...@@ -10,12 +13,11 @@ __all__ = ['GoogLeNet'] ...@@ -10,12 +13,11 @@ __all__ = ['GoogLeNet']
def xavier(channels, filter_size, name): def xavier(channels, filter_size, name):
stdv = (3.0 / (filter_size**2 * channels))**0.5 stdv = (3.0 / (filter_size**2 * channels))**0.5
param_attr = ParamAttr( param_attr = ParamAttr(
initializer=fluid.initializer.Uniform(-stdv, stdv), initializer=Uniform(-stdv, stdv), name=name + "_weights")
name=name + "_weights")
return param_attr return param_attr
class ConvLayer(fluid.dygraph.Layer): class ConvLayer(nn.Layer):
def __init__(self, def __init__(self,
num_channels, num_channels,
num_filters, num_filters,
...@@ -26,15 +28,14 @@ class ConvLayer(fluid.dygraph.Layer): ...@@ -26,15 +28,14 @@ class ConvLayer(fluid.dygraph.Layer):
name=None): name=None):
super(ConvLayer, self).__init__() super(ConvLayer, self).__init__()
self._conv = Conv2D( self._conv = Conv2d(
num_channels=num_channels, in_channels=num_channels,
num_filters=num_filters, out_channels=num_filters,
filter_size=filter_size, kernel_size=filter_size,
stride=stride, stride=stride,
padding=(filter_size - 1) // 2, padding=(filter_size - 1) // 2,
groups=groups, groups=groups,
act=None, weight_attr=ParamAttr(name=name + "_weights"),
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=False) bias_attr=False)
def forward(self, inputs): def forward(self, inputs):
...@@ -42,7 +43,7 @@ class ConvLayer(fluid.dygraph.Layer): ...@@ -42,7 +43,7 @@ class ConvLayer(fluid.dygraph.Layer):
return y return y
class Inception(fluid.dygraph.Layer): class Inception(nn.Layer):
def __init__(self, def __init__(self,
input_channels, input_channels,
output_channels, output_channels,
...@@ -88,12 +89,12 @@ class Inception(fluid.dygraph.Layer): ...@@ -88,12 +89,12 @@ class Inception(fluid.dygraph.Layer):
pool = self._pool(inputs) pool = self._pool(inputs)
convprj = self._convprj(pool) convprj = self._convprj(pool)
cat = fluid.layers.concat([conv1, conv3, conv5, convprj], axis=1) cat = paddle.concat([conv1, conv3, conv5, convprj], axis=1)
cat = fluid.layers.relu(cat) cat = F.relu(cat)
return cat return cat
class GoogleNetDY(fluid.dygraph.Layer): class GoogleNetDY(nn.Layer):
def __init__(self, class_dim=1000): def __init__(self, class_dim=1000):
super(GoogleNetDY, self).__init__() super(GoogleNetDY, self).__init__()
self._conv = ConvLayer(3, 64, 7, 2, name="conv1") self._conv = ConvLayer(3, 64, 7, 2, name="conv1")
...@@ -124,40 +125,37 @@ class GoogleNetDY(fluid.dygraph.Layer): ...@@ -124,40 +125,37 @@ class GoogleNetDY(fluid.dygraph.Layer):
self._pool_5 = Pool2D(pool_size=7, pool_type='avg', pool_stride=7) self._pool_5 = Pool2D(pool_size=7, pool_type='avg', pool_stride=7)
self._drop = fluid.dygraph.Dropout(p=0.4) self._drop = Dropout(p=0.4)
self._fc_out = Linear( self._fc_out = Linear(
1024, 1024,
class_dim, class_dim,
param_attr=xavier(1024, 1, "out"), weight_attr=xavier(1024, 1, "out"),
bias_attr=ParamAttr(name="out_offset"), bias_attr=ParamAttr(name="out_offset"))
act="softmax")
self._pool_o1 = Pool2D(pool_size=5, pool_stride=3, pool_type="avg") self._pool_o1 = Pool2D(pool_size=5, pool_stride=3, pool_type="avg")
self._conv_o1 = ConvLayer(512, 128, 1, name="conv_o1") self._conv_o1 = ConvLayer(512, 128, 1, name="conv_o1")
self._fc_o1 = Linear( self._fc_o1 = Linear(
1152, 1152,
1024, 1024,
param_attr=xavier(2048, 1, "fc_o1"), weight_attr=xavier(2048, 1, "fc_o1"),
bias_attr=ParamAttr(name="fc_o1_offset"), bias_attr=ParamAttr(name="fc_o1_offset"))
act="relu") self._drop_o1 = Dropout(p=0.7)
self._drop_o1 = fluid.dygraph.Dropout(p=0.7)
self._out1 = Linear( self._out1 = Linear(
1024, 1024,
class_dim, class_dim,
param_attr=xavier(1024, 1, "out1"), weight_attr=xavier(1024, 1, "out1"),
bias_attr=ParamAttr(name="out1_offset"), bias_attr=ParamAttr(name="out1_offset"))
act="softmax")
self._pool_o2 = Pool2D(pool_size=5, pool_stride=3, pool_type='avg') self._pool_o2 = Pool2D(pool_size=5, pool_stride=3, pool_type='avg')
self._conv_o2 = ConvLayer(528, 128, 1, name="conv_o2") self._conv_o2 = ConvLayer(528, 128, 1, name="conv_o2")
self._fc_o2 = Linear( self._fc_o2 = Linear(
1152, 1152,
1024, 1024,
param_attr=xavier(2048, 1, "fc_o2"), weight_attr=xavier(2048, 1, "fc_o2"),
bias_attr=ParamAttr(name="fc_o2_offset")) bias_attr=ParamAttr(name="fc_o2_offset"))
self._drop_o2 = fluid.dygraph.Dropout(p=0.7) self._drop_o2 = Dropout(p=0.7)
self._out2 = Linear( self._out2 = Linear(
1024, 1024,
class_dim, class_dim,
param_attr=xavier(1024, 1, "out2"), weight_attr=xavier(1024, 1, "out2"),
bias_attr=ParamAttr(name="out2_offset")) bias_attr=ParamAttr(name="out2_offset"))
def forward(self, inputs): def forward(self, inputs):
...@@ -183,19 +181,22 @@ class GoogleNetDY(fluid.dygraph.Layer): ...@@ -183,19 +181,22 @@ class GoogleNetDY(fluid.dygraph.Layer):
x = self._pool_5(ince5b) x = self._pool_5(ince5b)
x = self._drop(x) x = self._drop(x)
x = fluid.layers.squeeze(x, axes=[2, 3]) x = paddle.squeeze(x, axis=[2, 3])
out = self._fc_out(x) out = self._fc_out(x)
out = F.softmax(out)
x = self._pool_o1(ince4a) x = self._pool_o1(ince4a)
x = self._conv_o1(x) x = self._conv_o1(x)
x = fluid.layers.flatten(x) x = paddle.flatten(x, start_axis=1, stop_axis=-1)
x = self._fc_o1(x) x = self._fc_o1(x)
x = F.relu(x)
x = self._drop_o1(x) x = self._drop_o1(x)
out1 = self._out1(x) out1 = self._out1(x)
out1 = F.softmax(out1)
x = self._pool_o2(ince4d) x = self._pool_o2(ince4d)
x = self._conv_o2(x) x = self._conv_o2(x)
x = fluid.layers.flatten(x) x = paddle.flatten(x, start_axis=1, stop_axis=-1)
x = self._fc_o2(x) x = self._fc_o2(x)
x = self._drop_o2(x) x = self._drop_o2(x)
out2 = self._out2(x) out2 = self._out2(x)
......
...@@ -12,8 +12,6 @@ ...@@ -12,8 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from ppcls.utils.save_load import load_dygraph_pretrain
from ppcls.modeling import architectures
import paddle.fluid as fluid import paddle.fluid as fluid
import numpy as np import numpy as np
import argparse import argparse
...@@ -24,6 +22,9 @@ __dir__ = os.path.dirname(os.path.abspath(__file__)) ...@@ -24,6 +22,9 @@ __dir__ = os.path.dirname(os.path.abspath(__file__))
sys.path.append(__dir__) sys.path.append(__dir__)
sys.path.append(os.path.abspath(os.path.join(__dir__, '../..'))) sys.path.append(os.path.abspath(os.path.join(__dir__, '../..')))
from ppcls.modeling import architectures
from ppcls.utils.save_load import load_dygraph_pretrain
def parse_args(): def parse_args():
def str2bool(v): def str2bool(v):
...@@ -108,6 +109,9 @@ def main(): ...@@ -108,6 +109,9 @@ def main():
data = fluid.dygraph.to_variable(data) data = fluid.dygraph.to_variable(data)
net.eval() net.eval()
outputs = net(data) outputs = net(data)
if args.model == "GoogLeNet":
outputs = outputs[0]
else:
outputs = fluid.layers.softmax(outputs) outputs = fluid.layers.softmax(outputs)
outputs = outputs.numpy() outputs = outputs.numpy()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册