alexnet.py 2.7 KB
Newer Older
D
dzhwinter 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
#  Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
#    http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
D
dangqingqing 已提交
14 15 16 17
#!/usr/bin/env python

from paddle.trainer_config_helpers import *

18 19
height = 227
width = 227
D
dangqingqing 已提交
20
num_class = 1000
21
batch_size = get_config_arg('batch_size', int, 128)
T
tensor-tang 已提交
22
gp = get_config_arg('layer_num', int, 1)
T
tensor-tang 已提交
23 24
is_infer = get_config_arg("is_infer", bool, False)
num_samples = get_config_arg('num_samples', int, 2560)
D
dangqingqing 已提交
25

T
tensor-tang 已提交
26 27 28 29 30 31 32 33
args = {
    'height': height,
    'width': width,
    'color': True,
    'num_class': num_class,
    'is_infer': is_infer,
    'num_samples': num_samples
}
34
define_py_data_sources2(
35 36 37 38 39
    "train.list" if not is_infer else None,
    "test.list" if is_infer else None,
    module="provider",
    obj="process",
    args=args)
D
dangqingqing 已提交
40 41

settings(
42 43 44 45
    batch_size=batch_size,
    learning_rate=0.01 / batch_size,
    learning_method=MomentumOptimizer(0.9),
    regularization=L2Regularization(0.0005 * batch_size))
D
dangqingqing 已提交
46 47 48

# conv1
net = data_layer('data', size=height * width * 3)
49 50 51 52 53 54 55
net = img_conv_layer(
    input=net,
    filter_size=11,
    num_channels=3,
    num_filters=96,
    stride=4,
    padding=1)
D
dangqingqing 已提交
56
net = img_cmrnorm_layer(input=net, size=5, scale=0.0001, power=0.75)
57
net = img_pool_layer(input=net, pool_size=3, stride=2)
D
dangqingqing 已提交
58 59

# conv2
60
net = img_conv_layer(
T
tensor-tang 已提交
61
    input=net, filter_size=5, num_filters=256, stride=1, padding=2, groups=gp)
D
dangqingqing 已提交
62 63 64 65
net = img_cmrnorm_layer(input=net, size=5, scale=0.0001, power=0.75)
net = img_pool_layer(input=net, pool_size=3, stride=2)

# conv3
66 67
net = img_conv_layer(
    input=net, filter_size=3, num_filters=384, stride=1, padding=1)
D
dangqingqing 已提交
68
# conv4
69
net = img_conv_layer(
T
tensor-tang 已提交
70
    input=net, filter_size=3, num_filters=384, stride=1, padding=1, groups=gp)
D
dangqingqing 已提交
71 72

# conv5
73
net = img_conv_layer(
T
tensor-tang 已提交
74
    input=net, filter_size=3, num_filters=256, stride=1, padding=1, groups=gp)
D
dangqingqing 已提交
75 76
net = img_pool_layer(input=net, pool_size=3, stride=2)

77 78 79 80 81 82 83 84 85 86
net = fc_layer(
    input=net,
    size=4096,
    act=ReluActivation(),
    layer_attr=ExtraAttr(drop_rate=0.5))
net = fc_layer(
    input=net,
    size=4096,
    act=ReluActivation(),
    layer_attr=ExtraAttr(drop_rate=0.5))
D
dangqingqing 已提交
87 88
net = fc_layer(input=net, size=1000, act=SoftmaxActivation())

T
tensor-tang 已提交
89 90 91 92 93 94
if is_infer:
    outputs(net)
else:
    lab = data_layer('label', num_class)
    loss = cross_entropy(input=net, label=lab)
    outputs(loss)