train.py 4.3 KB
Newer Older
L
liaogang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License

D
dzhwinter 已提交
15
import sys, os
L
liaogang 已提交
16

L
liaogang 已提交
17
import paddle.v2 as paddle
L
liaogang 已提交
18

L
liaogang 已提交
19 20
from vgg import vgg_bn_drop
from resnet import resnet_cifar10
L
liaogang 已提交
21

D
dzhwinter 已提交
22
with_gpu = os.getenv('WITH_GPU', '0') != '0'
L
liaogang 已提交
23 24 25 26 27 28

def main():
    datadim = 3 * 32 * 32
    classdim = 10

    # PaddlePaddle init
D
dzhwinter 已提交
29
    paddle.init(use_gpu=with_gpu, trainer_count=1)
L
liaogang 已提交
30 31 32 33 34 35

    image = paddle.layer.data(
        name="image", type=paddle.data_type.dense_vector(datadim))

    # Add neural network config
    # option 1. resnet
L
liaogang 已提交
36
    # net = resnet_cifar10(image, depth=32)
L
liaogang 已提交
37
    # option 2. vgg
L
liaogang 已提交
38
    net = vgg_bn_drop(image)
L
liaogang 已提交
39

40 41
    out = paddle.layer.fc(
        input=net, size=classdim, act=paddle.activation.Softmax())
L
liaogang 已提交
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56

    lbl = paddle.layer.data(
        name="label", type=paddle.data_type.integer_value(classdim))
    cost = paddle.layer.classification_cost(input=out, label=lbl)

    # Create parameters
    parameters = paddle.parameters.create(cost)

    # Create optimizer
    momentum_optimizer = paddle.optimizer.Momentum(
        momentum=0.9,
        regularization=paddle.optimizer.L2Regularization(rate=0.0002 * 128),
        learning_rate=0.1 / 128.0,
        learning_rate_decay_a=0.1,
        learning_rate_decay_b=50000 * 100,
Q
qingqing01 已提交
57
        learning_rate_schedule='discexp')
L
liaogang 已提交
58 59 60 61 62 63 64 65 66 67 68

    # End batch and end pass event handler
    def event_handler(event):
        if isinstance(event, paddle.event.EndIteration):
            if event.batch_id % 100 == 0:
                print "\nPass %d, Batch %d, Cost %f, %s" % (
                    event.pass_id, event.batch_id, event.cost, event.metrics)
            else:
                sys.stdout.write('.')
                sys.stdout.flush()
        if isinstance(event, paddle.event.EndPass):
69
            # save parameters
70
            with open('params_pass_%d.tar' % event.pass_id, 'w') as f:
71 72
                parameters.to_tar(f)

L
liaogang 已提交
73
            result = trainer.test(
L
liaogang 已提交
74
                reader=paddle.batch(
L
liaogang 已提交
75
                    paddle.dataset.cifar.test10(), batch_size=128),
L
liaogang 已提交
76 77
                feeding={'image': 0,
                         'label': 1})
L
liaogang 已提交
78 79 80
            print "\nTest with Pass %d, %s" % (event.pass_id, result.metrics)

    # Create trainer
81 82
    trainer = paddle.trainer.SGD(
        cost=cost, parameters=parameters, update_equation=momentum_optimizer)
L
liaogang 已提交
83
    trainer.train(
L
liaogang 已提交
84
        reader=paddle.batch(
L
liaogang 已提交
85 86 87
            paddle.reader.shuffle(
                paddle.dataset.cifar.train10(), buf_size=50000),
            batch_size=128),
Q
qingqing01 已提交
88
        num_passes=200,
L
liaogang 已提交
89
        event_handler=event_handler,
L
liaogang 已提交
90 91
        feeding={'image': 0,
                 'label': 1})
L
liaogang 已提交
92

93 94 95
    # inference
    from PIL import Image
    import numpy as np
L
liaogang 已提交
96
    import os
97 98 99 100

    def load_image(file):
        im = Image.open(file)
        im = im.resize((32, 32), Image.ANTIALIAS)
Q
qingqing01 已提交
101
        im = np.array(im).astype(np.float32)
Q
qingqing01 已提交
102 103 104
        # The storage order of the loaded image is W(widht),
        # H(height), C(channel). PaddlePaddle requires
        # the CHW order, so transpose them.
Q
qingqing01 已提交
105
        im = im.transpose((2, 0, 1))  # CHW
Q
qingqing01 已提交
106 107 108
        # In the training phase, the channel order of CIFAR
        # image is B(Blue), G(green), R(Red). But PIL open
        # image in RGB mode. It must swap the channel order.
Q
qingqing01 已提交
109 110
        im = im[(2, 1, 0), :, :]  # BGR
        im = im.flatten()
111 112 113 114
        im = im / 255.0
        return im

    test_data = []
L
liaogang 已提交
115 116
    cur_dir = os.path.dirname(os.path.realpath(__file__))
    test_data.append((load_image(cur_dir + '/image/dog.png'), ))
117

Q
qingqing01 已提交
118
    # users can remove the comments and change the model name
119
    # with open('params_pass_50.tar', 'r') as f:
Q
qingqing01 已提交
120 121
    #    parameters = paddle.parameters.Parameters.from_tar(f)

122 123 124
    probs = paddle.infer(
        output_layer=out, parameters=parameters, input=test_data)
    lab = np.argsort(-probs)  # probs and lab are the results of one batch data
Q
qingqing01 已提交
125
    print "Label of image/dog.png is: %d" % lab[0][0]
126

L
liaogang 已提交
127 128 129

if __name__ == '__main__':
    main()