api_train.py 7.2 KB
Newer Older
Y
Yu Yang 已提交
1 2 3 4 5 6 7 8
"""
A very basic example for how to use current Raw SWIG API to train mnist network.

Current implementation uses Raw SWIG, which means the API call is directly \
passed to C++ side of Paddle.

The user api could be simpler and carefully designed.
"""
Y
Yu Yang 已提交
9
import py_paddle.swig_paddle as api
Y
Yu Yang 已提交
10 11
from py_paddle import DataProviderConverter
import paddle.trainer.PyDataProvider2 as dp
Y
Yu Yang 已提交
12
import numpy as np
Y
Yu Yang 已提交
13
import random
Y
Yu Yang 已提交
14
from mnist_util import read_from_mnist
Q
qiaolongfei 已提交
15
import paddle.trainer_config_helpers.config_parser_utils as config_parser_utils
16 17 18 19 20
from paddle.trainer_config_helpers import *


def optimizer_config():
    settings(
Q
qiaolongfei 已提交
21 22 23 24 25
        learning_rate=1e-4,
        learning_method=AdamOptimizer(),
        batch_size=1000,
        model_average=ModelAverage(average_window=0.5),
        regularization=L2Regularization(rate=0.5))
26 27 28 29 30 31 32 33 34 35 36 37


def network_config():
    imgs = data_layer(name='pixel', size=784)
    hidden1 = fc_layer(input=imgs, size=200)
    hidden2 = fc_layer(input=hidden1, size=200)
    inference = fc_layer(input=hidden2, size=10, act=SoftmaxActivation())
    cost = classification_cost(
        input=inference, label=data_layer(
            name='label', size=10))
    outputs(cost)

Y
Yu Yang 已提交
38 39 40 41 42

def init_parameter(network):
    assert isinstance(network, api.GradientMachine)
    for each_param in network.getParameters():
        assert isinstance(each_param, api.Parameter)
Y
Yu Yang 已提交
43 44 45
        array_size = len(each_param)
        array = np.random.uniform(-1.0, 1.0, array_size).astype('float32')
        each_param.getBuf(api.PARAMETER_VALUE).copyFromNumpyArray(array)
Y
Yu Yang 已提交
46 47


Y
Yu Yang 已提交
48 49 50 51 52 53 54 55 56 57 58
def generator_to_batch(generator, batch_size):
    ret_val = list()
    for each_item in generator:
        ret_val.append(each_item)
        if len(ret_val) == batch_size:
            yield ret_val
            ret_val = list()
    if len(ret_val) != 0:
        yield ret_val


Y
Yu Yang 已提交
59 60 61 62 63 64 65 66 67 68 69 70
class BatchPool(object):
    def __init__(self, generator, batch_size):
        self.data = list(generator)
        self.batch_size = batch_size

    def __call__(self):
        random.shuffle(self.data)
        for offset in xrange(0, len(self.data), self.batch_size):
            limit = min(offset + self.batch_size, len(self.data))
            yield self.data[offset:limit]


Y
Yu Yang 已提交
71 72 73 74 75
def input_order_converter(generator):
    for each_item in generator:
        yield each_item['pixel'], each_item['label']


Y
Yu Yang 已提交
76
def main():
Y
Yu Yang 已提交
77
    api.initPaddle("-use_gpu=false", "-trainer_count=4")  # use 4 cpu cores
Y
Yu Yang 已提交
78

Y
Yu Yang 已提交
79 80 81 82
    # get enable_types for each optimizer.
    # enable_types = [value, gradient, momentum, etc]
    # For each optimizer(SGD, Adam), GradientMachine should enable different
    # buffers.
Q
qiaolongfei 已提交
83 84
    opt_config_proto = config_parser_utils.parse_optimizer_config(
        optimizer_config)
85
    opt_config = api.OptimizationConfig.createFromProto(opt_config_proto)
Y
Yu Yang 已提交
86 87 88
    _temp_optimizer_ = api.ParameterOptimizer.create(opt_config)
    enable_types = _temp_optimizer_.getParameterTypes()

Y
Yu Yang 已提交
89
    # Create Simple Gradient Machine.
Q
qiaolongfei 已提交
90
    model_config = config_parser_utils.parse_network_config(network_config)
Y
Yu Yang 已提交
91
    m = api.GradientMachine.createFromConfigProto(
92
        model_config, api.CREATE_MODE_NORMAL, enable_types)
Y
Yu Yang 已提交
93 94 95

    # This type check is not useful. Only enable type hint in IDE.
    # Such as PyCharm
Y
Yu Yang 已提交
96
    assert isinstance(m, api.GradientMachine)
Y
Yu Yang 已提交
97 98

    # Initialize Parameter by numpy.
Y
Yu Yang 已提交
99
    init_parameter(network=m)
Y
Yu Yang 已提交
100 101 102 103

    # Create Local Updater. Local means not run in cluster.
    # For a cluster training, here we can change to createRemoteUpdater
    # in future.
Y
Yu Yang 已提交
104 105
    updater = api.ParameterUpdater.createLocalUpdater(opt_config)
    assert isinstance(updater, api.ParameterUpdater)
Y
Yu Yang 已提交
106 107

    # Initialize ParameterUpdater.
Y
Yu Yang 已提交
108
    updater.init(m)
Y
Yu Yang 已提交
109

Y
Yu Yang 已提交
110 111
    # DataProvider Converter is a utility convert Python Object to Paddle C++
    # Input. The input format is as same as Paddle's DataProvider.
Y
Yu Yang 已提交
112 113 114 115
    converter = DataProviderConverter(
        input_types=[dp.dense_vector(784), dp.integer_value(10)])

    train_file = './data/raw_data/train'
Y
Yu Yang 已提交
116
    test_file = './data/raw_data/t10k'
Y
Yu Yang 已提交
117

Y
Yu Yang 已提交
118 119 120
    # start gradient machine.
    # the gradient machine must be started before invoke forward/backward.
    # not just for training, but also for inference.
Y
Yu Yang 已提交
121 122
    m.start()

Y
Yu Yang 已提交
123 124 125 126 127 128 129 130
    # evaluator can print error rate, etc. It is a C++ class.
    batch_evaluator = m.makeEvaluator()
    test_evaluator = m.makeEvaluator()

    # Get Train Data.
    # TrainData will stored in a data pool. Currently implementation is not care
    # about memory, speed. Just a very naive implementation.
    train_data_generator = input_order_converter(read_from_mnist(train_file))
Y
Yu Yang 已提交
131
    train_data = BatchPool(train_data_generator, 512)
Y
Yu Yang 已提交
132 133 134 135 136 137

    # outArgs is Neural Network forward result. Here is not useful, just passed
    # to gradient_machine.forward
    outArgs = api.Arguments.createArguments(0)

    for pass_id in xrange(2):  # we train 2 passes.
Y
Yu Yang 已提交
138 139
        updater.startPass()

Y
Yu Yang 已提交
140 141 142 143 144 145 146 147 148 149 150 151 152 153
        for batch_id, data_batch in enumerate(train_data()):
            # data_batch is input images.
            # here, for online learning, we could get data_batch from network.

            # Start update one batch.
            pass_type = updater.startBatch(len(data_batch))

            # Start BatchEvaluator.
            # batch_evaluator can be used between start/finish.
            batch_evaluator.start()

            # forwardBackward is a shortcut for forward and backward.
            # It is sometimes faster than invoke forward/backward separately,
            # because in GradientMachine, it may be async.
Y
Yu Yang 已提交
154 155 156 157
            m.forwardBackward(converter(data_batch), outArgs, pass_type)

            for each_param in m.getParameters():
                updater.update(each_param)
Y
Yu Yang 已提交
158

Y
Yu Yang 已提交
159
            # Get cost. We use numpy to calculate total cost for this batch.
Y
Yu Yang 已提交
160 161 162
            cost_vec = outArgs.getSlotValue(0)
            cost_vec = cost_vec.copyToNumpyMat()
            cost = cost_vec.sum() / len(data_batch)
Y
Yu Yang 已提交
163 164 165 166 167 168 169 170 171 172 173 174

            # Make evaluator works.
            m.eval(batch_evaluator)

            # Print logs.
            print 'Pass id', pass_id, 'Batch id', batch_id, 'with cost=', \
                cost, batch_evaluator

            batch_evaluator.finish()
            # Finish batch.
            #  * will clear gradient.
            #  * ensure all values should be updated.
Y
Yu Yang 已提交
175 176
            updater.finishBatch(cost)

Y
Yu Yang 已提交
177
        # testing stage. use test data set to test current network.
Y
Yu Yang 已提交
178
        updater.apply()
Y
Yu Yang 已提交
179 180
        test_evaluator.start()
        test_data_generator = input_order_converter(read_from_mnist(test_file))
Y
Yu Yang 已提交
181
        for data_batch in generator_to_batch(test_data_generator, 512):
Y
Yu Yang 已提交
182 183 184 185 186 187 188
            # in testing stage, only forward is needed.
            m.forward(converter(data_batch), outArgs, api.PASS_TEST)
            m.eval(test_evaluator)

        # print error rate for test data set
        print 'Pass', pass_id, ' test evaluator: ', test_evaluator
        test_evaluator.finish()
Y
Yu Yang 已提交
189 190 191 192 193 194 195
        updater.restore()

        updater.catchUpWith()
        params = m.getParameters()
        for each_param in params:
            assert isinstance(each_param, api.Parameter)
            value = each_param.getBuf(api.PARAMETER_VALUE)
Y
Yu Yang 已提交
196
            value = value.copyToNumpyArray()
Y
Yu Yang 已提交
197 198 199 200

            # Here, we could save parameter to every where you want
            print each_param.getName(), value

Y
Yu Yang 已提交
201 202
        updater.finishPass()

Y
Yu Yang 已提交
203
    m.finish()
Y
Yu Yang 已提交
204 205 206 207


if __name__ == '__main__':
    main()