pipeline_mnist.py 5.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from functools import reduce
16

17
from test_dist_base import TestDistRunnerBase, runtime_main
18 19

import paddle
20
import paddle.distributed.fleet as fleet
21
import paddle.fluid as fluid
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40

paddle.enable_static()

DTYPE = "float32"
paddle.dataset.mnist.fetch()

# Fix seed for test
fluid.default_startup_program().random_seed = 1
fluid.default_main_program().random_seed = 1


def cnn_model(data):
    conv_pool_1 = fluid.nets.simple_img_conv_pool(
        input=data,
        filter_size=5,
        num_filters=20,
        pool_size=2,
        pool_stride=2,
        act="relu",
41 42 43 44
        param_attr=fluid.ParamAttr(
            initializer=fluid.initializer.Constant(value=0.01)
        ),
    )
45 46 47 48 49 50 51
    conv_pool_2 = fluid.nets.simple_img_conv_pool(
        input=conv_pool_1,
        filter_size=5,
        num_filters=50,
        pool_size=2,
        pool_stride=2,
        act="relu",
52 53 54 55
        param_attr=fluid.ParamAttr(
            initializer=fluid.initializer.Constant(value=0.01)
        ),
    )
56 57 58 59

    SIZE = 10
    input_shape = conv_pool_2.shape
    param_shape = [reduce(lambda a, b: a * b, input_shape[1:], 1)] + [SIZE]
60
    scale = (2.0 / (param_shape[0] ** 2 * SIZE)) ** 0.5
61

62 63 64 65 66 67
    with fluid.device_guard("gpu:1"):
        predict = fluid.layers.fc(
            input=conv_pool_2,
            size=SIZE,
            act="softmax",
            param_attr=fluid.param_attr.ParamAttr(
68 69 70
                initializer=fluid.initializer.Constant(value=0.01)
            ),
        )
71 72 73 74 75 76
        # To cover @RENAMED@GRADIENT
        predict2 = fluid.layers.fc(
            input=conv_pool_1,
            size=SIZE,
            act="softmax",
            param_attr=fluid.param_attr.ParamAttr(
77 78 79
                initializer=fluid.initializer.Constant(value=0.01)
            ),
        )
80
        predict += predict2
81 82 83 84 85 86 87
    return predict


class TestDistMnist2x2(TestDistRunnerBase):
    def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None):
        # Input data
        with fluid.device_guard("gpu:0"):
88 89 90
            images = fluid.layers.data(
                name='pixel', shape=[1, 28, 28], dtype=DTYPE
            )
91 92 93 94 95 96 97
            label = fluid.layers.data(name='label', shape=[1], dtype='int64')

            if dist_strategy:
                data_loader = fluid.io.DataLoader.from_generator(
                    feed_list=[images, label],
                    capacity=64,
                    use_double_buffer=False,
98 99
                    iterable=False,
                )
100 101 102 103
            # Train program
            predict = cnn_model(images)
        with fluid.device_guard("gpu:1"):
            cost = fluid.layers.cross_entropy(input=predict, label=label)
104
            avg_cost = paddle.mean(x=cost)
105 106 107 108

        # Evaluator
        with fluid.device_guard("gpu:1"):
            batch_size_tensor = fluid.layers.create_tensor(dtype='int64')
109 110 111
            batch_acc = fluid.layers.accuracy(
                input=predict, label=label, total=batch_size_tensor
            )
112 113 114 115 116 117 118

        inference_program = fluid.default_main_program().clone()
        base_lr = self.lr
        passes = [30, 60, 80, 90]
        steps_per_pass = 10
        bd = [steps_per_pass * p for p in passes]
        lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)]
119 120 121
        lr_val = paddle.optimizer.lr.PiecewiseDecay(boundaries=bd, values=lr)

        opt = paddle.optimizer.AdamW(
122
            learning_rate=lr_val,
123 124
            grad_clip=fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0),
        )
125

126
        acc_steps = 2  # accumulated steps for pipeline
127
        if dist_strategy:
128
            # Reader
129 130 131 132 133 134
            train_reader = paddle.batch(
                paddle.dataset.mnist.test(), batch_size=batch_size
            )
            test_reader = paddle.batch(
                paddle.dataset.mnist.test(), batch_size=batch_size
            )
135 136 137
            fleet.init(is_collective=True)
            strategy = fleet.DistributedStrategy()
            strategy.pipeline = True
138
            strategy.amp = True
139 140 141
            strategy.pipeline_configs = {
                'micro_batch_size': batch_size,
                'schedule_mode': '1F1B',
142
                'accumulate_steps': acc_steps,
143
            }
144 145 146
            dist_opt = fleet.distributed_optimizer(
                optimizer=opt, strategy=strategy
            )
147 148 149
            dist_opt.minimize(avg_cost)
        else:
            opt.minimize(avg_cost)
150
            # Reader
151 152 153 154 155 156
            train_reader = paddle.batch(
                paddle.dataset.mnist.test(), batch_size=batch_size * acc_steps
            )
            test_reader = paddle.batch(
                paddle.dataset.mnist.test(), batch_size=batch_size * acc_steps
            )
157 158

        if dist_strategy:
159 160 161 162 163 164 165 166 167
            return (
                inference_program,
                avg_cost,
                train_reader,
                test_reader,
                batch_acc,
                predict,
                data_loader,
            )
168
        else:
169 170 171 172 173 174 175 176
            return (
                inference_program,
                avg_cost,
                train_reader,
                test_reader,
                batch_acc,
                predict,
            )
177 178 179 180


if __name__ == "__main__":
    runtime_main(TestDistMnist2x2)