compiler.py 21.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import multiprocessing
import os
import six
X
polish  
Xin Pan 已提交
18
import sys
19
from .. import compat as cpt
X
Xin Pan 已提交
20
from . import framework
21
from .framework import cuda_places, cpu_places, xpu_places
22 23 24

from . import core

X
Xin Pan 已提交
25 26
__all__ = ['CompiledProgram', 'ExecutionStrategy', 'BuildStrategy']

27 28
ExecutionStrategy = core.ParallelExecutor.ExecutionStrategy
BuildStrategy = core.ParallelExecutor.BuildStrategy
F
flame 已提交
29 30
InferNativeConfig = core.NativeConfig
InferAnalysisConfig = core.AnalysisConfig
31
DeviceType = core.DeviceType
32 33 34 35 36 37 38 39


def _place_obj(place):
    p = core.Place()
    p.set_place(place)
    return p


40 41
def _is_pserver_mode(main_program):
    main = main_program if main_program \
C
chengduo 已提交
42
        else framework.default_main_program()
43 44 45 46 47 48
    for op in main.global_block().ops:
        if op.type in ["send", "recv"]:
            return True
    return False


C
chengduo 已提交
49 50 51 52 53 54 55 56
def _has_backward_op(graph):
    for node in graph.nodes():
        if node.is_op() and node.op() is not None and \
                node.op().type().endswith("_grad"):
            return True
    return False


57 58 59 60 61 62 63 64 65
def _prune_feed_ops(program):
    # prune the feed ops in the program.
    pop_idx = []
    for i, op in enumerate(program.global_block().ops):
        if op.type == "feed": pop_idx.append(i)
    for index in pop_idx[::-1]:
        program.global_block()._remove_op(index)


66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
def _has_optimize_op(block):
    for op in block.ops:
        op_maker = core.op_proto_and_checker_maker
        optimize = core.op_proto_and_checker_maker.OpRole.Optimize
        if op_maker.kOpRoleVarAttrName() in op.attr_names and \
                int(op.all_attrs()[op_maker.kOpRoleAttrName()]) == int(optimize):
            return True
    return False


def _has_optimizer_in_control_flow(program):
    if not program:
        program = framework.default_main_program()
    for op in program.global_block().ops:
        if op.type == "conditional_block_grad":
            sub_block = program.block(op._block_attr_id("sub_block"))
            if _has_optimize_op(sub_block):
                return True

    return False


X
polish  
Xin Pan 已提交
88
class CompiledProgram(object):
X
polish  
Xin Pan 已提交
89
    """
90 91
    :api_attr: Static Graph
    
C
chengduo 已提交
92 93 94 95 96
    The CompiledProgram is used to transform a program or graph for
    various optimizations according to the configuration of build_strategy,
    for example, the operators' fusion in the computation graph, memory
    optimization during the execution of the computation graph, etc.
    For more information about build_strategy, please refer to
97
    :code:`paddle.static.BuildStrategy`.
X
polish  
Xin Pan 已提交
98

C
chengduo 已提交
99
    Args:
100
        program_or_graph (Graph|Program): This argument is the Program or Graph
C
chengduo 已提交
101
            being executed.
102
        build_strategy(BuildStrategy): This argument is used to compile the
C
chengduo 已提交
103 104 105
            program or graph with the specified options, such as operators' fusion
            in the computational graph and memory optimization during the execution
            of the computational graph. For more information about build_strategy,
106
            please refer to :code:`paddle.static.BuildStrategy`. The default is None.
X
Xin Pan 已提交
107

C
chengduo 已提交
108 109
    Returns:
        CompiledProgram
X
polish  
Xin Pan 已提交
110 111

    Example:
X
Xin Pan 已提交
112
        .. code-block:: python
113

114 115 116
            import numpy
            import paddle
            import paddle.static as static
117

118
            paddle.enable_static()
119

120 121
            place = paddle.CUDAPlace(0) # paddle.CPUPlace()
            exe = static.Executor(place)
122

123
            data = static.data(name='X', shape=[None, 1], dtype='float32')
124
            hidden = static.nn.fc(x=data, size=10)
125 126
            loss = paddle.mean(hidden)
            paddle.optimizer.SGD(learning_rate=0.01).minimize(loss)
127

128 129 130 131 132 133 134 135
            exe.run(static.default_startup_program())
            compiled_prog = static.CompiledProgram(
                static.default_main_program())

            x = numpy.random.random(size=(10, 1)).astype('float32')
            loss_data, = exe.run(compiled_prog,
                                feed={"X": x},
                                fetch_list=[loss.name])
X
polish  
Xin Pan 已提交
136 137
    """

C
chengduo 已提交
138
    def __init__(self, program_or_graph, build_strategy=None):
X
Xin Pan 已提交
139 140
        if isinstance(program_or_graph, core.Graph):
            self._graph = program_or_graph
141
            # don't not create a new program here.
X
Xin Pan 已提交
142 143
            self._program = None
        elif isinstance(program_or_graph, framework.Program):
144
            _prune_feed_ops(program_or_graph)
X
Xin Pan 已提交
145 146 147
            self._graph = core.Graph(program_or_graph.desc)
            self._program = program_or_graph
        else:
148 149 150
            raise TypeError(
                "The type of program_to_graph parameter is wrong, expected Graph or Program, but received %s"
                % type(program_or_graph))
X
Xin Pan 已提交
151

X
polish  
Xin Pan 已提交
152 153 154
        self._scope = None
        self._place = None
        self._executor = None
155 156
        self._compiled = False
        self._is_data_parallel = False
F
flame 已提交
157
        self._is_inference = False
C
chengduo 已提交
158 159 160 161 162
        self._loss_name = None
        self._share_vars_from = None
        self._places = None
        self._build_strategy = build_strategy
        self._exec_strategy = None
163

X
Xin Pan 已提交
164 165 166 167
    def with_data_parallel(self,
                           loss_name=None,
                           build_strategy=None,
                           exec_strategy=None,
S
sneaxiy 已提交
168 169
                           share_vars_from=None,
                           places=None):
C
chengduo 已提交
170 171 172 173 174 175
        """
        This interface is used to transform the input Program or Graph to a multi-graph
        to run the model in data parallel mode. Users can use the build_strategy and
        exec_strategy to set some optimizations that can be applied during the construction
        and computation of the Graph, such as reducing the number of AllReduce operations,
        specifying the size of the thread pool used in the computation Graph running the model,
176 177 178 179 180 181 182
        and so on. 
        
        .. note::
            If build_strategy is specified when building CompiledProgram and calling 
            with_data_parallel, build_strategy in CompiledProgram will be overwritten, therefore, 
            if it is data parallel training, it is recommended to set build_strategy when calling 
            with_data_parallel interface.
C
chengduo 已提交
183 184

        Args:
185
            loss_name (str): This parameter is the name of the loss Tensor of the model.
C
chengduo 已提交
186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
                **Note: If it is model training, you must set loss_name, otherwise the
                result may be problematic**. The default is None.
            build_strategy(BuildStrategy): This parameter is used to compile the
                program or graph with the specified options, such as operators' fusion
                in the computational graph and memory optimization during the execution
                of the computational graph. For more information about build_strategy,
                please refer to :code:`fluid.BuildStrategy`. The default is None.
            exec_strategy(ExecutionStrategy): exec_strategy specifies the options that can
                be changed when running the current model, such as the thread pool size.
                For more information about exec_strategy, please refer to :code:`fluid.ExecutionStrategy`.
                The default is None.
            share_vars_from(CompiledProgram): If share_vars_from is set, the current
                CompiledProgram will share the parameter value with the CompiledProgram
                specified by share_vars_from. This parameter needs to be set when model testing
                is required during model training, and the data parallel mode is used for
                training and testing. Since CompiledProgram will only distribute parameter
202
                Tensors to other devices when it is first executed, the CompiledProgram
C
chengduo 已提交
203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
                specified by share_vars_from must be run before the current CompiledProgram.
                The default is None.
            places(list(CUDAPlace)|list(CPUPlace)|None): This parameter specifies the device
                on which the model is running. If you want to run on GPU0 and GPU1, places are
                [fluid.CUDAPlace(0), fluid.CUDAPlace(1)]; if you want to run with 2 CPUs, places are
                [fluid.CPUPlace()] * 2. If the parameter is not set, i.e. the parameter is None,
                the available device will be obtained from the environment variable when the model
                is executed: If the GPU is used, the currently available device ID is obtained
                from the environment variable FLAGS_selected_gpus or CUDA_VISIBLE_DEVICES when
                the model is executed; CPU, when the model is executed, the currently available
                CPU number is obtained from the environment variable CPU_NUM. For example,
                export CPU_NUM=4, if the environment variable is not set, the executor will
                add the variable to the environment variable and set its value to 1.
                The default is None.

        Returns:
            CompiledProgram
X
Xin Pan 已提交
220

221 222 223
        Example:
            .. code-block:: python

224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
                import numpy
                import os
                import paddle
                import paddle.static as static

                paddle.enable_static()

                use_cuda = True
                place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
                parallel_places = [paddle.CUDAPlace(0), paddle.CUDAPlace(1)] if use_cuda else [paddle.CPUPlace()] * 2

                # NOTE: If you use CPU to run the program, you need
                # to specify the CPU_NUM, otherwise, paddle will use
                # all the number of the logic core as the CPU_NUM,
                # in that case, the batch size of the input should be
                # greater than CPU_NUM, if not, the process will be
                # failed by an exception.
                if not use_cuda:
                    os.environ['CPU_NUM'] = str(2)

                exe = static.Executor(place)

                data = static.data(name='X', shape=[None, 1], dtype='float32')
247
                hidden = static.nn.fc(x=data, size=10)
248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
                loss = paddle.mean(hidden)

                test_program = static.default_main_program().clone(for_test=True)
                paddle.optimizer.SGD(learning_rate=0.01).minimize(loss)

                exe.run(static.default_startup_program())
                compiled_train_prog = static.CompiledProgram(
                    static.default_main_program()).with_data_parallel(
                            loss_name=loss.name, places=parallel_places)
                # NOTE: if not set share_vars_from=compiled_train_prog,
                # the parameters used in test process are different with 
                # the parameters used by train process
                compiled_test_prog = static.CompiledProgram(
                    test_program).with_data_parallel(
                            share_vars_from=compiled_train_prog,
                            places=parallel_places)

                train_data = numpy.random.random(size=(10, 1)).astype('float32')
                loss_data, = exe.run(compiled_train_prog,
267 268
                                feed={"X": train_data},
                                fetch_list=[loss.name])
269 270
                test_data = numpy.random.random(size=(10, 1)).astype('float32')
                loss_data, = exe.run(compiled_test_prog,
271 272
                                feed={"X": test_data},
                                fetch_list=[loss.name])
X
Xin Pan 已提交
273
        """
274 275
        assert not self._is_data_parallel, "Already compiled with parallel, cannot be recompiled."
        assert not self._is_inference, "Cannot compile with both data parallel and inference."
276
        self._is_data_parallel = True
C
chengduo 已提交
277 278 279 280 281
        # FIXME(zcd): Currently, the build_strategy can be set during creating
        # CompiledProgram or calling with_data_parallel, and it may be confusing,
        # but in the long run, we should set up build_strategy only when creating
        # CompiledProgram, and exec_strategy should be deprecated.
        if build_strategy is not None: self._build_strategy = build_strategy
282 283
        self._exec_strategy = exec_strategy
        self._loss_name = loss_name
X
polish  
Xin Pan 已提交
284
        self._share_vars_from = share_vars_from
C
chengduo 已提交
285 286 287
        self._places = places

        if _has_backward_op(self._graph):
288
            assert self._loss_name is not None, "The loss name of CompiledProgram is None. The loss name should be set if CompiledProgram contains backward part."
C
chengduo 已提交
289 290 291 292 293

        if self._places is not None:
            if not isinstance(self._places, (list, tuple)):
                self._places = [self._places]

294 295
        return self

F
flame 已提交
296
    def _with_inference_optimize(self, config):
F
flame 已提交
297 298 299 300 301 302 303
        """ Add inference optimize

        Args:
            config: instance of `NativeConfig` or `AnalysisConfig` to create predictor
        Returns:
            self
        """
304 305
        assert not self._is_data_parallel, "Cannot compile with both data parallel and inference"
        assert not self._is_inference, "Already compiled with inference, cannot be recompiled."
X
Xin Pan 已提交
306

F
flame 已提交
307 308 309 310 311 312 313
        assert any([
            isinstance(config, InferNativeConfig),
            isinstance(config, InferAnalysisConfig)
        ])
        self._is_inference = True
        self._infer_config = config
        return self
X
polish  
Xin Pan 已提交
314

F
flame 已提交
315
    def _with_distributed(self):
316 317 318
        raise NotImplementedError(
            "Subclass of CompiledProgram should implement _with_distributed method."
        )
X
polish  
Xin Pan 已提交
319

320
    def _compile_data_parallel(self, places, use_device, scope=None):
X
polish  
Xin Pan 已提交
321
        if self._share_vars_from:
322
            if scope:
X
polish  
Xin Pan 已提交
323 324
                sys.stderr.write("share_vars_from is set, scope is ignored.\n")
            if not self._share_vars_from._is_data_parallel:
325 326 327
                raise ValueError(
                    "The shared Program is not data parallel, cannot "
                    "share variables from it.")
X
polish  
Xin Pan 已提交
328 329
            if self._share_vars_from._executor is None:
                raise ValueError(
330 331
                    "The shared Program is not compiled and executed, so there is no "
                    "variables to share.")
X
polish  
Xin Pan 已提交
332 333
            self._local_scopes = self._share_vars_from._executor.local_scopes()
        else:
334
            assert scope is not None, ""
X
polish  
Xin Pan 已提交
335
            self._local_scopes = []
336

C
chengduo 已提交
337
        assert isinstance(places, tuple) or isinstance(places, list), \
338
            "Currently , The places type can only be list or tuple, but the input type is {}.".format(type(places))
C
chengduo 已提交
339 340 341 342 343 344 345

        if self._build_strategy is None:
            self._build_strategy = BuildStrategy()
        self._build_strategy.is_distribution = _is_pserver_mode(self._program)

        if self._exec_strategy is None:
            self._exec_strategy = ExecutionStrategy()
346
        self._exec_strategy._use_device = use_device
347 348

        if self._exec_strategy.num_threads == 0:
349
            if self._exec_strategy._use_device == DeviceType.CUDA:
350 351
                # Experiments on se-resnext shows that too many threads hurt
                # performance. Worth tunning for other models in the future.
C
chengduo 已提交
352
                self._exec_strategy.num_threads = len(places) * 4
353 354 355
            elif self._exec_strategy._use_device == DeviceType.XPU:
                # Currently only single thread is supported in Kunlun XPU.
                self._exec_strategy.num_threads = 1
356
            else:
C
chengduo 已提交
357 358
                self._exec_strategy.num_threads = len(places) * 2

359 360 361 362
        if self._exec_strategy._use_device == DeviceType.XPU:
            assert self._exec_strategy.num_threads == 1, \
                "Currently only single thread is supported in Kunlun XPU."

C
chengduo 已提交
363 364 365 366
        if self._build_strategy.num_trainers > 1:
            assert self._is_data_parallel, \
                "If you use multi-trainer to train the model, you should use "\
                "the data parallel model, i.e. calling with_data_parallel function."
367

X
Xin Pan 已提交
368 369
        # TODO(wuyi): trainer endpoings should be passed in through
        # build_strategy, not program.xxx.
370
        # TODO(gongwb): let user to set them once.
X
Xin Pan 已提交
371 372 373
        if self._program and self._build_strategy.num_trainers > 1 and \
                self._program._trainers_endpoints:
            tps = self._program._trainers_endpoints
D
dzhwinter 已提交
374

375
            assert self._build_strategy.num_trainers == len(
376
                tps), "The trainer numbers is not equal to endpoint numbers."
X
Xin Pan 已提交
377 378
            self._build_strategy.trainers_endpoints = tps

379 380
        if self._program:
            self._build_strategy.nccl_comm_num = self._program._nccl_comm_num
381 382
            self._build_strategy.use_hierarchical_allreduce = self._program._use_hierarchical_allreduce
            self._build_strategy.hierarchical_allreduce_inter_nranks = self._program._hierarchical_allreduce_inter_nranks
383

Q
qingqing01 已提交
384 385 386
        if self._build_strategy.sync_batch_norm:
            self._build_strategy.enable_sequential_execution = True

387
        if self._program is not None and self._program._enable_dgc:
388
            assert self._exec_strategy._use_device == DeviceType.CUDA, "DGC only used under CUDA environment."
389
            assert self._build_strategy.num_trainers * len(
390
                places) > 1, "DGC is not avaliable for single card training."
391
            assert self._build_strategy.reduce_strategy == BuildStrategy.ReduceStrategy.AllReduce, "DGC \
392
                only can be used for AllReduce BuildStrategy."
393 394 395 396

            # DGC doesn't support fuse for now, close fuse.
            self._build_strategy.fuse_all_reduce_ops = False

X
Xin Pan 已提交
397
        self._persistable_vars = []
Z
Zhen Wang 已提交
398 399 400 401
        for node in self._graph.nodes():
            if node.is_var() and node.var() is not None and node.var().persistable() and \
                    node.var().type() != core.VarDesc.VarType.RAW:
                self._persistable_vars.append(cpt.to_text(node.name()))
402

C
chengduo 已提交
403 404
        places = list(map(_place_obj, places))

Y
Yan Xu 已提交
405 406 407 408 409 410 411 412 413 414 415
        # ParallelExecutor would broadcast all the parameters during initializing.
        # The parameters of each process should be in the same ordered for the data-parallelism
        # distributed training to keep the broadcast correct.
        self._persistable_vars = list(set(self._persistable_vars))
        self._persistable_vars.sort()

        return core.ParallelExecutor(
            places, self._persistable_vars,
            cpt.to_text(self._loss_name)
            if self._loss_name else six.u(''), self._scope, self._local_scopes,
            self._exec_strategy, self._build_strategy, self._graph)
416

F
flame 已提交
417 418 419
    def _compile_inference(self):
        return core.create_paddle_predictor(self._infer_config)

420
    def _compile(self, scope, place):
X
Xin Pan 已提交
421 422 423 424 425 426 427 428 429 430
        """Compile the program based on the configs.

        Args:
            scope: The variables (resources) that are associated with
               this compiled program.
            place: The location that the compiled program will be run on.

        Returns:
            self
        """
431
        if self._compiled:
X
polish  
Xin Pan 已提交
432
            if scope and self._scope != scope:
433
                raise ValueError("Cannot compile program with different scope.")
S
sneaxiy 已提交
434
            if place and not self._place._equals(place):
435
                raise ValueError("Cannot compile program with different place.")
436
            return self
X
fix  
Xin Pan 已提交
437
        self._compiled = True
438 439 440

        self._scope = scope
        self._place = place
C
chengduo 已提交
441 442

        if self._is_inference:
F
flame 已提交
443
            self._executor = self._compile_inference()
444
        else:
C
chengduo 已提交
445 446 447 448
            if self._is_data_parallel:
                self._places = self._get_places(self._place, self._places)
            else:
                self._places = [self._place]
449 450 451 452 453 454 455 456 457

            # Todo(liym27):If optimizer is used in control flow,
            #  training on multi-places is not supported now, will
            #  be supported later.
            if len(self._places) > 1 and \
                    _has_optimizer_in_control_flow(self._program):
                raise NotImplementedError(
                    "If optimizer is used in control flow, "
                    "training on multi-places is not supported now.")
458 459 460 461 462 463
            if isinstance(self._place, core.CUDAPlace):
                use_device = DeviceType.CUDA
            elif isinstance(self._place, core.XPUPlace):
                use_device = DeviceType.XPU
            else:
                use_device = DeviceType.CPU
C
chengduo 已提交
464
            self._executor = self._compile_data_parallel(
465
                use_device=use_device, scope=self._scope, places=self._places)
466
        return self
C
chengduo 已提交
467 468 469 470 471 472

    def _get_places(self, place, place_list):
        has_set_place = (place_list is not None)
        if has_set_place:
            for p in place_list:
                assert p._type() == place._type(), \
473
                    "Place type not match. You may set wrong type of places."
C
chengduo 已提交
474
        else:
475 476 477 478 479 480
            if isinstance(place, core.CUDAPlace):
                place_list = cuda_places()
            elif isinstance(place, core.XPUPlace):
                place_list = xpu_places()
            else:
                place_list = cpu_places()
481
        assert place_list, "No places for execution."
C
chengduo 已提交
482
        return place_list