compiler.py 21.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import multiprocessing
import os
import six
X
polish  
Xin Pan 已提交
18
import sys
19
from .. import compat as cpt
X
Xin Pan 已提交
20
from . import framework
21
from .framework import _get_paddle_place, _get_paddle_place_list
22
from .framework import cuda_places, cpu_places, xpu_places
23 24
from . import core

X
Xin Pan 已提交
25 26
__all__ = ['CompiledProgram', 'ExecutionStrategy', 'BuildStrategy']

27 28
ExecutionStrategy = core.ParallelExecutor.ExecutionStrategy
BuildStrategy = core.ParallelExecutor.BuildStrategy
F
flame 已提交
29 30
InferNativeConfig = core.NativeConfig
InferAnalysisConfig = core.AnalysisConfig
31
DeviceType = core.DeviceType
32 33 34 35 36 37 38 39


def _place_obj(place):
    p = core.Place()
    p.set_place(place)
    return p


40 41
def _is_pserver_mode(main_program):
    main = main_program if main_program \
C
chengduo 已提交
42
        else framework.default_main_program()
43 44 45 46 47 48
    for op in main.global_block().ops:
        if op.type in ["send", "recv"]:
            return True
    return False


C
chengduo 已提交
49 50 51 52 53 54 55 56
def _has_backward_op(graph):
    for node in graph.nodes():
        if node.is_op() and node.op() is not None and \
                node.op().type().endswith("_grad"):
            return True
    return False


57 58 59 60 61 62 63 64 65
def _prune_feed_ops(program):
    # prune the feed ops in the program.
    pop_idx = []
    for i, op in enumerate(program.global_block().ops):
        if op.type == "feed": pop_idx.append(i)
    for index in pop_idx[::-1]:
        program.global_block()._remove_op(index)


66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
def _has_optimize_op(block):
    for op in block.ops:
        op_maker = core.op_proto_and_checker_maker
        optimize = core.op_proto_and_checker_maker.OpRole.Optimize
        if op_maker.kOpRoleVarAttrName() in op.attr_names and \
                int(op.all_attrs()[op_maker.kOpRoleAttrName()]) == int(optimize):
            return True
    return False


def _has_optimizer_in_control_flow(program):
    if not program:
        program = framework.default_main_program()
    for op in program.global_block().ops:
        if op.type == "conditional_block_grad":
            sub_block = program.block(op._block_attr_id("sub_block"))
            if _has_optimize_op(sub_block):
                return True

    return False


X
polish  
Xin Pan 已提交
88
class CompiledProgram(object):
X
polish  
Xin Pan 已提交
89
    """
90 91
    :api_attr: Static Graph
    
C
chengduo 已提交
92 93 94 95 96
    The CompiledProgram is used to transform a program or graph for
    various optimizations according to the configuration of build_strategy,
    for example, the operators' fusion in the computation graph, memory
    optimization during the execution of the computation graph, etc.
    For more information about build_strategy, please refer to
97
    :code:`paddle.static.BuildStrategy`.
X
polish  
Xin Pan 已提交
98

C
chengduo 已提交
99
    Args:
100
        program_or_graph (Graph|Program): This argument is the Program or Graph
C
chengduo 已提交
101
            being executed.
102
        build_strategy(BuildStrategy): This argument is used to compile the
C
chengduo 已提交
103 104 105
            program or graph with the specified options, such as operators' fusion
            in the computational graph and memory optimization during the execution
            of the computational graph. For more information about build_strategy,
106
            please refer to :code:`paddle.static.BuildStrategy`. The default is None.
X
Xin Pan 已提交
107

C
chengduo 已提交
108 109
    Returns:
        CompiledProgram
X
polish  
Xin Pan 已提交
110 111

    Example:
X
Xin Pan 已提交
112
        .. code-block:: python
113

114 115 116
            import numpy
            import paddle
            import paddle.static as static
117

118
            paddle.enable_static()
119

120 121
            place = paddle.CUDAPlace(0) # paddle.CPUPlace()
            exe = static.Executor(place)
122

123
            data = static.data(name='X', shape=[None, 1], dtype='float32')
124
            hidden = static.nn.fc(x=data, size=10)
125 126
            loss = paddle.mean(hidden)
            paddle.optimizer.SGD(learning_rate=0.01).minimize(loss)
127

128 129 130 131 132 133 134 135
            exe.run(static.default_startup_program())
            compiled_prog = static.CompiledProgram(
                static.default_main_program())

            x = numpy.random.random(size=(10, 1)).astype('float32')
            loss_data, = exe.run(compiled_prog,
                                feed={"X": x},
                                fetch_list=[loss.name])
X
polish  
Xin Pan 已提交
136 137
    """

C
chengduo 已提交
138
    def __init__(self, program_or_graph, build_strategy=None):
X
Xin Pan 已提交
139 140
        if isinstance(program_or_graph, core.Graph):
            self._graph = program_or_graph
141
            # don't not create a new program here.
X
Xin Pan 已提交
142 143
            self._program = None
        elif isinstance(program_or_graph, framework.Program):
144
            _prune_feed_ops(program_or_graph)
X
Xin Pan 已提交
145 146 147
            self._graph = core.Graph(program_or_graph.desc)
            self._program = program_or_graph
        else:
148 149 150
            raise TypeError(
                "The type of program_to_graph parameter is wrong, expected Graph or Program, but received %s"
                % type(program_or_graph))
X
Xin Pan 已提交
151

X
polish  
Xin Pan 已提交
152 153 154
        self._scope = None
        self._place = None
        self._executor = None
155 156
        self._compiled = False
        self._is_data_parallel = False
F
flame 已提交
157
        self._is_inference = False
C
chengduo 已提交
158 159 160 161 162
        self._loss_name = None
        self._share_vars_from = None
        self._places = None
        self._build_strategy = build_strategy
        self._exec_strategy = None
163

X
Xin Pan 已提交
164 165 166 167
    def with_data_parallel(self,
                           loss_name=None,
                           build_strategy=None,
                           exec_strategy=None,
S
sneaxiy 已提交
168 169
                           share_vars_from=None,
                           places=None):
C
chengduo 已提交
170 171 172 173 174 175
        """
        This interface is used to transform the input Program or Graph to a multi-graph
        to run the model in data parallel mode. Users can use the build_strategy and
        exec_strategy to set some optimizations that can be applied during the construction
        and computation of the Graph, such as reducing the number of AllReduce operations,
        specifying the size of the thread pool used in the computation Graph running the model,
176 177 178 179 180 181 182
        and so on. 
        
        .. note::
            If build_strategy is specified when building CompiledProgram and calling 
            with_data_parallel, build_strategy in CompiledProgram will be overwritten, therefore, 
            if it is data parallel training, it is recommended to set build_strategy when calling 
            with_data_parallel interface.
C
chengduo 已提交
183 184

        Args:
185
            loss_name (str): This parameter is the name of the loss Tensor of the model.
C
chengduo 已提交
186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
                **Note: If it is model training, you must set loss_name, otherwise the
                result may be problematic**. The default is None.
            build_strategy(BuildStrategy): This parameter is used to compile the
                program or graph with the specified options, such as operators' fusion
                in the computational graph and memory optimization during the execution
                of the computational graph. For more information about build_strategy,
                please refer to :code:`fluid.BuildStrategy`. The default is None.
            exec_strategy(ExecutionStrategy): exec_strategy specifies the options that can
                be changed when running the current model, such as the thread pool size.
                For more information about exec_strategy, please refer to :code:`fluid.ExecutionStrategy`.
                The default is None.
            share_vars_from(CompiledProgram): If share_vars_from is set, the current
                CompiledProgram will share the parameter value with the CompiledProgram
                specified by share_vars_from. This parameter needs to be set when model testing
                is required during model training, and the data parallel mode is used for
                training and testing. Since CompiledProgram will only distribute parameter
202
                Tensors to other devices when it is first executed, the CompiledProgram
C
chengduo 已提交
203 204
                specified by share_vars_from must be run before the current CompiledProgram.
                The default is None.
205
            places(list(CUDAPlace)|list(CPUPlace)|list(str)|None): This parameter specifies the device
C
chengduo 已提交
206 207 208 209 210 211 212 213 214 215
                on which the model is running. If you want to run on GPU0 and GPU1, places are
                [fluid.CUDAPlace(0), fluid.CUDAPlace(1)]; if you want to run with 2 CPUs, places are
                [fluid.CPUPlace()] * 2. If the parameter is not set, i.e. the parameter is None,
                the available device will be obtained from the environment variable when the model
                is executed: If the GPU is used, the currently available device ID is obtained
                from the environment variable FLAGS_selected_gpus or CUDA_VISIBLE_DEVICES when
                the model is executed; CPU, when the model is executed, the currently available
                CPU number is obtained from the environment variable CPU_NUM. For example,
                export CPU_NUM=4, if the environment variable is not set, the executor will
                add the variable to the environment variable and set its value to 1.
216 217
                The default is None. If ``places`` is the list of string, the string in the list
                can be ``cpu``, ``gpu:x``, where ``x`` is the index of the GPUs. 
C
chengduo 已提交
218 219 220

        Returns:
            CompiledProgram
X
Xin Pan 已提交
221

222 223 224
        Example:
            .. code-block:: python

225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
                import numpy
                import os
                import paddle
                import paddle.static as static

                paddle.enable_static()

                use_cuda = True
                place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
                parallel_places = [paddle.CUDAPlace(0), paddle.CUDAPlace(1)] if use_cuda else [paddle.CPUPlace()] * 2

                # NOTE: If you use CPU to run the program, you need
                # to specify the CPU_NUM, otherwise, paddle will use
                # all the number of the logic core as the CPU_NUM,
                # in that case, the batch size of the input should be
                # greater than CPU_NUM, if not, the process will be
                # failed by an exception.
                if not use_cuda:
                    os.environ['CPU_NUM'] = str(2)

                exe = static.Executor(place)

                data = static.data(name='X', shape=[None, 1], dtype='float32')
248
                hidden = static.nn.fc(x=data, size=10)
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
                loss = paddle.mean(hidden)

                test_program = static.default_main_program().clone(for_test=True)
                paddle.optimizer.SGD(learning_rate=0.01).minimize(loss)

                exe.run(static.default_startup_program())
                compiled_train_prog = static.CompiledProgram(
                    static.default_main_program()).with_data_parallel(
                            loss_name=loss.name, places=parallel_places)
                # NOTE: if not set share_vars_from=compiled_train_prog,
                # the parameters used in test process are different with 
                # the parameters used by train process
                compiled_test_prog = static.CompiledProgram(
                    test_program).with_data_parallel(
                            share_vars_from=compiled_train_prog,
                            places=parallel_places)

                train_data = numpy.random.random(size=(10, 1)).astype('float32')
                loss_data, = exe.run(compiled_train_prog,
268 269
                                feed={"X": train_data},
                                fetch_list=[loss.name])
270 271
                test_data = numpy.random.random(size=(10, 1)).astype('float32')
                loss_data, = exe.run(compiled_test_prog,
272 273
                                feed={"X": test_data},
                                fetch_list=[loss.name])
X
Xin Pan 已提交
274
        """
275 276
        assert not self._is_data_parallel, "Already compiled with parallel, cannot be recompiled."
        assert not self._is_inference, "Cannot compile with both data parallel and inference."
277
        self._is_data_parallel = True
C
chengduo 已提交
278 279 280 281 282
        # FIXME(zcd): Currently, the build_strategy can be set during creating
        # CompiledProgram or calling with_data_parallel, and it may be confusing,
        # but in the long run, we should set up build_strategy only when creating
        # CompiledProgram, and exec_strategy should be deprecated.
        if build_strategy is not None: self._build_strategy = build_strategy
283 284
        self._exec_strategy = exec_strategy
        self._loss_name = loss_name
X
polish  
Xin Pan 已提交
285
        self._share_vars_from = share_vars_from
286 287 288 289
        if isinstance(places, (list, tuple)):
            self._places = _get_paddle_place_list(places)
        else:
            self._places = _get_paddle_place(places)
C
chengduo 已提交
290 291

        if _has_backward_op(self._graph):
292
            assert self._loss_name is not None, "The loss name of CompiledProgram is None. The loss name should be set if CompiledProgram contains backward part."
C
chengduo 已提交
293 294 295 296 297

        if self._places is not None:
            if not isinstance(self._places, (list, tuple)):
                self._places = [self._places]

298 299
        return self

F
flame 已提交
300
    def _with_inference_optimize(self, config):
F
flame 已提交
301 302 303 304 305 306 307
        """ Add inference optimize

        Args:
            config: instance of `NativeConfig` or `AnalysisConfig` to create predictor
        Returns:
            self
        """
308 309
        assert not self._is_data_parallel, "Cannot compile with both data parallel and inference"
        assert not self._is_inference, "Already compiled with inference, cannot be recompiled."
X
Xin Pan 已提交
310

F
flame 已提交
311 312 313 314 315 316 317
        assert any([
            isinstance(config, InferNativeConfig),
            isinstance(config, InferAnalysisConfig)
        ])
        self._is_inference = True
        self._infer_config = config
        return self
X
polish  
Xin Pan 已提交
318

F
flame 已提交
319
    def _with_distributed(self):
320 321 322
        raise NotImplementedError(
            "Subclass of CompiledProgram should implement _with_distributed method."
        )
X
polish  
Xin Pan 已提交
323

324
    def _compile_data_parallel(self, places, use_device, scope=None):
X
polish  
Xin Pan 已提交
325
        if self._share_vars_from:
326
            if scope:
X
polish  
Xin Pan 已提交
327 328
                sys.stderr.write("share_vars_from is set, scope is ignored.\n")
            if not self._share_vars_from._is_data_parallel:
329 330 331
                raise ValueError(
                    "The shared Program is not data parallel, cannot "
                    "share variables from it.")
X
polish  
Xin Pan 已提交
332 333
            if self._share_vars_from._executor is None:
                raise ValueError(
334 335
                    "The shared Program is not compiled and executed, so there is no "
                    "variables to share.")
X
polish  
Xin Pan 已提交
336 337
            self._local_scopes = self._share_vars_from._executor.local_scopes()
        else:
338
            assert scope is not None, ""
X
polish  
Xin Pan 已提交
339
            self._local_scopes = []
340

C
chengduo 已提交
341
        assert isinstance(places, tuple) or isinstance(places, list), \
342
            "Currently , The places type can only be list or tuple, but the input type is {}.".format(type(places))
C
chengduo 已提交
343 344 345 346 347 348 349

        if self._build_strategy is None:
            self._build_strategy = BuildStrategy()
        self._build_strategy.is_distribution = _is_pserver_mode(self._program)

        if self._exec_strategy is None:
            self._exec_strategy = ExecutionStrategy()
350
        self._exec_strategy._use_device = use_device
351 352

        if self._exec_strategy.num_threads == 0:
353
            if self._exec_strategy._use_device == DeviceType.CUDA:
354 355
                # Experiments on se-resnext shows that too many threads hurt
                # performance. Worth tunning for other models in the future.
C
chengduo 已提交
356
                self._exec_strategy.num_threads = len(places) * 4
357
            elif self._exec_strategy._use_device == DeviceType.XPU:
358 359
                # Currently only single thread is supported in Kunlun XPU.
                self._exec_strategy.num_threads = 1
360
            else:
C
chengduo 已提交
361 362
                self._exec_strategy.num_threads = len(places) * 2

363
        if self._exec_strategy._use_device == DeviceType.XPU:
364 365 366
            assert self._exec_strategy.num_threads == 1, \
                "Currently only single thread is supported in Kunlun XPU."

C
chengduo 已提交
367 368 369 370
        if self._build_strategy.num_trainers > 1:
            assert self._is_data_parallel, \
                "If you use multi-trainer to train the model, you should use "\
                "the data parallel model, i.e. calling with_data_parallel function."
371

X
Xin Pan 已提交
372 373
        # TODO(wuyi): trainer endpoings should be passed in through
        # build_strategy, not program.xxx.
374
        # TODO(gongwb): let user to set them once.
X
Xin Pan 已提交
375 376 377
        if self._program and self._build_strategy.num_trainers > 1 and \
                self._program._trainers_endpoints:
            tps = self._program._trainers_endpoints
D
dzhwinter 已提交
378

379
            assert self._build_strategy.num_trainers == len(
380
                tps), "The trainer numbers is not equal to endpoint numbers."
X
Xin Pan 已提交
381 382
            self._build_strategy.trainers_endpoints = tps

383 384
        if self._program:
            self._build_strategy.nccl_comm_num = self._program._nccl_comm_num
385 386
            self._build_strategy.use_hierarchical_allreduce = self._program._use_hierarchical_allreduce
            self._build_strategy.hierarchical_allreduce_inter_nranks = self._program._hierarchical_allreduce_inter_nranks
387

Q
qingqing01 已提交
388 389 390
        if self._build_strategy.sync_batch_norm:
            self._build_strategy.enable_sequential_execution = True

391
        if self._program is not None and self._program._enable_dgc:
392
            assert self._exec_strategy._use_device == DeviceType.CUDA, "DGC only used under CUDA environment."
393
            assert self._build_strategy.num_trainers * len(
394
                places) > 1, "DGC is not avaliable for single card training."
395
            assert self._build_strategy.reduce_strategy == BuildStrategy.ReduceStrategy.AllReduce, "DGC \
396
                only can be used for AllReduce BuildStrategy."
397 398 399 400

            # DGC doesn't support fuse for now, close fuse.
            self._build_strategy.fuse_all_reduce_ops = False

X
Xin Pan 已提交
401
        self._persistable_vars = []
Z
Zhen Wang 已提交
402 403 404 405
        for node in self._graph.nodes():
            if node.is_var() and node.var() is not None and node.var().persistable() and \
                    node.var().type() != core.VarDesc.VarType.RAW:
                self._persistable_vars.append(cpt.to_text(node.name()))
406

C
chengduo 已提交
407 408
        places = list(map(_place_obj, places))

Y
Yan Xu 已提交
409 410 411 412 413 414 415 416 417 418 419
        # ParallelExecutor would broadcast all the parameters during initializing.
        # The parameters of each process should be in the same ordered for the data-parallelism
        # distributed training to keep the broadcast correct.
        self._persistable_vars = list(set(self._persistable_vars))
        self._persistable_vars.sort()

        return core.ParallelExecutor(
            places, self._persistable_vars,
            cpt.to_text(self._loss_name)
            if self._loss_name else six.u(''), self._scope, self._local_scopes,
            self._exec_strategy, self._build_strategy, self._graph)
420

F
flame 已提交
421 422 423
    def _compile_inference(self):
        return core.create_paddle_predictor(self._infer_config)

424
    def _compile(self, scope, place):
X
Xin Pan 已提交
425 426 427 428 429 430 431 432 433 434
        """Compile the program based on the configs.

        Args:
            scope: The variables (resources) that are associated with
               this compiled program.
            place: The location that the compiled program will be run on.

        Returns:
            self
        """
435
        if self._compiled:
X
polish  
Xin Pan 已提交
436
            if scope and self._scope != scope:
437
                raise ValueError("Cannot compile program with different scope.")
S
sneaxiy 已提交
438
            if place and not self._place._equals(place):
439
                raise ValueError("Cannot compile program with different place.")
440
            return self
X
fix  
Xin Pan 已提交
441
        self._compiled = True
442 443 444

        self._scope = scope
        self._place = place
C
chengduo 已提交
445 446

        if self._is_inference:
F
flame 已提交
447
            self._executor = self._compile_inference()
448
        else:
C
chengduo 已提交
449 450 451 452
            if self._is_data_parallel:
                self._places = self._get_places(self._place, self._places)
            else:
                self._places = [self._place]
453 454 455 456 457 458 459 460 461

            # Todo(liym27):If optimizer is used in control flow,
            #  training on multi-places is not supported now, will
            #  be supported later.
            if len(self._places) > 1 and \
                    _has_optimizer_in_control_flow(self._program):
                raise NotImplementedError(
                    "If optimizer is used in control flow, "
                    "training on multi-places is not supported now.")
462
            if isinstance(self._place, core.CUDAPlace):
463
                use_device = DeviceType.CUDA
464
            elif isinstance(self._place, core.XPUPlace):
465
                use_device = DeviceType.XPU
466
            else:
467
                use_device = DeviceType.CPU
C
chengduo 已提交
468
            self._executor = self._compile_data_parallel(
469
                use_device=use_device, scope=self._scope, places=self._places)
470
        return self
C
chengduo 已提交
471 472 473 474 475 476

    def _get_places(self, place, place_list):
        has_set_place = (place_list is not None)
        if has_set_place:
            for p in place_list:
                assert p._type() == place._type(), \
477
                    "Place type not match. You may set wrong type of places."
C
chengduo 已提交
478
        else:
479 480 481 482 483 484
            if isinstance(place, core.CUDAPlace):
                place_list = cuda_places()
            elif isinstance(place, core.XPUPlace):
                place_list = xpu_places()
            else:
                place_list = cpu_places()
485
        assert place_list, "No places for execution."
C
chengduo 已提交
486
        return place_list