compiler.py 40.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import multiprocessing
import os
import six
X
polish  
Xin Pan 已提交
18
import sys
19
import warnings
20
from .. import compat as cpt
X
Xin Pan 已提交
21
from . import framework
22
from .framework import _get_paddle_place, _get_paddle_place_list
23
from .framework import cuda_places, cpu_places, xpu_places
24 25
from . import core

J
jianghaicheng 已提交
26 27 28 29
__all__ = [
    'CompiledProgram', 'ExecutionStrategy', 'BuildStrategy',
    'IpuCompiledProgram', 'IpuStrategy'
]
X
Xin Pan 已提交
30

31 32
ExecutionStrategy = core.ParallelExecutor.ExecutionStrategy
BuildStrategy = core.ParallelExecutor.BuildStrategy
F
flame 已提交
33 34
InferNativeConfig = core.NativeConfig
InferAnalysisConfig = core.AnalysisConfig
35
DeviceType = core.DeviceType
36 37 38 39 40 41 42 43


def _place_obj(place):
    p = core.Place()
    p.set_place(place)
    return p


44 45
def _is_pserver_mode(main_program):
    main = main_program if main_program \
C
chengduo 已提交
46
        else framework.default_main_program()
47 48 49 50 51 52
    for op in main.global_block().ops:
        if op.type in ["send", "recv"]:
            return True
    return False


C
chengduo 已提交
53 54 55 56 57 58 59 60
def _has_backward_op(graph):
    for node in graph.nodes():
        if node.is_op() and node.op() is not None and \
                node.op().type().endswith("_grad"):
            return True
    return False


61 62 63 64 65 66 67 68 69
def _prune_feed_ops(program):
    # prune the feed ops in the program.
    pop_idx = []
    for i, op in enumerate(program.global_block().ops):
        if op.type == "feed": pop_idx.append(i)
    for index in pop_idx[::-1]:
        program.global_block()._remove_op(index)


70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
def _has_optimize_op(block):
    for op in block.ops:
        op_maker = core.op_proto_and_checker_maker
        optimize = core.op_proto_and_checker_maker.OpRole.Optimize
        if op_maker.kOpRoleVarAttrName() in op.attr_names and \
                int(op.all_attrs()[op_maker.kOpRoleAttrName()]) == int(optimize):
            return True
    return False


def _has_optimizer_in_control_flow(program):
    if not program:
        program = framework.default_main_program()
    for op in program.global_block().ops:
        if op.type == "conditional_block_grad":
            sub_block = program.block(op._block_attr_id("sub_block"))
            if _has_optimize_op(sub_block):
                return True

    return False


92 93 94 95 96 97 98 99 100 101
def _should_broadcast_or_not_exists(program, var_name):
    block = program.global_block()
    var = block.vars.get(var_name, None)
    if var is None:
        return True
    is_distributed = getattr(var, '_is_distributed', False) or getattr(
        var, 'is_distributed', False)
    return not is_distributed


X
polish  
Xin Pan 已提交
102
class CompiledProgram(object):
X
polish  
Xin Pan 已提交
103
    """
104 105
    :api_attr: Static Graph
    
C
chengduo 已提交
106 107 108 109 110
    The CompiledProgram is used to transform a program or graph for
    various optimizations according to the configuration of build_strategy,
    for example, the operators' fusion in the computation graph, memory
    optimization during the execution of the computation graph, etc.
    For more information about build_strategy, please refer to
111
    :code:`paddle.static.BuildStrategy`.
X
polish  
Xin Pan 已提交
112

C
chengduo 已提交
113
    Args:
114
        program_or_graph (Graph|Program): This argument is the Program or Graph
C
chengduo 已提交
115
            being executed.
116
        build_strategy(BuildStrategy): This argument is used to compile the
C
chengduo 已提交
117 118 119
            program or graph with the specified options, such as operators' fusion
            in the computational graph and memory optimization during the execution
            of the computational graph. For more information about build_strategy,
120
            please refer to :code:`paddle.static.BuildStrategy`. The default is None.
X
Xin Pan 已提交
121

C
chengduo 已提交
122 123
    Returns:
        CompiledProgram
X
polish  
Xin Pan 已提交
124 125

    Example:
X
Xin Pan 已提交
126
        .. code-block:: python
127

128 129 130
            import numpy
            import paddle
            import paddle.static as static
131

132
            paddle.enable_static()
133

134 135
            place = paddle.CUDAPlace(0) # paddle.CPUPlace()
            exe = static.Executor(place)
136

137
            data = static.data(name='X', shape=[None, 1], dtype='float32')
138
            hidden = static.nn.fc(x=data, size=10)
139 140
            loss = paddle.mean(hidden)
            paddle.optimizer.SGD(learning_rate=0.01).minimize(loss)
141

142 143 144 145 146 147 148 149
            exe.run(static.default_startup_program())
            compiled_prog = static.CompiledProgram(
                static.default_main_program())

            x = numpy.random.random(size=(10, 1)).astype('float32')
            loss_data, = exe.run(compiled_prog,
                                feed={"X": x},
                                fetch_list=[loss.name])
X
polish  
Xin Pan 已提交
150 151
    """

C
chengduo 已提交
152
    def __init__(self, program_or_graph, build_strategy=None):
X
Xin Pan 已提交
153 154
        if isinstance(program_or_graph, core.Graph):
            self._graph = program_or_graph
155
            # don't not create a new program here.
X
Xin Pan 已提交
156 157
            self._program = None
        elif isinstance(program_or_graph, framework.Program):
158
            _prune_feed_ops(program_or_graph)
X
Xin Pan 已提交
159 160 161
            self._graph = core.Graph(program_or_graph.desc)
            self._program = program_or_graph
        else:
162 163 164
            raise TypeError(
                "The type of program_to_graph parameter is wrong, expected Graph or Program, but received %s"
                % type(program_or_graph))
X
Xin Pan 已提交
165

X
polish  
Xin Pan 已提交
166 167 168
        self._scope = None
        self._place = None
        self._executor = None
169 170
        self._compiled = False
        self._is_data_parallel = False
F
flame 已提交
171
        self._is_inference = False
C
chengduo 已提交
172 173 174 175 176
        self._loss_name = None
        self._share_vars_from = None
        self._places = None
        self._build_strategy = build_strategy
        self._exec_strategy = None
177

X
Xin Pan 已提交
178 179 180 181
    def with_data_parallel(self,
                           loss_name=None,
                           build_strategy=None,
                           exec_strategy=None,
S
sneaxiy 已提交
182 183
                           share_vars_from=None,
                           places=None):
C
chengduo 已提交
184 185 186 187 188 189
        """
        This interface is used to transform the input Program or Graph to a multi-graph
        to run the model in data parallel mode. Users can use the build_strategy and
        exec_strategy to set some optimizations that can be applied during the construction
        and computation of the Graph, such as reducing the number of AllReduce operations,
        specifying the size of the thread pool used in the computation Graph running the model,
190 191 192 193 194 195 196
        and so on. 
        
        .. note::
            If build_strategy is specified when building CompiledProgram and calling 
            with_data_parallel, build_strategy in CompiledProgram will be overwritten, therefore, 
            if it is data parallel training, it is recommended to set build_strategy when calling 
            with_data_parallel interface.
C
chengduo 已提交
197 198

        Args:
199
            loss_name (str): This parameter is the name of the loss Tensor of the model.
C
chengduo 已提交
200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215
                **Note: If it is model training, you must set loss_name, otherwise the
                result may be problematic**. The default is None.
            build_strategy(BuildStrategy): This parameter is used to compile the
                program or graph with the specified options, such as operators' fusion
                in the computational graph and memory optimization during the execution
                of the computational graph. For more information about build_strategy,
                please refer to :code:`fluid.BuildStrategy`. The default is None.
            exec_strategy(ExecutionStrategy): exec_strategy specifies the options that can
                be changed when running the current model, such as the thread pool size.
                For more information about exec_strategy, please refer to :code:`fluid.ExecutionStrategy`.
                The default is None.
            share_vars_from(CompiledProgram): If share_vars_from is set, the current
                CompiledProgram will share the parameter value with the CompiledProgram
                specified by share_vars_from. This parameter needs to be set when model testing
                is required during model training, and the data parallel mode is used for
                training and testing. Since CompiledProgram will only distribute parameter
216
                Tensors to other devices when it is first executed, the CompiledProgram
C
chengduo 已提交
217 218
                specified by share_vars_from must be run before the current CompiledProgram.
                The default is None.
219
            places(list(CUDAPlace)|list(CPUPlace)|list(str)|None): This parameter specifies the device
C
chengduo 已提交
220 221 222 223 224 225 226 227 228 229
                on which the model is running. If you want to run on GPU0 and GPU1, places are
                [fluid.CUDAPlace(0), fluid.CUDAPlace(1)]; if you want to run with 2 CPUs, places are
                [fluid.CPUPlace()] * 2. If the parameter is not set, i.e. the parameter is None,
                the available device will be obtained from the environment variable when the model
                is executed: If the GPU is used, the currently available device ID is obtained
                from the environment variable FLAGS_selected_gpus or CUDA_VISIBLE_DEVICES when
                the model is executed; CPU, when the model is executed, the currently available
                CPU number is obtained from the environment variable CPU_NUM. For example,
                export CPU_NUM=4, if the environment variable is not set, the executor will
                add the variable to the environment variable and set its value to 1.
230 231
                The default is None. If ``places`` is the list of string, the string in the list
                can be ``cpu``, ``gpu:x``, where ``x`` is the index of the GPUs. 
C
chengduo 已提交
232 233 234

        Returns:
            CompiledProgram
X
Xin Pan 已提交
235

236 237 238
        Example:
            .. code-block:: python

239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
                import numpy
                import os
                import paddle
                import paddle.static as static

                paddle.enable_static()

                use_cuda = True
                place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
                parallel_places = [paddle.CUDAPlace(0), paddle.CUDAPlace(1)] if use_cuda else [paddle.CPUPlace()] * 2

                # NOTE: If you use CPU to run the program, you need
                # to specify the CPU_NUM, otherwise, paddle will use
                # all the number of the logic core as the CPU_NUM,
                # in that case, the batch size of the input should be
                # greater than CPU_NUM, if not, the process will be
                # failed by an exception.
                if not use_cuda:
                    os.environ['CPU_NUM'] = str(2)

                exe = static.Executor(place)

                data = static.data(name='X', shape=[None, 1], dtype='float32')
262
                hidden = static.nn.fc(x=data, size=10)
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
                loss = paddle.mean(hidden)

                test_program = static.default_main_program().clone(for_test=True)
                paddle.optimizer.SGD(learning_rate=0.01).minimize(loss)

                exe.run(static.default_startup_program())
                compiled_train_prog = static.CompiledProgram(
                    static.default_main_program()).with_data_parallel(
                            loss_name=loss.name, places=parallel_places)
                # NOTE: if not set share_vars_from=compiled_train_prog,
                # the parameters used in test process are different with 
                # the parameters used by train process
                compiled_test_prog = static.CompiledProgram(
                    test_program).with_data_parallel(
                            share_vars_from=compiled_train_prog,
                            places=parallel_places)

                train_data = numpy.random.random(size=(10, 1)).astype('float32')
                loss_data, = exe.run(compiled_train_prog,
282 283
                                feed={"X": train_data},
                                fetch_list=[loss.name])
284 285
                test_data = numpy.random.random(size=(10, 1)).astype('float32')
                loss_data, = exe.run(compiled_test_prog,
286 287
                                feed={"X": test_data},
                                fetch_list=[loss.name])
X
Xin Pan 已提交
288
        """
289 290
        assert not self._is_data_parallel, "Already compiled with parallel, cannot be recompiled."
        assert not self._is_inference, "Cannot compile with both data parallel and inference."
291
        self._is_data_parallel = True
C
chengduo 已提交
292 293 294 295 296
        # FIXME(zcd): Currently, the build_strategy can be set during creating
        # CompiledProgram or calling with_data_parallel, and it may be confusing,
        # but in the long run, we should set up build_strategy only when creating
        # CompiledProgram, and exec_strategy should be deprecated.
        if build_strategy is not None: self._build_strategy = build_strategy
297 298
        self._exec_strategy = exec_strategy
        self._loss_name = loss_name
X
polish  
Xin Pan 已提交
299
        self._share_vars_from = share_vars_from
300 301 302 303
        if isinstance(places, (list, tuple)):
            self._places = _get_paddle_place_list(places)
        else:
            self._places = _get_paddle_place(places)
C
chengduo 已提交
304 305

        if _has_backward_op(self._graph):
306
            assert self._loss_name is not None, "The loss name of CompiledProgram is None. The loss name should be set if CompiledProgram contains backward part."
C
chengduo 已提交
307 308 309 310 311

        if self._places is not None:
            if not isinstance(self._places, (list, tuple)):
                self._places = [self._places]

312 313
        return self

F
flame 已提交
314
    def _with_inference_optimize(self, config):
F
flame 已提交
315 316 317 318 319 320 321
        """ Add inference optimize

        Args:
            config: instance of `NativeConfig` or `AnalysisConfig` to create predictor
        Returns:
            self
        """
322 323
        assert not self._is_data_parallel, "Cannot compile with both data parallel and inference"
        assert not self._is_inference, "Already compiled with inference, cannot be recompiled."
X
Xin Pan 已提交
324

F
flame 已提交
325 326 327 328 329 330 331
        assert any([
            isinstance(config, InferNativeConfig),
            isinstance(config, InferAnalysisConfig)
        ])
        self._is_inference = True
        self._infer_config = config
        return self
X
polish  
Xin Pan 已提交
332

F
flame 已提交
333
    def _with_distributed(self):
334 335 336
        raise NotImplementedError(
            "Subclass of CompiledProgram should implement _with_distributed method."
        )
X
polish  
Xin Pan 已提交
337

338
    def _compile_data_parallel(self, places, use_device, scope=None):
X
polish  
Xin Pan 已提交
339
        if self._share_vars_from:
340
            if scope:
X
polish  
Xin Pan 已提交
341 342
                sys.stderr.write("share_vars_from is set, scope is ignored.\n")
            if not self._share_vars_from._is_data_parallel:
343 344 345
                raise ValueError(
                    "The shared Program is not data parallel, cannot "
                    "share variables from it.")
X
polish  
Xin Pan 已提交
346 347
            if self._share_vars_from._executor is None:
                raise ValueError(
348 349
                    "The shared Program is not compiled and executed, so there is no "
                    "variables to share.")
X
polish  
Xin Pan 已提交
350 351
            self._local_scopes = self._share_vars_from._executor.local_scopes()
        else:
352
            assert scope is not None, ""
X
polish  
Xin Pan 已提交
353
            self._local_scopes = []
354

C
chengduo 已提交
355
        assert isinstance(places, tuple) or isinstance(places, list), \
356
            "Currently , The places type can only be list or tuple, but the input type is {}.".format(type(places))
C
chengduo 已提交
357 358 359 360 361 362 363

        if self._build_strategy is None:
            self._build_strategy = BuildStrategy()
        self._build_strategy.is_distribution = _is_pserver_mode(self._program)

        if self._exec_strategy is None:
            self._exec_strategy = ExecutionStrategy()
364
        self._exec_strategy._use_device = use_device
365 366

        if self._exec_strategy.num_threads == 0:
367
            if self._exec_strategy._use_device == DeviceType.CUDA:
368 369
                # Experiments on se-resnext shows that too many threads hurt
                # performance. Worth tunning for other models in the future.
C
chengduo 已提交
370
                self._exec_strategy.num_threads = len(places) * 4
371
            elif self._exec_strategy._use_device == DeviceType.XPU:
372 373
                # Currently only single thread is supported in Kunlun XPU.
                self._exec_strategy.num_threads = 1
374
            else:
C
chengduo 已提交
375 376
                self._exec_strategy.num_threads = len(places) * 2

377 378 379 380 381 382
        if "FLAGS_use_cinn" in core.globals() and core.globals(
        )["FLAGS_use_cinn"] and self._exec_strategy.num_threads != 1:
            warnings.warn("At present, when CINN is turned on, each process can " \
                  "only contain one thread, so reset the number of threads to 1 here.")
            self._exec_strategy.num_threads = 1

C
chengduo 已提交
383 384 385 386
        if self._build_strategy.num_trainers > 1:
            assert self._is_data_parallel, \
                "If you use multi-trainer to train the model, you should use "\
                "the data parallel model, i.e. calling with_data_parallel function."
387

X
Xin Pan 已提交
388 389
        # TODO(wuyi): trainer endpoings should be passed in through
        # build_strategy, not program.xxx.
390
        # TODO(gongwb): let user to set them once.
X
Xin Pan 已提交
391 392 393
        if self._program and self._build_strategy.num_trainers > 1 and \
                self._program._trainers_endpoints:
            tps = self._program._trainers_endpoints
D
dzhwinter 已提交
394

395
            assert self._build_strategy.num_trainers == len(
396
                tps), "The trainer numbers is not equal to endpoint numbers."
X
Xin Pan 已提交
397 398
            self._build_strategy.trainers_endpoints = tps

399 400
        if self._program:
            self._build_strategy.nccl_comm_num = self._program._nccl_comm_num
401 402
            self._build_strategy.use_hierarchical_allreduce = self._program._use_hierarchical_allreduce
            self._build_strategy.hierarchical_allreduce_inter_nranks = self._program._hierarchical_allreduce_inter_nranks
403

Q
qingqing01 已提交
404 405 406
        if self._build_strategy.sync_batch_norm:
            self._build_strategy.enable_sequential_execution = True

407
        if self._program is not None and self._program._enable_dgc:
408
            assert self._exec_strategy._use_device == DeviceType.CUDA, "DGC only used under CUDA environment."
409
            assert self._build_strategy.num_trainers * len(
410
                places) > 1, "DGC is not avaliable for single card training."
411
            assert self._build_strategy.reduce_strategy == BuildStrategy.ReduceStrategy.AllReduce, "DGC \
412
                only can be used for AllReduce BuildStrategy."
413 414 415 416

            # DGC doesn't support fuse for now, close fuse.
            self._build_strategy.fuse_all_reduce_ops = False

X
Xin Pan 已提交
417
        self._persistable_vars = []
Z
Zhen Wang 已提交
418 419 420
        for node in self._graph.nodes():
            if node.is_var() and node.var() is not None and node.var().persistable() and \
                    node.var().type() != core.VarDesc.VarType.RAW:
421 422 423 424
                name = cpt.to_text(node.name())
                if self._program is not None and _should_broadcast_or_not_exists(
                        self._program, name):
                    self._persistable_vars.append(cpt.to_text(node.name()))
425

C
chengduo 已提交
426 427
        places = list(map(_place_obj, places))

Y
Yan Xu 已提交
428 429 430 431 432 433 434 435
        # ParallelExecutor would broadcast all the parameters during initializing.
        # The parameters of each process should be in the same ordered for the data-parallelism
        # distributed training to keep the broadcast correct.
        self._persistable_vars = list(set(self._persistable_vars))
        self._persistable_vars.sort()

        return core.ParallelExecutor(
            places, self._persistable_vars,
436 437 438
            cpt.to_text(self._loss_name) if self._loss_name else six.u(''),
            self._scope, self._local_scopes, self._exec_strategy,
            self._build_strategy, self._graph)
439

F
flame 已提交
440 441 442
    def _compile_inference(self):
        return core.create_paddle_predictor(self._infer_config)

443
    def _compile(self, scope, place):
X
Xin Pan 已提交
444 445 446 447 448 449 450 451 452 453
        """Compile the program based on the configs.

        Args:
            scope: The variables (resources) that are associated with
               this compiled program.
            place: The location that the compiled program will be run on.

        Returns:
            self
        """
454
        if self._compiled:
X
polish  
Xin Pan 已提交
455
            if scope and self._scope != scope:
456
                raise ValueError("Cannot compile program with different scope.")
S
sneaxiy 已提交
457
            if place and not self._place._equals(place):
458
                raise ValueError("Cannot compile program with different place.")
459
            return self
X
fix  
Xin Pan 已提交
460
        self._compiled = True
461 462 463

        self._scope = scope
        self._place = place
C
chengduo 已提交
464 465

        if self._is_inference:
F
flame 已提交
466
            self._executor = self._compile_inference()
467
        else:
C
chengduo 已提交
468 469 470 471
            if self._is_data_parallel:
                self._places = self._get_places(self._place, self._places)
            else:
                self._places = [self._place]
472 473 474 475 476 477 478 479 480

            # Todo(liym27):If optimizer is used in control flow,
            #  training on multi-places is not supported now, will
            #  be supported later.
            if len(self._places) > 1 and \
                    _has_optimizer_in_control_flow(self._program):
                raise NotImplementedError(
                    "If optimizer is used in control flow, "
                    "training on multi-places is not supported now.")
481
            if isinstance(self._place, core.CUDAPlace):
482
                use_device = DeviceType.CUDA
483
            elif isinstance(self._place, core.XPUPlace):
484
                use_device = DeviceType.XPU
485
            else:
486
                use_device = DeviceType.CPU
487 488 489
            self._executor = self._compile_data_parallel(use_device=use_device,
                                                         scope=self._scope,
                                                         places=self._places)
490
        return self
C
chengduo 已提交
491 492 493 494 495 496

    def _get_places(self, place, place_list):
        has_set_place = (place_list is not None)
        if has_set_place:
            for p in place_list:
                assert p._type() == place._type(), \
497
                    "Place type not match. You may set wrong type of places."
C
chengduo 已提交
498
        else:
499 500 501 502 503 504
            if isinstance(place, core.CUDAPlace):
                place_list = cuda_places()
            elif isinstance(place, core.XPUPlace):
                place_list = xpu_places()
            else:
                place_list = cpu_places()
505
        assert place_list, "No places for execution."
C
chengduo 已提交
506
        return place_list
J
jianghaicheng 已提交
507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524


class IpuStrategy(object):
    """
    Help users precisely control the graph building in :code:`paddle.static.IpuCompiledProgram` .

    Returns:
        The IpuStrategy instance.

    Examples:
        .. code-block:: python
	
            # required: ipu

            import paddle
            import paddle.static as static

            paddle.enable_static()
525

J
jianghaicheng 已提交
526 527 528 529 530 531
            ipu_strategy = static.IpuStrategy()
    """

    def __init__(self):
        if core.is_compiled_with_ipu():
            self._ipu_strategy = core.IpuStrategy()
532 533 534 535 536 537 538 539 540 541 542 543 544
            default_options = {
                'location_optimizer': {
                    'on_chip': 0,
                    'use_replicated_tensor_sharding': 1,
                },  # set optimizer location
                'accumulation_and_replication_reduction_type':
                1,  # popart::ReductionType::Mean
                'mean_accumulation_and_replication_reduction_strategy':
                1,  # popart::MeanReductionStrategy::Post
            }
            self._ipu_strategy.set_options(default_options)
            self.has_custom_ops = False
            self.custom_op_names = []
J
jianghaicheng 已提交
545 546 547 548 549
        else:
            raise RuntimeError(
                "Can not use IpuStrategy in non IPU compiled environment, please re-compile with WITH_IPU=ON."
            )

550 551 552
    def set_graph_config(self,
                         num_ipus=1,
                         is_training=True,
A
Allen Guo 已提交
553
                         micro_batch_size=1,
554
                         enable_manual_shard=False):
J
jianghaicheng 已提交
555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577
        """
        Set graph configuration to the IpuStrategy instance.

        Args:
            num_ipus (int, optional): Number of IPU devices. Default 1, which means only use 1 IPU.
            is_training (bool, optional): True is training graph, False is inference graph. Default True, which means is training mode.
            batch_size (int, optional): The batch-size in the graph. Used to make the graph batch-size fixed,
                if the batch-size in the graph is dynamic. Default 1, which means the batch-size would be set 1, if the batch-size is dynamice.
            enable_manual_shard (bool, optional): Enable graph sharding or not. Only if num_ipus > 1, enable_manual_shard is able to be set True. 
                Default False, which means disabled.    
            
        Returns:
            None.

        Examples:
            .. code-block:: python
	
                # required: ipu

                import paddle
                import paddle.static as static

                paddle.enable_static()
578

J
jianghaicheng 已提交
579
                ipu_strategy = static.IpuStrategy()
580
                ipu_strategy.set_graph_config(num_ipus=1,
J
jianghaicheng 已提交
581
                                            is_training=True,
A
Allen Guo 已提交
582
                                            micro_batch_size=1,
583
                                            enable_manual_shard=False)
J
jianghaicheng 已提交
584
        """
585
        if num_ipus == 1 and enable_manual_shard:
J
jianghaicheng 已提交
586 587 588
            raise RuntimeError(
                "Only if num_ipus > 1, enable_manual_shard is able to be set True."
            )
589 590 591
        options = {
            'num_ipus': num_ipus,
            'is_training': is_training,
A
Allen Guo 已提交
592
            'micro_batch_size': micro_batch_size,
593 594 595 596 597 598 599
            'enable_manual_shard': enable_manual_shard,
        }
        self.set_options(options)

    def set_pipelining_config(self,
                              enable_pipelining=False,
                              batches_per_step=1,
A
Allen Guo 已提交
600
                              enable_gradient_accumulation=False,
601
                              accumulation_factor=1):
J
jianghaicheng 已提交
602 603 604 605 606 607 608 609
        """
        Set pipelining configuration to the IpuStrategy instance. Used to optimize the throughput performance.

        Args:
            enable_pipelining (bool, optional): Enable data pipelining between subgraphs. Only if enable_manual_shard=True, enable_pipelining is able to be set True. 
                Default False, which means disabled.
            batches_per_step (int, optional): Set the batches per run in data pipelining mode. Only if enable_pipelining=True, batches_per_step is able to be set > 1.
                Default 1, which means no data pipelining.
A
Allen Guo 已提交
610 611
            enable_gradient_accumulation (bool, optional): Enable to accumulate gradients before updating the weights in training mode. Only if enable_pipelining=True,
                enable_gradient_accumulation is able to be set True. Default False, which means no gradient accumulation. 
612
            accumulation_factor (int, optional): Specify the number of micro-batches to accumulate 
J
jianghaicheng 已提交
613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
                before applying the varUpdate. Default 1, which means disable the accumulation.
        
        Returns:
            None.

        Examples:
            .. code-block:: python

                # required: ipu

                import paddle
                import paddle.static as static

                paddle.enable_static()

                ipu_strategy = static.IpuStrategy()
629 630
                ipu_strategy.set_pipelining_config(enable_pipelining=False,
                                                    batches_per_step=1,
A
Allen Guo 已提交
631
                                                    enable_gradient_accumulation=False,
632
                                                    accumulation_factor=1)
J
jianghaicheng 已提交
633
        """
634 635
        enable_manual_shard = self.get_option('enable_manual_shard')
        if not enable_manual_shard and enable_pipelining:
J
jianghaicheng 已提交
636 637 638
            raise RuntimeError(
                "Only if enable_manual_shard=True, enable_pipelining is able to be set True."
            )
639 640 641
        options = {
            'enable_pipelining': enable_pipelining,
            'batches_per_step': batches_per_step,
A
Allen Guo 已提交
642
            'enable_gradient_accumulation': enable_gradient_accumulation,
643 644 645 646 647
            'accumulation_factor': accumulation_factor,
        }
        self.set_options(options)

    def set_precision_config(self, enable_fp16=False):
J
jianghaicheng 已提交
648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667
        """
        Set half computation configuration to the IpuStrategy instance. Used to optimize the performance.

        Args:
            enable_fp16 (bool, optional): Enable FLOAT16 mode and transform FLOAT32 to FLOAT16. Default False, which means disable FLOAT16 mode.
        
        Returns:
            None.

        Examples:
            .. code-block:: python

                # required: ipu

                import paddle
                import paddle.static as static

                paddle.enable_static()

                ipu_strategy = static.IpuStrategy()
668 669
                ipu_strategy.set_precision_config(enable_fp16=False)
        """
670 671 672
        options = {
            'enable_fp16': enable_fp16,
        }
673 674 675 676 677 678 679
        self.set_options(options)

    def add_custom_op(self,
                      paddle_op,
                      popart_op=None,
                      domain='custom.ops',
                      version=1):
J
jianghaicheng 已提交
680
        """
681
        Add a mapping to use popart custom ops running on the IPU.
J
jianghaicheng 已提交
682

683 684
        Args:
            paddle_op(str): the name of custom op in paddle.
J
jianghaicheng 已提交
685

686
            popart_op(str): the name of custom op in popart.
J
jianghaicheng 已提交
687

688
            domain(str): domain name of custom op in popart.
J
jianghaicheng 已提交
689

690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
            version(int): version of custom op in popart.
        
        Returns:
            None.

        Examples:
            .. code-block:: python

                # required: ipu

                import paddle
                import paddle.static as static

                paddle.enable_static()

                ipu_strategy = static.IpuStrategy()
                ipu_strategy.add_custom_op('paddle_relu', 'popart_relu')
J
jianghaicheng 已提交
707
        """
708 709 710 711 712 713 714 715 716 717 718 719 720 721
        if popart_op is None:
            popart_op = paddle_op
        custom_op = {
            'paddle_op': paddle_op,
            'popart_op': popart_op,
            'domain': domain,
            'version': version,
        }
        self.set_options({'custom_op': custom_op})
        self.custom_op_names.append(paddle_op)
        if not self.has_custom_ops:
            self.has_custom_ops = True

    def set_options(self, options):
J
jianghaicheng 已提交
722
        """
723
        Set options from dict.
J
jianghaicheng 已提交
724

725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743
        Args:
            options(dict): dict of options.
        
        Returns:
            None.

        Examples:
            .. code-block:: python

                # required: ipu

                import paddle
                import paddle.static as static

                paddle.enable_static()

                ipu_strategy = static.IpuStrategy()
                options = {'num_ipus':1, 'enable_fp16': True}
                ipu_strategy.set_options(options)
J
jianghaicheng 已提交
744
        """
745
        self._ipu_strategy.set_options(options)
J
jianghaicheng 已提交
746

747
    def get_option(self, option):
J
jianghaicheng 已提交
748
        """
749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768
        Get option.

        Args:
            option(str): name of option.
        
        Returns:
            option value.

        Examples:
            .. code-block:: python

                # required: ipu

                import paddle
                import paddle.static as static

                paddle.enable_static()

                ipu_strategy = static.IpuStrategy()
                num_ipus = ipu_strategy.get_option('num_ipus')
J
jianghaicheng 已提交
769
        """
770
        return self._ipu_strategy.get_option(option)['value']
J
jianghaicheng 已提交
771

A
Allen Guo 已提交
772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821
    def enable_pattern(self, pattern):
        """
        Enable PopART pattern to optimize the graph.

        Args:
            pattern(string): the name of the pattern.
        
        Returns:
            None.

        Examples:
            .. code-block:: python

                # required: ipu

                import paddle
                import paddle.static as static

                paddle.enable_static()

                ipu_strategy = static.IpuStrategy()
                ipu_strategy.enable_pattern("ViewSimplifyPattern")
        """
        self._ipu_strategy.enable_pattern(pattern)

    def disable_pattern(self, pattern):
        """
        Disable PopART pattern.

        Args:
            pattern(string): the name of the pattern.
        
        Returns:
            None.

        Examples:
            .. code-block:: python

                # required: ipu

                import paddle
                import paddle.static as static

                paddle.enable_static()

                ipu_strategy = static.IpuStrategy()
                ipu_strategy.disable_pattern("ViewSimplifyPattern")
        """
        self._ipu_strategy.disable_pattern(pattern)

J
jianghaicheng 已提交
822
    @property
823
    def num_ipus(self):
J
jianghaicheng 已提交
824
        """
825
        Get the number of IPU devices from IpuStrategy instance.
J
jianghaicheng 已提交
826
        """
827
        return self.get_option('num_ipus')
J
jianghaicheng 已提交
828 829

    @property
830
    def is_training(self):
J
jianghaicheng 已提交
831
        """
832
        Get the boolean of training or inference from IpuStrategy instance.
J
jianghaicheng 已提交
833
        """
834
        return self.get_option('is_training')
J
jianghaicheng 已提交
835 836

    @property
837
    def enable_pipelining(self):
J
jianghaicheng 已提交
838
        """
839
        Get the boolean of enable pipelining or not from IpuStrategy instance.
J
jianghaicheng 已提交
840
        """
841
        return self.get_option('enable_pipelining')
J
jianghaicheng 已提交
842 843 844 845 846 847

    @property
    def enable_fp16(self):
        """
        Get the boolean of float16 mode or not from IpuStrategy instance.
        """
848
        return self.get_option('enable_fp16')
J
jianghaicheng 已提交
849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884


class IpuCompiledProgram(object):
    """
    The IpuCompiledProgram is used to transform a program to a ipu-target program,
    such as forward graph extraction, computing graph transformation, useless scale Ops clean, etc.

    Args:
        program(Program, optional): This parameter represents the :code:`Program`
            to be executed. Default is None, which means the program will be set to 
            the default program :code:`paddle.static.default_main_program()` .
        scope(Scope, optional): The scope used to run this program, you can switch
            it to different scope. Default is None, which means use the global 
            scope :code:`paddle.static.global_scope()` .
        ipu_strategy(IpuStrategy, optional): This argument is used to build the program with the
            specified options, such as half computation, training or inference session, the number of IPUs, etc.
            Default is None, which means build the program based on the default `ipu_strategy`. 

    Returns:
        IpuCompiledProgram

    Example:
        .. code-block:: python
	
            # required: ipu

            import paddle
            import paddle.static as static

            paddle.enable_static()

            a = static.data(name='data', shape=[None, 1], dtype='int32')
            b = a + 1
            main_prog = static.default_main_program()
            
            ipu_strategy = static.IpuStrategy()
A
Allen Guo 已提交
885 886
            ipu_strategy.set_graph_config(num_ipus=1, is_training=True, micro_batch_size=1)
            ipu_strategy.set_pipelining_config(enable_pipelining=False, batches_per_step=1, enable_gradient_accumulation=False, accumulation_factor=1)
887
            ipu_strategy.set_precision_config(enable_fp16=False)
J
jianghaicheng 已提交
888 889 890 891 892 893 894 895 896 897 898 899 900
            
            ipu_compiled_program = static.IpuCompiledProgram(
                main_prog,
                ipu_strategy=ipu_strategy)
    """

    def __init__(self, program=None, scope=None, ipu_strategy=None):
        if not core.is_compiled_with_ipu():
            raise ValueError(
                "Can not use this function since PaddlePaddle is not compiled with IPU"
            )

        if program is None:
901
            program = framework.default_main_program()
J
jianghaicheng 已提交
902 903 904 905 906 907 908 909 910 911 912 913

        if not isinstance(program, framework.Program):
            raise TypeError(
                "The type of program is wrong, expected Program, but got %s" %
                type(program))

        self._program = program
        self._compiled = False

        if scope is not None:
            self._scope = scope
        else:
914 915
            # import here to avoiding confused
            import paddle
J
jianghaicheng 已提交
916 917 918
            self._scope = paddle.static.global_scope()

        if ipu_strategy is not None:
919
            self._ipu_strategy = ipu_strategy
J
jianghaicheng 已提交
920
        else:
921
            self._ipu_strategy = IpuStrategy()
J
jianghaicheng 已提交
922

923 924 925 926 927 928
        if ipu_strategy.has_custom_ops:
            self._custom_op_names = set(ipu_strategy.custom_op_names)
        else:
            self._custom_op_names = ()

        self._backend = core.IpuBackend.get_instance()
J
jianghaicheng 已提交
929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958

    def compile(self, feed_list, fetch_list):
        """
        This interface is used to compile the input Program to a program
        to run the model on the ipu.
        
        Args:
            feed_list(list): This parameter represents the input Tensors of the model.

            fetch_list(list): This parameter represents the Tensors that need to be returned
                after the model.

        Returns:
            Program

        Example:
            .. code-block:: python
    	
                # required: ipu
    
                import paddle
                import paddle.static as static
    
                paddle.enable_static()
    
                a = static.data(name='data', shape=[None, 1], dtype='int32')
                b = a + 1
                main_prog = static.default_main_program()

                ipu_strategy = static.IpuStrategy()
A
Allen Guo 已提交
959 960
                ipu_strategy.set_graph_config(num_ipus=1, is_training=True, micro_batch_size=1)
                ipu_strategy.set_pipelining_config(enable_pipelining=False, batches_per_step=1, enable_gradient_accumulation=False, accumulation_factor=1)
961
                ipu_strategy.set_precision_config(enable_fp16=False)
J
jianghaicheng 已提交
962 963 964 965 966
                
                program = static.IpuCompiledProgram(
                    main_prog,
                    ipu_strategy=ipu_strategy).compile([a.name], [b.name])
        """
967 968 969
        self._backend.set_scope(self._scope)
        self._backend.set_ipu_strategy(self._ipu_strategy._ipu_strategy)

J
jianghaicheng 已提交
970 971 972 973 974
        # feed and fetch doesn't have corresponding popart op, so we rm both here
        global_block = self._program.global_block()
        need_to_remove_op_index = []
        for i, op in enumerate(global_block.ops):
            op.desc.set_is_target(False)
975
            if op.type == 'feed' or op.type == 'fetch':
J
jianghaicheng 已提交
976 977 978 979 980 981 982 983 984 985 986 987
                need_to_remove_op_index.append(i)

        for index in need_to_remove_op_index[::-1]:
            global_block._remove_op(index)

        for var in ['feed', 'fetch']:
            if global_block.has_var(var):
                global_block._remove_var(var)

        self._program.desc.flush()
        self._graph = core.Graph(self._program.desc)

988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023
        if self._ipu_strategy.is_training:
            passes = [
                'optimizer_extract_pass',
                'optimizer_state_align_pass',
            ]
            for pass_name in passes:
                a_pass = core.get_pass(pass_name)
                a_pass.apply(self._graph)

        passes = [
            'forward_graph_extract_pass',
            'infer_shape_pass',
            'avg_shard_pass',
            'delete_scale_op_pass',
        ]
        for pass_name in passes:
            a_pass = core.get_pass(pass_name)
            if pass_name == 'infer_shape_pass':
                a_pass.set('feed_list', feed_list)
            a_pass.apply(self._graph)

        a_pass = core.get_pass('popart_canonicalization_pass')
        if self._custom_op_names:
            a_pass.set('custom_ops', self._custom_op_names)
        a_pass.apply(self._graph)

        passes = [
            'ipu_inplace_pass',
            'ipu_graph_builder_pass',
            'ipu_runtime_replacer_pass',
        ]
        for pass_name in passes:
            a_pass = core.get_pass(pass_name)
            a_pass.set('feed_list', feed_list)
            a_pass.set('fetch_list', fetch_list)
            a_pass.apply(self._graph)
J
jianghaicheng 已提交
1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053

        convert_pass = core.get_pass('graph_to_program_pass')
        desc = core.ProgramDesc()
        convert_pass.set_not_owned('program', desc)
        convert_pass.apply(self._graph)
        program = framework.Program._construct_from_desc(desc)

        if hasattr(self._program, 'lr_sheduler'):
            # how to share var between two different block ?
            lr_var_name = self._program.lr_sheduler._var_name

            program.lr_sheduler = self._program.lr_sheduler
            # Program.clone will clone lr_sheduler, so i set lr_var as
            # lr_sheduler attribute
            global_block = self._program.global_block()
            program.lr_sheduler.lr_var = global_block.vars[lr_var_name]

        # with popart, we need to support batches_per_step, what means
        # the shape of feed_var and feed_tensor(maybe numpy array) will
        # mismatch, so we set need_check_feed to False. Thus we can avoid
        # modify logic of run.
        program_global_block = program.global_block()
        for feed_name in feed_list:
            feed_var = program_global_block.var(feed_name)
            feed_var.desc.set_need_check_feed(False)

        if not hasattr(program, 'org_program'):
            program.org_program = self._program

        return program