compiler.py 40.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import multiprocessing
import os
import six
X
polish  
Xin Pan 已提交
18
import sys
19
from .. import compat as cpt
X
Xin Pan 已提交
20
from . import framework
21
from .framework import _get_paddle_place, _get_paddle_place_list
22
from .framework import cuda_places, cpu_places, xpu_places
23 24
from . import core

J
jianghaicheng 已提交
25 26 27 28
__all__ = [
    'CompiledProgram', 'ExecutionStrategy', 'BuildStrategy',
    'IpuCompiledProgram', 'IpuStrategy'
]
X
Xin Pan 已提交
29

30 31
ExecutionStrategy = core.ParallelExecutor.ExecutionStrategy
BuildStrategy = core.ParallelExecutor.BuildStrategy
F
flame 已提交
32 33
InferNativeConfig = core.NativeConfig
InferAnalysisConfig = core.AnalysisConfig
34
DeviceType = core.DeviceType
35 36 37 38 39 40 41 42


def _place_obj(place):
    p = core.Place()
    p.set_place(place)
    return p


43 44
def _is_pserver_mode(main_program):
    main = main_program if main_program \
C
chengduo 已提交
45
        else framework.default_main_program()
46 47 48 49 50 51
    for op in main.global_block().ops:
        if op.type in ["send", "recv"]:
            return True
    return False


C
chengduo 已提交
52 53 54 55 56 57 58 59
def _has_backward_op(graph):
    for node in graph.nodes():
        if node.is_op() and node.op() is not None and \
                node.op().type().endswith("_grad"):
            return True
    return False


60 61 62 63 64 65 66 67 68
def _prune_feed_ops(program):
    # prune the feed ops in the program.
    pop_idx = []
    for i, op in enumerate(program.global_block().ops):
        if op.type == "feed": pop_idx.append(i)
    for index in pop_idx[::-1]:
        program.global_block()._remove_op(index)


69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
def _has_optimize_op(block):
    for op in block.ops:
        op_maker = core.op_proto_and_checker_maker
        optimize = core.op_proto_and_checker_maker.OpRole.Optimize
        if op_maker.kOpRoleVarAttrName() in op.attr_names and \
                int(op.all_attrs()[op_maker.kOpRoleAttrName()]) == int(optimize):
            return True
    return False


def _has_optimizer_in_control_flow(program):
    if not program:
        program = framework.default_main_program()
    for op in program.global_block().ops:
        if op.type == "conditional_block_grad":
            sub_block = program.block(op._block_attr_id("sub_block"))
            if _has_optimize_op(sub_block):
                return True

    return False


91 92 93 94 95 96 97 98 99 100
def _should_broadcast_or_not_exists(program, var_name):
    block = program.global_block()
    var = block.vars.get(var_name, None)
    if var is None:
        return True
    is_distributed = getattr(var, '_is_distributed', False) or getattr(
        var, 'is_distributed', False)
    return not is_distributed


X
polish  
Xin Pan 已提交
101
class CompiledProgram(object):
X
polish  
Xin Pan 已提交
102
    """
103 104
    :api_attr: Static Graph
    
C
chengduo 已提交
105 106 107 108 109
    The CompiledProgram is used to transform a program or graph for
    various optimizations according to the configuration of build_strategy,
    for example, the operators' fusion in the computation graph, memory
    optimization during the execution of the computation graph, etc.
    For more information about build_strategy, please refer to
110
    :code:`paddle.static.BuildStrategy`.
X
polish  
Xin Pan 已提交
111

C
chengduo 已提交
112
    Args:
113
        program_or_graph (Graph|Program): This argument is the Program or Graph
C
chengduo 已提交
114
            being executed.
115
        build_strategy(BuildStrategy): This argument is used to compile the
C
chengduo 已提交
116 117 118
            program or graph with the specified options, such as operators' fusion
            in the computational graph and memory optimization during the execution
            of the computational graph. For more information about build_strategy,
119
            please refer to :code:`paddle.static.BuildStrategy`. The default is None.
X
Xin Pan 已提交
120

C
chengduo 已提交
121 122
    Returns:
        CompiledProgram
X
polish  
Xin Pan 已提交
123 124

    Example:
X
Xin Pan 已提交
125
        .. code-block:: python
126

127 128 129
            import numpy
            import paddle
            import paddle.static as static
130

131
            paddle.enable_static()
132

133 134
            place = paddle.CUDAPlace(0) # paddle.CPUPlace()
            exe = static.Executor(place)
135

136
            data = static.data(name='X', shape=[None, 1], dtype='float32')
137
            hidden = static.nn.fc(x=data, size=10)
138 139
            loss = paddle.mean(hidden)
            paddle.optimizer.SGD(learning_rate=0.01).minimize(loss)
140

141 142 143 144 145 146 147 148
            exe.run(static.default_startup_program())
            compiled_prog = static.CompiledProgram(
                static.default_main_program())

            x = numpy.random.random(size=(10, 1)).astype('float32')
            loss_data, = exe.run(compiled_prog,
                                feed={"X": x},
                                fetch_list=[loss.name])
X
polish  
Xin Pan 已提交
149 150
    """

C
chengduo 已提交
151
    def __init__(self, program_or_graph, build_strategy=None):
X
Xin Pan 已提交
152 153
        if isinstance(program_or_graph, core.Graph):
            self._graph = program_or_graph
154
            # don't not create a new program here.
X
Xin Pan 已提交
155 156
            self._program = None
        elif isinstance(program_or_graph, framework.Program):
157
            _prune_feed_ops(program_or_graph)
X
Xin Pan 已提交
158 159 160
            self._graph = core.Graph(program_or_graph.desc)
            self._program = program_or_graph
        else:
161 162 163
            raise TypeError(
                "The type of program_to_graph parameter is wrong, expected Graph or Program, but received %s"
                % type(program_or_graph))
X
Xin Pan 已提交
164

X
polish  
Xin Pan 已提交
165 166 167
        self._scope = None
        self._place = None
        self._executor = None
168 169
        self._compiled = False
        self._is_data_parallel = False
F
flame 已提交
170
        self._is_inference = False
C
chengduo 已提交
171 172 173 174 175
        self._loss_name = None
        self._share_vars_from = None
        self._places = None
        self._build_strategy = build_strategy
        self._exec_strategy = None
176

X
Xin Pan 已提交
177 178 179 180
    def with_data_parallel(self,
                           loss_name=None,
                           build_strategy=None,
                           exec_strategy=None,
S
sneaxiy 已提交
181 182
                           share_vars_from=None,
                           places=None):
C
chengduo 已提交
183 184 185 186 187 188
        """
        This interface is used to transform the input Program or Graph to a multi-graph
        to run the model in data parallel mode. Users can use the build_strategy and
        exec_strategy to set some optimizations that can be applied during the construction
        and computation of the Graph, such as reducing the number of AllReduce operations,
        specifying the size of the thread pool used in the computation Graph running the model,
189 190 191 192 193 194 195
        and so on. 
        
        .. note::
            If build_strategy is specified when building CompiledProgram and calling 
            with_data_parallel, build_strategy in CompiledProgram will be overwritten, therefore, 
            if it is data parallel training, it is recommended to set build_strategy when calling 
            with_data_parallel interface.
C
chengduo 已提交
196 197

        Args:
198
            loss_name (str): This parameter is the name of the loss Tensor of the model.
C
chengduo 已提交
199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
                **Note: If it is model training, you must set loss_name, otherwise the
                result may be problematic**. The default is None.
            build_strategy(BuildStrategy): This parameter is used to compile the
                program or graph with the specified options, such as operators' fusion
                in the computational graph and memory optimization during the execution
                of the computational graph. For more information about build_strategy,
                please refer to :code:`fluid.BuildStrategy`. The default is None.
            exec_strategy(ExecutionStrategy): exec_strategy specifies the options that can
                be changed when running the current model, such as the thread pool size.
                For more information about exec_strategy, please refer to :code:`fluid.ExecutionStrategy`.
                The default is None.
            share_vars_from(CompiledProgram): If share_vars_from is set, the current
                CompiledProgram will share the parameter value with the CompiledProgram
                specified by share_vars_from. This parameter needs to be set when model testing
                is required during model training, and the data parallel mode is used for
                training and testing. Since CompiledProgram will only distribute parameter
215
                Tensors to other devices when it is first executed, the CompiledProgram
C
chengduo 已提交
216 217
                specified by share_vars_from must be run before the current CompiledProgram.
                The default is None.
218
            places(list(CUDAPlace)|list(CPUPlace)|list(str)|None): This parameter specifies the device
C
chengduo 已提交
219 220 221 222 223 224 225 226 227 228
                on which the model is running. If you want to run on GPU0 and GPU1, places are
                [fluid.CUDAPlace(0), fluid.CUDAPlace(1)]; if you want to run with 2 CPUs, places are
                [fluid.CPUPlace()] * 2. If the parameter is not set, i.e. the parameter is None,
                the available device will be obtained from the environment variable when the model
                is executed: If the GPU is used, the currently available device ID is obtained
                from the environment variable FLAGS_selected_gpus or CUDA_VISIBLE_DEVICES when
                the model is executed; CPU, when the model is executed, the currently available
                CPU number is obtained from the environment variable CPU_NUM. For example,
                export CPU_NUM=4, if the environment variable is not set, the executor will
                add the variable to the environment variable and set its value to 1.
229 230
                The default is None. If ``places`` is the list of string, the string in the list
                can be ``cpu``, ``gpu:x``, where ``x`` is the index of the GPUs. 
C
chengduo 已提交
231 232 233

        Returns:
            CompiledProgram
X
Xin Pan 已提交
234

235 236 237
        Example:
            .. code-block:: python

238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
                import numpy
                import os
                import paddle
                import paddle.static as static

                paddle.enable_static()

                use_cuda = True
                place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace()
                parallel_places = [paddle.CUDAPlace(0), paddle.CUDAPlace(1)] if use_cuda else [paddle.CPUPlace()] * 2

                # NOTE: If you use CPU to run the program, you need
                # to specify the CPU_NUM, otherwise, paddle will use
                # all the number of the logic core as the CPU_NUM,
                # in that case, the batch size of the input should be
                # greater than CPU_NUM, if not, the process will be
                # failed by an exception.
                if not use_cuda:
                    os.environ['CPU_NUM'] = str(2)

                exe = static.Executor(place)

                data = static.data(name='X', shape=[None, 1], dtype='float32')
261
                hidden = static.nn.fc(x=data, size=10)
262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
                loss = paddle.mean(hidden)

                test_program = static.default_main_program().clone(for_test=True)
                paddle.optimizer.SGD(learning_rate=0.01).minimize(loss)

                exe.run(static.default_startup_program())
                compiled_train_prog = static.CompiledProgram(
                    static.default_main_program()).with_data_parallel(
                            loss_name=loss.name, places=parallel_places)
                # NOTE: if not set share_vars_from=compiled_train_prog,
                # the parameters used in test process are different with 
                # the parameters used by train process
                compiled_test_prog = static.CompiledProgram(
                    test_program).with_data_parallel(
                            share_vars_from=compiled_train_prog,
                            places=parallel_places)

                train_data = numpy.random.random(size=(10, 1)).astype('float32')
                loss_data, = exe.run(compiled_train_prog,
281 282
                                feed={"X": train_data},
                                fetch_list=[loss.name])
283 284
                test_data = numpy.random.random(size=(10, 1)).astype('float32')
                loss_data, = exe.run(compiled_test_prog,
285 286
                                feed={"X": test_data},
                                fetch_list=[loss.name])
X
Xin Pan 已提交
287
        """
288 289
        assert not self._is_data_parallel, "Already compiled with parallel, cannot be recompiled."
        assert not self._is_inference, "Cannot compile with both data parallel and inference."
290
        self._is_data_parallel = True
C
chengduo 已提交
291 292 293 294 295
        # FIXME(zcd): Currently, the build_strategy can be set during creating
        # CompiledProgram or calling with_data_parallel, and it may be confusing,
        # but in the long run, we should set up build_strategy only when creating
        # CompiledProgram, and exec_strategy should be deprecated.
        if build_strategy is not None: self._build_strategy = build_strategy
296 297
        self._exec_strategy = exec_strategy
        self._loss_name = loss_name
X
polish  
Xin Pan 已提交
298
        self._share_vars_from = share_vars_from
299 300 301 302
        if isinstance(places, (list, tuple)):
            self._places = _get_paddle_place_list(places)
        else:
            self._places = _get_paddle_place(places)
C
chengduo 已提交
303 304

        if _has_backward_op(self._graph):
305
            assert self._loss_name is not None, "The loss name of CompiledProgram is None. The loss name should be set if CompiledProgram contains backward part."
C
chengduo 已提交
306 307 308 309 310

        if self._places is not None:
            if not isinstance(self._places, (list, tuple)):
                self._places = [self._places]

311 312
        return self

F
flame 已提交
313
    def _with_inference_optimize(self, config):
F
flame 已提交
314 315 316 317 318 319 320
        """ Add inference optimize

        Args:
            config: instance of `NativeConfig` or `AnalysisConfig` to create predictor
        Returns:
            self
        """
321 322
        assert not self._is_data_parallel, "Cannot compile with both data parallel and inference"
        assert not self._is_inference, "Already compiled with inference, cannot be recompiled."
X
Xin Pan 已提交
323

F
flame 已提交
324 325 326 327 328 329 330
        assert any([
            isinstance(config, InferNativeConfig),
            isinstance(config, InferAnalysisConfig)
        ])
        self._is_inference = True
        self._infer_config = config
        return self
X
polish  
Xin Pan 已提交
331

F
flame 已提交
332
    def _with_distributed(self):
333 334 335
        raise NotImplementedError(
            "Subclass of CompiledProgram should implement _with_distributed method."
        )
X
polish  
Xin Pan 已提交
336

337
    def _compile_data_parallel(self, places, use_device, scope=None):
X
polish  
Xin Pan 已提交
338
        if self._share_vars_from:
339
            if scope:
X
polish  
Xin Pan 已提交
340 341
                sys.stderr.write("share_vars_from is set, scope is ignored.\n")
            if not self._share_vars_from._is_data_parallel:
342 343 344
                raise ValueError(
                    "The shared Program is not data parallel, cannot "
                    "share variables from it.")
X
polish  
Xin Pan 已提交
345 346
            if self._share_vars_from._executor is None:
                raise ValueError(
347 348
                    "The shared Program is not compiled and executed, so there is no "
                    "variables to share.")
X
polish  
Xin Pan 已提交
349 350
            self._local_scopes = self._share_vars_from._executor.local_scopes()
        else:
351
            assert scope is not None, ""
X
polish  
Xin Pan 已提交
352
            self._local_scopes = []
353

C
chengduo 已提交
354
        assert isinstance(places, tuple) or isinstance(places, list), \
355
            "Currently , The places type can only be list or tuple, but the input type is {}.".format(type(places))
C
chengduo 已提交
356 357 358 359 360 361 362

        if self._build_strategy is None:
            self._build_strategy = BuildStrategy()
        self._build_strategy.is_distribution = _is_pserver_mode(self._program)

        if self._exec_strategy is None:
            self._exec_strategy = ExecutionStrategy()
363
        self._exec_strategy._use_device = use_device
364 365

        if self._exec_strategy.num_threads == 0:
366
            if self._exec_strategy._use_device == DeviceType.CUDA:
367 368
                # Experiments on se-resnext shows that too many threads hurt
                # performance. Worth tunning for other models in the future.
C
chengduo 已提交
369
                self._exec_strategy.num_threads = len(places) * 4
370
            elif self._exec_strategy._use_device == DeviceType.XPU:
371 372
                # Currently only single thread is supported in Kunlun XPU.
                self._exec_strategy.num_threads = 1
373
            else:
C
chengduo 已提交
374 375 376 377 378 379
                self._exec_strategy.num_threads = len(places) * 2

        if self._build_strategy.num_trainers > 1:
            assert self._is_data_parallel, \
                "If you use multi-trainer to train the model, you should use "\
                "the data parallel model, i.e. calling with_data_parallel function."
380

X
Xin Pan 已提交
381 382
        # TODO(wuyi): trainer endpoings should be passed in through
        # build_strategy, not program.xxx.
383
        # TODO(gongwb): let user to set them once.
X
Xin Pan 已提交
384 385 386
        if self._program and self._build_strategy.num_trainers > 1 and \
                self._program._trainers_endpoints:
            tps = self._program._trainers_endpoints
D
dzhwinter 已提交
387

388
            assert self._build_strategy.num_trainers == len(
389
                tps), "The trainer numbers is not equal to endpoint numbers."
X
Xin Pan 已提交
390 391
            self._build_strategy.trainers_endpoints = tps

392 393
        if self._program:
            self._build_strategy.nccl_comm_num = self._program._nccl_comm_num
394 395
            self._build_strategy.use_hierarchical_allreduce = self._program._use_hierarchical_allreduce
            self._build_strategy.hierarchical_allreduce_inter_nranks = self._program._hierarchical_allreduce_inter_nranks
396

Q
qingqing01 已提交
397 398 399
        if self._build_strategy.sync_batch_norm:
            self._build_strategy.enable_sequential_execution = True

400
        if self._program is not None and self._program._enable_dgc:
401
            assert self._exec_strategy._use_device == DeviceType.CUDA, "DGC only used under CUDA environment."
402
            assert self._build_strategy.num_trainers * len(
403
                places) > 1, "DGC is not avaliable for single card training."
404
            assert self._build_strategy.reduce_strategy == BuildStrategy.ReduceStrategy.AllReduce, "DGC \
405
                only can be used for AllReduce BuildStrategy."
406 407 408 409

            # DGC doesn't support fuse for now, close fuse.
            self._build_strategy.fuse_all_reduce_ops = False

X
Xin Pan 已提交
410
        self._persistable_vars = []
Z
Zhen Wang 已提交
411 412 413
        for node in self._graph.nodes():
            if node.is_var() and node.var() is not None and node.var().persistable() and \
                    node.var().type() != core.VarDesc.VarType.RAW:
414 415 416 417
                name = cpt.to_text(node.name())
                if self._program is not None and _should_broadcast_or_not_exists(
                        self._program, name):
                    self._persistable_vars.append(cpt.to_text(node.name()))
418

C
chengduo 已提交
419 420
        places = list(map(_place_obj, places))

Y
Yan Xu 已提交
421 422 423 424 425 426 427 428 429 430 431
        # ParallelExecutor would broadcast all the parameters during initializing.
        # The parameters of each process should be in the same ordered for the data-parallelism
        # distributed training to keep the broadcast correct.
        self._persistable_vars = list(set(self._persistable_vars))
        self._persistable_vars.sort()

        return core.ParallelExecutor(
            places, self._persistable_vars,
            cpt.to_text(self._loss_name)
            if self._loss_name else six.u(''), self._scope, self._local_scopes,
            self._exec_strategy, self._build_strategy, self._graph)
432

F
flame 已提交
433 434 435
    def _compile_inference(self):
        return core.create_paddle_predictor(self._infer_config)

436
    def _compile(self, scope, place):
X
Xin Pan 已提交
437 438 439 440 441 442 443 444 445 446
        """Compile the program based on the configs.

        Args:
            scope: The variables (resources) that are associated with
               this compiled program.
            place: The location that the compiled program will be run on.

        Returns:
            self
        """
447
        if self._compiled:
X
polish  
Xin Pan 已提交
448
            if scope and self._scope != scope:
449
                raise ValueError("Cannot compile program with different scope.")
S
sneaxiy 已提交
450
            if place and not self._place._equals(place):
451
                raise ValueError("Cannot compile program with different place.")
452
            return self
X
fix  
Xin Pan 已提交
453
        self._compiled = True
454 455 456

        self._scope = scope
        self._place = place
C
chengduo 已提交
457 458

        if self._is_inference:
F
flame 已提交
459
            self._executor = self._compile_inference()
460
        else:
C
chengduo 已提交
461 462 463 464
            if self._is_data_parallel:
                self._places = self._get_places(self._place, self._places)
            else:
                self._places = [self._place]
465 466 467 468 469 470 471 472 473

            # Todo(liym27):If optimizer is used in control flow,
            #  training on multi-places is not supported now, will
            #  be supported later.
            if len(self._places) > 1 and \
                    _has_optimizer_in_control_flow(self._program):
                raise NotImplementedError(
                    "If optimizer is used in control flow, "
                    "training on multi-places is not supported now.")
474
            if isinstance(self._place, core.CUDAPlace):
475
                use_device = DeviceType.CUDA
476
            elif isinstance(self._place, core.XPUPlace):
477
                use_device = DeviceType.XPU
478
            else:
479
                use_device = DeviceType.CPU
C
chengduo 已提交
480
            self._executor = self._compile_data_parallel(
481
                use_device=use_device, scope=self._scope, places=self._places)
482
        return self
C
chengduo 已提交
483 484 485 486 487 488

    def _get_places(self, place, place_list):
        has_set_place = (place_list is not None)
        if has_set_place:
            for p in place_list:
                assert p._type() == place._type(), \
489
                    "Place type not match. You may set wrong type of places."
C
chengduo 已提交
490
        else:
491 492 493 494 495 496
            if isinstance(place, core.CUDAPlace):
                place_list = cuda_places()
            elif isinstance(place, core.XPUPlace):
                place_list = xpu_places()
            else:
                place_list = cpu_places()
497
        assert place_list, "No places for execution."
C
chengduo 已提交
498
        return place_list
J
jianghaicheng 已提交
499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516


class IpuStrategy(object):
    """
    Help users precisely control the graph building in :code:`paddle.static.IpuCompiledProgram` .

    Returns:
        The IpuStrategy instance.

    Examples:
        .. code-block:: python
	
            # required: ipu

            import paddle
            import paddle.static as static

            paddle.enable_static()
517

J
jianghaicheng 已提交
518 519 520 521 522 523
            ipu_strategy = static.IpuStrategy()
    """

    def __init__(self):
        if core.is_compiled_with_ipu():
            self._ipu_strategy = core.IpuStrategy()
524 525 526 527 528 529 530 531 532 533 534 535 536
            default_options = {
                'location_optimizer': {
                    'on_chip': 0,
                    'use_replicated_tensor_sharding': 1,
                },  # set optimizer location
                'accumulation_and_replication_reduction_type':
                1,  # popart::ReductionType::Mean
                'mean_accumulation_and_replication_reduction_strategy':
                1,  # popart::MeanReductionStrategy::Post
            }
            self._ipu_strategy.set_options(default_options)
            self.has_custom_ops = False
            self.custom_op_names = []
J
jianghaicheng 已提交
537 538 539 540 541
        else:
            raise RuntimeError(
                "Can not use IpuStrategy in non IPU compiled environment, please re-compile with WITH_IPU=ON."
            )

542 543 544
    def set_graph_config(self,
                         num_ipus=1,
                         is_training=True,
A
Allen Guo 已提交
545
                         micro_batch_size=1,
546
                         enable_manual_shard=False):
J
jianghaicheng 已提交
547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569
        """
        Set graph configuration to the IpuStrategy instance.

        Args:
            num_ipus (int, optional): Number of IPU devices. Default 1, which means only use 1 IPU.
            is_training (bool, optional): True is training graph, False is inference graph. Default True, which means is training mode.
            batch_size (int, optional): The batch-size in the graph. Used to make the graph batch-size fixed,
                if the batch-size in the graph is dynamic. Default 1, which means the batch-size would be set 1, if the batch-size is dynamice.
            enable_manual_shard (bool, optional): Enable graph sharding or not. Only if num_ipus > 1, enable_manual_shard is able to be set True. 
                Default False, which means disabled.    
            
        Returns:
            None.

        Examples:
            .. code-block:: python
	
                # required: ipu

                import paddle
                import paddle.static as static

                paddle.enable_static()
570

J
jianghaicheng 已提交
571
                ipu_strategy = static.IpuStrategy()
572
                ipu_strategy.set_graph_config(num_ipus=1,
J
jianghaicheng 已提交
573
                                            is_training=True,
A
Allen Guo 已提交
574
                                            micro_batch_size=1,
575
                                            enable_manual_shard=False)
J
jianghaicheng 已提交
576
        """
577
        if num_ipus == 1 and enable_manual_shard:
J
jianghaicheng 已提交
578 579 580
            raise RuntimeError(
                "Only if num_ipus > 1, enable_manual_shard is able to be set True."
            )
581 582 583
        options = {
            'num_ipus': num_ipus,
            'is_training': is_training,
A
Allen Guo 已提交
584
            'micro_batch_size': micro_batch_size,
585 586 587 588 589 590 591
            'enable_manual_shard': enable_manual_shard,
        }
        self.set_options(options)

    def set_pipelining_config(self,
                              enable_pipelining=False,
                              batches_per_step=1,
A
Allen Guo 已提交
592
                              enable_gradient_accumulation=False,
593
                              accumulation_factor=1):
J
jianghaicheng 已提交
594 595 596 597 598 599 600 601
        """
        Set pipelining configuration to the IpuStrategy instance. Used to optimize the throughput performance.

        Args:
            enable_pipelining (bool, optional): Enable data pipelining between subgraphs. Only if enable_manual_shard=True, enable_pipelining is able to be set True. 
                Default False, which means disabled.
            batches_per_step (int, optional): Set the batches per run in data pipelining mode. Only if enable_pipelining=True, batches_per_step is able to be set > 1.
                Default 1, which means no data pipelining.
A
Allen Guo 已提交
602 603
            enable_gradient_accumulation (bool, optional): Enable to accumulate gradients before updating the weights in training mode. Only if enable_pipelining=True,
                enable_gradient_accumulation is able to be set True. Default False, which means no gradient accumulation. 
604
            accumulation_factor (int, optional): Specify the number of micro-batches to accumulate 
J
jianghaicheng 已提交
605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620
                before applying the varUpdate. Default 1, which means disable the accumulation.
        
        Returns:
            None.

        Examples:
            .. code-block:: python

                # required: ipu

                import paddle
                import paddle.static as static

                paddle.enable_static()

                ipu_strategy = static.IpuStrategy()
621 622
                ipu_strategy.set_pipelining_config(enable_pipelining=False,
                                                    batches_per_step=1,
A
Allen Guo 已提交
623
                                                    enable_gradient_accumulation=False,
624
                                                    accumulation_factor=1)
J
jianghaicheng 已提交
625
        """
626 627
        enable_manual_shard = self.get_option('enable_manual_shard')
        if not enable_manual_shard and enable_pipelining:
J
jianghaicheng 已提交
628 629 630
            raise RuntimeError(
                "Only if enable_manual_shard=True, enable_pipelining is able to be set True."
            )
631 632 633
        options = {
            'enable_pipelining': enable_pipelining,
            'batches_per_step': batches_per_step,
A
Allen Guo 已提交
634
            'enable_gradient_accumulation': enable_gradient_accumulation,
635 636 637 638 639
            'accumulation_factor': accumulation_factor,
        }
        self.set_options(options)

    def set_precision_config(self, enable_fp16=False):
J
jianghaicheng 已提交
640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659
        """
        Set half computation configuration to the IpuStrategy instance. Used to optimize the performance.

        Args:
            enable_fp16 (bool, optional): Enable FLOAT16 mode and transform FLOAT32 to FLOAT16. Default False, which means disable FLOAT16 mode.
        
        Returns:
            None.

        Examples:
            .. code-block:: python

                # required: ipu

                import paddle
                import paddle.static as static

                paddle.enable_static()

                ipu_strategy = static.IpuStrategy()
660 661 662 663 664 665 666 667 668 669
                ipu_strategy.set_precision_config(enable_fp16=False)
        """
        options = {'enable_fp16': enable_fp16, }
        self.set_options(options)

    def add_custom_op(self,
                      paddle_op,
                      popart_op=None,
                      domain='custom.ops',
                      version=1):
J
jianghaicheng 已提交
670
        """
671
        Add a mapping to use popart custom ops running on the IPU.
J
jianghaicheng 已提交
672

673 674
        Args:
            paddle_op(str): the name of custom op in paddle.
J
jianghaicheng 已提交
675

676
            popart_op(str): the name of custom op in popart.
J
jianghaicheng 已提交
677

678
            domain(str): domain name of custom op in popart.
J
jianghaicheng 已提交
679

680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696
            version(int): version of custom op in popart.
        
        Returns:
            None.

        Examples:
            .. code-block:: python

                # required: ipu

                import paddle
                import paddle.static as static

                paddle.enable_static()

                ipu_strategy = static.IpuStrategy()
                ipu_strategy.add_custom_op('paddle_relu', 'popart_relu')
J
jianghaicheng 已提交
697
        """
698 699 700 701 702 703 704 705 706 707 708 709 710 711
        if popart_op is None:
            popart_op = paddle_op
        custom_op = {
            'paddle_op': paddle_op,
            'popart_op': popart_op,
            'domain': domain,
            'version': version,
        }
        self.set_options({'custom_op': custom_op})
        self.custom_op_names.append(paddle_op)
        if not self.has_custom_ops:
            self.has_custom_ops = True

    def set_options(self, options):
J
jianghaicheng 已提交
712
        """
713
        Set options from dict.
J
jianghaicheng 已提交
714

715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733
        Args:
            options(dict): dict of options.
        
        Returns:
            None.

        Examples:
            .. code-block:: python

                # required: ipu

                import paddle
                import paddle.static as static

                paddle.enable_static()

                ipu_strategy = static.IpuStrategy()
                options = {'num_ipus':1, 'enable_fp16': True}
                ipu_strategy.set_options(options)
J
jianghaicheng 已提交
734
        """
735
        self._ipu_strategy.set_options(options)
J
jianghaicheng 已提交
736

737
    def get_option(self, option):
J
jianghaicheng 已提交
738
        """
739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758
        Get option.

        Args:
            option(str): name of option.
        
        Returns:
            option value.

        Examples:
            .. code-block:: python

                # required: ipu

                import paddle
                import paddle.static as static

                paddle.enable_static()

                ipu_strategy = static.IpuStrategy()
                num_ipus = ipu_strategy.get_option('num_ipus')
J
jianghaicheng 已提交
759
        """
760
        return self._ipu_strategy.get_option(option)['value']
J
jianghaicheng 已提交
761

A
Allen Guo 已提交
762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811
    def enable_pattern(self, pattern):
        """
        Enable PopART pattern to optimize the graph.

        Args:
            pattern(string): the name of the pattern.
        
        Returns:
            None.

        Examples:
            .. code-block:: python

                # required: ipu

                import paddle
                import paddle.static as static

                paddle.enable_static()

                ipu_strategy = static.IpuStrategy()
                ipu_strategy.enable_pattern("ViewSimplifyPattern")
        """
        self._ipu_strategy.enable_pattern(pattern)

    def disable_pattern(self, pattern):
        """
        Disable PopART pattern.

        Args:
            pattern(string): the name of the pattern.
        
        Returns:
            None.

        Examples:
            .. code-block:: python

                # required: ipu

                import paddle
                import paddle.static as static

                paddle.enable_static()

                ipu_strategy = static.IpuStrategy()
                ipu_strategy.disable_pattern("ViewSimplifyPattern")
        """
        self._ipu_strategy.disable_pattern(pattern)

J
jianghaicheng 已提交
812
    @property
813
    def num_ipus(self):
J
jianghaicheng 已提交
814
        """
815
        Get the number of IPU devices from IpuStrategy instance.
J
jianghaicheng 已提交
816
        """
817
        return self.get_option('num_ipus')
J
jianghaicheng 已提交
818 819

    @property
820
    def is_training(self):
J
jianghaicheng 已提交
821
        """
822
        Get the boolean of training or inference from IpuStrategy instance.
J
jianghaicheng 已提交
823
        """
824
        return self.get_option('is_training')
J
jianghaicheng 已提交
825 826

    @property
827
    def enable_pipelining(self):
J
jianghaicheng 已提交
828
        """
829
        Get the boolean of enable pipelining or not from IpuStrategy instance.
J
jianghaicheng 已提交
830
        """
831
        return self.get_option('enable_pipelining')
J
jianghaicheng 已提交
832 833 834 835 836 837

    @property
    def enable_fp16(self):
        """
        Get the boolean of float16 mode or not from IpuStrategy instance.
        """
838
        return self.get_option('enable_fp16')
J
jianghaicheng 已提交
839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874


class IpuCompiledProgram(object):
    """
    The IpuCompiledProgram is used to transform a program to a ipu-target program,
    such as forward graph extraction, computing graph transformation, useless scale Ops clean, etc.

    Args:
        program(Program, optional): This parameter represents the :code:`Program`
            to be executed. Default is None, which means the program will be set to 
            the default program :code:`paddle.static.default_main_program()` .
        scope(Scope, optional): The scope used to run this program, you can switch
            it to different scope. Default is None, which means use the global 
            scope :code:`paddle.static.global_scope()` .
        ipu_strategy(IpuStrategy, optional): This argument is used to build the program with the
            specified options, such as half computation, training or inference session, the number of IPUs, etc.
            Default is None, which means build the program based on the default `ipu_strategy`. 

    Returns:
        IpuCompiledProgram

    Example:
        .. code-block:: python
	
            # required: ipu

            import paddle
            import paddle.static as static

            paddle.enable_static()

            a = static.data(name='data', shape=[None, 1], dtype='int32')
            b = a + 1
            main_prog = static.default_main_program()
            
            ipu_strategy = static.IpuStrategy()
A
Allen Guo 已提交
875 876
            ipu_strategy.set_graph_config(num_ipus=1, is_training=True, micro_batch_size=1)
            ipu_strategy.set_pipelining_config(enable_pipelining=False, batches_per_step=1, enable_gradient_accumulation=False, accumulation_factor=1)
877
            ipu_strategy.set_precision_config(enable_fp16=False)
J
jianghaicheng 已提交
878 879 880 881 882 883 884 885 886 887 888 889 890
            
            ipu_compiled_program = static.IpuCompiledProgram(
                main_prog,
                ipu_strategy=ipu_strategy)
    """

    def __init__(self, program=None, scope=None, ipu_strategy=None):
        if not core.is_compiled_with_ipu():
            raise ValueError(
                "Can not use this function since PaddlePaddle is not compiled with IPU"
            )

        if program is None:
891
            program = framework.default_main_program()
J
jianghaicheng 已提交
892 893 894 895 896 897 898 899 900 901 902 903

        if not isinstance(program, framework.Program):
            raise TypeError(
                "The type of program is wrong, expected Program, but got %s" %
                type(program))

        self._program = program
        self._compiled = False

        if scope is not None:
            self._scope = scope
        else:
904 905
            # import here to avoiding confused
            import paddle
J
jianghaicheng 已提交
906 907 908
            self._scope = paddle.static.global_scope()

        if ipu_strategy is not None:
909
            self._ipu_strategy = ipu_strategy
J
jianghaicheng 已提交
910
        else:
911
            self._ipu_strategy = IpuStrategy()
J
jianghaicheng 已提交
912

913 914 915 916 917 918
        if ipu_strategy.has_custom_ops:
            self._custom_op_names = set(ipu_strategy.custom_op_names)
        else:
            self._custom_op_names = ()

        self._backend = core.IpuBackend.get_instance()
J
jianghaicheng 已提交
919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948

    def compile(self, feed_list, fetch_list):
        """
        This interface is used to compile the input Program to a program
        to run the model on the ipu.
        
        Args:
            feed_list(list): This parameter represents the input Tensors of the model.

            fetch_list(list): This parameter represents the Tensors that need to be returned
                after the model.

        Returns:
            Program

        Example:
            .. code-block:: python
    	
                # required: ipu
    
                import paddle
                import paddle.static as static
    
                paddle.enable_static()
    
                a = static.data(name='data', shape=[None, 1], dtype='int32')
                b = a + 1
                main_prog = static.default_main_program()

                ipu_strategy = static.IpuStrategy()
A
Allen Guo 已提交
949 950
                ipu_strategy.set_graph_config(num_ipus=1, is_training=True, micro_batch_size=1)
                ipu_strategy.set_pipelining_config(enable_pipelining=False, batches_per_step=1, enable_gradient_accumulation=False, accumulation_factor=1)
951
                ipu_strategy.set_precision_config(enable_fp16=False)
J
jianghaicheng 已提交
952 953 954 955 956
                
                program = static.IpuCompiledProgram(
                    main_prog,
                    ipu_strategy=ipu_strategy).compile([a.name], [b.name])
        """
957 958 959
        self._backend.set_scope(self._scope)
        self._backend.set_ipu_strategy(self._ipu_strategy._ipu_strategy)

J
jianghaicheng 已提交
960 961 962 963 964
        # feed and fetch doesn't have corresponding popart op, so we rm both here
        global_block = self._program.global_block()
        need_to_remove_op_index = []
        for i, op in enumerate(global_block.ops):
            op.desc.set_is_target(False)
965
            if op.type == 'feed' or op.type == 'fetch':
J
jianghaicheng 已提交
966 967 968 969 970 971 972 973 974 975 976 977
                need_to_remove_op_index.append(i)

        for index in need_to_remove_op_index[::-1]:
            global_block._remove_op(index)

        for var in ['feed', 'fetch']:
            if global_block.has_var(var):
                global_block._remove_var(var)

        self._program.desc.flush()
        self._graph = core.Graph(self._program.desc)

978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013
        if self._ipu_strategy.is_training:
            passes = [
                'optimizer_extract_pass',
                'optimizer_state_align_pass',
            ]
            for pass_name in passes:
                a_pass = core.get_pass(pass_name)
                a_pass.apply(self._graph)

        passes = [
            'forward_graph_extract_pass',
            'infer_shape_pass',
            'avg_shard_pass',
            'delete_scale_op_pass',
        ]
        for pass_name in passes:
            a_pass = core.get_pass(pass_name)
            if pass_name == 'infer_shape_pass':
                a_pass.set('feed_list', feed_list)
            a_pass.apply(self._graph)

        a_pass = core.get_pass('popart_canonicalization_pass')
        if self._custom_op_names:
            a_pass.set('custom_ops', self._custom_op_names)
        a_pass.apply(self._graph)

        passes = [
            'ipu_inplace_pass',
            'ipu_graph_builder_pass',
            'ipu_runtime_replacer_pass',
        ]
        for pass_name in passes:
            a_pass = core.get_pass(pass_name)
            a_pass.set('feed_list', feed_list)
            a_pass.set('fetch_list', fetch_list)
            a_pass.apply(self._graph)
J
jianghaicheng 已提交
1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043

        convert_pass = core.get_pass('graph_to_program_pass')
        desc = core.ProgramDesc()
        convert_pass.set_not_owned('program', desc)
        convert_pass.apply(self._graph)
        program = framework.Program._construct_from_desc(desc)

        if hasattr(self._program, 'lr_sheduler'):
            # how to share var between two different block ?
            lr_var_name = self._program.lr_sheduler._var_name

            program.lr_sheduler = self._program.lr_sheduler
            # Program.clone will clone lr_sheduler, so i set lr_var as
            # lr_sheduler attribute
            global_block = self._program.global_block()
            program.lr_sheduler.lr_var = global_block.vars[lr_var_name]

        # with popart, we need to support batches_per_step, what means
        # the shape of feed_var and feed_tensor(maybe numpy array) will
        # mismatch, so we set need_check_feed to False. Thus we can avoid
        # modify logic of run.
        program_global_block = program.global_block()
        for feed_name in feed_list:
            feed_var = program_global_block.var(feed_name)
            feed_var.desc.set_need_check_feed(False)

        if not hasattr(program, 'org_program'):
            program.org_program = self._program

        return program