engine.py 23.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import copy
import logging
from collections import defaultdict

import paddle
20 21
import paddle.distributed.auto_parallel as auto

22
from paddle import fluid, static
23
from paddle.io import Dataset
24
from paddle.metric import Metric
25
from paddle.static import InputSpec
26
from paddle.fluid import core
27
from paddle.fluid import program_guard
28 29
from paddle.fluid.layers.utils import flatten
from paddle.fluid.executor import global_scope
30
from paddle.fluid.backward import append_backward
31
from paddle.fluid.framework import Operator, Variable
32 33
from paddle.fluid.framework import _current_expected_place as _get_device
from paddle.fluid.dygraph.parallel import ParallelEnv
34
from paddle.distributed import fleet
35
from paddle.distributed.utils import get_logger
36
from paddle.distributed.passes import new_pass, PassContext
37

38
# from .cluster import Cluster, get_default_cluster
39 40
from .planner_v2 import Planner
from .parallelizer_v2 import Parallelizer
41 42 43 44 45 46 47
from .dist_op import DistributedOperator
from .dist_saver import DistributedSaver
from .dist_loader import NonIterableGeneratorLoader
from .utils import make_data_unshard, set_grad_var_shape
from .utils import print_program_with_dist_attr, to_list
from .process_group import get_all_process_groups, get_world_process_group
from .dist_context import DistributedContext, get_default_distributed_context
48 49 50


class Engine:
51

52 53 54 55 56 57
    def __init__(self,
                 model=None,
                 inputs_spec=None,
                 labels_spec=None,
                 cluster=None,
                 strategy=None):
58
        self.model = model
59 60
        self.inputs_spec = self._validate_spec(inputs_spec)
        self.labels_spec = self._validate_spec(labels_spec)
61
        self.cluster = cluster
62 63
        # if self.cluster is None:
        #     self.cluster = get_default_cluster()
64
        self.strategy = strategy
65 66
        if self.strategy is None:
            self.strategy = fleet.DistributedStrategy()
67

68
        self._executor = None
69 70 71 72 73 74
        self._cur_rank = paddle.distributed.get_rank()
        self._nranks = paddle.distributed.get_world_size()
        self._saver = DistributedSaver()
        self._logger = get_logger(logging.INFO)

        self._default_strategy = None
75 76
        self._orig_main_prog = static.default_main_program()
        self._orig_startup_prog = static.default_startup_program()
77
        self._orig_dist_context = get_default_distributed_context()
78
        self._dist_contexts = {}
79 80
        self._serial_main_progs = {}
        self._serial_startup_progs = {}
81 82 83 84
        self._dist_main_progs = defaultdict(dict)  # dist main programs
        self._dist_startup_progs = defaultdict(dict)  # dist startup programs
        self._feed_vars = {}
        self._fetch_vars = {}
85 86 87 88

    def prepare(self,
                optimizer=None,
                loss=None,
89
                gradient_scale=True,
90 91
                metrics=None,
                all_ranks=False):
92 93 94
        if optimizer and not isinstance(
                optimizer,
            (paddle.optimizer.Optimizer, paddle.fluid.optimizer.Optimizer)):
95 96 97 98
            raise TypeError(
                    "'optimizer' must be object of class `paddle.optimizer.Optimizer`" \
                        " or `paddle.fluid.optimizer.Optimizer`."
                )
99
        self._optimizer = optimizer
100 101 102 103 104 105

        if loss and not isinstance(loss,
                                   paddle.nn.Layer) and not callable(loss):
            raise TypeError(
                "'loss' must be sub classes of `paddle.nn.Layer` or any callable function."
            )
106
        self._loss = loss
107 108 109 110 111 112

        metrics = metrics or []
        for metric in to_list(metrics):
            assert isinstance(metric, Metric), \
                "{} is not sub class of Metric".format(
                    metric.__class__.__name__)
113
        self._metrics = to_list(metrics)
114
        self._gradient_scale = gradient_scale
115 116 117

        self._planned_mode = None
        self._modes = ['train', 'eval', 'predict']
118
        # Build forward program
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
        self._build()

        # Do auto parallel process
        for mode in self._modes:
            # Do the planning process
            self._plan(mode)
            # Do the parallel process
            self._parallel(mode, all_ranks)
            # Init comm and startup program
            self._initialize(mode)

    def _build(self):
        for mode in self._modes:
            serial_main_prog = self._serial_main_progs.get(mode, None)
            if serial_main_prog is not None:
                return

            losses = []
            metrics = []
            serial_main_prog = self._orig_main_prog.clone()
            serial_startup_prog = self._orig_startup_prog.clone()
            with static.program_guard(serial_main_prog, serial_startup_prog):
                inputs_spec = self.inputs_spec
                labels_spec = self.labels_spec if self.labels_spec else []
                inputs = [s._create_feed_layer() for s in inputs_spec]
                labels = [s._create_feed_layer() for s in labels_spec]
                outputs = to_list(self.model(*inputs))
                if mode != "predict" and self._loss:
                    losses = to_list(self._loss(*(outputs + labels)))

                if mode != "predict":
                    for metric in self._metrics:
                        metrics.extend(
                            to_list(metric.compute(*(outputs + labels))))

            default_ctx = get_default_distributed_context()
            if not default_ctx.has_annotation or self._default_strategy:
                inputs = [self._set_data_parallel(var) for var in inputs]
                labels = [self._set_data_parallel(var) for var in labels]

            # self._feed_vars[mode] = {"inputs": inputs, "labels": labels}
            feed_vars = {"inputs": inputs, "labels": labels}

            # self._fetch_vars[mode] = {
            #     "outputs": flatten(outputs),
            #     "loss": losses,
            #     "metrics": metrics
            # }
            fetch_vars = {
                "outputs": flatten(outputs),
                "loss": losses,
                "metrics": metrics
            }

            self._dist_contexts[mode] = DistributedContext(
                serial_main_prog, serial_startup_prog, self._optimizer, losses,
                feed_vars, fetch_vars, self.cluster, self.strategy)
            self._dist_contexts[mode].gradient_scale = self._gradient_scale

    def _plan(self, mode):
        if self._planned_mode is None:
            self._planned_mode = mode
        else:
            self._init_dist_context(mode)

        self.planner = Planner(mode, self._dist_contexts[mode])
        self.planner.plan()

    def _parallel(self, mode, all_ranks):
188 189 190
        # Parallelize program based on the planner's results
        # For now, the completer has to be passed to the planner,
        # because we may use it to complete the annotation of the backwarkward and update.
191
        parallelizer = Parallelizer(mode, self.planner.completer,
192 193 194 195 196
                                    self._dist_contexts[mode])
        if not all_ranks:
            parallelizer.parallel(self._cur_rank)
        else:
            parallelizer.parallel_all()
197 198

    def _init_dist_context(self, mode):
199
        # Init dist_context['mode'] with the first planned dist_context
200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
        # to guarantee that train/eval/predict mode have same parallel strategy
        dist_context = self._dist_contexts[mode]
        origin_main_prog = dist_context._original_serial_main_program
        ref_mode = self._planned_mode
        ref_dist_context = self._dist_contexts[ref_mode]
        ref_origin_main_prog = ref_dist_context._original_serial_main_program
        ref_blocks = ref_origin_main_prog.blocks
        for ib, block in enumerate(origin_main_prog.blocks):
            for iop, op in enumerate(block.ops):
                ref_op = ref_blocks[ib].ops[iop]
                assert op.type == ref_op.type, \
                    "'{}' mode op '{}' is different with '{}' op '{}'. ".format(mode, op.type, ref_mode, ref_op.type)
                ref_op_dist_attr = ref_dist_context.get_op_dist_attr_for_program(
                    ref_op)
                dist_context.set_op_dist_attr_for_program(op, ref_op_dist_attr)

    def _initialize(self, mode):
217
        # Get the current content from the distributed context
218 219 220 221
        self._serial_main_progs[mode] = self._dist_contexts[
            mode].serial_main_program
        self._serial_startup_progs[mode] = self._dist_contexts[
            mode].serial_startup_program
222 223 224 225
        self._dist_main_progs[mode] = self._dist_contexts[
            mode].dist_main_programs
        self._dist_startup_progs[mode] = self._dist_contexts[
            mode].dist_startup_programs
226 227
        self._feed_vars[mode] = self._dist_contexts[mode].serial_feed_vars
        self._fetch_vars[mode] = self._dist_contexts[mode].serial_fetch_vars
228

229 230 231 232 233 234 235 236
        if self._nranks > 1:
            # Traverse different rank programs and traverse each op of them,
            # instantiate communication by process_mapping.
            all_process_groups = get_all_process_groups()
            for process_group in all_process_groups:
                if self._cur_rank not in process_group.ranks:
                    continue
                process_group.instantiate()
237 238 239 240 241 242 243

        # initialize
        self._place = _get_device()
        if isinstance(self._place, fluid.CUDAPlace):
            self._place = fluid.CUDAPlace(ParallelEnv().dev_id)
        if self._executor is None:
            self._executor = paddle.static.Executor(self._place)
244 245 246 247 248 249 250 251 252 253
            uninitialized = []
            dist_startup_prog = self._dist_startup_progs[mode][self._cur_rank]
            for var in dist_startup_prog.list_vars():
                scope_var = global_scope().find_var(var.name)
                if scope_var and scope_var.get_tensor()._is_initialized():
                    continue
                uninitialized.append(var)
            if uninitialized:
                prune_startup_prog = dist_startup_prog._prune(uninitialized)
                self._executor.run(prune_startup_prog)
254

255 256 257 258
    def fit(self,
            train_data,
            batch_size=1,
            epochs=1,
259
            fetch_list=None,
260 261
            steps_per_epoch=None,
            use_program_cache=False,
262
            return_numpy=True):
263 264 265
        # TODO: callbacks
        # TODO: evaluate after training
        self.mode = 'train'
266
        assert self.mode in self._dist_main_progs, \
267
            "train model is not ready, please call `engine.prepare()` first."
268 269
        train_dataloader = self._create_dataloader(train_data, batch_size,
                                                   epochs, steps_per_epoch)
270
        self._usr_fetch_list = fetch_list
271 272

        outputs = []
273 274
        for epoch in range(epochs):
            for step, data in enumerate(train_dataloader):
275
                logs, outs = self._train_step(data, use_program_cache,
276
                                              return_numpy)
277
                outputs.append(outs)
278 279 280 281
                train_logs = {
                    "train_" + name: val
                    for name, val in logs.items()
                }
282
                self._logger.info(train_logs)
283 284
        return outputs

285 286 287
    def evaluate(self,
                 eval_data,
                 batch_size=1,
288
                 fetch_list=None,
289
                 use_program_cache=False,
290
                 return_numpy=True):
291
        self.mode = 'eval'
292
        assert self.mode in self._dist_main_progs, \
293
            "eval model is not ready, please call `engine.prepare()` first."
294
        eval_dataloader = self._create_dataloader(eval_data, batch_size)
295
        self._usr_fetch_list = fetch_list
296 297

        for step, data in enumerate(eval_dataloader):
298
            eval_logs = dict()
299
            logs, outs = self._eval_step(data, use_program_cache, return_numpy)
300 301 302 303 304
            eval_logs["eval_loss"] = outs[0] if len(outs) > 0 else []
            for metric in self._metrics:
                results = metric.accumulate()
                for i, res in enumerate(to_list(results)):
                    eval_logs["eval_" + metric.name()[i]] = res
305 306
            for name, val in logs.items():
                eval_logs["eval_" + name] = val
307 308
            self._logger.info(eval_logs)
        return eval_logs
309

310 311 312
    def predict(self,
                test_data,
                batch_size=1,
313
                fetch_list=None,
314
                use_program_cache=False,
315
                return_numpy=True):
316
        self.mode = 'predict'
317
        assert self.mode in self._dist_main_progs, \
318
            "predict model is not ready, please call `engine.prepare()` first."
319
        test_dataloader = self._create_dataloader(test_data, batch_size)
320
        self._usr_fetch_list = fetch_list
321 322 323 324 325 326

        outputs = []
        for step, data in enumerate(test_dataloader):
            logs, outs = self._predict_step(data, use_program_cache,
                                            return_numpy)
            outputs.append(outs)
327
            predict_logs = {"pred_" + name: val for name, val in logs.items()}
328 329
            self._logger.info(predict_logs)
        return outputs
330

331
    def _train_step(self, data, use_program_cache=False, return_numpy=True):
332
        logs = {}
333
        fetch_vars = self._fetch_vars[self.mode]["loss"]
334 335
        fetch_list, usr_fetch_list = self._fetch_list(fetch_vars)
        fetch_list += usr_fetch_list
336

337
        outs = self._executor.run(self.main_program,
338 339 340
                                  fetch_list=fetch_list,
                                  use_program_cache=use_program_cache,
                                  return_numpy=return_numpy)
341 342 343
        for i, out in enumerate(outs):
            logs[fetch_list[i]] = out
        return logs, outs
344

345 346
    def _eval_step(self, data, use_program_cache=False, return_numpy=True):
        logs = {}
347 348
        metrics = self._fetch_vars[self.mode]["metrics"]
        losses = self._fetch_vars[self.mode]["loss"]
349 350
        fetch_loss, usr_fetch_list = self._fetch_list(losses)
        fetch_metrics, usr_fetch_list = self._fetch_list(metrics)
351 352
        fetch_list = fetch_loss + fetch_metrics

353 354 355 356 357 358 359 360 361 362
        outs = self._executor.run(self.main_program,
                                  fetch_list=fetch_list + usr_fetch_list,
                                  use_program_cache=use_program_cache,
                                  return_numpy=return_numpy)
        usr_out = outs[len(fetch_list):]
        for i, out in enumerate(usr_out):
            logs[usr_fetch_list[i]] = out
        outs = outs[:len(fetch_list)]
        if not outs[len(fetch_loss):]:
            return logs, outs[:len(fetch_loss)]
363
        for metric in self._metrics:
364 365
            metric.update(*outs[len(fetch_loss):])
        return logs, outs[:len(fetch_loss)]
366

367 368
    def _predict_step(self, data, use_program_cache=False, return_numpy=True):
        logs = {}
369
        fetch_vars = self._fetch_vars[self.mode]["outputs"]
370 371
        fetch_list, usr_fetch_list = self._fetch_list(fetch_vars)
        fetch_list += usr_fetch_list
372 373 374 375 376

        outs = self._executor.run(self.main_program,
                                  fetch_list=fetch_list,
                                  use_program_cache=use_program_cache,
                                  return_numpy=return_numpy)
377 378
        for i, out in enumerate(outs):
            logs[fetch_list[i]] = out
379
        return logs, outs
380

381 382 383 384 385
    def _fetch_list(self, fetch_vars):
        fetch_list = []
        for var in fetch_vars:
            if var.name in self.main_program.global_block().vars:
                fetch_list.append(var.name)
386 387 388 389 390 391 392 393 394 395 396 397
        usr_fetch_list = []
        if self._usr_fetch_list:
            assert isinstance(self._usr_fetch_list,
                              list), "'fetch_list' type should be list."
            for var in self._usr_fetch_list:
                if isinstance(var, str):
                    if var in self.main_program.global_block().vars:
                        usr_fetch_list.append(var)
                elif isinstance(var, Variable):
                    if var.name in self.main_program.global_block().vars:
                        usr_fetch_list.append(var.name)
        return fetch_list, usr_fetch_list
398

399 400 401 402
    def _create_dataloader(self,
                           dataset,
                           batch_size,
                           epochs=1,
403
                           steps_per_epoch=None):
404 405 406 407
        dist_main_prog = self._dist_main_progs[self.mode][self._cur_rank]
        dist_startup_prog = self._dist_startup_progs[self.mode][self._cur_rank]
        dist_context = self._dist_contexts[self.mode]
        dist_main_block = dist_main_prog.global_block()
408

409
        # NOTE: Get feed_list from dist_program, then insert dataloader op
410 411
        # with sharded var shape. Because predict_program does not contain
        # labels var, so we will filter dataset's value with length of feed_list.
412 413 414 415 416 417
        inputs_var = self._feed_vars[self.mode]["inputs"]
        labels_var = self._feed_vars[self.mode]["labels"]
        feed_list = []
        for var in inputs_var + labels_var:
            if var.name in dist_main_block.vars:
                feed_list.append(dist_main_block.vars[var.name])
418 419
        dp_world_size, dp_rank = self._get_data_parallel_info(
            feed_list[0], dist_context)
420 421

        # remove the first three ops if multi run fit/evaluate/predict
422
        op_size = len(dist_main_block.ops)
423 424 425 426
        if dist_main_block.ops[0].type == 'create_py_reader':
            op_size -= 3
            for _ in range(3):
                dist_main_block._remove_op(0, sync=False)
427 428

        # insert read op at the end of program
429
        places = paddle.static.cuda_places()
430
        with static.program_guard(dist_main_prog, dist_startup_prog):
431
            dataloader = NonIterableGeneratorLoader(
432 433 434 435 436 437
                dataset,
                feed_list,
                places,
                batch_size,
                epochs,
                steps_per_epoch,
438 439 440 441
                data_parallel_world_size=dp_world_size,
                data_parallel_rank=dp_rank)

        # move read op from the end of program to the start of program
442
        new_op_size = len(dist_main_block.ops)
443
        for _ in range(new_op_size - 1, op_size - 1, -1):
444 445 446
            op = dist_main_block.ops[new_op_size - 1]
            new_op_desc = dist_main_block.desc._prepend_op()
            new_op_desc.copy_from(op.desc)
447 448 449
            new_op = Operator(dist_main_block,
                              new_op_desc,
                              type=new_op_desc.type())
450 451 452 453 454 455 456 457
            dist_main_block.ops.insert(0, new_op)
            dist_op = DistributedOperator(new_op)
            dist_context.add_dist_op_for_program(dist_op)
        for _ in range(new_op_size - op_size):
            dist_main_block._remove_op(new_op_size, sync=False)
        dist_main_block._sync_with_cpp()
        return dataloader

458 459 460 461 462 463 464 465 466 467 468
    def _validate_spec(self, specs):
        specs = to_list(specs)
        if specs is not None:
            for i, spec in enumerate(specs):
                assert isinstance(spec, InputSpec)
                if spec.name is None:
                    raise ValueError(
                        "Requires Input[{}].name != None, but receive `None` with {}."
                        .format(i, spec))
        return specs

469 470 471
    def _set_data_parallel(self, var):
        if self._nranks == 1:
            self._default_strategy = 'serial'
472 473 474 475 476 477
            auto.shard_tensor(var,
                              dist_attr={
                                  "process_mesh": [0],
                                  "dims_mapping":
                                  [-1 for _ in range(len(var.shape))]
                              })
478 479
        else:
            self._default_strategy = 'dp'
480 481 482 483 484 485 486
            auto.shard_tensor(var,
                              dist_attr={
                                  "process_mesh":
                                  list(range(self._nranks)),
                                  "dims_mapping":
                                  [0] + [-1 for _ in range(len(var.shape) - 1)]
                              })
487 488 489

        return var

490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512
    def _get_data_parallel_info(self, var, dist_context):
        # get data parallel world size and current data parallel rank
        from .utils import _get_comm_group, _get_corresponding_rank

        tensor_dist_attr = dist_context.get_tensor_dist_attr_for_program(var)
        process_mesh = tensor_dist_attr.process_mesh
        dims_mapping = tensor_dist_attr.dims_mapping

        if self._cur_rank not in process_mesh.processes:
            rank_id = _get_corresponding_rank(dist_context, process_mesh,
                                              self._cur_rank)
        else:
            rank_id = self._cur_rank

        batch_size_axis = dims_mapping[0]
        if batch_size_axis > -1 and process_mesh.topology[batch_size_axis] > 1:
            group_ranks = _get_comm_group(process_mesh.processes,
                                          process_mesh.topology,
                                          batch_size_axis, rank_id)
            return len(group_ranks), group_ranks.index(rank_id)

        return None, None

513 514 515 516 517 518 519 520 521
    def save(self, path, training=True, mode=None):
        if not mode:
            mode = self.mode

        if training:
            assert 'train' in self._serial_main_progs, "training model is not ready, please call `engine.prepare(mode='train')` first."
            serial_program = self._serial_main_progs["train"]
            dist_main_prog = self._dist_main_progs["train"][self._cur_rank]
            dist_context = self._dist_contexts["train"]
522 523 524 525
            self._saver.save(path,
                             serial_program=serial_program,
                             dist_main_program=dist_main_prog,
                             dist_context=dist_context)
526 527 528 529 530
        else:
            assert mode, "Please set the 'mode' you want to save."
            feed_vars = self._feed_vars[mode]['inputs']
            fetch_vars = self._fetch_vars[mode]['outputs']
            dist_main_prog = self._dist_main_progs[mode][self._cur_rank]
531 532 533 534 535
            self._saver.save_inference_model(path,
                                             feed_vars,
                                             fetch_vars,
                                             self._executor,
                                             program=dist_main_prog)
536

537 538 539 540
    def load(self, path, strict=True, load_optimizer=True, mode=None):
        if not mode:
            mode = self.mode
        assert mode, "Please set the 'mode' you want to load."
541

542 543 544 545
        dist_main_prog = self._dist_main_progs[mode][self._cur_rank]
        dist_context = self._dist_contexts[mode]
        self._saver.load(path, dist_main_prog, dist_context, strict,
                         load_optimizer)
546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573

    @property
    def mode(self):
        return self._mode

    @mode.setter
    def mode(self, mode):
        self._mode = mode

    @property
    def main_program(self):
        return self._dist_main_progs[self.mode][self._cur_rank]

    @property
    def startup_program(self):
        return self._dist_startup_progs[self.mode][self._cur_rank]

    @property
    def dist_context(self):
        return self._dist_contexts[self.mode]

    @property
    def serial_main_program(self):
        return self._serial_main_progs[self.mode]

    @property
    def serial_startup_program(self):
        return self._serial_startup_progs[self.mode]