engine.py 67.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
import os
16
import copy
17
import logging
18
import random
19
import numbers
20
import numpy as np
21 22 23
from collections import defaultdict

import paddle
24
import paddle.utils as utils
25

Z
zhaoyingli 已提交
26
from paddle import fluid, static
27
from paddle.metric import Metric
28
from paddle.static import InputSpec
29
from paddle.fluid import core
30
from paddle.fluid import Variable
31
from paddle.fluid.layers.utils import flatten
32
from paddle.fluid.executor import global_scope, _to_name_str
33
from paddle.fluid.framework import Operator, _non_static_mode
34 35
from paddle.fluid.framework import _current_expected_place as _get_device
from paddle.fluid.dygraph.parallel import ParallelEnv
36
from paddle.distributed import fleet
37

Z
zhaoyingli 已提交
38
from .callbacks import config_callbacks
39
from .converter import Converter
40
from .helper import ProgramHelper
41
from .cluster import Cluster, get_default_cluster
42 43
from .planner_v2 import Planner
from .parallelizer_v2 import Parallelizer
44 45
from .dist_op import DistributedOperator
from .dist_saver import DistributedSaver
46 47 48 49
from .dist_loader import (
    DistributedDataLoaderFromGenerator,
    DistributedDataLoader,
)
50
from .process_group import new_process_group, get_all_process_groups
51
from .dist_context import DistributedContext, get_default_distributed_context
52
from .strategy import Strategy
53
from .interface import CollectionNames, get_collection
54 55
from .utils import to_list, get_dist_attr, get_lr, validate_opt
from .utils import initialize_pg_in_full_mode, get_input_split_info
56
from .cost.estimate_cost import get_cost_from_engine
57

58 59
from ..utils.log_utils import get_logger

60 61

class Engine:
62
    """
63 64
    An Engine object can provide the full power of auto parallel to users.
    With the help of it, users can easily obtain the abilities of the
65 66 67 68 69 70 71
    distributed training and inference. It also support the dynamic graph and
    static graph at the same time.

    Args:
        model (paddle.nn.Layer, optional): The model is an instance of
            paddle.nn.Layer.
        loss (Loss|Callable|None, optional): The loss can be a `paddle.nn.Layer`
72 73
            instance or any callable function taken the predicted values and
            ground truth values as input. It can be None when there is no loss.
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
            Default: None.
        optimizer (Optimizer|None, optional): The optimizer need to be set in training
            and should be None in eval and predict mode. Default: None.
        metrics (Metric|list[Metric]|None, optional): If metrics is set, all
            metrics will be calculated and output in train/eval mode. Default: None.
        cluster (Cluster|None, optional): The cluster represents the topology information
            about the used physical devices. Default: None. (Unused for now)
        strategy (Strategy|None, optional): The strategy is used to configure the
        parallelization and optimization behaviors. Default: None.

    Examples:

        .. code-block:: python

            import paddle
            import paddle.vision.transforms as T
90
            from paddle.distributed.fleet import auto
91 92 93 94 95 96 97 98 99 100
            from paddle.vision.datasets import MNIST

            transform = T.Compose([
                T.Transpose(),
                T.Normalize([127.5], [127.5])
            ])
            train_dataset = MNIST(mode='train', transform=transform)
            valid_dataset = MNIST(mode='test', transform=transform)

            model = paddle.vision.models.LeNet()
101
            loss = paddle.nn.CrossEntropyLoss()
102 103 104 105
            optimizer = paddle.optimizer.Adam(
                learning_rate=0.001, parameters=model.parameters())
            metrics = paddle.metric.Accuracy(topk=(1, 2))

106 107
            engine = auto.Engine(model, loss, optimizer, metrics)
            # fit
108 109 110
            engine.fit(train_dataset,
                       epochs=2,
                       batch_size=64)
111
            # evaluate
112 113 114 115 116 117 118
            engine.evaluate(valid_dataset,
                            batch_size=64)
            # predict
            engine.predict(valid_dataset,
                           batch_size=64)
            # save
            engine.save("./my_model")
119
            # load
120 121 122
            engine.load("./my_model")

    """
123

124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
    def __init__(
        self,
        model=None,
        loss=None,
        optimizer=None,
        metrics=None,
        cluster=None,
        strategy=None,
    ):

        if (
            model
            and not isinstance(model, paddle.nn.Layer)
            and not callable(model)
        ):
139 140 141 142
            raise TypeError(
                "'model must be sub classes of `paddle.nn.Layer` or any callable function."
            )
        self._model = model
143 144 145 146 147 148 149 150 151

        if (
            loss
            and not isinstance(loss, (paddle.nn.Layer, Variable))
            and not callable(loss)
        ):
            raise TypeError(
                "'loss' must be sub classes of `paddle.nn.Layer` or any callable function or a Variable."
            )
152 153 154
        self._loss = loss

        if optimizer and not isinstance(
155 156 157
            optimizer,
            (paddle.optimizer.Optimizer, paddle.fluid.optimizer.Optimizer),
        ):
158 159
            raise TypeError(
                "'optimizer' must be object of class `paddle.optimizer.Optimizer`"
160 161
                " or `paddle.fluid.optimizer.Optimizer`."
            )
162 163
        self._optimizer = validate_opt(optimizer)
        self._orig_optimizer = copy.deepcopy(self._optimizer)
164 165 166

        metrics = metrics or []
        for metric in to_list(metrics):
167 168 169 170 171 172
            if metric and not isinstance(metric, Metric):
                raise TypeError(
                    "{} is not sub class of Metric".format(
                        metric.__class__.__name__
                    )
                )
173 174 175 176 177 178 179 180 181 182 183 184 185 186
        self._metrics = to_list(metrics)

        if cluster and not isinstance(cluster, Cluster):
            raise TypeError(
                "'cluster' must be the object or class `paddle.distributed.auto_parallel.Cluster`"
            )
        self._cluster = cluster or get_default_cluster()

        if strategy and not isinstance(strategy, Strategy):
            raise TypeError(
                "'strategy' must be object of class `paddle.distributed.auto_parallel.Strategy`"
            )
        self._strategy = strategy or Strategy()

187
        self._logger = get_logger(logging.INFO)
188
        if os.getenv("POD_NAME"):
189 190
            self._logger.info(
                "Distribute training by paddle.distributed.launch"
191
            )
192
            fleet.init(is_collective=True)
193

194
        self._executor = None
195 196 197
        self._cur_rank = paddle.distributed.get_rank()
        self._nranks = paddle.distributed.get_world_size()
        self._saver = DistributedSaver()
198

199 200
        self._orig_main_prog = static.default_main_program()
        self._orig_startup_prog = static.default_startup_program()
201
        self._orig_dist_context = get_default_distributed_context()
202
        self._dist_contexts = {}
203 204
        self._fwd_main_progs = {}
        self._fwd_dist_contexts = {}
205 206
        self._serial_main_progs = {}
        self._serial_startup_progs = {}
207 208 209 210
        self._dist_main_progs = defaultdict(dict)  # dist main programs
        self._dist_startup_progs = defaultdict(dict)  # dist startup programs
        self._feed_vars = {}
        self._fetch_vars = {}
211
        self._planners = {}
212 213
        self._has_prepared = {"train": False, "eval": False, "predict": False}
        self._has_prepared_reader = {
214 215
            "train": False,
            "eval": False,
216
            "predict": False,
217
        }
218 219 220 221
        self._inputs_spec = []
        self._labels_spec = []
        self._inputs = []
        self._labels = []
222
        self._losses = []
223

224
        self._mode = None
225 226
        self._skip_build = False
        self._outside_dataloader = False
227
        self._planned_mode = None
228 229
        self._dygraph_mode = False
        self._tuning = self._strategy.tuning
230

Z
zhaoyingli 已提交
231 232
        self.history = None

233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250
    def _prepare_data_spec(self, data, split, batch_size):
        inputs_spec = []
        labels_spec = []
        if isinstance(data, paddle.io.IterableDataset):
            if split is None:
                inputs, labels = next(iter(data))
            else:
                sample = next(iter(data))
                inputs = sample[:split]
                labels = sample[split:]
        elif isinstance(data, paddle.io.Dataset):
            if split is None:
                inputs, labels = data[0]
            else:
                sample = data[0]
                inputs = sample[:split]
                labels = sample[split:]
        else:
251
            raise TypeError(
252 253 254 255
                "Data should be a Dataset or IterableDatset, but received {}.".format(
                    type(data).__name__
                )
            )
256 257 258 259
        inputs = to_list(inputs)
        labels = to_list(labels)

        num_shards = self._strategy.dataset.num_shards
260

261 262 263 264 265 266 267 268 269 270 271 272 273 274
        def _adjust_item_spec(num_shards, spec):
            if num_shards > 1 and len(spec.shape) > 1:
                spec.shape[0] = spec.shape[0] * num_shards

        def _infer_item_spec(item, name, batch_size, specs):
            if isinstance(item, np.ndarray):
                spec = InputSpec.from_numpy(item, name)
                if batch_size is None:
                    _adjust_item_spec(num_shards, spec)
                    specs.append(spec)
                else:
                    specs.append(spec.batch(batch_size))
            elif isinstance(item, (Variable, core.VarBase, core.eager.Tensor)):
                spec = InputSpec.from_tensor(item, name)
275
                _adjust_item_spec(num_shards, spec)
276 277 278 279
                if batch_size is None:
                    specs.append(spec)
                else:
                    specs.append(spec.batch(batch_size))
280
            elif isinstance(item, numbers.Number):
281
                specs.append(InputSpec([batch_size], type(item), name))
282 283 284 285 286 287
            else:
                raise TypeError(
                    "The sample's dtype returned of dataset should be number, np.ndarray or Tensor, but got {}".format(
                        type(item).__name__
                    )
                )
288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303

        if inputs is not None:
            for i, item in enumerate(inputs):
                assert item is not None, "Receive None input."
                name = "input" + str(i)
                _infer_item_spec(item, name, batch_size, inputs_spec)
        if labels is not None:
            for i, item in enumerate(labels):
                assert item is not None, "Receive None input."
                name = "label" + str(i)
                _infer_item_spec(item, name, batch_size, labels_spec)

        inputs_spec = self._validate_spec(inputs_spec)
        labels_spec = self._validate_spec(labels_spec)
        return inputs_spec, labels_spec

304
    def _prepare_data_tensor(self, inputs_spec, labels_spec, inputs, labels):
305
        if _non_static_mode() or self._dygraph_mode:
306 307
            raise ValueError("Only support static graph mode.")

308
        if inputs_spec:
309 310 311 312 313
            assert isinstance(
                inputs_spec, list
            ), "inputs should be list, but received {}".format(
                type(inputs_spec)
            )
314 315 316 317 318 319 320 321 322
            assert isinstance(
                inputs, list
            ), "inputs should be list, but received {}".format(type(inputs))
            assert len(inputs_spec) == len(
                inputs
            ), "the number of `inputs_spec` should be equal to `inputs`'s."
            for input_spec, input in zip(inputs_spec, inputs):
                if input_spec.shape != input.shape:
                    input.desc.set_shape(input_spec.shape)
323
        if labels_spec:
324 325 326 327 328
            assert isinstance(
                labels_spec, list
            ), "labels should be list, but received {}".format(
                type(labels_spec)
            )
329 330 331 332 333 334 335 336 337 338
            assert isinstance(
                labels, list
            ), "labels should be list, but received {}".format(type(labels))
            assert len(labels_spec) == len(
                labels
            ), "the number of `labels_spec` should be equal to `labels`'s."
            for label_spec, label in zip(labels_spec, labels):
                if label_spec.shape != label.shape:
                    label.desc.set_shape(label_spec.shape)

339 340 341 342 343 344 345 346 347
        return inputs, labels

    def _prepare_reader(self):
        dist_main_prog = self._dist_main_progs[self._mode][self._cur_rank]
        dist_context = self._dist_contexts[self._mode]
        dist_main_block = dist_main_prog.global_block()

        # NOTE: this list may be changed if Paddle changes the existing rules.
        related_reader_ops = [
348 349 350
            "create_py_reader",
            "create_double_buffer_reader",
            "read",
351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367
        ]
        # remove the first three ops if multiple run fit/evaluate/predict
        if dist_main_block.ops[0].type == 'create_py_reader':
            for i in range(len(related_reader_ops)):
                if dist_main_block.ops[0].type in related_reader_ops:
                    dist_main_block._remove_op(0, sync=False)
        dist_main_block._sync_with_cpp()
        # Step 1: find the reader ops
        reader_op_indices = []
        for idx, op in enumerate(dist_main_block.ops):
            if op.type in related_reader_ops:
                reader_op_indices.append(idx)
        # Step 2: insert the new reader ops to cpp
        new_reader_ops = []
        for idx in reversed(reader_op_indices):
            new_op_desc = dist_main_block.desc._prepend_op()
            new_op_desc.copy_from(dist_main_block.ops[idx].desc)
368 369 370
            new_op = Operator(
                dist_main_block, new_op_desc, type=new_op_desc.type()
            )
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
            new_reader_ops.append(new_op)
            dist_op = DistributedOperator(new_op)
            dist_context.add_dist_op_for_program(dist_op)
        # Step 3: insert the new reader ops to python
        for new_op in new_reader_ops:
            dist_main_block.ops.insert(0, new_op)
        for i in range(len(reader_op_indices)):
            reader_op_indices[i] += len(reader_op_indices)
        # Step 4: remove the old reader ops from python and cpp
        for idx in reversed(reader_op_indices):
            op = dist_main_block.ops.pop(idx)
            dist_main_block.desc._remove_op(idx, idx + 1)
        dist_main_block._sync_with_cpp()
        self._has_prepared_reader[self._mode] = True

    def _prepare_feed(self, data, user_feeds, mode):
        feeds = {}
        if data is not None:
            if isinstance(data, (list, tuple)):
                if len(data) == 1 and isinstance(data[0], dict):
                    for name, data in data[0].items():
                        feeds[name] = data
                else:
                    raise ValueError("Unsupported data {}".format(data))
            elif isinstance(data, dict):
                for name, data in data.items():
                    feeds[name] = data
            else:
                raise ValueError("Unsupported data {}".format(data))
400
        if user_feeds is not None:
401 402 403 404 405
            assert isinstance(
                user_feeds, dict
            ), "user_feeds must be a dict, but receive {}".format(
                type(user_feeds).__name__
            )
406 407
            for name, data in user_feeds.items():
                feeds[name] = data
408 409
        return feeds

410
    def _prepare_fetch(self, user_fetches, mode):
411
        if user_fetches is not None:
412 413 414 415 416
            assert isinstance(
                user_fetches, list
            ), "user_fetches must be a list, but receive {}".format(
                type(user_fetches).__name__
            )
417
        fetch_names = []
418
        fetch_indices = []
419

420 421
        def _process_fetch_group(group_name, var_list):
            group_indices = []
422
            for var in var_list:
423 424 425 426 427 428
                # Remove duplicate var_names
                if self._is_local_var(var):
                    var_name = _to_name_str(var)
                    if var_name not in fetch_names:
                        fetch_names.append(var_name)
                    group_indices.append(fetch_names.index(var_name))
429 430
            if not group_indices:
                fetch_names.append([])
431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
            fetch_indices.append(group_indices)

        if mode != "predict":
            _process_fetch_group("loss", self._fetch_vars[mode]["loss"])
        if mode != "predict":
            metrics = self._fetch_vars[mode]["metrics"]
            for i, var_list in enumerate(metrics):
                _process_fetch_group("metrics_" + str(i), var_list)
        if mode == "predict":
            _process_fetch_group("outputs", self._fetch_vars[mode]["outputs"])
        user_fetches_collection = [
            item[1] for item in get_collection(CollectionNames.FETCHES)
        ]
        var_list = (user_fetches_collection or []) + (user_fetches or [])
        _process_fetch_group("fetches", var_list)
        return fetch_names, fetch_indices

448 449 450 451 452 453 454 455 456 457
    def _prepare_logger(
        self,
        outs,
        epoch=None,
        step=None,
        lr=None,
        fetch_names=None,
        fetch_indices=None,
        mode=None,
    ):
Z
zhaoyingli 已提交
458
        logs = {}
459
        if epoch is not None:
Z
zhaoyingli 已提交
460
            logs["epoch"] = epoch
461
        if step is not None:
Z
zhaoyingli 已提交
462
            logs["step"] = step + 1
463
        if lr is not None:
Z
zhaoyingli 已提交
464
            logs["lr"] = lr
465 466
        group_idx = 0
        if mode != "predict":
Z
zhaoyingli 已提交
467
            # logging loss
468
            loss_indices = fetch_indices[group_idx]
Z
zhaoyingli 已提交
469
            assert len(loss_indices) <= 1
470
            for idx in loss_indices:
Z
zhaoyingli 已提交
471
                logs["loss"] = outs[idx][0]
472
            group_idx += 1
Z
zhaoyingli 已提交
473
            # logging metrics
474 475 476 477 478 479 480 481 482 483 484
            metric_vars = self._fetch_vars[mode]["metrics"]
            if metric_vars:
                for metric in self._metrics:
                    metrics_indices = fetch_indices[group_idx]
                    metric_out = []
                    for idx in metrics_indices:
                        metric_out.append(outs[idx])
                    if metric_out:
                        metric.update(*metric_out)
                        results = metric.accumulate()
                        for i, res in enumerate(to_list(results)):
Z
zhaoyingli 已提交
485
                            logs[metric.name()[i]] = res
486
                    group_idx += 1
Z
zhaoyingli 已提交
487 488 489 490 491 492 493
        # logging outputs
        elif mode == "predict":
            outputs_indices = fetch_indices[group_idx]
            logs_out = {}
            for idx in outputs_indices:
                logs_out["out%d" % (idx)] = outs[idx]
            logs["outputs"] = logs_out
494 495
            group_idx += 1
        # logging user fetches
Z
zhaoyingli 已提交
496 497 498
        collect_fetches = get_collection(CollectionNames.FETCHES)
        logs_fetch = {}
        for name, var in collect_fetches:
499 500
            if var.name in fetch_names:
                idx = fetch_names.index(var.name)
Z
zhaoyingli 已提交
501 502 503
                logs_fetch[name or var.name] = outs[idx]
        logs["fetches"] = logs_fetch
        return logs
504

505 506 507 508 509 510 511 512 513 514 515
    def _prepare_program(self, mode):
        # Do the build process
        self._build(mode)
        # Do the planning process
        self._plan(mode)
        # Do the parallel process
        self._parallel(mode)
        # Init comm and startup program
        self._initialize(mode)
        self._has_prepared[mode] = True

516
    def _build(self, mode):
517
        if _non_static_mode() or self._dygraph_mode:
518
            paddle.disable_static()
519 520 521
            self._dygraph_mode = True
            self._logger.info("Building model with 'to_static' method.")

522
            self.program_helper = ProgramHelper(
523 524 525 526 527
                self._model,
                self._loss,
                self._metrics,
                self._inputs_spec,
                self._labels_spec,
528
            )
529
            # build forward main program
530
            self.program_helper.build_program(mode)
531

532 533 534
            self.concrete_program = self.program_helper.concrete_program
            serial_main_prog = self.program_helper.main_program
            serial_startup_prog = self.program_helper.startup_program
535

536 537
            self._inputs = self.program_helper.input_vars
            self._labels = self.program_helper.label_vars
538
            outputs = self.program_helper.output_vars
539
            self._losses = self.program_helper.loss_vars
540
            metrics = self.program_helper.metric_vars
541

542
            paddle.enable_static()
543 544 545 546 547 548
        else:
            # build program in static mode
            serial_main_prog = self._serial_main_progs.get(mode, None)
            if serial_main_prog is not None:
                return

549
            outputs = []
550
            metrics = []
551
            self._losses = []
552 553
            serial_main_prog = self._orig_main_prog.clone()
            serial_startup_prog = self._orig_startup_prog.clone()
554
            if not self._skip_build:
555 556 557
                with static.program_guard(
                    serial_main_prog, serial_startup_prog
                ), utils.unique_name.guard():
558 559 560 561 562 563 564 565 566
                    self._inputs = [
                        s._create_feed_layer() for s in self._inputs_spec
                    ]
                    self._labels = [
                        s._create_feed_layer() for s in self._labels_spec
                    ]

                    outputs = to_list(self._model(*self._inputs))

567
                    if mode != "predict" and self._loss:
568 569 570 571 572 573 574 575
                        assert isinstance(
                            self._loss, paddle.nn.Layer
                        ) or callable(
                            self._loss
                        ), "the type of `loss` of the Engine arguments should be sub classes of `paddle.nn.Layer` or any callable function."
                        self._losses = to_list(
                            self._loss(*(outputs + self._labels))
                        )
576

577
                    if mode != "predict" and (outputs or self._labels):
578 579
                        for metric in self._metrics:
                            metrics.append(
580 581 582
                                to_list(
                                    metric.compute(*(outputs + self._labels))
                                )
583
                            )
584
            else:
585 586 587 588
                assert isinstance(
                    self._loss, Variable
                ), "the type of `loss` of the Engine arguments should be Variable."
                self._losses = to_list(self._loss)
589 590 591 592 593 594 595 596

        default_ctx = get_default_distributed_context()
        if not default_ctx.has_annotation:
            # We build the world process group because the data parallel
            # needs all ranks by default.
            new_process_group(list(range(self._nranks)))
            default_ctx.data_parallel = True

597
        feed_vars = {"inputs": self._inputs, "labels": self._labels}
598 599 600

        fetch_vars = {
            "outputs": flatten(outputs),
601
            "loss": self._losses,
602
            "metrics": metrics,
603 604
        }

605 606 607
        if mode != "train":
            serial_main_prog = serial_main_prog.clone(for_test=True)

608
        self._set_recompute_ckpts()
609
        self._dist_contexts[mode] = DistributedContext(
610 611 612
            serial_main_prog,
            serial_startup_prog,
            self._optimizer,
613 614 615 616 617 618 619 620 621 622 623
            self._losses,
            feed_vars,
            fetch_vars,
            self._cluster,
            self._strategy,
        )
        self._fwd_dist_contexts[mode] = DistributedContext(
            serial_main_prog,
            serial_startup_prog,
            self._optimizer,
            self._losses,
624 625 626 627 628
            feed_vars,
            fetch_vars,
            self._cluster,
            self._strategy,
        )
629
        self._dist_contexts[mode].gradient_scale = self._strategy.gradient_scale
630
        self._fwd_main_progs[mode] = serial_main_prog.clone()
631

632 633 634
    def _optimization_tuning(self, mode, dataset, batch_size):
        if not self._tuning.enable:
            raise ValueError("Please set `tuning.enable=True`.")
635

636 637 638 639 640 641 642 643
        assert mode == "train"
        # Do the build process
        self._build(mode)
        # Do the planning process
        self._plan(mode)

        dataset.dp_world_size = self._dp_world_sizes
        dataset.dp_rank = self._dp_ranks
644 645

        from .tuner.optimization_tuner import OptimizationTuner
646 647 648 649 650 651 652 653 654 655

        self._optimization_tuner = OptimizationTuner(
            self._tuning.to_dict(),
            self._dist_contexts[mode],
            dataset,
            self._inputs_spec,
            self._labels_spec,
            batch_size=batch_size,
            rank=self._cur_rank,
        )
656 657 658

        self._optimization_tuner.tune()

659
        if self._tuning.run_after_tuning:
660 661
            # update the strategy
            self._dist_contexts[
662 663
                mode
            ]._strategy = self._optimization_tuner.get_best_config()
664

665 666 667 668 669 670
    def _plan(self, mode):
        if self._planned_mode is None:
            self._planned_mode = mode
        else:
            self._init_dist_context(mode)

671 672
        self._planners[mode] = Planner(mode, self._dist_contexts[mode])
        self._planners[mode].plan()
673

674 675 676 677
        # infer data parallel info
        inputs_var = self._dist_contexts[mode].serial_feed_vars["inputs"]
        labels_var = self._dist_contexts[mode].serial_feed_vars["labels"]
        block = self._dist_contexts[mode].serial_main_program.global_block()
678
        # TODO: check this feed_list
679 680 681 682 683
        feed_list = []
        for var in inputs_var + labels_var:
            if var.name in block.vars:
                feed_list.append(block.vars[var.name])

684 685
        self._dp_world_sizes = []
        self._dp_ranks = []
686
        for feed_var in feed_list:
687 688
            dp_world_size, dp_rank = get_input_split_info(
                self._cur_rank, feed_var, self._dist_contexts[mode]
689
            )
690 691
            self._dp_world_sizes.append(dp_world_size)
            self._dp_ranks.append(dp_rank)
692

693
    def _parallel(self, mode, all_ranks=False):
694 695 696
        # Parallelize program based on the planner's results
        # For now, the completer has to be passed to the planner,
        # because we may use it to complete the annotation of the backwarkward and update.
697 698 699
        parallelizer = Parallelizer(
            mode, self._planners[mode].completer, self._dist_contexts[mode]
        )
700 701 702 703
        if not all_ranks:
            parallelizer.parallel(self._cur_rank)
        else:
            parallelizer.parallel_all()
704 705

    def _init_dist_context(self, mode):
706
        # Init dist_context['mode'] with the first planned dist_context
707 708 709 710 711 712 713 714 715 716
        # to guarantee that train/eval/predict mode have same parallel strategy
        dist_context = self._dist_contexts[mode]
        origin_main_prog = dist_context._original_serial_main_program
        ref_mode = self._planned_mode
        ref_dist_context = self._dist_contexts[ref_mode]
        ref_origin_main_prog = ref_dist_context._original_serial_main_program
        ref_blocks = ref_origin_main_prog.blocks
        for ib, block in enumerate(origin_main_prog.blocks):
            for iop, op in enumerate(block.ops):
                ref_op = ref_blocks[ib].ops[iop]
717 718 719 720 721 722 723 724
                assert (
                    op.type == ref_op.type
                ), "'{}' mode op '{}' is different with '{}' op '{}'. ".format(
                    mode, op.type, ref_mode, ref_op.type
                )
                ref_op_dist_attr = (
                    ref_dist_context.get_op_dist_attr_for_program(ref_op)
                )
725 726 727
                dist_context.set_op_dist_attr_for_program(op, ref_op_dist_attr)

    def _initialize(self, mode):
728
        # Get the current content from the distributed context
729
        self._serial_main_progs[mode] = self._dist_contexts[
730 731
            mode
        ].serial_main_program
732
        self._serial_startup_progs[mode] = self._dist_contexts[
733 734
            mode
        ].serial_startup_program
735
        self._dist_main_progs[mode] = self._dist_contexts[
736 737
            mode
        ].dist_main_programs
738
        self._dist_startup_progs[mode] = self._dist_contexts[
739 740
            mode
        ].dist_startup_programs
741 742
        self._feed_vars[mode] = self._dist_contexts[mode].serial_feed_vars
        self._fetch_vars[mode] = self._dist_contexts[mode].serial_fetch_vars
Z
zhaoyingli 已提交
743
        self._optimizer = self._dist_contexts[mode]._serial_optimizer
744

745 746 747 748
        if self._nranks > 1:
            # Traverse different rank programs and traverse each op of them,
            # instantiate communication by process_mapping.
            all_process_groups = get_all_process_groups()
C
caozhou 已提交
749 750
            cur_rank = self._cur_rank
            # NOTE: After the implementation of the unified dynamic and static communication group initialization mode in the future, the initialization logic of full mode will be removed because port occupation error may occur.
751 752 753 754
            if self._strategy.auto_mode == "full":
                initialize_pg_in_full_mode(all_process_groups, cur_rank)
            else:
                for process_group in all_process_groups:
C
caozhou 已提交
755
                    if cur_rank not in process_group.ranks:
756 757
                        continue
                    process_group.instantiate()
758

759 760 761
        place = _get_device()
        if isinstance(place, fluid.CUDAPlace):
            place = fluid.CUDAPlace(ParallelEnv().dev_id)
762

763 764 765 766 767
        if self._strategy.seed:
            paddle.seed(self._strategy.seed + self._dp_ranks[0])
            np.random.seed(self._strategy.seed + self._dp_ranks[0])
            random.seed(self._strategy.seed + self._dp_ranks[0])

768
        if self._dygraph_mode:
769 770 771
            dist_context = self._dist_contexts[mode]
            dist_main_program = self._dist_main_progs[mode][self._cur_rank]
            self.program_helper.init(dist_main_program, place, dist_context)
772

773
        if self._executor is None:
774
            self._executor = paddle.static.Executor(place)
775 776 777 778 779 780 781 782 783 784
            uninitialized = []
            dist_startup_prog = self._dist_startup_progs[mode][self._cur_rank]
            for var in dist_startup_prog.list_vars():
                scope_var = global_scope().find_var(var.name)
                if scope_var and scope_var.get_tensor()._is_initialized():
                    continue
                uninitialized.append(var)
            if uninitialized:
                prune_startup_prog = dist_startup_prog._prune(uninitialized)
                self._executor.run(prune_startup_prog)
785

786
            if hasattr(self, "_state_dict") and hasattr(self, "_dist_attr"):
787 788 789
                self._set_state_dict(
                    mode, self._strict, self._state_dict, self._dist_attr
                )
790 791

        if self._strategy.reinit:
Z
zhaoyingli 已提交
792
            self._logger.info("NOTE: parameters will be re-initialized.")
793 794 795
            dist_startup_prog = self._dist_startup_progs[mode][self._cur_rank]
            self._executor.run(dist_startup_prog)

796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813
    def fit(
        self,
        train_data,
        train_sample_split=None,
        batch_size=1,
        epochs=1,
        steps_per_epoch=None,
        log_freq=10,
        save_dir=None,
        save_freq=1,
        valid_data=None,
        valid_sample_split=None,
        valid_freq=1,
        valid_steps=None,
        collate_fn=None,
        callbacks=None,
        verbose=2,
    ):
814 815 816 817 818 819 820 821
        """
        Trains the model for a fixed number of epochs. If `valid_data` is set,
        evaluation will be done at the end of each epoch.

        Args:
            train_data (Dataset): An instance of paddle paddle.io.Dataset. Default: None.
            train_sample_split (int, optional): Each sample of the train dataset is assumed
                to be a (input, label) pair by default and has two items. If each sample has
822
                more than two items, train_sample_split specifies how to split these items into
823
                input and label. The items before it are input and the left are label. Default: None.
824
            batch_size (int, optional): The batch size of train_data and valid_data if provided.
825 826 827
                The user's data will be used directly without batching if set to None. Default: 1.
            epochs (int, optional): The number of epochs to train the model. Default: 1.
            steps_per_epoch (int, optional): The total number of steps (batches of samples)
828
                is executed in one epoch before stating the next one. If None, it is equal to
829 830
                the number samples in your dataset divided by the batch size. Default: None.
            valid_data (Dataset, optional): An instance of paddle paddle.io.Dataset used for
831
                evaluation at the end of epoch. No evaluation will be done if set to None.
832
                Default: None. (Unsupported for now)
833
            valid_freq (int, optional): Only relevant if valid_data is provided. This specifies
834 835
                how many training epochs before a new evaluation is performed. Default: 1.
            valid_sample_split (int, optional): Only relevant if valid_data is provided.
836 837
                Each sample of the valid dataset is assumed to be a (input, label) pair
                by default and has two items. If each sample has more than two items,
838 839 840
                valid_sample_split specifies how to split these items into input and label.
                The items before it are input and the left are label. Default: None.
            valid_steps (int, optional): Only relevant if valid_data is provided.
841 842
                It is the total number of steps (batches of samples) to draw before
                stopping validation at the end of every epoch. If None, validation will run until the
843 844 845 846
                `valid_data` dataset is exhausted. The validation will start from the
                beginning of the dataset at each epoch. Default: None.
            collate_fn(callable, optional): function to generate mini-batch data by merging
                the sample list, None for only stack each fields of sample in axis
847
                0. Default None.
848 849 850 851 852 853 854 855 856 857 858 859
            callbacks (Callback|None, optional): A list of `Callback` instances to apply
                during training. Default: None. (Unused for now)

        Returns:
            None

        Examples:

            .. code-block:: python

                import paddle
                import paddle.vision.transforms as T
860
                from paddle.distributed.fleet import auto
861 862 863 864 865 866 867 868 869
                from paddle.vision.datasets import MNIST

                transform = T.Compose([
                    T.Transpose(),
                    T.Normalize([127.5], [127.5])
                ])
                train_dataset = MNIST(mode='train', transform=transform)

                model = paddle.vision.models.LeNet()
870
                loss = paddle.nn.CrossEntropyLoss()
871 872 873 874
                optimizer = paddle.optimizer.Adam(
                    learning_rate=0.001, parameters=model.parameters())
                metrics = paddle.metric.Accuracy(topk=(1, 2))

875
                engine = auto.Engine(model, loss, optimizer, metrics)
876 877 878 879
                engine.fit(train_dataset,
                           epochs=2,
                           batch_size=64)
        """
880 881
        self._mode = 'train'
        self._inputs_spec, self._labels_spec = self._prepare_data_spec(
882 883
            train_data, train_sample_split, batch_size
        )
884 885
        if not self._has_prepared[self._mode]:
            self._prepare_program(self._mode)
Z
zhaoyingli 已提交
886
        else:
887
            self._switch_mode(self._mode)
Z
zhaoyingli 已提交
888

889 890 891 892 893 894 895
        train_dataloader = self._prepare_dataloader_from_generator(
            dataset=train_data,
            capacity=70,
            iterable=False,
            batch_size=batch_size,
            epochs=epochs,
            steps_per_epoch=steps_per_epoch,
896 897
            collate_fn=collate_fn,
        )
Z
zhaoyingli 已提交
898

899
        fetch_names, fetch_indices = self._prepare_fetch(None, mode=self._mode)
Z
zhaoyingli 已提交
900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925

        cbks = config_callbacks(
            callbacks,
            engine=self,
            batch_size=batch_size,
            epochs=epochs,
            steps=train_dataloader._steps,
            log_freq=log_freq,
            save_freq=save_freq,
            save_dir=save_dir,
            verbose=verbose,
            metrics=self._metrics_name(),
            acc_step=self._k_steps,
        )

        cbks.on_begin('train')
        for epoch in range(epochs):
            logs = {}
            cbks.on_epoch_begin(epoch)
            for step, _ in enumerate(train_dataloader):
                cbks.on_batch_begin('train', step, logs)
                try:
                    outs = self._executor.run(
                        self.main_program,
                        fetch_list=fetch_names,
                        use_program_cache=self._strategy.use_cache,
926 927
                        return_numpy=self._strategy.return_numpy,
                    )
Z
zhaoyingli 已提交
928 929 930
                except core.EOFException:
                    break
                lr = get_lr(self._optimizer)
931 932 933 934 935 936 937 938 939
                logs = self._prepare_logger(
                    outs,
                    epoch,
                    step,
                    lr,
                    fetch_names,
                    fetch_indices,
                    self._mode,
                )
Z
zhaoyingli 已提交
940 941 942
                cbks.on_batch_end('train', step, logs)

            if valid_data and (epoch + 1) % valid_freq == 0:
943 944 945 946 947 948 949 950 951 952
                val_logs = self.evaluate(
                    valid_data,
                    valid_sample_split,
                    batch_size,
                    valid_steps,
                    log_freq,
                    collate_fn,
                    callbacks,
                    verbose,
                )
Z
zhaoyingli 已提交
953
                val_logs = {
954
                    "val_" + name: val for name, val in val_logs.items()
Z
zhaoyingli 已提交
955 956 957 958 959 960 961 962 963 964
                }
                logs.update(val_logs)
                self._switch_mode("train")
            else:
                self._reset_metrics()

            cbks.on_epoch_end(epoch, logs)

        cbks.on_end('train', logs)
        return self.history
965

966 967 968 969 970 971 972 973 974 975 976
    def evaluate(
        self,
        valid_data,
        valid_sample_split=None,
        batch_size=1,
        steps=None,
        log_freq=10,
        collate_fn=None,
        callbacks=None,
        verbose=2,
    ):
977 978 979 980
        """
        Evaluate the loss and metrics of the model on evaluation data.

        Args:
981 982
            valid_data (Dataset): An instance of paddle paddle.io.Dataset. Default: None.
            valid_sample_split (int, optional): Each sample of the eval dataset is assumed
983
                to be a (input, label) pair by default and has two items. If each sample has
984
                more than two items, valid_sample_split specifies how to split these items into
985
                input and label. The items before it are input and the left are label. Default: None.
986
            batch_size (int, optional): The batch size of valid_data. The user's data will
987
                be used directly without batching if set to None. Default: 1.
988 989
            steps (int, optional): It is the total number of steps (batches of samples) to draw before
                stopping evaluation. If None, evaluation will run until the `valid_data` dataset is exhausted.
990 991 992 993 994
                The evaluation will start from the beginning of the dataset in each run. Default: None.
            collate_fn(callable, optional): function to generate mini-batch data by merging
                the sample list, None for only stack each fields of sample in axis
                0. Default None.
            callbacks (Callback|None, optional): A list of `Callback` instances to apply
995
                during evaluating. Default: None. (Unused for now)
996 997 998 999 1000 1001 1002 1003 1004 1005

        Returns:
            None

        Examples:

            .. code-block:: python

                import paddle
                import paddle.vision.transforms as T
1006
                from paddle.distributed.fleet import auto
1007 1008 1009 1010 1011 1012 1013 1014 1015
                from paddle.vision.datasets import MNIST

                transform = T.Compose([
                    T.Transpose(),
                    T.Normalize([127.5], [127.5])
                ])
                valid_dataset = MNIST(mode='test', transform=transform)

                model = paddle.vision.models.LeNet()
1016
                loss = paddle.nn.CrossEntropyLoss()
1017 1018
                metrics = paddle.metric.Accuracy(topk=(1, 2))

1019
                engine = auto.Engine(model, loss, metrics=metrics)
1020 1021 1022
                engine.evaluate(valid_dataset, batch_size=64)

        """
1023 1024
        self._mode = 'eval'
        self._inputs_spec, self._labels_spec = self._prepare_data_spec(
1025 1026
            valid_data, valid_sample_split, batch_size
        )
1027 1028
        if not self._has_prepared[self._mode]:
            self._prepare_program(self._mode)
Z
zhaoyingli 已提交
1029
        else:
1030
            self._switch_mode(self._mode)
Z
zhaoyingli 已提交
1031

1032 1033 1034 1035 1036 1037
        valid_dataloader = self._prepare_dataloader_from_generator(
            dataset=valid_data,
            capacity=70,
            iterable=False,
            batch_size=batch_size,
            steps_per_epoch=steps,
1038 1039
            collate_fn=collate_fn,
        )
Z
zhaoyingli 已提交
1040

1041
        fetch_names, fetch_indices = self._prepare_fetch(None, mode=self._mode)
1042

Z
zhaoyingli 已提交
1043 1044 1045 1046 1047 1048 1049 1050 1051 1052
        cbks = config_callbacks(
            callbacks,
            engine=self,
            batch_size=batch_size,
            log_freq=log_freq,
            verbose=verbose,
            metrics=self._metrics_name(),
        )

        eval_steps = valid_dataloader._steps
1053 1054 1055
        cbks.on_begin(
            'eval', {'steps': eval_steps, 'metrics': self._metrics_name()}
        )
Z
zhaoyingli 已提交
1056
        logs = {}
1057
        for step, _ in enumerate(valid_dataloader):
Z
zhaoyingli 已提交
1058
            cbks.on_batch_begin('eval', step, logs)
1059
            try:
1060 1061
                outs = self._executor.run(
                    self.main_program,
1062
                    fetch_list=fetch_names,
1063
                    use_program_cache=self._strategy.use_cache,
1064 1065
                    return_numpy=self._strategy.return_numpy,
                )
1066
            except core.EOFException:
1067
                break
1068 1069 1070
            logs = self._prepare_logger(
                outs, None, step, None, fetch_names, fetch_indices, self._mode
            )
Z
zhaoyingli 已提交
1071 1072
            cbks.on_batch_end('eval', step, logs)
        cbks.on_end('eval', logs)
1073
        self._reset_metrics()
Z
zhaoyingli 已提交
1074
        return logs
1075

1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
    def predict(
        self,
        test_data,
        test_sample_split=None,
        batch_size=1,
        steps=None,
        collate_fn=None,
        callbacks=None,
        verbose=2,
    ):
1086 1087 1088 1089 1090 1091 1092
        """
        Compute the output predictions on testing data.

        Args:
            test_data (Dataset): An instance of paddle paddle.io.Dataset. Default: None.
            test_sample_split (int, optional): Each sample of the test dataset is assumed
                to be a (input, label) pair by default and has two items. If each sample has
1093
                more than two items, test_sample_split specifies how to split these items into
1094 1095 1096
                input and label. The items before it are input and the left are label. Default: None.
            batch_size (int, optional): The batch size of test_data. The user's data will
                be used directly without batching if set to None. Default: 1.
1097 1098
            steps (int, optional): It is the total number of steps (batches of samples) to draw before
                stopping predict. If None, predict will run until the `test_data` dataset is exhausted.
1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114
                The predict will start from the beginning of the dataset in each run. Default: None.
            collate_fn(callable, optional): function to generate mini-batch data by merging
                the sample list, None for only stack each fields of sample in axis
                0. Default None.
            callbacks (Callback|None, optional): A list of `Callback` instances to apply
                during testing. Default: None. (Unused for now)

        Returns:
            None

        Examples:

            .. code-block:: python

                import paddle
                import paddle.vision.transforms as T
1115
                from paddle.distributed.fleet import auto
1116 1117 1118 1119 1120 1121 1122 1123 1124 1125
                from paddle.vision.datasets import MNIST

                transform = T.Compose([
                    T.Transpose(),
                    T.Normalize([127.5], [127.5])
                ])
                valid_dataset = MNIST(mode='test', transform=transform)

                model = paddle.vision.models.LeNet()

1126
                engine = auto.Engine(model)
1127 1128
                engine.predict(valid_dataset, batch_size=64)
        """
1129 1130
        self._mode = 'predict'
        self._inputs_spec, self._labels_spec = self._prepare_data_spec(
1131 1132
            test_data, test_sample_split, batch_size
        )
1133 1134
        if not self._has_prepared[self._mode]:
            self._prepare_program(self._mode)
Z
zhaoyingli 已提交
1135
        else:
1136
            self._switch_mode(self._mode)
Z
zhaoyingli 已提交
1137

1138 1139 1140 1141 1142 1143
        test_dataloader = self._prepare_dataloader_from_generator(
            dataset=test_data,
            capacity=70,
            iterable=False,
            batch_size=batch_size,
            steps_per_epoch=steps,
1144 1145
            collate_fn=collate_fn,
        )
Z
zhaoyingli 已提交
1146

1147
        fetch_names, fetch_indices = self._prepare_fetch(None, mode=self._mode)
1148

Z
zhaoyingli 已提交
1149 1150 1151 1152 1153
        outputs = []
        cbks = config_callbacks(callbacks, engine=self, verbose=verbose)
        test_steps = test_dataloader._steps
        cbks.on_begin('predict', {'steps': test_steps})
        logs = {}
1154
        for step, _ in enumerate(test_dataloader):
Z
zhaoyingli 已提交
1155
            cbks.on_batch_begin('predict', step, logs)
1156
            try:
1157 1158
                outs = self._executor.run(
                    self.main_program,
1159
                    fetch_list=fetch_names,
1160
                    use_program_cache=self._strategy.use_cache,
1161 1162
                    return_numpy=self._strategy.return_numpy,
                )
1163
            except core.EOFException:
1164
                break
1165 1166 1167
            logs = self._prepare_logger(
                outs, None, step, None, fetch_names, fetch_indices, self._mode
            )
Z
zhaoyingli 已提交
1168 1169 1170 1171 1172
            cbks.on_batch_end('predict', step, logs)
            outputs.append(list(logs["outputs"].values()))
        cbks.on_end('predict', logs)
        return outputs

1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189
    def dataloader(
        self,
        dataset,
        batch_size=1,
        shuffle=False,
        drop_last=False,
        collate_fn=None,
        num_workers=0,
        use_buffer_reader=True,
        use_shared_memory=True,
        timeout=0,
        worker_init_fn=None,
        epochs=1,
        steps_per_epoch=None,
        sample_split=1,
        mode=None,
    ):
1190 1191 1192
        if mode is not None:
            self.to_mode(mode)
        self._inputs_spec, self._labels_spec = self._prepare_data_spec(
1193 1194
            dataset, sample_split, batch_size
        )
1195 1196
        if not self._has_prepared[self._mode]:
            self._prepare_program(self._mode)
1197
        else:
1198
            self._switch_mode(self._mode)
1199

1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212
        dataloader = self._prepare_dataloader(
            dataset,
            return_list=False,
            batch_size=batch_size,
            shuffle=shuffle,
            drop_last=drop_last,
            collate_fn=collate_fn,
            num_workers=num_workers,
            use_buffer_reader=use_buffer_reader,
            use_shared_memory=use_shared_memory,
            timeout=timeout,
            worker_init_fn=worker_init_fn,
            epochs=epochs,
1213 1214
            steps_per_epoch=steps_per_epoch,
        )
1215 1216
        return dataloader

1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231
    def dataloader_from_generator(
        self,
        dataset,
        capacity=70,
        use_double_buffer=True,
        iterable=True,
        use_multiprocess=False,
        drop_last=True,
        batch_size=1,
        epochs=1,
        steps_per_epoch=None,
        collate_fn=None,
        sample_split=1,
        mode=None,
    ):
1232 1233 1234
        if mode is not None:
            self.to_mode(mode)
        self._inputs_spec, self._labels_spec = self._prepare_data_spec(
1235 1236
            dataset, sample_split, batch_size
        )
1237 1238 1239 1240
        if not self._has_prepared[self._mode]:
            self._prepare_program(self._mode)
        else:
            self._switch_mode(self._mode)
1241

1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252
        dataloader = self._prepare_dataloader_from_generator(
            dataset=dataset,
            capacity=capacity,
            use_double_buffer=use_double_buffer,
            iterable=iterable,
            return_list=False,
            use_multiprocess=use_multiprocess,
            drop_last=drop_last,
            batch_size=batch_size,
            epochs=epochs,
            steps_per_epoch=steps_per_epoch,
1253 1254
            collate_fn=collate_fn,
        )
1255 1256
        return dataloader

1257 1258 1259 1260 1261 1262 1263 1264 1265 1266
    def prepare(
        self,
        inputs_spec=None,
        labels_spec=None,
        inputs=None,
        labels=None,
        main_program=None,
        startup_program=None,
        mode=None,
    ):
1267 1268
        if mode is not None:
            self.to_mode(mode)
1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284

        if not self._mode:
            raise ValueError(
                "Please set mode to be prepared with `prepare(mode=...)`"
            )

        if self._has_prepared[self._mode]:
            return

        inputs_spec = self._validate_spec(inputs_spec)
        labels_spec = self._validate_spec(labels_spec)
        inputs = self._validate_vars(inputs)
        labels = self._validate_vars(labels)

        self._orig_main_prog = main_program
        self._orig_startup_prog = startup_program
1285 1286
        if inputs or labels:
            self._skip_build = True
1287 1288
            inputs, labels = self._prepare_data_tensor(
                inputs_spec, labels_spec, inputs, labels
1289
            )
1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300
            if self._orig_main_prog is None:
                self._orig_main_prog = static.default_main_program()
            if self._orig_startup_prog is None:
                self._orig_startup_prog = static.default_startup_program()
        elif inputs_spec or labels_spec:
            self._outside_dataloader = True
            if self._orig_main_prog is None:
                self._orig_main_prog = static.default_main_program()
            if self._orig_startup_prog is None:
                self._orig_startup_prog = static.default_startup_program()
        else:
1301 1302 1303
            assert (
                self._inputs_spec and self._labels_spec
            ), "Please call the dataloader(...) before calling prepare(...)"
1304

1305 1306 1307 1308 1309 1310 1311
        self._inputs_spec, self._labels_spec = inputs_spec, labels_spec
        self._inputs, self._labels = inputs, labels
        if not self._has_prepared[self._mode]:
            self._prepare_program(self._mode)
        else:
            self._switch_mode(self._mode)

1312
    def run(self, data=None, feed=None, fetch_list=None, mode=None):
1313 1314 1315 1316
        if mode is not None:
            self.to_mode(mode)
        feed_dict = self._prepare_feed(data, feed, self._mode)
        fetch_names, fetch_indices = self._prepare_fetch(fetch_list, self._mode)
1317 1318 1319 1320
        if (
            self._outside_dataloader
            and not self._has_prepared_reader[self._mode]
        ):
1321
            self._prepare_reader()
1322 1323 1324 1325 1326 1327 1328 1329 1330 1331
        outs = self._executor.run(
            self.main_program,
            feed=feed_dict,
            fetch_list=fetch_names,
            use_program_cache=self._strategy.use_cache,
            return_numpy=self._strategy.return_numpy,
        )
        logs = self._prepare_logger(
            outs, None, None, None, fetch_names, fetch_indices, self._mode
        )
Z
zhaoyingli 已提交
1332
        return logs
1333

1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349
    def _prepare_dataloader(
        self,
        dataset,
        return_list=True,
        batch_size=1,
        shuffle=False,
        drop_last=False,
        collate_fn=None,
        num_workers=0,
        use_buffer_reader=True,
        use_shared_memory=True,
        timeout=0,
        worker_init_fn=None,
        epochs=1,
        steps_per_epoch=None,
    ):
1350

1351
        if self._strategy.gradient_merge and batch_size is not None:
1352 1353 1354 1355 1356
            assert (
                batch_size % self._k_steps == 0
            ), "Requires batch_size:[{}] to be divisible by k_steps:[{}].".format(
                batch_size, self._k_steps
            )
1357
            batch_size //= self._k_steps
1358

1359 1360
        dist_main_prog = self._dist_main_progs[self._mode][self._cur_rank]
        dist_startup_prog = self._dist_startup_progs[self._mode][self._cur_rank]
1361
        dist_main_block = dist_main_prog.global_block()
1362

1363 1364 1365 1366
        # NOTE: Get feed_list, then insert dataloader op with sharded var shape.
        # Cause predict_program does not contain labels var,
        # then we will add labels var from serial_program to dist_program,
        # that maintains the length of feed_list equal to the length of dataset's values.
1367 1368
        inputs_var = self._feed_vars[self._mode]["inputs"]
        labels_var = self._feed_vars[self._mode]["labels"]
1369 1370 1371 1372
        feed_list = []
        for var in inputs_var + labels_var:
            if var.name in dist_main_block.vars:
                feed_list.append(dist_main_block.vars[var.name])
1373 1374 1375 1376
            else:
                copy_var = dist_main_block._clone_variable(var, var.persistable)
                copy_var.desc.set_original_id(var.desc.original_id())
                feed_list.append(copy_var)
1377 1378

        # insert read op at the end of program
1379
        places = paddle.static.cuda_places()
1380
        with static.program_guard(dist_main_prog, dist_startup_prog):
1381
            dataloader = DistributedDataLoader(
1382
                dataset,
1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397
                feed_list=feed_list,
                places=places,
                return_list=return_list,
                batch_size=batch_size,
                shuffle=shuffle,
                drop_last=drop_last,
                collate_fn=collate_fn,
                num_workers=num_workers,
                use_buffer_reader=use_buffer_reader,
                use_shared_memory=use_shared_memory,
                timeout=timeout,
                worker_init_fn=worker_init_fn,
                epochs=epochs,
                steps_per_epoch=steps_per_epoch,
                split_data=self._strategy.split_data,
1398
                data_parallel_world_size=self._dp_world_sizes,
1399 1400
                data_parallel_rank=self._dp_ranks,
            )
1401

1402 1403
        return dataloader

1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417
    def _prepare_dataloader_from_generator(
        self,
        dataset,
        capacity=None,
        use_double_buffer=True,
        iterable=True,
        return_list=False,
        use_multiprocess=False,
        drop_last=True,
        batch_size=1,
        epochs=1,
        steps_per_epoch=None,
        collate_fn=None,
    ):
1418 1419

        if self._strategy.gradient_merge and batch_size is not None:
1420 1421 1422 1423 1424
            assert (
                batch_size % self._k_steps == 0
            ), "Requires batch_size:[{}] to be divisible by k_steps:[{}].".format(
                batch_size, self._k_steps
            )
1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463
            batch_size //= self._k_steps

        dist_main_prog = self._dist_main_progs[self._mode][self._cur_rank]
        dist_startup_prog = self._dist_startup_progs[self._mode][self._cur_rank]
        dist_main_block = dist_main_prog.global_block()

        # NOTE: Get feed_list, then insert dataloader op with sharded var shape.
        # Cause predict_program does not contain labels var,
        # then we will add labels var from serial_program to dist_program,
        # that maintains the length of feed_list equal to the length of dataset's values.
        inputs_var = self._feed_vars[self._mode]["inputs"]
        labels_var = self._feed_vars[self._mode]["labels"]
        feed_list = []
        for var in inputs_var + labels_var:
            if var.name in dist_main_block.vars:
                feed_list.append(dist_main_block.vars[var.name])
            else:
                copy_var = dist_main_block._clone_variable(var, var.persistable)
                copy_var.desc.set_original_id(var.desc.original_id())
                feed_list.append(copy_var)

        places = paddle.static.cuda_places()
        with static.program_guard(dist_main_prog, dist_startup_prog):
            dataloader = DistributedDataLoaderFromGenerator(
                dataset=dataset,
                feed_list=feed_list,
                capacity=capacity,
                use_double_buffer=use_double_buffer,
                iterable=iterable,
                return_list=return_list,
                use_multiprocess=use_multiprocess,
                drop_last=drop_last,
                places=places,
                batch_size=batch_size,
                epochs=epochs,
                steps_per_epoch=steps_per_epoch,
                collate_fn=collate_fn,
                split_data=self._strategy.split_data,
                data_parallel_world_size=self._dp_world_sizes,
1464 1465
                data_parallel_rank=self._dp_ranks,
            )
1466 1467 1468 1469 1470 1471
        self._prepare_reader()
        return dataloader

    def _tune(self, tune_data, tune_sample_split=None, batch_size=1):
        self._mode = 'train'
        self._inputs_spec, self._labels_spec = self._prepare_data_spec(
1472 1473
            tune_data, tune_sample_split, batch_size
        )
1474 1475
        self._optimization_tuning(self._mode, tune_data, batch_size)

1476 1477
    def _validate_spec(self, specs):
        specs = to_list(specs)
1478
        self._k_steps = self._strategy.gradient_merge.k_steps
1479 1480
        if specs is not None:
            for i, spec in enumerate(specs):
1481 1482 1483 1484
                if not isinstance(spec, InputSpec):
                    raise TypeError(
                        "'spec' must be object of class `paddle.static.InputSpec`."
                    )
1485 1486
                if spec.name is None:
                    raise ValueError(
1487 1488 1489 1490
                        "Requires Input[{}].name != None, but receive `None` with {}.".format(
                            i, spec
                        )
                    )
1491
                if self._k_steps > 1:
1492
                    shape = list(spec.shape)
1493 1494 1495 1496 1497
                    assert (
                        shape[0] % self._k_steps == 0
                    ), "Requires batch_size[{}] to be divisible by k_steps[{}].".format(
                        spec.shape[0], self._k_steps
                    )
1498
                    shape[0] //= self._k_steps
1499
                    spec.shape = shape
1500 1501 1502 1503 1504 1505 1506 1507 1508
        return specs or []

    def _validate_vars(self, vars):
        vars = to_list(vars)
        if vars is not None:
            for i, var in enumerate(vars):
                if not isinstance(var, Variable):
                    raise TypeError("'var' must be a `Variable`.")
        return vars or []
1509

1510 1511 1512 1513
    def _is_local_var(self, var):
        var_name = _to_name_str(var)
        return var_name in self.main_program.global_block().vars

1514 1515 1516 1517
    def _set_recompute_ckpts(self):
        # NOTE hack to enable recompute in engine api for GPT-3
        # TODO support more PaddleNLP/CV models here

1518
        recompute = self._strategy.recompute
1519 1520

        # extract ckpts by specific model
1521
        if isinstance(self._model, paddle.nn.Layer):
1522 1523 1524 1525 1526 1527
            if hasattr(
                self._model, "gpt"
            ) and self._model.__class__.__name__ in [
                'GPTForPretraining',
                'GPTForPretrainingAuto',
            ]:
1528
                exact_ckpts = self._model.gpt.checkpoints
1529
            else:
1530
                exact_ckpts = recompute.checkpoints
1531
        else:
1532
            exact_ckpts = recompute.checkpoints
1533 1534

        # modify strategy
1535 1536
        if recompute.enable:
            recompute.checkpoints = exact_ckpts[:]
1537
            logs = {
1538
                'Model Class': self._model.__class__.__name__,
1539
                'Applied Recompute ckpts': exact_ckpts,
1540 1541 1542
            }
            self._logger.info(logs)

1543 1544 1545 1546
    def _reset_metrics(self):
        for metric in self._metrics:
            metric.reset()

Z
zhaoyingli 已提交
1547 1548 1549 1550 1551 1552
    def _metrics_name(self):
        metrics_name = ['loss'] if self._loss else []
        for m in self._metrics:
            metrics_name.extend(to_list(m.name()))
        return metrics_name

1553
    def _switch_mode(self, mode):
1554 1555 1556
        assert (
            mode in self._dist_main_progs
        ), "{} model is not ready, please call `prepare()` first.".format(mode)
1557
        self.to_mode(mode)
Z
zhaoyingli 已提交
1558
        self._optimizer = self._dist_contexts[mode]._serial_optimizer
1559

1560
    def to_mode(self, mode):
1561 1562 1563 1564 1565
        assert mode in [
            "train",
            "eval",
            "predict",
        ], "mode {} should be one of ['train', 'eval', 'predict']".format(mode)
1566 1567
        self._mode = mode

1568 1569 1570 1571 1572 1573 1574 1575 1576
    def _set_state_dict(self, mode, strict, state_dict, dist_attr):
        program = self._dist_main_progs[mode][self._cur_rank]
        dist_context = self._dist_contexts[mode]
        cur_dist_attr = get_dist_attr(program, dist_context)
        converter = Converter(state_dict, dist_attr, cur_dist_attr)
        state_dict = converter.convert(strict=strict)
        program.set_state_dict(state_dict)

    def save(self, path, training=True):
1577 1578
        """
        Saves the model, parameters, optimizer state to path.
1579 1580 1581 1582 1583 1584 1585
        If `training` is set to False, only inference model will be saved.

        Args:
            path (str): The file prefix to save model. The format
                is 'dirname/file_prefix' or 'file_prefix'. if empty str.
                A exception will be raised.
            training (bool, optional): Whether to save for training. If not, save
1586
                for inference only. If `training` is set to True, the optimizer state
1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598
                will be saved. Otherwise, only the model and parameters are saved.
                This function will silently overwrite existing file at the target
                location. Default: True.

        Returns:
            None

        Examples:

            .. code-block:: python
                import paddle
                import paddle.vision.transforms as T
1599
                from paddle.distributed.fleet import auto
1600 1601 1602 1603 1604 1605 1606 1607 1608
                from paddle.vision.datasets import MNIST

                transform = T.Compose([
                    T.Transpose(),
                    T.Normalize([127.5], [127.5])
                ])
                train_dataset = MNIST(mode='train', transform=transform)

                model = paddle.vision.models.LeNet()
1609
                loss = paddle.nn.CrossEntropyLoss()
1610 1611 1612 1613
                optimizer = paddle.optimizer.Adam(
                    learning_rate=0.001, parameters=model.parameters())
                metrics = paddle.metric.Accuracy(topk=(1, 2))

1614
                engine = auto.Engine(model, loss, optimizer, metrics)
1615 1616 1617 1618
                engine.fit(train_dataset,
                           epochs=1,
                           batch_size=64)
                engine.save("./my_model")
1619

1620
        """
1621
        if training:
Z
zhaoyingli 已提交
1622 1623 1624 1625
            assert self._mode in self._serial_main_progs
            serial_program = self._serial_main_progs[self._mode]
            dist_main_prog = self._dist_main_progs[self._mode][self._cur_rank]
            dist_context = self._dist_contexts[self._mode]
1626 1627 1628 1629 1630 1631
            self._saver.save(
                path,
                serial_program=serial_program,
                dist_main_program=dist_main_prog,
                dist_context=dist_context,
            )
1632
        else:
Z
zhaoyingli 已提交
1633 1634 1635 1636
            assert "predict" in self._dist_main_progs
            feed_vars = self._feed_vars["predict"]['inputs']
            fetch_vars = self._fetch_vars["predict"]['outputs']
            dist_main_prog = self._dist_main_progs["predict"][self._cur_rank]
1637 1638 1639 1640 1641 1642 1643
            self._saver.save_inference_model(
                path,
                feed_vars,
                fetch_vars,
                self._executor,
                program=dist_main_prog,
            )
1644

1645 1646 1647 1648 1649 1650
    def load(self, path, strict=True, load_optimizer=True):
        """
        Load the stored model, parameters and optimizer states.

        Args:
            path (str): The prefix of files storing the model states and
1651
                optimizer states.
1652 1653 1654
            strict (bool, optional): Whether to skip the loading of mismatch
                parameter or raise an error when mismatch happens (not found
                the parameter in file storing model states of or receives a
1655
                mismatch shape). Default: True.
1656
            load_optimizer (bool, optional): If True, the stored optimizer
1657
                states is restored. Otherwise, the optimizer states is initialized
1658
                from scratch. Default: True.
1659 1660 1661 1662 1663 1664 1665 1666 1667

        Returns:
            None

        Examples:

            .. code-block:: python
                import paddle
                import paddle.vision.transforms as T
1668
                from paddle.distributed.fleet import auto
1669 1670 1671 1672 1673 1674 1675 1676 1677
                from paddle.vision.datasets import MNIST

                transform = T.Compose([
                    T.Transpose(),
                    T.Normalize([127.5], [127.5])
                ])
                train_dataset = MNIST(mode='train', transform=transform)

                model = paddle.vision.models.LeNet()
1678
                loss = paddle.nn.CrossEntropyLoss()
1679 1680 1681 1682
                optimizer = paddle.optimizer.Adam(
                    learning_rate=0.001, parameters=model.parameters())
                metrics = paddle.metric.Accuracy(topk=(1, 2))

1683
                engine = auto.Engine(model, loss, optimizer, metrics)
1684 1685 1686 1687 1688
                engine.fit(train_dataset,
                           epochs=1,
                           batch_size=64)
                engine.save("./my_model")
                engine.load("./my_model")
1689

1690 1691 1692
        """
        self._strict = strict
        self._state_dict, self._dist_attr = self._saver.load(
1693 1694
            path, load_optimizer
        )
1695
        return self._state_dict, self._dist_attr
1696

1697
    def cost(self, inputs_spec=None, labels_spec=None, mode=None):
1698 1699 1700 1701 1702 1703 1704 1705 1706 1707
        """
        Get and Print cost, including memory of every rank,
        max memory among all ranks, and the global cost of one step based on
        communication cost(computation cost is 0 by default).
        In the future, the flops information of every rank and global cost including
        computation cost will be added.

        Args:
            inputs_spec(InputSpec): The specification of inputs. Default: None.
            labels_spec(InputSpec): The specification of labels. Default: None.
1708
            mode (str): The engine mode must be in ["train", "predict", "eval"]. Default: None.
1709 1710 1711 1712 1713 1714 1715

        Returns:
            Return the global execution time (ms) and max memory (B).

        """
        # Check parallel mode
        if self._strategy.auto_mode == "full":
1716
            self._logger.info(
1717 1718 1719 1720 1721
                "The cost will be calcudated in the search process when the auto mode is full."
            )
            return

        # Check mode
1722 1723 1724
        mode = mode if mode is not None else self._mode
        assert mode is not None, "Please set mode."
        if mode not in self._has_prepared:
1725 1726
            raise ValueError(
                "The mode {} is not in accepted modes {}".format(
1727
                    mode, list(self._has_prepared.keys())
1728 1729
                )
            )
1730 1731
        self.to_mode(mode)

1732 1733 1734
        if inputs_spec is not None and not self._has_prepared[mode]:
            self._inputs_spec = self._validate_spec(inputs_spec)
            self._labels_spec = self._validate_spec(labels_spec)
1735 1736 1737 1738 1739
            self._build(mode)
            self._plan(mode)
        else:
            if _non_static_mode() or self._dygraph_mode:
                raise ValueError(
1740 1741 1742 1743 1744
                    "Please call `prepare()` or `fit()` or  `evaluate()` or  `predict()` before calling `cost()`."
                )
            else:
                self._logger.info(
                    "The program whose cost to be estimated must be static default program. Otherwise, please call `prepare()`before calling `cost()`."
1745
                )
1746 1747 1748 1749 1750 1751 1752 1753
                program = paddle.static.default_main_program()
                if (
                    not program.global_block().ops
                    or not program.global_block().ops
                ) and not self._has_prepared[mode]:
                    raise ValueError(
                        "Please call `prepare()` or `fit()` or  `evaluate()` or  `predict()` before calling `cost()`."
                    )
1754 1755 1756 1757 1758 1759

        # Estimate the exec cost and max memory
        global_cost, max_memory = get_cost_from_engine(self, mode)

        return global_cost.time, max_memory

1760 1761
    @property
    def main_program(self):
1762
        return self._dist_main_progs[self._mode][self._cur_rank]
1763 1764 1765

    @property
    def startup_program(self):
1766
        return self._dist_startup_progs[self._mode][self._cur_rank]
1767 1768 1769

    @property
    def dist_context(self):
1770
        return self._dist_contexts[self._mode]
1771 1772 1773

    @property
    def serial_main_program(self):
1774
        return self._serial_main_progs[self._mode]
1775 1776 1777

    @property
    def serial_startup_program(self):
1778
        return self._serial_startup_progs[self._mode]
1779 1780 1781

    @property
    def fetch_vars(self):
1782
        return self._fetch_vars[self._mode]
1783 1784 1785

    @property
    def inputs(self):
1786
        return self._inputs
1787 1788 1789

    @property
    def labels(self):
1790
        return self._labels