engine.py 22.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import copy
import logging
from collections import defaultdict

import paddle
20 21
import paddle.distributed.auto_parallel as auto

22
from paddle import fluid, static
23
from paddle.io import Dataset
24
from paddle.metric import Metric
25
from paddle.static import InputSpec
26
from paddle.fluid import core
27
from paddle.fluid import program_guard
28 29
from paddle.fluid.layers.utils import flatten
from paddle.fluid.executor import global_scope
30
from paddle.fluid.backward import append_backward
31 32 33
from paddle.fluid.framework import Operator
from paddle.fluid.framework import _current_expected_place as _get_device
from paddle.fluid.dygraph.parallel import ParallelEnv
34
from paddle.distributed import fleet
35
from paddle.distributed.utils import get_logger
36
from paddle.distributed.passes import new_pass, PassContext
37

38
# from .cluster import Cluster, get_default_cluster
39 40
from .planner_v2 import Planner
from .parallelizer_v2 import Parallelizer
41 42 43 44 45 46 47
from .dist_op import DistributedOperator
from .dist_saver import DistributedSaver
from .dist_loader import NonIterableGeneratorLoader
from .utils import make_data_unshard, set_grad_var_shape
from .utils import print_program_with_dist_attr, to_list
from .process_group import get_all_process_groups, get_world_process_group
from .dist_context import DistributedContext, get_default_distributed_context
48 49 50


class Engine:
51

52 53 54 55 56 57
    def __init__(self,
                 model=None,
                 inputs_spec=None,
                 labels_spec=None,
                 cluster=None,
                 strategy=None):
58
        self.model = model
59 60
        self.inputs_spec = self._validate_spec(inputs_spec)
        self.labels_spec = self._validate_spec(labels_spec)
61
        self.cluster = cluster
62 63
        # if self.cluster is None:
        #     self.cluster = get_default_cluster()
64
        self.strategy = strategy
65 66
        if self.strategy is None:
            self.strategy = fleet.DistributedStrategy()
67

68
        self._executor = None
69 70 71 72 73 74
        self._cur_rank = paddle.distributed.get_rank()
        self._nranks = paddle.distributed.get_world_size()
        self._saver = DistributedSaver()
        self._logger = get_logger(logging.INFO)

        self._default_strategy = None
75 76
        self._orig_main_prog = static.default_main_program()
        self._orig_startup_prog = static.default_startup_program()
77
        self._orig_dist_context = get_default_distributed_context()
78
        self._dist_contexts = {}
79 80
        self._serial_main_progs = {}
        self._serial_startup_progs = {}
81 82 83 84
        self._dist_main_progs = defaultdict(dict)  # dist main programs
        self._dist_startup_progs = defaultdict(dict)  # dist startup programs
        self._feed_vars = {}
        self._fetch_vars = {}
85 86 87 88

    def prepare(self,
                optimizer=None,
                loss=None,
89
                gradient_scale=True,
90 91
                metrics=None,
                all_ranks=False):
92 93 94
        if optimizer and not isinstance(
                optimizer,
            (paddle.optimizer.Optimizer, paddle.fluid.optimizer.Optimizer)):
95 96 97 98
            raise TypeError(
                    "'optimizer' must be object of class `paddle.optimizer.Optimizer`" \
                        " or `paddle.fluid.optimizer.Optimizer`."
                )
99
        self._optimizer = optimizer
100 101 102 103 104 105

        if loss and not isinstance(loss,
                                   paddle.nn.Layer) and not callable(loss):
            raise TypeError(
                "'loss' must be sub classes of `paddle.nn.Layer` or any callable function."
            )
106
        self._loss = loss
107 108 109 110 111 112

        metrics = metrics or []
        for metric in to_list(metrics):
            assert isinstance(metric, Metric), \
                "{} is not sub class of Metric".format(
                    metric.__class__.__name__)
113
        self._metrics = to_list(metrics)
114
        self._gradient_scale = gradient_scale
115 116 117

        self._planned_mode = None
        self._modes = ['train', 'eval', 'predict']
118
        # Build forward program
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
        self._build()

        # Do auto parallel process
        for mode in self._modes:
            # Do the planning process
            self._plan(mode)
            # Do the parallel process
            self._parallel(mode, all_ranks)
            # Init comm and startup program
            self._initialize(mode)

    def _build(self):
        for mode in self._modes:
            serial_main_prog = self._serial_main_progs.get(mode, None)
            if serial_main_prog is not None:
                return

            losses = []
            metrics = []
            serial_main_prog = self._orig_main_prog.clone()
            serial_startup_prog = self._orig_startup_prog.clone()
            with static.program_guard(serial_main_prog, serial_startup_prog):
                inputs_spec = self.inputs_spec
                labels_spec = self.labels_spec if self.labels_spec else []
                inputs = [s._create_feed_layer() for s in inputs_spec]
                labels = [s._create_feed_layer() for s in labels_spec]
                outputs = to_list(self.model(*inputs))
                if mode != "predict" and self._loss:
                    losses = to_list(self._loss(*(outputs + labels)))

                if mode != "predict":
                    for metric in self._metrics:
                        metrics.extend(
                            to_list(metric.compute(*(outputs + labels))))

            default_ctx = get_default_distributed_context()
            if not default_ctx.has_annotation or self._default_strategy:
                inputs = [self._set_data_parallel(var) for var in inputs]
                labels = [self._set_data_parallel(var) for var in labels]

            # self._feed_vars[mode] = {"inputs": inputs, "labels": labels}
            feed_vars = {"inputs": inputs, "labels": labels}

            # self._fetch_vars[mode] = {
            #     "outputs": flatten(outputs),
            #     "loss": losses,
            #     "metrics": metrics
            # }
            fetch_vars = {
                "outputs": flatten(outputs),
                "loss": losses,
                "metrics": metrics
            }

            self._dist_contexts[mode] = DistributedContext(
                serial_main_prog, serial_startup_prog, self._optimizer, losses,
                feed_vars, fetch_vars, self.cluster, self.strategy)
            self._dist_contexts[mode].gradient_scale = self._gradient_scale

    def _plan(self, mode):
        if self._planned_mode is None:
            self._planned_mode = mode
        else:
            self._init_dist_context(mode)

        self.planner = Planner(mode, self._dist_contexts[mode])
        self.planner.plan()

    def _parallel(self, mode, all_ranks):
188 189 190
        # Parallelize program based on the planner's results
        # For now, the completer has to be passed to the planner,
        # because we may use it to complete the annotation of the backwarkward and update.
191
        parallelizer = Parallelizer(mode, self.planner.completer,
192 193 194 195 196
                                    self._dist_contexts[mode])
        if not all_ranks:
            parallelizer.parallel(self._cur_rank)
        else:
            parallelizer.parallel_all()
197 198

    def _init_dist_context(self, mode):
199
        # Init dist_context['mode'] with the first planned dist_context
200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
        # to guarantee that train/eval/predict mode have same parallel strategy
        dist_context = self._dist_contexts[mode]
        origin_main_prog = dist_context._original_serial_main_program
        ref_mode = self._planned_mode
        ref_dist_context = self._dist_contexts[ref_mode]
        ref_origin_main_prog = ref_dist_context._original_serial_main_program
        ref_blocks = ref_origin_main_prog.blocks
        for ib, block in enumerate(origin_main_prog.blocks):
            for iop, op in enumerate(block.ops):
                ref_op = ref_blocks[ib].ops[iop]
                assert op.type == ref_op.type, \
                    "'{}' mode op '{}' is different with '{}' op '{}'. ".format(mode, op.type, ref_mode, ref_op.type)
                ref_op_dist_attr = ref_dist_context.get_op_dist_attr_for_program(
                    ref_op)
                dist_context.set_op_dist_attr_for_program(op, ref_op_dist_attr)

    def _initialize(self, mode):
217
        # Get the current content from the distributed context
218 219 220 221
        self._serial_main_progs[mode] = self._dist_contexts[
            mode].serial_main_program
        self._serial_startup_progs[mode] = self._dist_contexts[
            mode].serial_startup_program
222 223 224 225
        self._dist_main_progs[mode] = self._dist_contexts[
            mode].dist_main_programs
        self._dist_startup_progs[mode] = self._dist_contexts[
            mode].dist_startup_programs
226 227
        self._feed_vars[mode] = self._dist_contexts[mode].serial_feed_vars
        self._fetch_vars[mode] = self._dist_contexts[mode].serial_fetch_vars
228

229 230 231 232 233 234 235 236
        if self._nranks > 1:
            # Traverse different rank programs and traverse each op of them,
            # instantiate communication by process_mapping.
            all_process_groups = get_all_process_groups()
            for process_group in all_process_groups:
                if self._cur_rank not in process_group.ranks:
                    continue
                process_group.instantiate()
237 238 239 240 241 242 243

        # initialize
        self._place = _get_device()
        if isinstance(self._place, fluid.CUDAPlace):
            self._place = fluid.CUDAPlace(ParallelEnv().dev_id)
        if self._executor is None:
            self._executor = paddle.static.Executor(self._place)
244 245 246 247 248 249 250 251 252 253
            uninitialized = []
            dist_startup_prog = self._dist_startup_progs[mode][self._cur_rank]
            for var in dist_startup_prog.list_vars():
                scope_var = global_scope().find_var(var.name)
                if scope_var and scope_var.get_tensor()._is_initialized():
                    continue
                uninitialized.append(var)
            if uninitialized:
                prune_startup_prog = dist_startup_prog._prune(uninitialized)
                self._executor.run(prune_startup_prog)
254

255 256 257 258 259 260
    def fit(self,
            train_data,
            batch_size=1,
            epochs=1,
            steps_per_epoch=None,
            use_program_cache=False,
261
            return_numpy=True):
262 263 264
        # TODO: callbacks
        # TODO: evaluate after training
        self.mode = 'train'
265
        assert self.mode in self._dist_main_progs, \
266
            "train model is not ready, please call `engine.prepare()` first."
267 268
        train_dataloader = self._create_dataloader(train_data, batch_size,
                                                   epochs, steps_per_epoch)
269 270

        outputs = []
271 272
        for epoch in range(epochs):
            for step, data in enumerate(train_dataloader):
273 274
                logs, loss = self._train_step(data, use_program_cache,
                                              return_numpy)
275
                outputs.append(loss)
276 277 278 279
                train_logs = {
                    "train_" + name: val
                    for name, val in logs.items()
                }
280
                self._logger.info(train_logs)
281 282
        return outputs

283 284 285 286
    def evaluate(self,
                 eval_data,
                 batch_size=1,
                 use_program_cache=False,
287
                 return_numpy=True):
288
        self.mode = 'eval'
289
        assert self.mode in self._dist_main_progs, \
290
            "eval model is not ready, please call `engine.prepare()` first."
291
        eval_dataloader = self._create_dataloader(eval_data, batch_size)
292 293

        for step, data in enumerate(eval_dataloader):
294 295 296 297 298 299 300 301 302
            eval_logs = dict()
            outs = self._eval_step(data, use_program_cache, return_numpy)
            eval_logs["eval_loss"] = outs[0] if len(outs) > 0 else []
            for metric in self._metrics:
                results = metric.accumulate()
                for i, res in enumerate(to_list(results)):
                    eval_logs["eval_" + metric.name()[i]] = res
            self._logger.info(eval_logs)
        return eval_logs
303

304 305 306 307
    def predict(self,
                test_data,
                batch_size=1,
                use_program_cache=False,
308
                return_numpy=True):
309
        self.mode = 'predict'
310
        assert self.mode in self._dist_main_progs, \
311
            "predict model is not ready, please call `engine.prepare()` first."
312
        test_dataloader = self._create_dataloader(test_data, batch_size)
313 314 315 316 317 318 319 320 321 322 323 324

        outputs = []
        for step, data in enumerate(test_dataloader):
            logs, outs = self._predict_step(data, use_program_cache,
                                            return_numpy)
            outputs.append(outs)
            predict_logs = {
                "predict_" + name: val
                for name, val in logs.items()
            }
            self._logger.info(predict_logs)
        return outputs
325

326
    def _train_step(self, data, use_program_cache=False, return_numpy=True):
327
        logs = {}
328 329 330 331 332 333 334 335
        fetch_vars = self._fetch_vars[self.mode]["loss"]
        fetch_list = self._fetch_list(fetch_vars)

        loss = self._executor.run(self.main_program,
                                  fetch_list=fetch_list,
                                  use_program_cache=use_program_cache,
                                  return_numpy=return_numpy)
        logs["loss"] = loss
336 337
        return logs, loss

338 339
    def _eval_step(self, data, use_program_cache=False, return_numpy=True):
        logs = {}
340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
        metrics = self._fetch_vars[self.mode]["metrics"]
        losses = self._fetch_vars[self.mode]["loss"]
        fetch_loss = self._fetch_list(losses)
        fetch_metrics = self._fetch_list(metrics)
        fetch_list = fetch_loss + fetch_metrics

        res = self._executor.run(self.main_program,
                                 fetch_list=fetch_list,
                                 use_program_cache=use_program_cache,
                                 return_numpy=return_numpy)
        if not res[len(fetch_loss):]:
            return res[:len(fetch_loss)]
        for metric in self._metrics:
            metric.update(*res[len(fetch_loss):])
        return res[:len(fetch_loss)]
355

356 357
    def _predict_step(self, data, use_program_cache=False, return_numpy=True):
        logs = {}
358 359 360 361 362 363 364 365
        fetch_vars = self._fetch_vars[self.mode]["outputs"]
        fetch_list = self._fetch_list(fetch_vars)

        outs = self._executor.run(self.main_program,
                                  fetch_list=fetch_list,
                                  use_program_cache=use_program_cache,
                                  return_numpy=return_numpy)
        logs["pred"] = outs
366
        return logs, outs
367

368 369 370 371 372 373 374
    def _fetch_list(self, fetch_vars):
        fetch_list = []
        for var in fetch_vars:
            if var.name in self.main_program.global_block().vars:
                fetch_list.append(var.name)
        return fetch_list

375 376 377 378
    def _create_dataloader(self,
                           dataset,
                           batch_size,
                           epochs=1,
379
                           steps_per_epoch=None):
380 381 382 383
        dist_main_prog = self._dist_main_progs[self.mode][self._cur_rank]
        dist_startup_prog = self._dist_startup_progs[self.mode][self._cur_rank]
        dist_context = self._dist_contexts[self.mode]
        dist_main_block = dist_main_prog.global_block()
384

385
        # NOTE: Get feed_list from dist_program, then insert dataloader op
386 387
        # with sharded var shape. Because predict_program does not contain
        # labels var, so we will filter dataset's value with length of feed_list.
388 389 390 391 392 393
        inputs_var = self._feed_vars[self.mode]["inputs"]
        labels_var = self._feed_vars[self.mode]["labels"]
        feed_list = []
        for var in inputs_var + labels_var:
            if var.name in dist_main_block.vars:
                feed_list.append(dist_main_block.vars[var.name])
394 395
        dp_world_size, dp_rank = self._get_data_parallel_info(
            feed_list[0], dist_context)
396 397

        # remove the first three ops if multi run fit/evaluate/predict
398
        op_size = len(dist_main_block.ops)
399 400 401 402
        if dist_main_block.ops[0].type == 'create_py_reader':
            op_size -= 3
            for _ in range(3):
                dist_main_block._remove_op(0, sync=False)
403 404

        # insert read op at the end of program
405
        places = paddle.static.cuda_places()
406
        with static.program_guard(dist_main_prog, dist_startup_prog):
407
            dataloader = NonIterableGeneratorLoader(
408 409 410 411 412 413
                dataset,
                feed_list,
                places,
                batch_size,
                epochs,
                steps_per_epoch,
414 415 416 417
                data_parallel_world_size=dp_world_size,
                data_parallel_rank=dp_rank)

        # move read op from the end of program to the start of program
418
        new_op_size = len(dist_main_block.ops)
419
        for _ in range(new_op_size - 1, op_size - 1, -1):
420 421 422
            op = dist_main_block.ops[new_op_size - 1]
            new_op_desc = dist_main_block.desc._prepend_op()
            new_op_desc.copy_from(op.desc)
423 424 425
            new_op = Operator(dist_main_block,
                              new_op_desc,
                              type=new_op_desc.type())
426 427 428 429 430 431 432 433
            dist_main_block.ops.insert(0, new_op)
            dist_op = DistributedOperator(new_op)
            dist_context.add_dist_op_for_program(dist_op)
        for _ in range(new_op_size - op_size):
            dist_main_block._remove_op(new_op_size, sync=False)
        dist_main_block._sync_with_cpp()
        return dataloader

434 435 436 437 438 439 440 441 442 443 444
    def _validate_spec(self, specs):
        specs = to_list(specs)
        if specs is not None:
            for i, spec in enumerate(specs):
                assert isinstance(spec, InputSpec)
                if spec.name is None:
                    raise ValueError(
                        "Requires Input[{}].name != None, but receive `None` with {}."
                        .format(i, spec))
        return specs

445 446 447
    def _set_data_parallel(self, var):
        if self._nranks == 1:
            self._default_strategy = 'serial'
448 449 450 451 452 453
            auto.shard_tensor(var,
                              dist_attr={
                                  "process_mesh": [0],
                                  "dims_mapping":
                                  [-1 for _ in range(len(var.shape))]
                              })
454 455
        else:
            self._default_strategy = 'dp'
456 457 458 459 460 461 462
            auto.shard_tensor(var,
                              dist_attr={
                                  "process_mesh":
                                  list(range(self._nranks)),
                                  "dims_mapping":
                                  [0] + [-1 for _ in range(len(var.shape) - 1)]
                              })
463 464 465

        return var

466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488
    def _get_data_parallel_info(self, var, dist_context):
        # get data parallel world size and current data parallel rank
        from .utils import _get_comm_group, _get_corresponding_rank

        tensor_dist_attr = dist_context.get_tensor_dist_attr_for_program(var)
        process_mesh = tensor_dist_attr.process_mesh
        dims_mapping = tensor_dist_attr.dims_mapping

        if self._cur_rank not in process_mesh.processes:
            rank_id = _get_corresponding_rank(dist_context, process_mesh,
                                              self._cur_rank)
        else:
            rank_id = self._cur_rank

        batch_size_axis = dims_mapping[0]
        if batch_size_axis > -1 and process_mesh.topology[batch_size_axis] > 1:
            group_ranks = _get_comm_group(process_mesh.processes,
                                          process_mesh.topology,
                                          batch_size_axis, rank_id)
            return len(group_ranks), group_ranks.index(rank_id)

        return None, None

489 490 491 492 493 494 495 496 497
    def save(self, path, training=True, mode=None):
        if not mode:
            mode = self.mode

        if training:
            assert 'train' in self._serial_main_progs, "training model is not ready, please call `engine.prepare(mode='train')` first."
            serial_program = self._serial_main_progs["train"]
            dist_main_prog = self._dist_main_progs["train"][self._cur_rank]
            dist_context = self._dist_contexts["train"]
498 499 500 501
            self._saver.save(path,
                             serial_program=serial_program,
                             dist_main_program=dist_main_prog,
                             dist_context=dist_context)
502 503 504 505 506
        else:
            assert mode, "Please set the 'mode' you want to save."
            feed_vars = self._feed_vars[mode]['inputs']
            fetch_vars = self._fetch_vars[mode]['outputs']
            dist_main_prog = self._dist_main_progs[mode][self._cur_rank]
507 508 509 510 511
            self._saver.save_inference_model(path,
                                             feed_vars,
                                             fetch_vars,
                                             self._executor,
                                             program=dist_main_prog)
512

513 514 515 516
    def load(self, path, strict=True, load_optimizer=True, mode=None):
        if not mode:
            mode = self.mode
        assert mode, "Please set the 'mode' you want to load."
517

518 519 520 521
        dist_main_prog = self._dist_main_progs[mode][self._cur_rank]
        dist_context = self._dist_contexts[mode]
        self._saver.load(path, dist_main_prog, dist_context, strict,
                         load_optimizer)
522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549

    @property
    def mode(self):
        return self._mode

    @mode.setter
    def mode(self, mode):
        self._mode = mode

    @property
    def main_program(self):
        return self._dist_main_progs[self.mode][self._cur_rank]

    @property
    def startup_program(self):
        return self._dist_startup_progs[self.mode][self._cur_rank]

    @property
    def dist_context(self):
        return self._dist_contexts[self.mode]

    @property
    def serial_main_program(self):
        return self._serial_main_progs[self.mode]

    @property
    def serial_startup_program(self):
        return self._serial_startup_progs[self.mode]