From a8fec662df01fdd72901fd449dd87513bf87b16f Mon Sep 17 00:00:00 2001 From: lyuwenyu Date: Tue, 22 Jun 2021 11:42:31 +0800 Subject: [PATCH] fix doc, last iter, and test for amp --- python/paddle/hapi/model.py | 7 +++++-- python/paddle/tests/test_model.py | 8 ++++++++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/python/paddle/hapi/model.py b/python/paddle/hapi/model.py index 4ff514483b..7aa5c4f461 100644 --- a/python/paddle/hapi/model.py +++ b/python/paddle/hapi/model.py @@ -1584,7 +1584,9 @@ class Model(object): callbacks (Callback|None): A list of `Callback` instances to apply during training. If None, `ProgBarLogger` and `ModelCheckpoint` are automatically inserted. Default: None. - accumulate (int): The number of steps to accumulate gradident during training process before optimizer updates. It can mimic large batch size. Default: 1. + accumulate (int): The number of steps to accumulate gradident during + training process before optimizer updates. It can mimic large batch + size. Default: 1. Returns: None @@ -2044,7 +2046,8 @@ class Model(object): _inputs = [data[:len(self._inputs)], data[len(self._inputs):]] if mode == 'train': - _inputs.append((step + 1) % self._accumulate == 0) + _inputs.append((step + 1) % self._accumulate == 0 or + step + 1 == len(data_loader)) outs = getattr(self, mode + '_batch')(*_inputs) diff --git a/python/paddle/tests/test_model.py b/python/paddle/tests/test_model.py index 789f099e97..904d5732d2 100644 --- a/python/paddle/tests/test_model.py +++ b/python/paddle/tests/test_model.py @@ -727,12 +727,20 @@ class TestModelFunction(unittest.TestCase): parameter_list=net.parameters()) inputs = [InputSpec([None, dim], 'float32', 'x')] labels = [InputSpec([None, 1], 'int64', 'label')] + model = Model(net, inputs, labels) model.prepare(optim, loss=CrossEntropyLoss(reduction="sum")) loss1, = model.train_batch([data], [label], update=False) loss2, = model.train_batch([data], [label], update=True) np.testing.assert_almost_equal(loss1, loss2, decimal=4) + model = Model(net, inputs, labels) + model.prepare( + optim, loss=CrossEntropyLoss(reduction="sum"), amp_configs='O1') + loss1, = model.train_batch([data], [label], update=False) + loss2, = model.train_batch([data], [label], update=True) + np.testing.assert_almost_equal(loss1, loss2, decimal=4) + class TestModelWithLRScheduler(unittest.TestCase): def test_fit_by_step(self): -- GitLab