diff --git a/python/paddle/hapi/model.py b/python/paddle/hapi/model.py index 4ff514483bbe3f7f64ebb578fae188e17e1a5712..7aa5c4f4613c7ed6824e2dbe04a05e7f60ab2c3a 100644 --- a/python/paddle/hapi/model.py +++ b/python/paddle/hapi/model.py @@ -1584,7 +1584,9 @@ class Model(object): callbacks (Callback|None): A list of `Callback` instances to apply during training. If None, `ProgBarLogger` and `ModelCheckpoint` are automatically inserted. Default: None. - accumulate (int): The number of steps to accumulate gradident during training process before optimizer updates. It can mimic large batch size. Default: 1. + accumulate (int): The number of steps to accumulate gradident during + training process before optimizer updates. It can mimic large batch + size. Default: 1. Returns: None @@ -2044,7 +2046,8 @@ class Model(object): _inputs = [data[:len(self._inputs)], data[len(self._inputs):]] if mode == 'train': - _inputs.append((step + 1) % self._accumulate == 0) + _inputs.append((step + 1) % self._accumulate == 0 or + step + 1 == len(data_loader)) outs = getattr(self, mode + '_batch')(*_inputs) diff --git a/python/paddle/tests/test_model.py b/python/paddle/tests/test_model.py index 789f099e97880ef01d242c1597295e3b58abe2fa..904d5732d2a53915501efbd70899d168938e311c 100644 --- a/python/paddle/tests/test_model.py +++ b/python/paddle/tests/test_model.py @@ -727,12 +727,20 @@ class TestModelFunction(unittest.TestCase): parameter_list=net.parameters()) inputs = [InputSpec([None, dim], 'float32', 'x')] labels = [InputSpec([None, 1], 'int64', 'label')] + model = Model(net, inputs, labels) model.prepare(optim, loss=CrossEntropyLoss(reduction="sum")) loss1, = model.train_batch([data], [label], update=False) loss2, = model.train_batch([data], [label], update=True) np.testing.assert_almost_equal(loss1, loss2, decimal=4) + model = Model(net, inputs, labels) + model.prepare( + optim, loss=CrossEntropyLoss(reduction="sum"), amp_configs='O1') + loss1, = model.train_batch([data], [label], update=False) + loss2, = model.train_batch([data], [label], update=True) + np.testing.assert_almost_equal(loss1, loss2, decimal=4) + class TestModelWithLRScheduler(unittest.TestCase): def test_fit_by_step(self):