diff --git a/python/paddle/hapi/model.py b/python/paddle/hapi/model.py index 5aa689ca324c099f239a29e2ee21b8283e378341..bba94d56cca8e77735d8921d007248b2e388a5f6 100644 --- a/python/paddle/hapi/model.py +++ b/python/paddle/hapi/model.py @@ -731,8 +731,8 @@ class DynamicGraphAdapter(object): if not self.model._optimizer or not optim_state: return - # If optimizer performs set_dict when state vars haven't been created, - # which would happen when set_dict before minimize, the state would be + # If optimizer performs set_state_dict when state vars haven't been created, + # which would happen when set_state_dict before minimize, the state would be # stored in optimizer._accumulators_holder and loaded lazily. # To contrive this when loading from static-graph saved states, extend # state dict to include keys named accoring to dygraph naming rules. @@ -776,7 +776,13 @@ class DynamicGraphAdapter(object): accum_name + "_0") converted_state[dy_state_name] = state_var - self.model._optimizer.set_dict(converted_state) + if not hasattr(self.model._optimizer, 'set_state_dict'): + warnings.warn( + "paddle.fluid.optimizer is deprecated in API 2.0, please use paddle.optimizer instead" + ) + self.model._optimizer.set_dict(converted_state) + else: + self.model._optimizer.set_state_dict(converted_state) class Model(object): diff --git a/python/paddle/tests/test_model.py b/python/paddle/tests/test_model.py index 7b79b25cbc3e98b802bad87386ad0572ec6ab8d7..e078595dc9551763f2c4fc1b17f5b4220e3b1f6d 100644 --- a/python/paddle/tests/test_model.py +++ b/python/paddle/tests/test_model.py @@ -416,6 +416,29 @@ class TestModelFunction(unittest.TestCase): shutil.rmtree(path) fluid.disable_dygraph() if dynamic else None + def test_dynamic_load(self): + mnist_data = MnistDataset(mode='train') + for new_optimizer in [True, False]: + path = tempfile.mkdtemp() + paddle.disable_static() + net = LeNet() + inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')] + labels = [InputSpec([None, 1], 'int64', 'label')] + if new_optimizer: + optim = paddle.optimizer.Adam( + learning_rate=0.001, parameters=net.parameters()) + else: + optim = fluid.optimizer.Adam( + learning_rate=0.001, parameter_list=net.parameters()) + model = Model(net, inputs, labels) + model.prepare( + optimizer=optim, loss=CrossEntropyLoss(reduction="sum")) + model.fit(mnist_data, batch_size=64, verbose=0) + model.save(path + '/test') + model.load(path + '/test') + shutil.rmtree(path) + paddle.enable_static() + def test_dynamic_save_static_load(self): path = tempfile.mkdtemp() # dynamic saving