diff --git a/python/paddle/fluid/tests/unittests/test_paddle_save_load.py b/python/paddle/fluid/tests/unittests/test_paddle_save_load.py index f1001bfe9ccb7f5d09f23bec4cc75b6d747fe09b..3a5c43b2bab3ed75dda7c2f0e8daabcb73cc786b 100644 --- a/python/paddle/fluid/tests/unittests/test_paddle_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_paddle_save_load.py @@ -27,6 +27,7 @@ import paddle.fluid as fluid from paddle.fluid.optimizer import Adam import paddle.fluid.framework as framework from test_imperative_base import new_program_scope +from paddle.optimizer.lr import LRScheduler BATCH_SIZE = 16 BATCH_NUM = 4 @@ -262,8 +263,31 @@ class TestSaveLoadAny(unittest.TestCase): def test_paddle_save_load_v2(self): paddle.disable_static() + + class StepDecay(LRScheduler): + def __init__(self, + learning_rate, + step_size, + gamma=0.1, + last_epoch=-1, + verbose=False): + self.step_size = step_size + self.gamma = gamma + super(StepDecay, self).__init__(learning_rate, last_epoch, + verbose) + + def get_lr(self): + i = self.last_epoch // self.step_size + return self.base_lr * (self.gamma**i) + layer = LinearNet() - state_dict = layer.state_dict() + inps = paddle.randn([2, IMAGE_SIZE]) + adam = opt.Adam( + learning_rate=StepDecay(0.1, 1), parameters=layer.parameters()) + y = layer(inps) + y.mean().backward() + adam.step() + state_dict = adam.state_dict() path = 'paddle_save_load_v2/model.pdparams' with self.assertRaises(TypeError): paddle.save(state_dict, path, use_binary_format='False') @@ -274,9 +298,15 @@ class TestSaveLoadAny(unittest.TestCase): paddle.save(state_dict, path) load_dict_np = paddle.framework.io._legacy_load(path) for k, v in state_dict.items(): - self.assertTrue( - np.array_equal(v.numpy(), load_dict_tensor[k].numpy())) - self.assertTrue(np.array_equal(v.numpy(), load_dict_np[k])) + if isinstance(v, dict): + self.assertTrue(v == load_dict_tensor[k]) + else: + self.assertTrue( + np.array_equal(v.numpy(), load_dict_tensor[k].numpy())) + if not np.array_equal(v.numpy(), load_dict_np[k]): + print(v.numpy()) + print(load_dict_np[k]) + self.assertTrue(np.array_equal(v.numpy(), load_dict_np[k])) def test_single_pickle_var_dygraph(self): # enable dygraph mode @@ -370,6 +400,366 @@ class TestSaveLoadAny(unittest.TestCase): np.array_equal(tensor.numpy(), np.array(state_dict_param[tensor.name]))) + def test_save_load_complex_object_dygraph_save(self): + paddle.disable_static() + layer = paddle.nn.Linear(3, 4) + state_dict = layer.state_dict() + obj1 = [ + paddle.randn( + [3, 4], dtype='float32'), np.random.randn(5, 6), + ('fake_weight', np.ones( + [7, 8], dtype='float32')) + ] + obj2 = {'k1': obj1, 'k2': state_dict, 'epoch': 123} + obj3 = (paddle.randn( + [5, 4], dtype='float32'), np.ndarray( + [3, 4], dtype="float32"), { + "state_dict": state_dict, + "opt": state_dict + }) + obj4 = (np.random.randn(5, 6), (123, )) + + path1 = "test_save_load_any_complex_object_dygraph/obj1" + path2 = "test_save_load_any_complex_object_dygraph/obj2" + path3 = "test_save_load_any_complex_object_dygraph/obj3" + path4 = "test_save_load_any_complex_object_dygraph/obj4" + paddle.save(obj1, path1) + paddle.save(obj2, path2) + paddle.save(obj3, path3) + paddle.save(obj4, path4) + + load_tensor1 = paddle.load(path1, return_numpy=False) + load_tensor2 = paddle.load(path2, return_numpy=False) + load_tensor3 = paddle.load(path3, return_numpy=False) + load_tensor4 = paddle.load(path4, return_numpy=False) + + self.assertTrue( + np.array_equal(load_tensor1[0].numpy(), obj1[0].numpy())) + self.assertTrue(np.array_equal(load_tensor1[1], obj1[1])) + self.assertTrue(np.array_equal(load_tensor1[2].numpy(), obj1[2][1])) + for i in range(len(load_tensor1)): + self.assertTrue( + type(load_tensor1[i]) == type(load_tensor2['k1'][i])) + for k, v in state_dict.items(): + self.assertTrue( + np.array_equal(v.numpy(), load_tensor2['k2'][k].numpy())) + self.assertTrue(load_tensor2['epoch'] == 123) + + self.assertTrue( + np.array_equal(load_tensor3[0].numpy(), obj3[0].numpy())) + self.assertTrue(np.array_equal(np.array(load_tensor3[1]), obj3[1])) + + for k, v in state_dict.items(): + self.assertTrue( + np.array_equal(load_tensor3[2]["state_dict"][k].numpy(), + v.numpy())) + + for k, v in state_dict.items(): + self.assertTrue( + np.array_equal(load_tensor3[2]["opt"][k].numpy(), v.numpy())) + + self.assertTrue(np.array_equal(load_tensor4[0].numpy(), obj4[0])) + + load_array1 = paddle.load(path1, return_numpy=True) + load_array2 = paddle.load(path2, return_numpy=True) + load_array3 = paddle.load(path3, return_numpy=True) + load_array4 = paddle.load(path4, return_numpy=True) + + self.assertTrue(np.array_equal(load_array1[0], obj1[0].numpy())) + self.assertTrue(np.array_equal(load_array1[1], obj1[1])) + self.assertTrue(np.array_equal(load_array1[2], obj1[2][1])) + for i in range(len(load_array1)): + self.assertTrue(type(load_array1[i]) == type(load_array2['k1'][i])) + for k, v in state_dict.items(): + self.assertTrue(np.array_equal(v.numpy(), load_array2['k2'][k])) + self.assertTrue(load_array2['epoch'] == 123) + + self.assertTrue(np.array_equal(load_array3[0], obj3[0].numpy())) + self.assertTrue(np.array_equal(load_array3[1], obj3[1])) + + for k, v in state_dict.items(): + self.assertTrue( + np.array_equal(load_array3[2]["state_dict"][k], v.numpy())) + + for k, v in state_dict.items(): + self.assertTrue(np.array_equal(load_array3[2]["opt"][k], v.numpy())) + + self.assertTrue(np.array_equal(load_array4[0], obj4[0])) + + # static mode + paddle.enable_static() + + load_tensor1 = paddle.load(path1, return_numpy=False) + load_tensor2 = paddle.load(path2, return_numpy=False) + load_tensor3 = paddle.load(path3, return_numpy=False) + load_tensor4 = paddle.load(path4, return_numpy=False) + + self.assertTrue( + np.array_equal(np.array(load_tensor1[0]), obj1[0].numpy())) + self.assertTrue(np.array_equal(np.array(load_tensor1[1]), obj1[1])) + self.assertTrue(np.array_equal(np.array(load_tensor1[2]), obj1[2][1])) + + for i in range(len(load_tensor1)): + self.assertTrue( + type(load_tensor1[i]) == type(load_tensor2['k1'][i])) + for k, v in state_dict.items(): + self.assertTrue( + np.array_equal(v.numpy(), np.array(load_tensor2['k2'][k]))) + self.assertTrue(load_tensor2['epoch'] == 123) + + self.assertTrue( + isinstance(load_tensor3[0], paddle.fluid.core.LoDTensor)) + self.assertTrue( + np.array_equal(np.array(load_tensor3[0]), obj3[0].numpy())) + self.assertTrue(np.array_equal(np.array(load_tensor3[1]), obj3[1])) + + for k, v in state_dict.items(): + self.assertTrue( + isinstance(load_tensor3[2]["state_dict"][k], + paddle.fluid.core.LoDTensor)) + self.assertTrue( + np.array_equal( + np.array(load_tensor3[2]["state_dict"][k]), v.numpy())) + + for k, v in state_dict.items(): + self.assertTrue( + isinstance(load_tensor3[2]["opt"][k], + paddle.fluid.core.LoDTensor)) + self.assertTrue( + np.array_equal(np.array(load_tensor3[2]["opt"][k]), v.numpy())) + + self.assertTrue(load_tensor4[0], paddle.fluid.core.LoDTensor) + self.assertTrue(np.array_equal(np.array(load_tensor4[0]), obj4[0])) + + load_array1 = paddle.load(path1, return_numpy=True) + load_array2 = paddle.load(path2, return_numpy=True) + load_array3 = paddle.load(path3, return_numpy=True) + load_array4 = paddle.load(path4, return_numpy=True) + + self.assertTrue(np.array_equal(load_array1[0], obj1[0].numpy())) + self.assertTrue(np.array_equal(load_array1[1], obj1[1])) + self.assertTrue(np.array_equal(load_array1[2], obj1[2][1])) + for i in range(len(load_array1)): + self.assertTrue(type(load_array1[i]) == type(load_array2['k1'][i])) + for k, v in state_dict.items(): + self.assertTrue(np.array_equal(v.numpy(), load_array2['k2'][k])) + self.assertTrue(load_array2['epoch'] == 123) + + self.assertTrue(isinstance(load_array3[0], np.ndarray)) + self.assertTrue(np.array_equal(load_array3[0], obj3[0].numpy())) + self.assertTrue(np.array_equal(load_array3[1], obj3[1])) + + for k, v in state_dict.items(): + self.assertTrue( + np.array_equal(load_array3[2]["state_dict"][k], v.numpy())) + + for k, v in state_dict.items(): + self.assertTrue(np.array_equal(load_array3[2]["opt"][k], v.numpy())) + + self.assertTrue(np.array_equal(load_array4[0], obj4[0])) + + def test_save_load_complex_object_static_save(self): + paddle.enable_static() + with new_program_scope(): + # create network + x = paddle.static.data( + name="x", shape=[None, IMAGE_SIZE], dtype='float32') + z = paddle.static.nn.fc(x, 10, bias_attr=False) + z = paddle.static.nn.fc(z, 128, bias_attr=False) + loss = fluid.layers.reduce_mean(z) + place = fluid.CPUPlace( + ) if not paddle.fluid.core.is_compiled_with_cuda( + ) else fluid.CUDAPlace(0) + prog = paddle.static.default_main_program() + exe = paddle.static.Executor(place) + exe.run(paddle.static.default_startup_program()) + + state_dict = prog.state_dict() + keys = list(state_dict.keys()) + obj1 = [ + state_dict[keys[0]], np.random.randn(5, 6), + ('fake_weight', np.ones( + [7, 8], dtype='float32')) + ] + obj2 = {'k1': obj1, 'k2': state_dict, 'epoch': 123} + obj3 = (state_dict[keys[0]], np.ndarray( + [3, 4], dtype="float32"), { + "state_dict": state_dict, + "opt": state_dict + }) + obj4 = (np.ndarray([3, 4], dtype="float32"), ) + + path1 = "test_save_load_any_complex_object_static/obj1" + path2 = "test_save_load_any_complex_object_static/obj2" + path3 = "test_save_load_any_complex_object_static/obj3" + path4 = "test_save_load_any_complex_object_static/obj4" + paddle.save(obj1, path1) + paddle.save(obj2, path2) + paddle.save(obj3, path3) + paddle.save(obj4, path4) + + load_tensor1 = paddle.load(path1, return_numpy=False) + load_tensor2 = paddle.load(path2, return_numpy=False) + load_tensor3 = paddle.load(path3, return_numpy=False) + load_tensor4 = paddle.load(path4, return_numpy=False) + + self.assertTrue( + np.array_equal(np.array(load_tensor1[0]), np.array(obj1[0]))) + self.assertTrue(np.array_equal(np.array(load_tensor1[1]), obj1[1])) + self.assertTrue( + np.array_equal(np.array(load_tensor1[2]), obj1[2][1])) + for i in range(len(load_tensor1)): + self.assertTrue( + type(load_tensor1[i]) == type(load_tensor2['k1'][i])) + for k, v in state_dict.items(): + self.assertTrue( + np.array_equal( + np.array(v), np.array(load_tensor2['k2'][k]))) + self.assertTrue(load_tensor2['epoch'] == 123) + + self.assertTrue(isinstance(load_tensor3[0], fluid.core.LoDTensor)) + self.assertTrue(np.array_equal(np.array(load_tensor3[0]), obj3[0])) + self.assertTrue(isinstance(load_tensor3[1], fluid.core.LoDTensor)) + self.assertTrue(np.array_equal(np.array(load_tensor3[1]), obj3[1])) + + for k, v in state_dict.items(): + self.assertTrue( + isinstance(load_tensor3[2]["state_dict"][k], + fluid.core.LoDTensor)) + self.assertTrue( + np.array_equal( + np.array(load_tensor3[2]["state_dict"][k]), np.array( + v))) + + for k, v in state_dict.items(): + self.assertTrue( + isinstance(load_tensor3[2]["opt"][k], fluid.core.LoDTensor)) + self.assertTrue( + np.array_equal( + np.array(load_tensor3[2]["opt"][k]), np.array(v))) + + self.assertTrue(isinstance(load_tensor4[0], fluid.core.LoDTensor)) + self.assertTrue(np.array_equal(np.array(load_tensor4[0]), obj4[0])) + + load_array1 = paddle.load(path1, return_numpy=True) + load_array2 = paddle.load(path2, return_numpy=True) + load_array3 = paddle.load(path3, return_numpy=True) + load_array4 = paddle.load(path4, return_numpy=True) + + self.assertTrue(np.array_equal(load_array1[0], np.array(obj1[0]))) + self.assertTrue(np.array_equal(load_array1[1], obj1[1])) + self.assertTrue(np.array_equal(load_array1[2], obj1[2][1])) + for i in range(len(load_array1)): + self.assertTrue( + type(load_array1[i]) == type(load_array2['k1'][i])) + for k, v in state_dict.items(): + self.assertTrue( + np.array_equal(np.array(v), load_array2['k2'][k])) + self.assertTrue(load_array2['epoch'] == 123) + + self.assertTrue(np.array_equal(load_array3[0], np.array(obj3[0]))) + self.assertTrue(np.array_equal(load_array3[1], obj3[1])) + + for k, v in state_dict.items(): + self.assertTrue( + np.array_equal(load_array3[2]["state_dict"][k], np.array( + v))) + + for k, v in state_dict.items(): + self.assertTrue( + np.array_equal(load_array3[2]["opt"][k], np.array(v))) + + self.assertTrue(np.array_equal(load_array4[0], obj4[0])) + + # dygraph mode + paddle.disable_static() + + load_tensor1 = paddle.load(path1, return_numpy=False) + load_tensor2 = paddle.load(path2, return_numpy=False) + load_tensor3 = paddle.load(path3, return_numpy=False) + load_tensor4 = paddle.load(path4, return_numpy=False) + + self.assertTrue( + np.array_equal(np.array(load_tensor1[0]), np.array(obj1[0]))) + self.assertTrue(np.array_equal(np.array(load_tensor1[1]), obj1[1])) + self.assertTrue(np.array_equal(load_tensor1[2].numpy(), obj1[2][1])) + for i in range(len(load_tensor1)): + self.assertTrue( + type(load_tensor1[i]) == type(load_tensor2['k1'][i])) + for k, v in state_dict.items(): + self.assertTrue( + np.array_equal( + np.array(v), np.array(load_tensor2['k2'][k]))) + self.assertTrue(load_tensor2['epoch'] == 123) + + self.assertTrue(isinstance(load_tensor3[0], fluid.core.VarBase)) + self.assertTrue(np.array_equal(load_tensor3[0].numpy(), obj3[0])) + self.assertTrue(isinstance(load_tensor3[1], fluid.core.VarBase)) + self.assertTrue(np.array_equal(load_tensor3[1].numpy(), obj3[1])) + + for k, v in state_dict.items(): + self.assertTrue( + isinstance(load_tensor3[2]["state_dict"][k], + fluid.core.VarBase)) + self.assertTrue( + np.array_equal(load_tensor3[2]["state_dict"][k].numpy(), + np.array(v))) + + for k, v in state_dict.items(): + self.assertTrue( + isinstance(load_tensor3[2]["opt"][k], fluid.core.VarBase)) + self.assertTrue( + np.array_equal(load_tensor3[2]["opt"][k].numpy(), + np.array(v))) + + self.assertTrue(isinstance(load_tensor4[0], fluid.core.VarBase)) + self.assertTrue(np.array_equal(load_tensor4[0].numpy(), obj4[0])) + + load_array1 = paddle.load(path1, return_numpy=True) + load_array2 = paddle.load(path2, return_numpy=True) + load_array3 = paddle.load(path3, return_numpy=True) + load_array4 = paddle.load(path4, return_numpy=True) + + self.assertTrue(np.array_equal(load_array1[0], np.array(obj1[0]))) + self.assertTrue(np.array_equal(load_array1[1], obj1[1])) + self.assertTrue(np.array_equal(load_array1[2], obj1[2][1])) + for i in range(len(load_array1)): + self.assertTrue( + type(load_array1[i]) == type(load_array2['k1'][i])) + for k, v in state_dict.items(): + self.assertTrue( + np.array_equal(np.array(v), load_array2['k2'][k])) + self.assertTrue(load_array2['epoch'] == 123) + + self.assertTrue(np.array_equal(load_array3[0], np.array(obj3[0]))) + self.assertTrue(np.array_equal(load_array3[1], obj3[1])) + + for k, v in state_dict.items(): + self.assertTrue( + np.array_equal(load_array3[2]["state_dict"][k], np.array( + v))) + + for k, v in state_dict.items(): + self.assertTrue( + np.array_equal(load_array3[2]["opt"][k], np.array(v))) + + self.assertTrue(isinstance(load_array4[0], np.ndarray)) + self.assertTrue(np.array_equal(load_array4[0], obj4[0])) + + def test_varbase_binary_var(self): + paddle.disable_static() + varbase = paddle.randn([3, 2], dtype='float32') + path = 'test_paddle_save_load_varbase_binary_var/varbase' + paddle.save(varbase, path, use_binary_format=True) + load_array = paddle.load(path, return_numpy=True) + load_tensor = paddle.load(path, return_numpy=False) + origin_array = varbase.numpy() + load_tensor_array = load_tensor.numpy() + if paddle.fluid.core.is_compiled_with_cuda(): + fluid.core._cuda_synchronize(paddle.CUDAPlace(0)) + self.assertTrue(np.array_equal(origin_array, load_array)) + self.assertTrue(np.array_equal(origin_array, load_tensor_array)) + class TestSaveLoad(unittest.TestCase): def setUp(self): @@ -431,8 +821,6 @@ class TestSaveLoad(unittest.TestCase): # error test cases, some tests relay base test above # 1. test save obj not dict error test_list = [1, 2, 3] - with self.assertRaises(NotImplementedError): - paddle.save(test_list, "not_dict_error_path") # 2. test save path format error with self.assertRaises(ValueError): @@ -471,5 +859,33 @@ class TestSaveLoadProgram(unittest.TestCase): self.assertTrue(origin_startup == load_startup) +class TestSaveLoadLayer(unittest.TestCase): + def test_save_load_layer(self): + if six.PY2: + return + + paddle.disable_static() + inps = paddle.randn([1, IMAGE_SIZE], dtype='float32') + layer1 = LinearNet() + layer2 = LinearNet() + layer1.eval() + layer2.eval() + origin = (layer1(inps), layer2(inps)) + path = "test_save_load_layer_/layer.pdmodel" + paddle.save((layer1, layer2), path) + + # static + paddle.enable_static() + with self.assertRaises(ValueError): + paddle.load(path) + # dygraph + paddle.disable_static() + + loaded_layer = paddle.load(path) + loaded_result = [l(inps) for l in loaded_layer] + for i in range(len(origin)): + self.assertTrue((origin[i] - loaded_result[i]).abs().max() < 1e-10) + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/framework/io.py b/python/paddle/framework/io.py index 8f2f56a6628ed26ca2bdc0f3894df0c15e474cc1..32a62d2461a14b2a68ca92844afad0f6a102424a 100644 --- a/python/paddle/framework/io.py +++ b/python/paddle/framework/io.py @@ -235,11 +235,6 @@ def _pickle_save(obj, f, protocol): raise ValueError("Expected 1<'protocol'<5, but received protocol={}". format(protocol)) - if not isinstance(obj, (core.LoDTensor, core.VarBase)): - raise NotImplementedError( - "Support 'paddle.Tensor' or 'paddle.core.LoDTensor', but received {}.". - format(type(obj))) - def reudce_varbase(self): data = self.numpy() name = self.name @@ -287,11 +282,48 @@ def _pickle_save(obj, f, protocol): pickler.dump(obj) -def _use_legacy(obj): - # TODO(weixin):If `obj` is any object, the judgment condition should be more precise. - if not isinstance(obj, dict): +def _contain_x(obj, condition_func): + if isinstance(obj, core.SelectedRows): + raise NotImplementedError( + "`paddle.save` do not support saving 'SelectedRows'.") + + if condition_func(obj): + return True + elif type(obj) in (dict, collections.OrderedDict, list, tuple): + if type(obj) in (dict, collections.OrderedDict): + keys = list(obj.keys()) + else: + keys = range(len(obj)) + flag = False + for key in keys: + flag |= _contain_x(obj[key], condition_func) + if flag: + return True + return flag + else: return False - return True + + +def _is_state_dict(obj): + if isinstance(obj, dict): + + def condition(obj): + return isinstance(obj, (core.Layer, Program, core.VarBase, + core.LoDTensor, core.SelectedRows)) + + # If the value of a dict is a core.VarBase/LoDTensor or a dict + # that does not contain a paddle type(Layer, Program, VarBase, LoDTensor, SelectedRows), + # the dict is considered to be a state_ dict. + for key, value in obj.items(): + if isinstance(value, dict): + for k, v in value.items(): + if _contain_x(v, condition): + return False + elif not isinstance(value, (core.VarBase, core.LoDTensor)): + return False + return True + + return False def _transformed_from_varbase(obj): @@ -348,6 +380,76 @@ def _ndarray_to_tensor(obj, return_numpy): return _to_LodTensor(obj) +def _lod_tensor2varbase(tensor): + return_var = _varbase_creator() + return_var.value().get_tensor().set(tensor, _current_expected_place()) + return return_var + + +def _parse_every_object(obj, condition_func, convert_func): + if condition_func(obj): + return convert_func(obj) + elif type(obj) in (dict, collections.OrderedDict, list): + if type(obj) == list: + keys = range(len(obj)) + else: + keys = list(obj.keys()) + for key in keys: + if condition_func(obj[key]): + obj[key] = convert_func(obj[key]) + else: + obj[key] = _parse_every_object(obj[key], condition_func, + convert_func) + return obj + elif type(obj) == tuple: + return tuple( + _parse_every_object(list(obj), condition_func, convert_func)) + elif type(obj) == set: + return set(_parse_every_object(list(obj), condition_func, convert_func)) + else: + if isinstance(obj, collections.Iterable) and not isinstance(obj, ( + str, np.ndarray, core.VarBase, core.LoDTensor)): + raise NotImplementedError( + "The iteratable objects supported are tuple, list, dict, OrderedDict, string. But received {}.". + format(type(obj))) + return obj + + +def _parse_load_result(obj, return_numpy): + def is_layer(obj): + return isinstance(obj, core.Layer) + + def parse_layer(obj): + temp_dict = _parse_load_result(obj.__dict__, False) + obj.__dict__.update(temp_dict) + return obj + + if _contain_x(obj, is_layer): + if not in_dygraph_mode(): + raise ValueError( + "Layer can only be loaded in dynamic graph mode, but now in static graph mode." + ) + + _parse_every_object(obj, is_layer, parse_layer) + + def tuple_to_tensor(obj): + return _tuple_to_tensor(obj, return_numpy=return_numpy) + + def ndarray_to_tensor(obj): + return _ndarray_to_tensor(obj, return_numpy=return_numpy) + + # tuple(name, ndarry) was converted from varbase of paddle2.1, + # and all tuple(name, ndarry) are converted to tensor. + if _contain_x(obj, _transformed_from_varbase): + return _parse_every_object(obj, _transformed_from_varbase, + tuple_to_tensor) + # If there is no tuple(name, ndary), it is considered to be saved by paddle2.0 + # or converted from LoDTensor, and all ndarrays are converted to tensor. + else: + return _parse_every_object(obj, _transformed_from_lodtensor, + ndarray_to_tensor) + + def _save_lod_tensor(tensor, file_name): if not tensor._is_initialized(): raise ValueError("The saved tensor is not initialized.") @@ -383,6 +485,8 @@ def _save_binary_var(obj, path): _save_lod_tensor(obj, path) elif isinstance(obj, core.SelectedRows): _save_selected_rows(obj, path) + elif isinstance(obj, core.VarBase): + _save_lod_tensor(obj.value().get_tensor(), path) else: # Since the concept of 'Tensor' is only exposed to users, the error message can only contain tensor instead of 'LoDTensor' or 'SelectedRows' raise NotImplementedError( @@ -498,32 +602,20 @@ def save(obj, path, protocol=2, **configs): warnings.warn( "'pickle_protocol' is a deprecated argument. Please use 'protocol' instead." ) + if isinstance(obj, Program): obj.desc.flush() with open(path, "wb") as f: f.write(obj.desc.serialize_to_string()) - elif _use_legacy(obj): + + elif _is_state_dict(obj): if in_dygraph_mode(): _legacy_save(obj, path, protocol) else: _legacy_static_save(obj, path, protocol) else: - # `protocol` need to be used, `pickle_protocol` is a deprecated arg. - if config.pickle_protocol is not None: - protocol = config.pickle_protocol - warnings.warn( - "'pickle_protocol' is a deprecated argument. Please use 'protocol' instead." - ) - - if _use_legacy(obj): - if in_dygraph_mode(): - _legacy_save(obj, path, protocol) - else: - _legacy_static_save(obj, path, protocol) - else: - # save single variable - with open(path, 'wb') as f: - _pickle_save(obj, f, protocol) + with open(path, 'wb') as f: + _pickle_save(obj, f, protocol) def _legacy_save(obj, path, protocol=2): @@ -703,8 +795,7 @@ def load(path, **configs): # TODO(weixin):If `obj` is any object, the judgment condition should be more precise. if isinstance(load_result, dict): - if isinstance(load_result, dict): - load_result = _pack_loaded_dict(load_result) + load_result = _pack_loaded_dict(load_result) # paddle2.0: paddle.save/load if "StructuredToParameterName@@" in load_result: @@ -716,23 +807,12 @@ def load(path, **configs): del load_result["StructuredToParameterName@@"] else: # paddle2.1 static.save/load - for key in load_result: - load_result[key] = _ndarray_to_tensor( - load_result[key], config.return_numpy) + load_result = _parse_load_result(load_result, + config.return_numpy) else: - # TODO(weixin): support complex objects such as layer. - # If `obj` is any object, the judgment condition should be more precise. - if _transformed_from_lodtensor(load_result): - load_result = _ndarray_to_tensor(load_result, - config.return_numpy) - elif _transformed_from_varbase(load_result): - load_result = _tuple_to_tensor(load_result, - config.return_numpy) - else: - raise NotImplementedError( - 'Only support tensor and state_dict, but received {}.'. - format(type(load_result))) + load_result = _parse_load_result(load_result, + config.return_numpy) except exception_type as msg_pickle: try: @@ -741,7 +821,12 @@ def load(path, **configs): except: try: tensor, _ = _load_lod_tensor(path) - return tensor + if config.return_numpy: + return np.array(tensor) + else: + if in_dygraph_mode(): + return _lod_tensor2varbase(tensor) + return tensor except: try: with open(path, "rb") as f: diff --git a/tools/__pycache__/static_mode_white_list.cpython-37.pyc b/tools/__pycache__/static_mode_white_list.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1e58ce7689c7db6cc0ce4ed18f87752b16d8beb Binary files /dev/null and b/tools/__pycache__/static_mode_white_list.cpython-37.pyc differ