From 727b28d7f2fc953f8d7078b216f5814b01c21f35 Mon Sep 17 00:00:00 2001 From: WeiXin Date: Sun, 25 Apr 2021 14:27:54 +0800 Subject: [PATCH] paddle.save/load support nested structure and layer (#32446) * support save/load binary format tensor * Fix error when create cudaplace * Fix error when create cudaplace * Fix error when create cudaplace * get devive context from pool. * move define of 'SerializeToStream' and 'DeserializeFromStream' to 'lod_tensor.cc' and 'selected_rows.cc'. * support complex object * improve coverage. * improve coverage * improve coverage. * fix a bug. * polish API * save/load program * paddle.save/load: layer * deal with conflict * if PY2, block test_paddle_save_load.TestSaveLoadLayer * polish code. * polish code * edit unnittest * The condition for object to be identified as state_dict becomes strict * use 'core._cuda_synchronize' --- .../tests/unittests/test_paddle_save_load.py | 428 +++++++++++++++++- python/paddle/framework/io.py | 173 +++++-- .../static_mode_white_list.cpython-37.pyc | Bin 0 -> 20443 bytes 3 files changed, 551 insertions(+), 50 deletions(-) create mode 100644 tools/__pycache__/static_mode_white_list.cpython-37.pyc diff --git a/python/paddle/fluid/tests/unittests/test_paddle_save_load.py b/python/paddle/fluid/tests/unittests/test_paddle_save_load.py index f1001bfe9cc..3a5c43b2bab 100644 --- a/python/paddle/fluid/tests/unittests/test_paddle_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_paddle_save_load.py @@ -27,6 +27,7 @@ import paddle.fluid as fluid from paddle.fluid.optimizer import Adam import paddle.fluid.framework as framework from test_imperative_base import new_program_scope +from paddle.optimizer.lr import LRScheduler BATCH_SIZE = 16 BATCH_NUM = 4 @@ -262,8 +263,31 @@ class TestSaveLoadAny(unittest.TestCase): def test_paddle_save_load_v2(self): paddle.disable_static() + + class StepDecay(LRScheduler): + def __init__(self, + learning_rate, + step_size, + gamma=0.1, + last_epoch=-1, + verbose=False): + self.step_size = step_size + self.gamma = gamma + super(StepDecay, self).__init__(learning_rate, last_epoch, + verbose) + + def get_lr(self): + i = self.last_epoch // self.step_size + return self.base_lr * (self.gamma**i) + layer = LinearNet() - state_dict = layer.state_dict() + inps = paddle.randn([2, IMAGE_SIZE]) + adam = opt.Adam( + learning_rate=StepDecay(0.1, 1), parameters=layer.parameters()) + y = layer(inps) + y.mean().backward() + adam.step() + state_dict = adam.state_dict() path = 'paddle_save_load_v2/model.pdparams' with self.assertRaises(TypeError): paddle.save(state_dict, path, use_binary_format='False') @@ -274,9 +298,15 @@ class TestSaveLoadAny(unittest.TestCase): paddle.save(state_dict, path) load_dict_np = paddle.framework.io._legacy_load(path) for k, v in state_dict.items(): - self.assertTrue( - np.array_equal(v.numpy(), load_dict_tensor[k].numpy())) - self.assertTrue(np.array_equal(v.numpy(), load_dict_np[k])) + if isinstance(v, dict): + self.assertTrue(v == load_dict_tensor[k]) + else: + self.assertTrue( + np.array_equal(v.numpy(), load_dict_tensor[k].numpy())) + if not np.array_equal(v.numpy(), load_dict_np[k]): + print(v.numpy()) + print(load_dict_np[k]) + self.assertTrue(np.array_equal(v.numpy(), load_dict_np[k])) def test_single_pickle_var_dygraph(self): # enable dygraph mode @@ -370,6 +400,366 @@ class TestSaveLoadAny(unittest.TestCase): np.array_equal(tensor.numpy(), np.array(state_dict_param[tensor.name]))) + def test_save_load_complex_object_dygraph_save(self): + paddle.disable_static() + layer = paddle.nn.Linear(3, 4) + state_dict = layer.state_dict() + obj1 = [ + paddle.randn( + [3, 4], dtype='float32'), np.random.randn(5, 6), + ('fake_weight', np.ones( + [7, 8], dtype='float32')) + ] + obj2 = {'k1': obj1, 'k2': state_dict, 'epoch': 123} + obj3 = (paddle.randn( + [5, 4], dtype='float32'), np.ndarray( + [3, 4], dtype="float32"), { + "state_dict": state_dict, + "opt": state_dict + }) + obj4 = (np.random.randn(5, 6), (123, )) + + path1 = "test_save_load_any_complex_object_dygraph/obj1" + path2 = "test_save_load_any_complex_object_dygraph/obj2" + path3 = "test_save_load_any_complex_object_dygraph/obj3" + path4 = "test_save_load_any_complex_object_dygraph/obj4" + paddle.save(obj1, path1) + paddle.save(obj2, path2) + paddle.save(obj3, path3) + paddle.save(obj4, path4) + + load_tensor1 = paddle.load(path1, return_numpy=False) + load_tensor2 = paddle.load(path2, return_numpy=False) + load_tensor3 = paddle.load(path3, return_numpy=False) + load_tensor4 = paddle.load(path4, return_numpy=False) + + self.assertTrue( + np.array_equal(load_tensor1[0].numpy(), obj1[0].numpy())) + self.assertTrue(np.array_equal(load_tensor1[1], obj1[1])) + self.assertTrue(np.array_equal(load_tensor1[2].numpy(), obj1[2][1])) + for i in range(len(load_tensor1)): + self.assertTrue( + type(load_tensor1[i]) == type(load_tensor2['k1'][i])) + for k, v in state_dict.items(): + self.assertTrue( + np.array_equal(v.numpy(), load_tensor2['k2'][k].numpy())) + self.assertTrue(load_tensor2['epoch'] == 123) + + self.assertTrue( + np.array_equal(load_tensor3[0].numpy(), obj3[0].numpy())) + self.assertTrue(np.array_equal(np.array(load_tensor3[1]), obj3[1])) + + for k, v in state_dict.items(): + self.assertTrue( + np.array_equal(load_tensor3[2]["state_dict"][k].numpy(), + v.numpy())) + + for k, v in state_dict.items(): + self.assertTrue( + np.array_equal(load_tensor3[2]["opt"][k].numpy(), v.numpy())) + + self.assertTrue(np.array_equal(load_tensor4[0].numpy(), obj4[0])) + + load_array1 = paddle.load(path1, return_numpy=True) + load_array2 = paddle.load(path2, return_numpy=True) + load_array3 = paddle.load(path3, return_numpy=True) + load_array4 = paddle.load(path4, return_numpy=True) + + self.assertTrue(np.array_equal(load_array1[0], obj1[0].numpy())) + self.assertTrue(np.array_equal(load_array1[1], obj1[1])) + self.assertTrue(np.array_equal(load_array1[2], obj1[2][1])) + for i in range(len(load_array1)): + self.assertTrue(type(load_array1[i]) == type(load_array2['k1'][i])) + for k, v in state_dict.items(): + self.assertTrue(np.array_equal(v.numpy(), load_array2['k2'][k])) + self.assertTrue(load_array2['epoch'] == 123) + + self.assertTrue(np.array_equal(load_array3[0], obj3[0].numpy())) + self.assertTrue(np.array_equal(load_array3[1], obj3[1])) + + for k, v in state_dict.items(): + self.assertTrue( + np.array_equal(load_array3[2]["state_dict"][k], v.numpy())) + + for k, v in state_dict.items(): + self.assertTrue(np.array_equal(load_array3[2]["opt"][k], v.numpy())) + + self.assertTrue(np.array_equal(load_array4[0], obj4[0])) + + # static mode + paddle.enable_static() + + load_tensor1 = paddle.load(path1, return_numpy=False) + load_tensor2 = paddle.load(path2, return_numpy=False) + load_tensor3 = paddle.load(path3, return_numpy=False) + load_tensor4 = paddle.load(path4, return_numpy=False) + + self.assertTrue( + np.array_equal(np.array(load_tensor1[0]), obj1[0].numpy())) + self.assertTrue(np.array_equal(np.array(load_tensor1[1]), obj1[1])) + self.assertTrue(np.array_equal(np.array(load_tensor1[2]), obj1[2][1])) + + for i in range(len(load_tensor1)): + self.assertTrue( + type(load_tensor1[i]) == type(load_tensor2['k1'][i])) + for k, v in state_dict.items(): + self.assertTrue( + np.array_equal(v.numpy(), np.array(load_tensor2['k2'][k]))) + self.assertTrue(load_tensor2['epoch'] == 123) + + self.assertTrue( + isinstance(load_tensor3[0], paddle.fluid.core.LoDTensor)) + self.assertTrue( + np.array_equal(np.array(load_tensor3[0]), obj3[0].numpy())) + self.assertTrue(np.array_equal(np.array(load_tensor3[1]), obj3[1])) + + for k, v in state_dict.items(): + self.assertTrue( + isinstance(load_tensor3[2]["state_dict"][k], + paddle.fluid.core.LoDTensor)) + self.assertTrue( + np.array_equal( + np.array(load_tensor3[2]["state_dict"][k]), v.numpy())) + + for k, v in state_dict.items(): + self.assertTrue( + isinstance(load_tensor3[2]["opt"][k], + paddle.fluid.core.LoDTensor)) + self.assertTrue( + np.array_equal(np.array(load_tensor3[2]["opt"][k]), v.numpy())) + + self.assertTrue(load_tensor4[0], paddle.fluid.core.LoDTensor) + self.assertTrue(np.array_equal(np.array(load_tensor4[0]), obj4[0])) + + load_array1 = paddle.load(path1, return_numpy=True) + load_array2 = paddle.load(path2, return_numpy=True) + load_array3 = paddle.load(path3, return_numpy=True) + load_array4 = paddle.load(path4, return_numpy=True) + + self.assertTrue(np.array_equal(load_array1[0], obj1[0].numpy())) + self.assertTrue(np.array_equal(load_array1[1], obj1[1])) + self.assertTrue(np.array_equal(load_array1[2], obj1[2][1])) + for i in range(len(load_array1)): + self.assertTrue(type(load_array1[i]) == type(load_array2['k1'][i])) + for k, v in state_dict.items(): + self.assertTrue(np.array_equal(v.numpy(), load_array2['k2'][k])) + self.assertTrue(load_array2['epoch'] == 123) + + self.assertTrue(isinstance(load_array3[0], np.ndarray)) + self.assertTrue(np.array_equal(load_array3[0], obj3[0].numpy())) + self.assertTrue(np.array_equal(load_array3[1], obj3[1])) + + for k, v in state_dict.items(): + self.assertTrue( + np.array_equal(load_array3[2]["state_dict"][k], v.numpy())) + + for k, v in state_dict.items(): + self.assertTrue(np.array_equal(load_array3[2]["opt"][k], v.numpy())) + + self.assertTrue(np.array_equal(load_array4[0], obj4[0])) + + def test_save_load_complex_object_static_save(self): + paddle.enable_static() + with new_program_scope(): + # create network + x = paddle.static.data( + name="x", shape=[None, IMAGE_SIZE], dtype='float32') + z = paddle.static.nn.fc(x, 10, bias_attr=False) + z = paddle.static.nn.fc(z, 128, bias_attr=False) + loss = fluid.layers.reduce_mean(z) + place = fluid.CPUPlace( + ) if not paddle.fluid.core.is_compiled_with_cuda( + ) else fluid.CUDAPlace(0) + prog = paddle.static.default_main_program() + exe = paddle.static.Executor(place) + exe.run(paddle.static.default_startup_program()) + + state_dict = prog.state_dict() + keys = list(state_dict.keys()) + obj1 = [ + state_dict[keys[0]], np.random.randn(5, 6), + ('fake_weight', np.ones( + [7, 8], dtype='float32')) + ] + obj2 = {'k1': obj1, 'k2': state_dict, 'epoch': 123} + obj3 = (state_dict[keys[0]], np.ndarray( + [3, 4], dtype="float32"), { + "state_dict": state_dict, + "opt": state_dict + }) + obj4 = (np.ndarray([3, 4], dtype="float32"), ) + + path1 = "test_save_load_any_complex_object_static/obj1" + path2 = "test_save_load_any_complex_object_static/obj2" + path3 = "test_save_load_any_complex_object_static/obj3" + path4 = "test_save_load_any_complex_object_static/obj4" + paddle.save(obj1, path1) + paddle.save(obj2, path2) + paddle.save(obj3, path3) + paddle.save(obj4, path4) + + load_tensor1 = paddle.load(path1, return_numpy=False) + load_tensor2 = paddle.load(path2, return_numpy=False) + load_tensor3 = paddle.load(path3, return_numpy=False) + load_tensor4 = paddle.load(path4, return_numpy=False) + + self.assertTrue( + np.array_equal(np.array(load_tensor1[0]), np.array(obj1[0]))) + self.assertTrue(np.array_equal(np.array(load_tensor1[1]), obj1[1])) + self.assertTrue( + np.array_equal(np.array(load_tensor1[2]), obj1[2][1])) + for i in range(len(load_tensor1)): + self.assertTrue( + type(load_tensor1[i]) == type(load_tensor2['k1'][i])) + for k, v in state_dict.items(): + self.assertTrue( + np.array_equal( + np.array(v), np.array(load_tensor2['k2'][k]))) + self.assertTrue(load_tensor2['epoch'] == 123) + + self.assertTrue(isinstance(load_tensor3[0], fluid.core.LoDTensor)) + self.assertTrue(np.array_equal(np.array(load_tensor3[0]), obj3[0])) + self.assertTrue(isinstance(load_tensor3[1], fluid.core.LoDTensor)) + self.assertTrue(np.array_equal(np.array(load_tensor3[1]), obj3[1])) + + for k, v in state_dict.items(): + self.assertTrue( + isinstance(load_tensor3[2]["state_dict"][k], + fluid.core.LoDTensor)) + self.assertTrue( + np.array_equal( + np.array(load_tensor3[2]["state_dict"][k]), np.array( + v))) + + for k, v in state_dict.items(): + self.assertTrue( + isinstance(load_tensor3[2]["opt"][k], fluid.core.LoDTensor)) + self.assertTrue( + np.array_equal( + np.array(load_tensor3[2]["opt"][k]), np.array(v))) + + self.assertTrue(isinstance(load_tensor4[0], fluid.core.LoDTensor)) + self.assertTrue(np.array_equal(np.array(load_tensor4[0]), obj4[0])) + + load_array1 = paddle.load(path1, return_numpy=True) + load_array2 = paddle.load(path2, return_numpy=True) + load_array3 = paddle.load(path3, return_numpy=True) + load_array4 = paddle.load(path4, return_numpy=True) + + self.assertTrue(np.array_equal(load_array1[0], np.array(obj1[0]))) + self.assertTrue(np.array_equal(load_array1[1], obj1[1])) + self.assertTrue(np.array_equal(load_array1[2], obj1[2][1])) + for i in range(len(load_array1)): + self.assertTrue( + type(load_array1[i]) == type(load_array2['k1'][i])) + for k, v in state_dict.items(): + self.assertTrue( + np.array_equal(np.array(v), load_array2['k2'][k])) + self.assertTrue(load_array2['epoch'] == 123) + + self.assertTrue(np.array_equal(load_array3[0], np.array(obj3[0]))) + self.assertTrue(np.array_equal(load_array3[1], obj3[1])) + + for k, v in state_dict.items(): + self.assertTrue( + np.array_equal(load_array3[2]["state_dict"][k], np.array( + v))) + + for k, v in state_dict.items(): + self.assertTrue( + np.array_equal(load_array3[2]["opt"][k], np.array(v))) + + self.assertTrue(np.array_equal(load_array4[0], obj4[0])) + + # dygraph mode + paddle.disable_static() + + load_tensor1 = paddle.load(path1, return_numpy=False) + load_tensor2 = paddle.load(path2, return_numpy=False) + load_tensor3 = paddle.load(path3, return_numpy=False) + load_tensor4 = paddle.load(path4, return_numpy=False) + + self.assertTrue( + np.array_equal(np.array(load_tensor1[0]), np.array(obj1[0]))) + self.assertTrue(np.array_equal(np.array(load_tensor1[1]), obj1[1])) + self.assertTrue(np.array_equal(load_tensor1[2].numpy(), obj1[2][1])) + for i in range(len(load_tensor1)): + self.assertTrue( + type(load_tensor1[i]) == type(load_tensor2['k1'][i])) + for k, v in state_dict.items(): + self.assertTrue( + np.array_equal( + np.array(v), np.array(load_tensor2['k2'][k]))) + self.assertTrue(load_tensor2['epoch'] == 123) + + self.assertTrue(isinstance(load_tensor3[0], fluid.core.VarBase)) + self.assertTrue(np.array_equal(load_tensor3[0].numpy(), obj3[0])) + self.assertTrue(isinstance(load_tensor3[1], fluid.core.VarBase)) + self.assertTrue(np.array_equal(load_tensor3[1].numpy(), obj3[1])) + + for k, v in state_dict.items(): + self.assertTrue( + isinstance(load_tensor3[2]["state_dict"][k], + fluid.core.VarBase)) + self.assertTrue( + np.array_equal(load_tensor3[2]["state_dict"][k].numpy(), + np.array(v))) + + for k, v in state_dict.items(): + self.assertTrue( + isinstance(load_tensor3[2]["opt"][k], fluid.core.VarBase)) + self.assertTrue( + np.array_equal(load_tensor3[2]["opt"][k].numpy(), + np.array(v))) + + self.assertTrue(isinstance(load_tensor4[0], fluid.core.VarBase)) + self.assertTrue(np.array_equal(load_tensor4[0].numpy(), obj4[0])) + + load_array1 = paddle.load(path1, return_numpy=True) + load_array2 = paddle.load(path2, return_numpy=True) + load_array3 = paddle.load(path3, return_numpy=True) + load_array4 = paddle.load(path4, return_numpy=True) + + self.assertTrue(np.array_equal(load_array1[0], np.array(obj1[0]))) + self.assertTrue(np.array_equal(load_array1[1], obj1[1])) + self.assertTrue(np.array_equal(load_array1[2], obj1[2][1])) + for i in range(len(load_array1)): + self.assertTrue( + type(load_array1[i]) == type(load_array2['k1'][i])) + for k, v in state_dict.items(): + self.assertTrue( + np.array_equal(np.array(v), load_array2['k2'][k])) + self.assertTrue(load_array2['epoch'] == 123) + + self.assertTrue(np.array_equal(load_array3[0], np.array(obj3[0]))) + self.assertTrue(np.array_equal(load_array3[1], obj3[1])) + + for k, v in state_dict.items(): + self.assertTrue( + np.array_equal(load_array3[2]["state_dict"][k], np.array( + v))) + + for k, v in state_dict.items(): + self.assertTrue( + np.array_equal(load_array3[2]["opt"][k], np.array(v))) + + self.assertTrue(isinstance(load_array4[0], np.ndarray)) + self.assertTrue(np.array_equal(load_array4[0], obj4[0])) + + def test_varbase_binary_var(self): + paddle.disable_static() + varbase = paddle.randn([3, 2], dtype='float32') + path = 'test_paddle_save_load_varbase_binary_var/varbase' + paddle.save(varbase, path, use_binary_format=True) + load_array = paddle.load(path, return_numpy=True) + load_tensor = paddle.load(path, return_numpy=False) + origin_array = varbase.numpy() + load_tensor_array = load_tensor.numpy() + if paddle.fluid.core.is_compiled_with_cuda(): + fluid.core._cuda_synchronize(paddle.CUDAPlace(0)) + self.assertTrue(np.array_equal(origin_array, load_array)) + self.assertTrue(np.array_equal(origin_array, load_tensor_array)) + class TestSaveLoad(unittest.TestCase): def setUp(self): @@ -431,8 +821,6 @@ class TestSaveLoad(unittest.TestCase): # error test cases, some tests relay base test above # 1. test save obj not dict error test_list = [1, 2, 3] - with self.assertRaises(NotImplementedError): - paddle.save(test_list, "not_dict_error_path") # 2. test save path format error with self.assertRaises(ValueError): @@ -471,5 +859,33 @@ class TestSaveLoadProgram(unittest.TestCase): self.assertTrue(origin_startup == load_startup) +class TestSaveLoadLayer(unittest.TestCase): + def test_save_load_layer(self): + if six.PY2: + return + + paddle.disable_static() + inps = paddle.randn([1, IMAGE_SIZE], dtype='float32') + layer1 = LinearNet() + layer2 = LinearNet() + layer1.eval() + layer2.eval() + origin = (layer1(inps), layer2(inps)) + path = "test_save_load_layer_/layer.pdmodel" + paddle.save((layer1, layer2), path) + + # static + paddle.enable_static() + with self.assertRaises(ValueError): + paddle.load(path) + # dygraph + paddle.disable_static() + + loaded_layer = paddle.load(path) + loaded_result = [l(inps) for l in loaded_layer] + for i in range(len(origin)): + self.assertTrue((origin[i] - loaded_result[i]).abs().max() < 1e-10) + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/framework/io.py b/python/paddle/framework/io.py index 8f2f56a6628..32a62d2461a 100644 --- a/python/paddle/framework/io.py +++ b/python/paddle/framework/io.py @@ -235,11 +235,6 @@ def _pickle_save(obj, f, protocol): raise ValueError("Expected 1<'protocol'<5, but received protocol={}". format(protocol)) - if not isinstance(obj, (core.LoDTensor, core.VarBase)): - raise NotImplementedError( - "Support 'paddle.Tensor' or 'paddle.core.LoDTensor', but received {}.". - format(type(obj))) - def reudce_varbase(self): data = self.numpy() name = self.name @@ -287,11 +282,48 @@ def _pickle_save(obj, f, protocol): pickler.dump(obj) -def _use_legacy(obj): - # TODO(weixin):If `obj` is any object, the judgment condition should be more precise. - if not isinstance(obj, dict): +def _contain_x(obj, condition_func): + if isinstance(obj, core.SelectedRows): + raise NotImplementedError( + "`paddle.save` do not support saving 'SelectedRows'.") + + if condition_func(obj): + return True + elif type(obj) in (dict, collections.OrderedDict, list, tuple): + if type(obj) in (dict, collections.OrderedDict): + keys = list(obj.keys()) + else: + keys = range(len(obj)) + flag = False + for key in keys: + flag |= _contain_x(obj[key], condition_func) + if flag: + return True + return flag + else: return False - return True + + +def _is_state_dict(obj): + if isinstance(obj, dict): + + def condition(obj): + return isinstance(obj, (core.Layer, Program, core.VarBase, + core.LoDTensor, core.SelectedRows)) + + # If the value of a dict is a core.VarBase/LoDTensor or a dict + # that does not contain a paddle type(Layer, Program, VarBase, LoDTensor, SelectedRows), + # the dict is considered to be a state_ dict. + for key, value in obj.items(): + if isinstance(value, dict): + for k, v in value.items(): + if _contain_x(v, condition): + return False + elif not isinstance(value, (core.VarBase, core.LoDTensor)): + return False + return True + + return False def _transformed_from_varbase(obj): @@ -348,6 +380,76 @@ def _ndarray_to_tensor(obj, return_numpy): return _to_LodTensor(obj) +def _lod_tensor2varbase(tensor): + return_var = _varbase_creator() + return_var.value().get_tensor().set(tensor, _current_expected_place()) + return return_var + + +def _parse_every_object(obj, condition_func, convert_func): + if condition_func(obj): + return convert_func(obj) + elif type(obj) in (dict, collections.OrderedDict, list): + if type(obj) == list: + keys = range(len(obj)) + else: + keys = list(obj.keys()) + for key in keys: + if condition_func(obj[key]): + obj[key] = convert_func(obj[key]) + else: + obj[key] = _parse_every_object(obj[key], condition_func, + convert_func) + return obj + elif type(obj) == tuple: + return tuple( + _parse_every_object(list(obj), condition_func, convert_func)) + elif type(obj) == set: + return set(_parse_every_object(list(obj), condition_func, convert_func)) + else: + if isinstance(obj, collections.Iterable) and not isinstance(obj, ( + str, np.ndarray, core.VarBase, core.LoDTensor)): + raise NotImplementedError( + "The iteratable objects supported are tuple, list, dict, OrderedDict, string. But received {}.". + format(type(obj))) + return obj + + +def _parse_load_result(obj, return_numpy): + def is_layer(obj): + return isinstance(obj, core.Layer) + + def parse_layer(obj): + temp_dict = _parse_load_result(obj.__dict__, False) + obj.__dict__.update(temp_dict) + return obj + + if _contain_x(obj, is_layer): + if not in_dygraph_mode(): + raise ValueError( + "Layer can only be loaded in dynamic graph mode, but now in static graph mode." + ) + + _parse_every_object(obj, is_layer, parse_layer) + + def tuple_to_tensor(obj): + return _tuple_to_tensor(obj, return_numpy=return_numpy) + + def ndarray_to_tensor(obj): + return _ndarray_to_tensor(obj, return_numpy=return_numpy) + + # tuple(name, ndarry) was converted from varbase of paddle2.1, + # and all tuple(name, ndarry) are converted to tensor. + if _contain_x(obj, _transformed_from_varbase): + return _parse_every_object(obj, _transformed_from_varbase, + tuple_to_tensor) + # If there is no tuple(name, ndary), it is considered to be saved by paddle2.0 + # or converted from LoDTensor, and all ndarrays are converted to tensor. + else: + return _parse_every_object(obj, _transformed_from_lodtensor, + ndarray_to_tensor) + + def _save_lod_tensor(tensor, file_name): if not tensor._is_initialized(): raise ValueError("The saved tensor is not initialized.") @@ -383,6 +485,8 @@ def _save_binary_var(obj, path): _save_lod_tensor(obj, path) elif isinstance(obj, core.SelectedRows): _save_selected_rows(obj, path) + elif isinstance(obj, core.VarBase): + _save_lod_tensor(obj.value().get_tensor(), path) else: # Since the concept of 'Tensor' is only exposed to users, the error message can only contain tensor instead of 'LoDTensor' or 'SelectedRows' raise NotImplementedError( @@ -498,32 +602,20 @@ def save(obj, path, protocol=2, **configs): warnings.warn( "'pickle_protocol' is a deprecated argument. Please use 'protocol' instead." ) + if isinstance(obj, Program): obj.desc.flush() with open(path, "wb") as f: f.write(obj.desc.serialize_to_string()) - elif _use_legacy(obj): + + elif _is_state_dict(obj): if in_dygraph_mode(): _legacy_save(obj, path, protocol) else: _legacy_static_save(obj, path, protocol) else: - # `protocol` need to be used, `pickle_protocol` is a deprecated arg. - if config.pickle_protocol is not None: - protocol = config.pickle_protocol - warnings.warn( - "'pickle_protocol' is a deprecated argument. Please use 'protocol' instead." - ) - - if _use_legacy(obj): - if in_dygraph_mode(): - _legacy_save(obj, path, protocol) - else: - _legacy_static_save(obj, path, protocol) - else: - # save single variable - with open(path, 'wb') as f: - _pickle_save(obj, f, protocol) + with open(path, 'wb') as f: + _pickle_save(obj, f, protocol) def _legacy_save(obj, path, protocol=2): @@ -703,8 +795,7 @@ def load(path, **configs): # TODO(weixin):If `obj` is any object, the judgment condition should be more precise. if isinstance(load_result, dict): - if isinstance(load_result, dict): - load_result = _pack_loaded_dict(load_result) + load_result = _pack_loaded_dict(load_result) # paddle2.0: paddle.save/load if "StructuredToParameterName@@" in load_result: @@ -716,23 +807,12 @@ def load(path, **configs): del load_result["StructuredToParameterName@@"] else: # paddle2.1 static.save/load - for key in load_result: - load_result[key] = _ndarray_to_tensor( - load_result[key], config.return_numpy) + load_result = _parse_load_result(load_result, + config.return_numpy) else: - # TODO(weixin): support complex objects such as layer. - # If `obj` is any object, the judgment condition should be more precise. - if _transformed_from_lodtensor(load_result): - load_result = _ndarray_to_tensor(load_result, - config.return_numpy) - elif _transformed_from_varbase(load_result): - load_result = _tuple_to_tensor(load_result, - config.return_numpy) - else: - raise NotImplementedError( - 'Only support tensor and state_dict, but received {}.'. - format(type(load_result))) + load_result = _parse_load_result(load_result, + config.return_numpy) except exception_type as msg_pickle: try: @@ -741,7 +821,12 @@ def load(path, **configs): except: try: tensor, _ = _load_lod_tensor(path) - return tensor + if config.return_numpy: + return np.array(tensor) + else: + if in_dygraph_mode(): + return _lod_tensor2varbase(tensor) + return tensor except: try: with open(path, "rb") as f: diff --git a/tools/__pycache__/static_mode_white_list.cpython-37.pyc b/tools/__pycache__/static_mode_white_list.cpython-37.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1e58ce7689c7db6cc0ce4ed18f87752b16d8beb GIT binary patch literal 20443 zcmeI4XP6{Mm99rxj1UNf7v6&eNMdG$@E!reNPq^{m?XPSR&~-@m8Hz8?rGt@2jRW< zZar&R?;Y!%)-UTl>z8%+Jti`X9Q4<)QuZX-AI85jjOpm2Z%1$Tj6!a&0+Ht|O<*b>(_;hFo86 zAZN-Aic2?*+lZVSA1ygWfJkxS)?@+5h(TqY;vDe_c#nmk>eAzSiHd6qm|o+Ft&S8~~wk%$yB zmWk|0DbJH#sbnfMsbwygOCzl;WGQ>HFDKqI`#Zr+k-uw|tL$uY5_q zPrhG%Kz>kuNPbv;M1E9$OnzK`LVi+yN`6{?Mt)X)PQENZFTWtaD8D4XEWaYZD!(Sb zF25naDObsF$#2W=$nVPU$?wY_$REmAf&*{` zcs_Umcp-QZcrkbhcqw=pcsY0lcqMohcr|zpcrADxcs+Oncq4cdcr&;Xyal`!ybZh^ zyaT)wybHV=ya&7&ybrt|d;ok9dk6-13wOa z0{kTSDe%+aXTZ;bp95b8KM#HZ{37@z@XO#=z^{T|1HTS_1N0^AbZ3fvmp2HY0h4%{Bx0o)PX z3EUan1>6h2TEmzTke~{@?-Nf#5;l z!Qdg_q2OWQ;ouSAk>DcmDDY_T7;rIoEO;DvJa_`Q1Y8Q92%ZF<3@!sFz*E3e!PCIg z!85=XcqVujcs6(r$iQ>La?pSlEWi@%fqifi z9Dpmp^T7+i3&D%Pi@{64OTo*)%fTzaE5WP4tHEo)Yr*Tl>%kkq8^N2vo57XfE#R%- zZQ$+T9pIhdUEtl|J>b3Iec=7z1K@+;L*T>UBjBUpW8mZ96X28JQ{dC!GvKq}b61`^ z`Qp|Qe){}nN6!8NvCY=a7Dc9$NH9vFXoMYvz%q4W~?nYw0EjnvZiPY z`|wSIRPQe5*&^Sr3hj7{P3rde=!M#JhBeJ|A}M{mfjzb?V^K6p@B!Q6@?|j_v1VK1 z){VT|fV#I%o7Z&}eCXaTPR{e0WO=(jtT(JvnUzk#ncfYuoCV!k*3k!(UVEdhN~Yc| zwtI|j^PJX2ok=lY>}a=J`RXoT=1n14bu5#zUF5oa8qc)Gd^B1%`RG6?zS;R`QSRl7 zvYutLSvF~MNnHsei)xW;S7&c7xwb3edOn)uQ+>M4@M%#_OGF_X&yOGDuM02e-=5|t zvwU8L*fE?Ec;7cupZE3A8N}n9sUd=UlT3HzsI0f>>(P>?AQ0Npmcz!^QqCqikVR6jj9^0sPcoN*}9|moX!#NVzir~UFCxL zA}VaEi*T%EHrlD1Y*NgMCSULqQ@XCd-Q=@Lu@&PtlWdBf1Y3gZ&7`dx(~=mhY4QVG zN4DSS2N}#2Id9v-dv?#|WTx$|4z%m@US2KjtuPDd%*;4ewF_fGOl>*RS3^vAXkjL+ znyYlta64bXUJZ*d7KoXyzs#%h3X`W`>~=od-On3|HE!o^Im#MjZI(}q(7x?_G1|#y zbz`5usSUhB%l3|?;rI@F(8iW-7l>esiQ#=I*l=5zvvb(Se0{asu@Wx zM&C0V=4;z6T^synOJQU!%hAB_ZkO}CS?HEd zjUi0Vw_@8x({*^eKADZG(&irXPzp;oxw@@$50dkvF@cP&XT%s}-WVH|0d5tM*pcaf zi8New9UEnow?*hWKiW+F^$~seoNYu|`VrUhk1Bna@(xyOx)m#` zXVSen6SEXdaJ#BUyJ=E2Ydg7M4`%rUHFqB4LH3SfRZa8V(gtaP3b#xvnT5VTzRCN$ zI%XH{XZE();&$UVq;OKtHz9sG?;;-OsPH`2Bc;(ULoe-DJG1-6_W= z1#X%~`1`grosG@Fx+1-ks@{gFdwG*sisLtF409a&%x>yxTw6!_6q?cRE+IvmA#5}r z3{;ZNJ>A7Hwtc06aYF8l%wz1tVw7R0CbRUsoVLq>=^O3oLnwjdi##~4KUTFl5KK;` zR+s=LRDaO8TNJFUK_j_wr zD|hnoH(N6OrhBF|!D5VswctvnBilQw+;=ULzHQz%b!mQ;-3sf7r^KrspiCu(ODzy@ z-aS5zLm=>C!qZ|}H);Vq25=iYCQM1zT8O-Ie9aYj$?Z1zIhUP5TUDFWb|I6Z9bxb4 zSrNuiibX!!=^pm*JfWjf=tDLk0FsD0q|7H-G2Jd?>-IifVxp$pULvL&7R5b*Mor!W zKaO%OipfFP$gYWSsZBGe2X;~F{n@-;Htj@0@?_4>%B@(9TCkeBXSz*t(1H zqfi@%a#n?f00_pKQFPz6*3J4Cs^M*@##)C-v&?3whKv7Nqs1B8dg$1M$riTh%6-q) zUPs)PO~b;Kt%rVKe+SdAt!@+wmb4NKyPgg694Yq(qYVk*Heq>%-G<}+H^E6M~R%I{-6Su9(kz4v=I!DtrkJG8R z*s29S$jGjmh#z{wvCxCUle^&fN%lwsT@8j2?J!ftki5iWhI7tFXmkH^($ zBh|Hxp%FC9!e4CN!`GNbZ;HjTNsaVFQ1*&OgJrd7?*F=+P8aFXNKf*p&3#2s>9z8r zck%G7I51ZE7L0QkH!x{B1rb$2*u8p=IMj17%H} z!&By(41@>u#^r2UG!uB{(|4k};+Tb|>iMkS-~k!s*M!mZ$}Ny3_DnUFwwmTE=UgD^ zrr{uwT&W|N_4crEOz6y1&2_sn2SrozMLAj4%Qmwds28@>qi0c7U@TlZ$NDpSsxJCy znwQb$2ZVXZ^iJOH#EyFBhRAfvt~3$&tx>b%E5j^FVn~yVmJmIp-(OfR$||J4C$te) zjtR&UGnhuPUpKo*0e)S5NT|rTwRk63yJ$?MU8=@Qwk7FYbWjpJaQ8S~&dAj38F7_5 z?_AD zOCDb`pwd#uZX_i(R0Ax6VI-S6Z*{BD4$Q&Upal^sVyTatr5Kq$T8xNVlYY2cYz*Pe z2rgqq#m!xN8X-zIWPe%I*L?1ie6fRmEt+CrXeRlRd_HIP7}?sQS9gX{X3#9M-6aFr zE<9m!{p(s&GBxPotl#Ki>6QmaeF>uvX+tw7=;gf9XiwMf(S_2&QjHtt*s|v%4HcVu z-@5sJb#7EszYsnvy|nq1csENz9%?;kvXnOsLU7!rl5W-}&489sL~w&0g5f4gA5`ge z)~{hR+{fyIX%|Z`5?N?l~Y14B3ez!$0c%L zVrhA$Pr97WwM?Y7FRPY>9uLSu1~D23qXzVGGCke*yOAdO><}U$mJ8`%5KV9}S{5mi zG?F^IdtK?$IUovqFR8euNo7nm~ZT+T*~&&)%RlA7enETkTZS)rQlsBncG&Bly7tBJE=(e`qzk8M>i+Qew>VCC;A z#i1%I>u_IPoeq~6f;uzJB`dSQ)awCdB8uoPr&K65D;nE+ywEt;ShA9qhx23<@||{G zj3NTD5J9Wbsov)?#5!5E)q~G+M)YRxCbX|)HaXg%lA4sGUTP_VN*z(26`?||o6W3r zj6F0v>gKZC>|*ZBtwTxq!2)9Vo+wf1h2j=sx6%Ybi($GuZbNn(+y+m+gz9LwTHjK$ z%2_+DYp>n=OI41)>xQ-3QTJXPhNzuz+LX@{{nOo@$e_EOlJxCvRtD`Pk8pQmMD@Au zrkyhtMUI)4Y%~d`roBUrtwLsEJgT^q$o-PWAs7`d4aT!-pwKG$Ntaw4XnG^>(V#cbjXb~(30r<}5+oiQa2tcBAI z)W%+GB_i#s@4iw(<|D#}Lh*<+A#za?#{Lv55&@l?$IvUS!z7|2W1&|FLR8{|aXeY% z=*yet&4dIJw~u!Zp>shba8Sp^9+D=+^q`S|>Co=F;@Lmlz1EZ`6JPzC(*iqJ*2{>4 zqO{W#Dk8FnT1&W+Czfikq6sW zj4=WI-WbNU0VoMtT5)GqYXlkrKtoFqgM@2jtGkNHMa|T27+SM zBzT^uV;4gz_1siXZC4b@Wr)&=NNE@sQW{JN$_w=2W zPCLVD78$l4>e`uId_a$A$OvN9X2DYh!kL_mx^q>Z@+#EWAL4X~vny5GCQK-YquAjj z&G9j`$^_SWMFlX6>ABb@!Z2Hh=4a5MNcN2QxXD5_jN`=LBy6LUQ^I~fmeX2es7)WW zxOMHG*$~TJ$tp97L;K-?^l+D9VJ-QyCUOyKt{evIA}!-dC$kZKX_&<+2YZxC7^2m1 z7YzUYVO=N!FJ}>UY_^+rva(iBf%QaPHT9UqG@G3Xk|I{{*Yq3;nb9CW%-ZHV(-|Xg zoqIJSGBx%SuGrA2(zE5!!{%T%ir4K3G|Ji5@zrtLHrI%YO6mVJRgbE4^l3pwqT3r^ zO`mpkv#?dJ)fWe0TZBgpXdUB_z-^lb34~o`rDcAX$a#5^O>B3F#i}BteqL7x6Jh}^ zns+0I`upLv>6gp65;zj-56Odo1os`t)K@z|po5z|%Q2(y7?NWL;)F}Hf;>{Q3ez>- zaHFQ>`_gh_t>b&m!4;4e(s(+iM{H5=NLJ1QQjeNaP|;lFwjdq71;6|t4lR*X&PwIK zIl`jJPMj3G5l)RBdurv*eK_<*O}I;ap&g}q1jLghR+P5-LzAw%w|E&oHmXTy9J(Ga z#Mva$Ms{Z)+8(SwFl(v=_gnLH2*Z-5+vUtMxd_JkHVrwdV$b9Piyl_DZ=dUdzj@Ax z5C%<4yxshe??r*LQvLH}pnoZnhb~c%l&J4O9fXiBS(->OhQ+g1%5*AIK+xsP~# zgMz#k4SU$z6l1Mdnyn$C_G=!ouwnvZn}f85VzSh<0#_K!YI=%kqI>10uoFLwUP_J= z=ZJW;jtf^j_-A!SNSB#>3)xg;2;YuiFQ}4eS%9M=J*sg5Y>=YJgMu(^k$Z~_gH`zA zQGp7peTa2Jir{UhsMrggM)c;G&rU&=GfUAKEbS6o8i#{QaW%&inwt*p zaqNJh-RrvhqjrxOaga|Ut)(F?f%pZLkRg0VQhWN})m#9!f%8PaR}-iD_DmESv6MUa z5PZV%a$zB(2Z@#`dIGHRk{-UPBlO93_jG*u_Q(}_f*_`0moxf6&n-u~uI2fLGoaN3 zykk&XOf>R@kj}=u$bjS0OxU-y=N^q_V`F%@vaMA_J8SVEDh?21#z~Z%V+ay)vzHB+ zfm_aYrzz1aJq--Y5*%{JgrU__1iHK6hQHEFdg)@|ImUT5XR5b*R?>RU^m{J5s->>u z8W+;ValxM0V|O=$ES=87;J(#!VP+L8^W2!s*&^Cl*$PoRd#ru+RS4}9t5>1cTRbU5 zjaTQSmVQp9=OzR7)mL3pTI&;OOzeY^=n;D~H#Lk|S{*kla57wdtTEET=&0{`HnT9D@<%V~R4fbQ#DP#LN1UR1gl;wm>ho6?$^KZmv86vcMgSFY#Sw(@x7 za-y}moj2>Ov1{yunIpSu(`}8_#MR#PyT$2!g<&sEtz77ov|g@=xvsZ(HsSVQf$j0D z4TluleTvH&p5NG+p2xMb*Lg%`Su22pBE4@Wbi8W_hCf6Nt;XK9k%e$+RE+_vb&J2$ z1j9wer@x#NqDOUFt(UrXdOJ6Tz zc-JY6>>Up3s!rEHT#S^mcf8Ylw`Tp7;C@n0VV1?C;+ z=~`;s(O1g8tzIy253)husK&a|4ElL)pt#-29DS7r{#APilk;0o$bAqi_Z^WsVx4}r<=@i*R z#4%Lu!E6+p5_j%2SLpKI6!dPSs@&>MLFh&sUepqTFw=*meL5{Kkw?A#f9#Xx+$tas ztoIi?u~s`SPv+G3(dw8~vbo#rHnZ@)FtT5YNZ7IM{@HH)uE$EJviWwCWtIc*Q(6=?ZBIx0D)1CKm z*-%t!7e(j&Qws-D(U3xsxIC}Nh{=bh&Jn_Hk}X7C|7dd0OviK5ZkpKt-R_3B=6G(o zbSieaS!3?&RKC!b!!Z@-Q{L(|K8wW+?>?f?_l%;cn)BFw#%HmabDf;JGkld z4qCEfcWv;qP5g{LycfhV^)XX`zNZ&G4(qwr(JjrP9sAKdpjcr-XrEpqUJhpHGf#)~ z{Rk=TSv!00J4AqX$k}_}C**1EoYmX4A)6cF-??Hgpn8Veo6SuA4%7?qm z!Oi&fj5j`u!@O^jAU@&#DcaVuVPAH%9WtEWSYCUj8&2ESKGhAUZT)b)bVKepqwnBh zr>=9Q!`heo(J6WLFFRWXcdq&--@Y})7`*DDzTUp}+08-pw4ndU<{;X>Hb|#0{d=8a ztaWvxw-)8}ulYTWwPwQBzK2hEJ$~xjJ-S_QKV*oIYt`qqjqdb9s)VMdpl)$={GU@4|)h6&uU-Pbf@2V z>v!N>^Pkvm*MW zI~sknCur^?@VS_O=Q)??iT06Jt#)|9t~aL`T}H5qW9qMQwf=Ud{;TIWR6oOn*=Cms^CWyG*HAa-@WD{#UH0>Cb?zL!o>K9K2ancm`*q=!ZB4!;fT;OIXKc7TS zV@2gY!D9WMI{TeXjq_s+C-|9C3iH#%(-QT!AJZ>1+N+U-ZC)9%U5!M>`MTg&@m9j4 z6pAb|Q$F+RFg{%o(B*kZ*T3(-a5k@&9QemUpAOrL(A9=BUV5`rd|2RGV24%RMV_?d zen-wWAS9*cmls>=Fny5XqlHKWqkD$NZw5w;?ey-_Eyqs#k%zlHz>}fB?PF!;=`5n3 zisIF-l8#-x9kuPWP1&qUCArz2W}lHz%*%P96`+vesD0RP+N_AR<$=f2cy3!>?E^rt__SmMJAYIeG%Hvd*_ReNZxl~fr~OqJqxuG= zLo>bT6W?;4MS~x94~7YMbu9IEarV;*>*oF;9d->#@tf>Ma{qP6C1bC=#!mW8ID5gT z>qj`oc}hU)|5NeX4t9-l$vLN7b=GAk9(Lm5M`Ta9^pO{3Coa0|#Ko69I(z)Zm!0^` zk>;BGPoC_ruU&e6!_~X<^(?wQzbmx+Pe>VBHuWt2{3o(?RdN1;IxIcCGOu&VD7b0m zm7~J*<_A}u_CSc-j(hN#`ahD|;Y+8U^7TLeKXc$3r+od-*Ew)F2UIV#CH>p~lqlYQ M_}gEn4mt3D0KTbuYybcN literal 0 HcmV?d00001 -- GitLab