未验证 提交 96180fff 编写于 作者: M Meteor Liu 提交者: GitHub

replace varbase to relevant name or notes as per the context (#53431)

上级 396fe483
...@@ -584,7 +584,7 @@ class GroupShardedStage3(nn.Layer): ...@@ -584,7 +584,7 @@ class GroupShardedStage3(nn.Layer):
param, "fw_storage" param, "fw_storage"
), f"Find {param.name} don't have fw_storage attribute" ), f"Find {param.name} don't have fw_storage attribute"
param.fw_storage = _VarBaseWrapper(param) param.fw_storage = _TensorWrapper(param)
assert param.fw_storage.grad is None assert param.fw_storage.grad is None
param.fw_storage._copy_gradient_from(param.bw_storage) param.fw_storage._copy_gradient_from(param.bw_storage)
update_list.append(param) update_list.append(param)
...@@ -1062,17 +1062,17 @@ def _UnsliceParam(param): ...@@ -1062,17 +1062,17 @@ def _UnsliceParam(param):
return param return param
def _VarBaseWrapper(param): def _TensorWrapper(param):
varbase = param.fw_storage var = param.fw_storage
tmp_param = EagerParamBase( tmp_param = EagerParamBase(
shape=varbase.shape, dtype=varbase.dtype, name="slice@" + param.name shape=var.shape, dtype=var.dtype, name="slice@" + param.name
) )
varbase._share_buffer_to(tmp_param) var._share_buffer_to(tmp_param)
tmp_param.regularizer = param.regularizer tmp_param.regularizer = param.regularizer
tmp_param.optimize_attr['learning_rate'] = param.optimize_attr[ tmp_param.optimize_attr['learning_rate'] = param.optimize_attr[
'learning_rate' 'learning_rate'
] ]
varbase._clear() var._clear()
return tmp_param return tmp_param
......
...@@ -146,12 +146,9 @@ def sync_params_buffers( ...@@ -146,12 +146,9 @@ def sync_params_buffers(
for _, param in model._obtain_parameters_buffers().items(): for _, param in model._obtain_parameters_buffers().items():
if not isinstance(param, core.eager.Tensor): if not isinstance(param, core.eager.Tensor):
raise TypeError( raise TypeError(
"The data type of '%s' must be Varbase or eager.Tensor" "The data type of '%s' must be core.eager.Tensor" % param.name
% param.name
) )
# is_distributed param not need to sync when in mp mode
if isinstance(param, core.eager.Tensor):
if is_model_parallel: if is_model_parallel:
if hasattr(param, "is_distributed") and param.is_distributed: if hasattr(param, "is_distributed") and param.is_distributed:
continue continue
...@@ -160,6 +157,7 @@ def sync_params_buffers( ...@@ -160,6 +157,7 @@ def sync_params_buffers(
# such as moe's expert parameters # such as moe's expert parameters
if getattr(param, "no_sync", False): if getattr(param, "no_sync", False):
continue continue
if param.type == core.VarDesc.VarType.VOCAB: if param.type == core.VarDesc.VarType.VOCAB:
continue continue
...@@ -474,14 +472,14 @@ class DataParallel(layers.Layer): ...@@ -474,14 +472,14 @@ class DataParallel(layers.Layer):
self.find_unused_parameters, self.find_unused_parameters,
) )
def _find_varbase(self, obj): def _find_tensor(self, obj):
var_type = core.eager.Tensor var_type = core.eager.Tensor
if isinstance(obj, var_type): if isinstance(obj, var_type):
return [obj] return [obj]
if isinstance(obj, (list, tuple)): if isinstance(obj, (list, tuple)):
return itertools.chain(*map(self._find_varbase, obj)) return itertools.chain(*map(self._find_tensor, obj))
if isinstance(obj, dict): if isinstance(obj, dict):
return itertools.chain(*map(self._find_varbase, obj.values())) return itertools.chain(*map(self._find_tensor, obj.values()))
return [] return []
@contextmanager @contextmanager
...@@ -536,9 +534,7 @@ class DataParallel(layers.Layer): ...@@ -536,9 +534,7 @@ class DataParallel(layers.Layer):
and framework._dygraph_tracer()._has_grad and framework._dygraph_tracer()._has_grad
and self.grad_need_sync and self.grad_need_sync
): ):
self._reducer.prepare_for_backward( self._reducer.prepare_for_backward(list(self._find_tensor(outputs)))
list(self._find_varbase(outputs))
)
return outputs return outputs
@deprecated( @deprecated(
......
...@@ -125,7 +125,7 @@ def param_guard(parameters): ...@@ -125,7 +125,7 @@ def param_guard(parameters):
def _convert_into_variable(tensor): def _convert_into_variable(tensor):
""" """
Convert Varbase into Variable. Convert Tensor into Variable.
""" """
if isinstance(tensor, core.eager.Tensor): if isinstance(tensor, core.eager.Tensor):
# Check whether has been created before. # Check whether has been created before.
......
...@@ -57,15 +57,14 @@ def _append_backward_desc(main_program, outs): ...@@ -57,15 +57,14 @@ def _append_backward_desc(main_program, outs):
def _create_out(var): def _create_out(var):
assert isinstance(var, Variable) assert isinstance(var, Variable)
var_desc = var.desc var_desc = var.desc
varbase = None out = core.eager.Tensor(
var_base = core.eager.Tensor(
var_desc.dtype(), var_desc.dtype(),
var_desc.shape(), var_desc.shape(),
var_desc.name(), var_desc.name(),
var_desc.type(), var_desc.type(),
False, False,
) )
return var_base return out
@switch_to_static_graph @switch_to_static_graph
......
...@@ -20,7 +20,7 @@ from paddle.fluid.framework import in_dygraph_mode ...@@ -20,7 +20,7 @@ from paddle.fluid.framework import in_dygraph_mode
class TestDataParallelGroup(unittest.TestCase): class TestDataParallelGroup(unittest.TestCase):
def create_varbase(self, dtype, shape): def _create_var(self, dtype, shape):
return paddle.rand(shape=shape, dtype=dtype) return paddle.rand(shape=shape, dtype=dtype)
def assign_group_by_size(self, *args): def assign_group_by_size(self, *args):
...@@ -30,10 +30,10 @@ class TestDataParallelGroup(unittest.TestCase): ...@@ -30,10 +30,10 @@ class TestDataParallelGroup(unittest.TestCase):
def test_construct_group0(self): def test_construct_group0(self):
# one dtype & one limit capability # one dtype & one limit capability
var_list = [] var_list = []
var_list.append(self.create_varbase("float32", [2, 50])) var_list.append(self._create_var("float32", [2, 50]))
var_list.append(self.create_varbase("float32", [2, 100])) var_list.append(self._create_var("float32", [2, 100]))
var_list.append(self.create_varbase("float32", [2, 50])) var_list.append(self._create_var("float32", [2, 50]))
var_list.append(self.create_varbase("float32", [2, 25])) var_list.append(self._create_var("float32", [2, 25]))
res = self.assign_group_by_size( res = self.assign_group_by_size(
var_list, [False, False, False, False], [400] var_list, [False, False, False, False], [400]
) )
...@@ -42,12 +42,12 @@ class TestDataParallelGroup(unittest.TestCase): ...@@ -42,12 +42,12 @@ class TestDataParallelGroup(unittest.TestCase):
def test_construct_group1(self): def test_construct_group1(self):
# multi dtype & one limit capability # multi dtype & one limit capability
var_list = [] var_list = []
var_list.append(self.create_varbase("float32", [1, 50])) var_list.append(self._create_var("float32", [1, 50]))
var_list.append(self.create_varbase("float64", [1, 25])) var_list.append(self._create_var("float64", [1, 25]))
var_list.append(self.create_varbase("float32", [1, 50])) var_list.append(self._create_var("float32", [1, 50]))
var_list.append(self.create_varbase("float64", [1, 25])) var_list.append(self._create_var("float64", [1, 25]))
var_list.append(self.create_varbase("float32", [1, 50])) var_list.append(self._create_var("float32", [1, 50]))
var_list.append(self.create_varbase("float64", [1, 25])) var_list.append(self._create_var("float64", [1, 25]))
res = self.assign_group_by_size( res = self.assign_group_by_size(
var_list, [False, False, False, False, False, False], [400] var_list, [False, False, False, False, False, False], [400]
) )
...@@ -56,10 +56,10 @@ class TestDataParallelGroup(unittest.TestCase): ...@@ -56,10 +56,10 @@ class TestDataParallelGroup(unittest.TestCase):
def test_construct_group2(self): def test_construct_group2(self):
# one dtype & multi limit capability # one dtype & multi limit capability
var_list = [] var_list = []
var_list.append(self.create_varbase("float32", [2, 50])) var_list.append(self._create_var("float32", [2, 50]))
var_list.append(self.create_varbase("float32", [2, 50])) var_list.append(self._create_var("float32", [2, 50]))
var_list.append(self.create_varbase("float32", [2, 50])) var_list.append(self._create_var("float32", [2, 50]))
var_list.append(self.create_varbase("float32", [2, 50])) var_list.append(self._create_var("float32", [2, 50]))
res = self.assign_group_by_size( res = self.assign_group_by_size(
var_list, [False, False, False, False], [400, 800] var_list, [False, False, False, False], [400, 800]
) )
...@@ -68,12 +68,12 @@ class TestDataParallelGroup(unittest.TestCase): ...@@ -68,12 +68,12 @@ class TestDataParallelGroup(unittest.TestCase):
def test_construct_group3(self): def test_construct_group3(self):
# multi dtype & multi limit capability # multi dtype & multi limit capability
var_list = [] var_list = []
var_list.append(self.create_varbase("float32", [1, 50])) var_list.append(self._create_var("float32", [1, 50]))
var_list.append(self.create_varbase("float64", [1, 25])) var_list.append(self._create_var("float64", [1, 25]))
var_list.append(self.create_varbase("float32", [1, 50])) var_list.append(self._create_var("float32", [1, 50]))
var_list.append(self.create_varbase("float64", [1, 25])) var_list.append(self._create_var("float64", [1, 25]))
var_list.append(self.create_varbase("float32", [1, 50])) var_list.append(self._create_var("float32", [1, 50]))
var_list.append(self.create_varbase("float64", [1, 25])) var_list.append(self._create_var("float64", [1, 25]))
res = self.assign_group_by_size( res = self.assign_group_by_size(
var_list, [False, False, False, False, False, False], [200, 400] var_list, [False, False, False, False, False, False], [200, 400]
) )
...@@ -82,12 +82,12 @@ class TestDataParallelGroup(unittest.TestCase): ...@@ -82,12 +82,12 @@ class TestDataParallelGroup(unittest.TestCase):
def test_construct_group4(self): def test_construct_group4(self):
# multi dtype & zero limit capability # multi dtype & zero limit capability
var_list = [] var_list = []
var_list.append(self.create_varbase("float32", [1, 50])) var_list.append(self._create_var("float32", [1, 50]))
var_list.append(self.create_varbase("float64", [1, 25])) var_list.append(self._create_var("float64", [1, 25]))
var_list.append(self.create_varbase("float32", [1, 50])) var_list.append(self._create_var("float32", [1, 50]))
var_list.append(self.create_varbase("float64", [1, 25])) var_list.append(self._create_var("float64", [1, 25]))
var_list.append(self.create_varbase("float32", [1, 50])) var_list.append(self._create_var("float32", [1, 50]))
var_list.append(self.create_varbase("float64", [1, 25])) var_list.append(self._create_var("float64", [1, 25]))
res = self.assign_group_by_size( res = self.assign_group_by_size(
var_list, [False, False, False, False, False, False], [0] var_list, [False, False, False, False, False, False], [0]
) )
...@@ -96,12 +96,12 @@ class TestDataParallelGroup(unittest.TestCase): ...@@ -96,12 +96,12 @@ class TestDataParallelGroup(unittest.TestCase):
def test_construct_group5(self): def test_construct_group5(self):
# multi dtype & infinite capability # multi dtype & infinite capability
var_list = [] var_list = []
var_list.append(self.create_varbase("float32", [1, 50])) var_list.append(self._create_var("float32", [1, 50]))
var_list.append(self.create_varbase("float64", [1, 25])) var_list.append(self._create_var("float64", [1, 25]))
var_list.append(self.create_varbase("float32", [1, 50])) var_list.append(self._create_var("float32", [1, 50]))
var_list.append(self.create_varbase("float64", [1, 25])) var_list.append(self._create_var("float64", [1, 25]))
var_list.append(self.create_varbase("float32", [1, 50])) var_list.append(self._create_var("float32", [1, 50]))
var_list.append(self.create_varbase("float64", [1, 25])) var_list.append(self._create_var("float64", [1, 25]))
res = self.assign_group_by_size( res = self.assign_group_by_size(
var_list, [False, False, False, False, False, False], [10000] var_list, [False, False, False, False, False, False], [10000]
) )
...@@ -111,16 +111,16 @@ class TestDataParallelGroup(unittest.TestCase): ...@@ -111,16 +111,16 @@ class TestDataParallelGroup(unittest.TestCase):
# multi dtype & limit capability & multi tensor type # multi dtype & limit capability & multi tensor type
var_list = [] var_list = []
var_list.append( var_list.append(
self.create_varbase( self._create_var(
"float32", "float32",
[1, 50], [1, 50],
) )
) )
var_list.append(self.create_varbase("float64", [1, 25])) var_list.append(self._create_var("float64", [1, 25]))
var_list.append(self.create_varbase("float32", [1, 50])) var_list.append(self._create_var("float32", [1, 50]))
var_list.append(self.create_varbase("float64", [1, 25])) var_list.append(self._create_var("float64", [1, 25]))
var_list.append(self.create_varbase("float32", [1, 50])) var_list.append(self._create_var("float32", [1, 50]))
var_list.append(self.create_varbase("float64", [1, 25])) var_list.append(self._create_var("float64", [1, 25]))
res = self.assign_group_by_size( res = self.assign_group_by_size(
var_list, [True, False, False, False, False, True], [400] var_list, [True, False, False, False, False, True], [400]
) )
...@@ -129,12 +129,12 @@ class TestDataParallelGroup(unittest.TestCase): ...@@ -129,12 +129,12 @@ class TestDataParallelGroup(unittest.TestCase):
def test_construct_group7(self): def test_construct_group7(self):
# multi dtype & multi limit capability & multi tensor type # multi dtype & multi limit capability & multi tensor type
var_list = [] var_list = []
var_list.append(self.create_varbase("float32", [1, 50])) var_list.append(self._create_var("float32", [1, 50]))
var_list.append(self.create_varbase("float64", [1, 25])) var_list.append(self._create_var("float64", [1, 25]))
var_list.append(self.create_varbase("float32", [1, 50])) var_list.append(self._create_var("float32", [1, 50]))
var_list.append(self.create_varbase("float64", [1, 25])) var_list.append(self._create_var("float64", [1, 25]))
var_list.append(self.create_varbase("float32", [1, 50])) var_list.append(self._create_var("float32", [1, 50]))
var_list.append(self.create_varbase("float64", [1, 25])) var_list.append(self._create_var("float64", [1, 25]))
res = self.assign_group_by_size( res = self.assign_group_by_size(
var_list, [True, False, False, False, False, True], [200, 400] var_list, [True, False, False, False, False, True], [200, 400]
) )
...@@ -143,10 +143,10 @@ class TestDataParallelGroup(unittest.TestCase): ...@@ -143,10 +143,10 @@ class TestDataParallelGroup(unittest.TestCase):
def test_construct_group8(self): def test_construct_group8(self):
# one dtype & one limit capability & have tensor_indices # one dtype & one limit capability & have tensor_indices
var_list = [] var_list = []
var_list.append(self.create_varbase("float32", [2, 25])) var_list.append(self._create_var("float32", [2, 25]))
var_list.append(self.create_varbase("float32", [2, 100])) var_list.append(self._create_var("float32", [2, 100]))
var_list.append(self.create_varbase("float32", [2, 50])) var_list.append(self._create_var("float32", [2, 50]))
var_list.append(self.create_varbase("float32", [2, 25])) var_list.append(self._create_var("float32", [2, 25]))
res = self.assign_group_by_size( res = self.assign_group_by_size(
var_list, [False, False, False, False], [400], [3, 0, 1, 2] var_list, [False, False, False, False], [400], [3, 0, 1, 2]
) )
...@@ -155,10 +155,10 @@ class TestDataParallelGroup(unittest.TestCase): ...@@ -155,10 +155,10 @@ class TestDataParallelGroup(unittest.TestCase):
def test_construct_group9(self): def test_construct_group9(self):
# one dtype & one limit capability & have tensor_indices # one dtype & one limit capability & have tensor_indices
var_list = [] var_list = []
var_list.append(self.create_varbase("float32", [2, 25])) var_list.append(self._create_var("float32", [2, 25]))
var_list.append(self.create_varbase("float32", [2, 25])) var_list.append(self._create_var("float32", [2, 25]))
var_list.append(self.create_varbase("float32", [2, 25])) var_list.append(self._create_var("float32", [2, 25]))
var_list.append(self.create_varbase("float32", [2, 1000])) var_list.append(self._create_var("float32", [2, 1000]))
res = self.assign_group_by_size( res = self.assign_group_by_size(
var_list, [False, False, False, True], [300], [1, 0, 2, 3] var_list, [False, False, False, True], [300], [1, 0, 2, 3]
) )
......
...@@ -21,8 +21,8 @@ import paddle ...@@ -21,8 +21,8 @@ import paddle
# NOTE(pangyoki): Tensor View Strategy. # NOTE(pangyoki): Tensor View Strategy.
# Refer to `op_function_generator.py`. # Refer to `op_function_generator.py`.
# For view op, a new output varbase will be created, and this varbase will # For view op, a new output tensor will be created, and this tensor will
# reuse the input varbase's allocation. # reuse the input tensor's allocation.
# View APIs include: `squeeze`, `unsqueeze`, `reshape`, `flatten`, `detach` # View APIs include: `squeeze`, `unsqueeze`, `reshape`, `flatten`, `detach`
class TestDygraphViewReuseAllocation(unittest.TestCase): class TestDygraphViewReuseAllocation(unittest.TestCase):
def setUp(self): def setUp(self):
......
...@@ -913,27 +913,26 @@ class PartialProgramLayer: ...@@ -913,27 +913,26 @@ class PartialProgramLayer:
input_vars.append(var) input_vars.append(var)
# mapping from name(string) -> Tensor # mapping from name(string) -> Tensor
out_varbase_map = {} out_tensor_map = {}
def create_out(var_id): def create_out(var_id):
var = self._outputs[var_id] var = self._outputs[var_id]
assert isinstance(var, framework.Variable) assert isinstance(var, framework.Variable)
var_desc = var.desc var_desc = var.desc
varbase = None
if var_desc.name() in out_varbase_map: if var_desc.name() in out_tensor_map:
return out_varbase_map[var_desc.name()] return out_tensor_map[var_desc.name()]
var_base = core.eager.Tensor( out = core.eager.Tensor(
var_desc.dtype(), var_desc.dtype(),
var_desc.shape(), var_desc.shape(),
var_desc.name(), var_desc.name(),
var_desc.type(), var_desc.type(),
False, False,
) )
var_base.stop_gradient = var.stop_gradient out.stop_gradient = var.stop_gradient
out_varbase_map[var_desc.name()] = var_base out_tensor_map[var_desc.name()] = out
return var_base return out
# Create Tensor to receive output data. # Create Tensor to receive output data.
out_vars = list(map(create_out, self._outputs.var_ids)) out_vars = list(map(create_out, self._outputs.var_ids))
......
...@@ -644,7 +644,7 @@ class _ProgramHolder: ...@@ -644,7 +644,7 @@ class _ProgramHolder:
# and executor, executes this program. Key points: # and executor, executes this program. Key points:
# #
# 1. Data Sharing: # 1. Data Sharing:
# The varBase of the dynamic graph is not in the scope, so before the op # The variable/parameter of the dynamic graph is not in the scope, so before the op
# executes the program internally, create persistent variables with the # executes the program internally, create persistent variables with the
# same name as feed, parameters, and fetch in the scope, and share the # same name as feed, parameters, and fetch in the scope, and share the
# LoDTensor of the op input. # LoDTensor of the op input.
...@@ -668,7 +668,7 @@ def _load_persistable_vars_by_program( ...@@ -668,7 +668,7 @@ def _load_persistable_vars_by_program(
for each_var in persistable_vars: for each_var in persistable_vars:
orig_each_name = program_holder._suffix_varname_dict[each_var.name()] orig_each_name = program_holder._suffix_varname_dict[each_var.name()]
if _is_parameter(each_var, program_holder.infer_program): if _is_parameter(each_var, program_holder.infer_program):
# create output varbase # create output param
new_var = framework.EagerParamBase( new_var = framework.EagerParamBase(
shape=each_var.shape(), shape=each_var.shape(),
dtype=each_var.dtype(), dtype=each_var.dtype(),
...@@ -755,7 +755,7 @@ def _load_persistable_vars( ...@@ -755,7 +755,7 @@ def _load_persistable_vars(
) )
# get suffix var name, see [why need to append suffix to persistable vars] # get suffix var name, see [why need to append suffix to persistable vars]
new_name = inv_suffix_varname_dict[name] new_name = inv_suffix_varname_dict[name]
# create output varbase # create output var or param
if extra_var_info[name].get('trainable', None) is not None: if extra_var_info[name].get('trainable', None) is not None:
# use default shape and dtype # use default shape and dtype
new_var = framework.EagerParamBase( new_var = framework.EagerParamBase(
......
...@@ -228,7 +228,7 @@ class TestListWithoutControlFlow(unittest.TestCase): ...@@ -228,7 +228,7 @@ class TestListWithoutControlFlow(unittest.TestCase):
test_list_pop_without_control_flow_2, test_list_pop_without_control_flow_2,
] ]
def varbase_to_numpy(self, res): def result_to_numpy(self, res):
if isinstance(res, (list, tuple)): if isinstance(res, (list, tuple)):
res = paddle.utils.map_structure(lambda x: x.numpy(), res) res = paddle.utils.map_structure(lambda x: x.numpy(), res)
else: else:
...@@ -248,7 +248,7 @@ class TestListWithoutControlFlow(unittest.TestCase): ...@@ -248,7 +248,7 @@ class TestListWithoutControlFlow(unittest.TestCase):
res = paddle.jit.to_static(self.dygraph_func)(self.input) res = paddle.jit.to_static(self.dygraph_func)(self.input)
else: else:
res = self.dygraph_func(self.input) res = self.dygraph_func(self.input)
return self.varbase_to_numpy(res) return self.result_to_numpy(res)
def test_transformed_static_result(self): def test_transformed_static_result(self):
for dyfunc in self.all_dygraph_funcs: for dyfunc in self.all_dygraph_funcs:
...@@ -294,7 +294,7 @@ class TestListInWhileLoop(TestListWithoutControlFlow): ...@@ -294,7 +294,7 @@ class TestListInWhileLoop(TestListWithoutControlFlow):
) )
else: else:
res = self.dygraph_func(self.input, self.iter_num) res = self.dygraph_func(self.input, self.iter_num)
return self.varbase_to_numpy(res) return self.result_to_numpy(res)
class TestListInWhileLoopWithStack(TestListInWhileLoop): class TestListInWhileLoopWithStack(TestListInWhileLoop):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册