diff --git a/python/paddle/common_ops_import.py b/python/paddle/common_ops_import.py index 2638afa3a27332b28029d901cadd717f4ec26b1a..62a55884f0277413ed4c797a5b54dc638e695ed6 100644 --- a/python/paddle/common_ops_import.py +++ b/python/paddle/common_ops_import.py @@ -23,9 +23,9 @@ from paddle.fluid.data_feeder import ( # noqa: F401 from paddle.fluid.framework import ( # noqa: F401 OpProtoHolder, Variable, + _create_tensor, _dygraph_tracer, _non_static_mode, - _varbase_creator, convert_np_dtype_to_dtype_, default_main_program, device_guard, diff --git a/python/paddle/distributed/fleet/layers/mpu/mp_ops.py b/python/paddle/distributed/fleet/layers/mpu/mp_ops.py index 884af3a441431861bc9757ea8d91c5c7997f880a..9ad7b29ec7e33408037259824a714e9864156dcf 100644 --- a/python/paddle/distributed/fleet/layers/mpu/mp_ops.py +++ b/python/paddle/distributed/fleet/layers/mpu/mp_ops.py @@ -17,7 +17,7 @@ from paddle import _legacy_C_ops from paddle.distributed import collective from paddle.fluid import core from paddle.fluid.data_feeder import check_dtype, check_variable_and_dtype -from paddle.framework import LayerHelper, _varbase_creator, in_dygraph_mode +from paddle.framework import LayerHelper, _create_tensor, in_dygraph_mode from paddle.nn import Layer from paddle.nn.utils import dygraph_utils @@ -447,7 +447,7 @@ def _linear(x, weight, bias=None, name=None): Fuction Linear """ if in_dygraph_mode(): - pre_bias = _varbase_creator(dtype=x.dtype) + pre_bias = _create_tensor(dtype=x.dtype) _legacy_C_ops.matmul( x, weight, diff --git a/python/paddle/distributed/parallel.py b/python/paddle/distributed/parallel.py index 2046cf08eb06ec88f1dcbc6e946ee00c3f72b676..c016f9d743c7ff91c14610319ec52a9877a72306 100644 --- a/python/paddle/distributed/parallel.py +++ b/python/paddle/distributed/parallel.py @@ -89,7 +89,7 @@ def _coalesce_tensors(var_groups): @framework.dygraph_only def _reshape_inplace(x, shape): - x_shape = framework._varbase_creator(dtype=x.dtype) + x_shape = framework._create_tensor(dtype=x.dtype) framework._dygraph_tracer().trace_op( type="reshape2", inputs={'X': x}, diff --git a/python/paddle/fluid/dygraph/math_op_patch.py b/python/paddle/fluid/dygraph/math_op_patch.py index 472cd2c1ba41cb0cfae6715f70acde1aaf92d9a4..47d345112294312c99431932abbcb8ac45d1ed9f 100644 --- a/python/paddle/fluid/dygraph/math_op_patch.py +++ b/python/paddle/fluid/dygraph/math_op_patch.py @@ -16,9 +16,9 @@ from .. import core from ..framework import ( Variable, convert_np_dtype_to_dtype_, - _varbase_creator, in_dygraph_mode, ) +from ..framework import _create_tensor as framework_create_tensor from ..layers.layer_function_generator import OpProtoHolder from . import no_grad from .. import framework @@ -78,7 +78,7 @@ def monkey_patch_math_varbase(): shape, value, dtype, framework._current_expected_place() ) else: - out = _varbase_creator(dtype=dtype) + out = framework_create_tensor(dtype=dtype) out = _legacy_C_ops.fill_constant( out, 'dtype', diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 966f5f650093e6b16ce47abf15cddbf8b516edc0..2893aba2b35e1a43d3128c5f16cdcc02d13109a2 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -1148,7 +1148,7 @@ def _debug_string_(proto, throw_on_error=True): return proto.__str__() -def _varbase_creator( +def _create_tensor( type=core.VarDesc.VarType.LOD_TENSOR, name=None, shape=None, @@ -3836,7 +3836,7 @@ class Block: def create_var(self, *args, **kwargs): if _non_static_mode(): - var = _varbase_creator(*args, **kwargs) + var = _create_tensor(*args, **kwargs) else: var = Variable(block=self, *args, **kwargs) if 'initializer' in kwargs: diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index d3c65da6def203dd58574c19c59e49fff60905a8..9e4e715507b3713f285805af843432e48ae07680 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -28,7 +28,7 @@ from ..framework import ( dygraph_only, _dygraph_tracer, default_main_program, - _varbase_creator, + _create_tensor, static_only, _global_flags, in_dygraph_mode, diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index 0617847fdbfabc450e1631ab47e94a2ac73cec91..06cfbf1cecb390401576025a50ef14f475d8e785 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -20,7 +20,7 @@ from ..layer_helper import LayerHelper from ..framework import ( _current_expected_place, convert_np_dtype_to_dtype_, - _varbase_creator, + _create_tensor, in_dygraph_mode, ) from ..framework import Variable diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index db483b151e4eb2d5c895eb546781fba307a22228..e8b48bfc18601c40a7d10c32818e2b5724a73477 100755 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -232,7 +232,7 @@ class Optimizer: if not isinstance(self._learning_rate, _LearningRateEpochDecay): var_tmp = None - var_temp = framework._varbase_creator( + var_temp = framework._create_tensor( None, name='global_step', dtype='int32' ) diff --git a/python/paddle/fluid/regularizer.py b/python/paddle/fluid/regularizer.py index 84bfa351c962db527c72d68dd77ad89b4e855336..6ef5f3a5f5986d224bf3b5ab6bdfaa188a79f877 100644 --- a/python/paddle/fluid/regularizer.py +++ b/python/paddle/fluid/regularizer.py @@ -15,7 +15,7 @@ import logging from . import framework -from .framework import _non_static_mode, _varbase_creator, in_dygraph_mode +from .framework import _non_static_mode, in_dygraph_mode from . import core from paddle import _C_ops, _legacy_C_ops diff --git a/python/paddle/fluid/tests/unittests/test_run_program_op.py b/python/paddle/fluid/tests/unittests/test_run_program_op.py index c54ab122a95b54a9db97bd0a853f299ebde81a6c..b780bf493397ce8e93a134a637844fed10ab00fd 100644 --- a/python/paddle/fluid/tests/unittests/test_run_program_op.py +++ b/python/paddle/fluid/tests/unittests/test_run_program_op.py @@ -197,7 +197,7 @@ class RunProgramOpTest(unittest.TestCase): def prepare_dygraph_output(self): def create_var_base(is_input, name): - var = framework._varbase_creator(dtype=None, shape=None, name=name) + var = framework._create_tensor(dtype=None, shape=None, name=name) var.stop_gradient = False return var @@ -210,7 +210,7 @@ class RunProgramOpTest(unittest.TestCase): if global_var._in_eager_mode_: outputs['OutScope'] = [core.Scope()] else: - outputs['OutScope'] = framework._varbase_creator( + outputs['OutScope'] = framework._create_tensor( type=core.VarDesc.VarType.STEP_SCOPES, name="program_out_scope", persistable=True, diff --git a/python/paddle/framework/__init__.py b/python/paddle/framework/__init__.py index 1aca2973e344e47ea41db0eddb6687ecce72aedc..2b9449b7c0902944df7dd90ddc4b1f31c76c7d9b 100755 --- a/python/paddle/framework/__init__.py +++ b/python/paddle/framework/__init__.py @@ -64,7 +64,7 @@ from ..fluid.framework import dygraph_only # noqa: F401 from ..fluid.framework import dygraph_not_support # noqa: F401 from ..fluid.framework import ( convert_np_dtype_to_dtype_, - _varbase_creator, + _create_tensor, OpProtoHolder, ) # noqa: F401 from ..fluid.framework import _dygraph_tracer # noqa: F401 diff --git a/python/paddle/framework/io.py b/python/paddle/framework/io.py index ef440ac253b59f6dfca149e1b9dc6bc7a3bc4d8b..a482d8bed4150630ca3d637cfc9226acd6f84993 100644 --- a/python/paddle/framework/io.py +++ b/python/paddle/framework/io.py @@ -31,10 +31,10 @@ from paddle.fluid.framework import ( EagerParamBase, Program, Variable, + _create_tensor, _current_expected_place, _dygraph_tracer, _non_static_mode, - _varbase_creator, ) from .io_utils import ( @@ -133,7 +133,7 @@ def _load_state_dict_from_save_params(model_path): # 2. create and load Tensor with fluid.dygraph.guard(): for name in var_name_list: - new_var = _varbase_creator(name=name, persistable=True) + new_var = _create_tensor(name=name, persistable=True) _dygraph_tracer().trace_op( type='load', inputs={}, @@ -458,7 +458,7 @@ def _ndarray_to_tensor(obj, return_numpy): def _lod_tensor2varbase(tensor): - return_var = _varbase_creator() + return_var = _create_tensor() return_var.value().get_tensor().set(tensor, _current_expected_place()) return return_var diff --git a/python/paddle/jit/translated_layer.py b/python/paddle/jit/translated_layer.py index 4eba9a31dbc2b9f4f20d9b9441850b0219c68b35..c56608d6bb66e50e9afda9b5a1968b842800ff88 100644 --- a/python/paddle/jit/translated_layer.py +++ b/python/paddle/jit/translated_layer.py @@ -650,7 +650,7 @@ def _load_persistable_vars_by_program( persistable=True, ) else: - new_var = framework._varbase_creator( + new_var = framework._create_tensor( type=each_var.type(), name=each_var.name(), shape=each_var.shape(), @@ -738,9 +738,7 @@ def _load_persistable_vars( persistable=True, ) else: - new_var = framework._varbase_creator( - name=new_name, persistable=True - ) + new_var = framework._create_tensor(name=new_name, persistable=True) new_var.stop_gradient = extra_var_info[name]['stop_gradient'] load_var_dict[new_name] = new_var diff --git a/python/paddle/metric/metrics.py b/python/paddle/metric/metrics.py index 13a916abec592c8d251d476df889800c8313d03b..9372d7e0f63f10d41ac6a42aec0d9b1c2201fff7 100644 --- a/python/paddle/metric/metrics.py +++ b/python/paddle/metric/metrics.py @@ -20,7 +20,7 @@ import paddle from paddle import _legacy_C_ops from ..fluid.data_feeder import check_variable_and_dtype -from ..fluid.framework import _non_static_mode, _varbase_creator +from ..fluid.framework import _create_tensor, _non_static_mode from ..fluid.layer_helper import LayerHelper __all__ = [] @@ -804,9 +804,9 @@ def accuracy(input, label, k=1, correct=None, total=None, name=None): label = paddle.cast(label, paddle.int64) if _non_static_mode(): if correct is None: - correct = _varbase_creator(dtype="int32") + correct = _create_tensor(dtype="int32") if total is None: - total = _varbase_creator(dtype="int32") + total = _create_tensor(dtype="int32") topk_out, topk_indices = paddle.topk(input, k=k) _acc, _, _ = _legacy_C_ops.accuracy( diff --git a/python/paddle/nn/initializer/dirac.py b/python/paddle/nn/initializer/dirac.py index 3abcc300bc64e164d879088163670b3a0d2d90a6..163c3fbb709bf4806bf0f5d23694c74020c21d3d 100644 --- a/python/paddle/nn/initializer/dirac.py +++ b/python/paddle/nn/initializer/dirac.py @@ -205,7 +205,7 @@ class Dirac(Initializer): if framework.in_dygraph_mode(): with fluid.dygraph.no_grad(): - tmp_tensor = framework._varbase_creator() + tmp_tensor = framework._create_tensor() _C_ops.assign_value_( tmp_tensor, [len(idx_list)], @@ -234,7 +234,7 @@ class Dirac(Initializer): if framework.in_dygraph_mode(): with fluid.dygraph.no_grad(): - tmp_tensor = framework._varbase_creator() + tmp_tensor = framework._create_tensor() _C_ops.assign_value_( tmp_tensor, [len(value_list)], diff --git a/python/paddle/nn/quant/quant_layers.py b/python/paddle/nn/quant/quant_layers.py index c928a5c5f3140f1ba86684569fa430ab6a4c8dbc..ed8f1efe8f3d4df4eb2b77fa3ff3002c1272045a 100644 --- a/python/paddle/nn/quant/quant_layers.py +++ b/python/paddle/nn/quant/quant_layers.py @@ -17,7 +17,7 @@ import logging import paddle from paddle import _legacy_C_ops, in_dynamic_mode from paddle.fluid.data_feeder import check_variable_and_dtype -from paddle.fluid.framework import _varbase_creator +from paddle.fluid.framework import _create_tensor from paddle.fluid.log_helper import get_logger from paddle.framework import ParamAttr, core from paddle.nn import Layer @@ -87,7 +87,7 @@ class FakeQuantAbsMax(Layer): def forward(self, input): if in_dynamic_mode(): attrs = ('bit_length', self._quant_bits) - quant_out = _varbase_creator( + quant_out = _create_tensor( type=input.type, name=f"{input.name}.quantized.dequantized", shape=input.shape, @@ -101,7 +101,7 @@ class FakeQuantAbsMax(Layer): ) if not out_scale: - out_scale = _varbase_creator( + out_scale = _create_tensor( type=core.VarDesc.VarType.LOD_TENSOR, name=self._scale_name, shape=[1], @@ -210,7 +210,7 @@ class FakeQuantMovingAverageAbsMax(Layer): 'is_test', not self.training, ) - quant_out = _varbase_creator( + quant_out = _create_tensor( type=input.type, name=f"{input.name}.quantized.dequantized", shape=input.shape, @@ -322,7 +322,7 @@ class FakeQuantChannelWiseAbsMax(Layer): 'quant_axis', self._quant_axis, ) - quant_out = _varbase_creator( + quant_out = _create_tensor( type=input.type, name=f"{input.name}.quantized.dequantized", shape=input.shape, @@ -336,7 +336,7 @@ class FakeQuantChannelWiseAbsMax(Layer): out_scale, op=paddle.distributed.ReduceOp.MAX ) if out_scale is None: - out_scale = _varbase_creator( + out_scale = _create_tensor( type=core.VarDesc.VarType.LOD_TENSOR, name=self._scale_name, shape=[self._channel_num], @@ -441,7 +441,7 @@ class MovingAverageAbsMaxScale(Layer): not self.training, ) - quant_out = _varbase_creator( + quant_out = _create_tensor( type=input.type, name=f"{input.name}.tmp", shape=input.shape, diff --git a/python/paddle/nn/utils/transform_parameters.py b/python/paddle/nn/utils/transform_parameters.py index 3696ad96090598513f9e1a44311d134750c84d51..0a329ae12ada33c75bef35dace1508a2cd2afe41 100644 --- a/python/paddle/nn/utils/transform_parameters.py +++ b/python/paddle/nn/utils/transform_parameters.py @@ -17,8 +17,8 @@ from functools import reduce import paddle from paddle import _C_ops from paddle.fluid.framework import ( + _create_tensor, _dygraph_tracer, - _varbase_creator, dygraph_only, in_dygraph_mode, ) @@ -26,7 +26,7 @@ from paddle.fluid.framework import ( # input==output, inplace strategy of reshape has no cost almostly def _inplace_reshape_dygraph(x, shape): - x_shape = _varbase_creator(dtype='int64') + x_shape = _create_tensor(dtype='int64') if in_dygraph_mode(): with paddle.fluid.dygraph.no_grad(): tmp_out = _C_ops.reshape(x, shape) @@ -104,7 +104,7 @@ def parameters_to_vector(parameters, name=None): origin_shapes.append(param.shape) _inplace_reshape_dygraph(param, [-1]) - out = _varbase_creator(dtype=dtype) + out = _create_tensor(dtype=dtype) if in_dygraph_mode(): with paddle.fluid.dygraph.no_grad(): tmp = _C_ops.concat(parameters, 0) diff --git a/python/paddle/quantization/quanters/abs_max.py b/python/paddle/quantization/quanters/abs_max.py index 58a47bb9896bda514b8267e84423e860133a1753..ce1e0233e2ff66a004a3042be56df123681563d0 100644 --- a/python/paddle/quantization/quanters/abs_max.py +++ b/python/paddle/quantization/quanters/abs_max.py @@ -15,7 +15,7 @@ import paddle from paddle import _legacy_C_ops from paddle.fluid.data_feeder import check_variable_and_dtype -from paddle.fluid.framework import _varbase_creator +from paddle.fluid.framework import _create_tensor from paddle.framework import ParamAttr, core from paddle.nn.initializer import Constant from paddle.utils import unique_name @@ -147,7 +147,7 @@ class FakeQuanterWithAbsMaxObserverLayer(BaseQuanter): 'is_test', not self.training, ) - quant_out = _varbase_creator( + quant_out = _create_tensor( type=input.type, name=f"{input.name}.quantized.dequantized", shape=input.shape, diff --git a/python/paddle/static/nn/metric.py b/python/paddle/static/nn/metric.py index a9a81a2f1cf6edfc64319de6b1a254b2ec5f7f1c..54377d824dae6cdcff1b71a612bb5a1d72dd62be 100644 --- a/python/paddle/static/nn/metric.py +++ b/python/paddle/static/nn/metric.py @@ -19,7 +19,7 @@ import numpy as np import paddle from paddle import _legacy_C_ops from paddle.fluid.data_feeder import check_variable_and_dtype -from paddle.fluid.framework import Variable, _non_static_mode, _varbase_creator +from paddle.fluid.framework import Variable, _create_tensor, _non_static_mode from paddle.fluid.layer_helper import LayerHelper from paddle.nn.initializer import ConstantInitializer @@ -74,9 +74,9 @@ def accuracy(input, label, k=1, correct=None, total=None): """ if _non_static_mode(): if correct is None: - correct = _varbase_creator(dtype="int32") + correct = _create_tensor(dtype="int32") if total is None: - total = _varbase_creator(dtype="int32") + total = _create_tensor(dtype="int32") _k = np.array(k).item(0) if isinstance(k, Variable) else k topk_out, topk_indices = _legacy_C_ops.top_k_v2(