未验证 提交 240e13a2 编写于 作者: M Meteor Liu 提交者: GitHub

rename _varbase_creator as _create_tensor (#52938)

* rename _varbase_creator as create_tensor

* rename _varbase_creator as create_tensor
上级 a7155c5c
...@@ -23,9 +23,9 @@ from paddle.fluid.data_feeder import ( # noqa: F401 ...@@ -23,9 +23,9 @@ from paddle.fluid.data_feeder import ( # noqa: F401
from paddle.fluid.framework import ( # noqa: F401 from paddle.fluid.framework import ( # noqa: F401
OpProtoHolder, OpProtoHolder,
Variable, Variable,
_create_tensor,
_dygraph_tracer, _dygraph_tracer,
_non_static_mode, _non_static_mode,
_varbase_creator,
convert_np_dtype_to_dtype_, convert_np_dtype_to_dtype_,
default_main_program, default_main_program,
device_guard, device_guard,
......
...@@ -17,7 +17,7 @@ from paddle import _legacy_C_ops ...@@ -17,7 +17,7 @@ from paddle import _legacy_C_ops
from paddle.distributed import collective from paddle.distributed import collective
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.data_feeder import check_dtype, check_variable_and_dtype from paddle.fluid.data_feeder import check_dtype, check_variable_and_dtype
from paddle.framework import LayerHelper, _varbase_creator, in_dygraph_mode from paddle.framework import LayerHelper, _create_tensor, in_dygraph_mode
from paddle.nn import Layer from paddle.nn import Layer
from paddle.nn.utils import dygraph_utils from paddle.nn.utils import dygraph_utils
...@@ -447,7 +447,7 @@ def _linear(x, weight, bias=None, name=None): ...@@ -447,7 +447,7 @@ def _linear(x, weight, bias=None, name=None):
Fuction Linear Fuction Linear
""" """
if in_dygraph_mode(): if in_dygraph_mode():
pre_bias = _varbase_creator(dtype=x.dtype) pre_bias = _create_tensor(dtype=x.dtype)
_legacy_C_ops.matmul( _legacy_C_ops.matmul(
x, x,
weight, weight,
......
...@@ -89,7 +89,7 @@ def _coalesce_tensors(var_groups): ...@@ -89,7 +89,7 @@ def _coalesce_tensors(var_groups):
@framework.dygraph_only @framework.dygraph_only
def _reshape_inplace(x, shape): def _reshape_inplace(x, shape):
x_shape = framework._varbase_creator(dtype=x.dtype) x_shape = framework._create_tensor(dtype=x.dtype)
framework._dygraph_tracer().trace_op( framework._dygraph_tracer().trace_op(
type="reshape2", type="reshape2",
inputs={'X': x}, inputs={'X': x},
......
...@@ -16,9 +16,9 @@ from .. import core ...@@ -16,9 +16,9 @@ from .. import core
from ..framework import ( from ..framework import (
Variable, Variable,
convert_np_dtype_to_dtype_, convert_np_dtype_to_dtype_,
_varbase_creator,
in_dygraph_mode, in_dygraph_mode,
) )
from ..framework import _create_tensor as framework_create_tensor
from ..layers.layer_function_generator import OpProtoHolder from ..layers.layer_function_generator import OpProtoHolder
from . import no_grad from . import no_grad
from .. import framework from .. import framework
...@@ -78,7 +78,7 @@ def monkey_patch_math_varbase(): ...@@ -78,7 +78,7 @@ def monkey_patch_math_varbase():
shape, value, dtype, framework._current_expected_place() shape, value, dtype, framework._current_expected_place()
) )
else: else:
out = _varbase_creator(dtype=dtype) out = framework_create_tensor(dtype=dtype)
out = _legacy_C_ops.fill_constant( out = _legacy_C_ops.fill_constant(
out, out,
'dtype', 'dtype',
......
...@@ -1148,7 +1148,7 @@ def _debug_string_(proto, throw_on_error=True): ...@@ -1148,7 +1148,7 @@ def _debug_string_(proto, throw_on_error=True):
return proto.__str__() return proto.__str__()
def _varbase_creator( def _create_tensor(
type=core.VarDesc.VarType.LOD_TENSOR, type=core.VarDesc.VarType.LOD_TENSOR,
name=None, name=None,
shape=None, shape=None,
...@@ -3836,7 +3836,7 @@ class Block: ...@@ -3836,7 +3836,7 @@ class Block:
def create_var(self, *args, **kwargs): def create_var(self, *args, **kwargs):
if _non_static_mode(): if _non_static_mode():
var = _varbase_creator(*args, **kwargs) var = _create_tensor(*args, **kwargs)
else: else:
var = Variable(block=self, *args, **kwargs) var = Variable(block=self, *args, **kwargs)
if 'initializer' in kwargs: if 'initializer' in kwargs:
......
...@@ -28,7 +28,7 @@ from ..framework import ( ...@@ -28,7 +28,7 @@ from ..framework import (
dygraph_only, dygraph_only,
_dygraph_tracer, _dygraph_tracer,
default_main_program, default_main_program,
_varbase_creator, _create_tensor,
static_only, static_only,
_global_flags, _global_flags,
in_dygraph_mode, in_dygraph_mode,
......
...@@ -20,7 +20,7 @@ from ..layer_helper import LayerHelper ...@@ -20,7 +20,7 @@ from ..layer_helper import LayerHelper
from ..framework import ( from ..framework import (
_current_expected_place, _current_expected_place,
convert_np_dtype_to_dtype_, convert_np_dtype_to_dtype_,
_varbase_creator, _create_tensor,
in_dygraph_mode, in_dygraph_mode,
) )
from ..framework import Variable from ..framework import Variable
......
...@@ -232,7 +232,7 @@ class Optimizer: ...@@ -232,7 +232,7 @@ class Optimizer:
if not isinstance(self._learning_rate, _LearningRateEpochDecay): if not isinstance(self._learning_rate, _LearningRateEpochDecay):
var_tmp = None var_tmp = None
var_temp = framework._varbase_creator( var_temp = framework._create_tensor(
None, name='global_step', dtype='int32' None, name='global_step', dtype='int32'
) )
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import logging import logging
from . import framework from . import framework
from .framework import _non_static_mode, _varbase_creator, in_dygraph_mode from .framework import _non_static_mode, in_dygraph_mode
from . import core from . import core
from paddle import _C_ops, _legacy_C_ops from paddle import _C_ops, _legacy_C_ops
......
...@@ -197,7 +197,7 @@ class RunProgramOpTest(unittest.TestCase): ...@@ -197,7 +197,7 @@ class RunProgramOpTest(unittest.TestCase):
def prepare_dygraph_output(self): def prepare_dygraph_output(self):
def create_var_base(is_input, name): def create_var_base(is_input, name):
var = framework._varbase_creator(dtype=None, shape=None, name=name) var = framework._create_tensor(dtype=None, shape=None, name=name)
var.stop_gradient = False var.stop_gradient = False
return var return var
...@@ -210,7 +210,7 @@ class RunProgramOpTest(unittest.TestCase): ...@@ -210,7 +210,7 @@ class RunProgramOpTest(unittest.TestCase):
if global_var._in_eager_mode_: if global_var._in_eager_mode_:
outputs['OutScope'] = [core.Scope()] outputs['OutScope'] = [core.Scope()]
else: else:
outputs['OutScope'] = framework._varbase_creator( outputs['OutScope'] = framework._create_tensor(
type=core.VarDesc.VarType.STEP_SCOPES, type=core.VarDesc.VarType.STEP_SCOPES,
name="program_out_scope", name="program_out_scope",
persistable=True, persistable=True,
......
...@@ -64,7 +64,7 @@ from ..fluid.framework import dygraph_only # noqa: F401 ...@@ -64,7 +64,7 @@ from ..fluid.framework import dygraph_only # noqa: F401
from ..fluid.framework import dygraph_not_support # noqa: F401 from ..fluid.framework import dygraph_not_support # noqa: F401
from ..fluid.framework import ( from ..fluid.framework import (
convert_np_dtype_to_dtype_, convert_np_dtype_to_dtype_,
_varbase_creator, _create_tensor,
OpProtoHolder, OpProtoHolder,
) # noqa: F401 ) # noqa: F401
from ..fluid.framework import _dygraph_tracer # noqa: F401 from ..fluid.framework import _dygraph_tracer # noqa: F401
......
...@@ -31,10 +31,10 @@ from paddle.fluid.framework import ( ...@@ -31,10 +31,10 @@ from paddle.fluid.framework import (
EagerParamBase, EagerParamBase,
Program, Program,
Variable, Variable,
_create_tensor,
_current_expected_place, _current_expected_place,
_dygraph_tracer, _dygraph_tracer,
_non_static_mode, _non_static_mode,
_varbase_creator,
) )
from .io_utils import ( from .io_utils import (
...@@ -133,7 +133,7 @@ def _load_state_dict_from_save_params(model_path): ...@@ -133,7 +133,7 @@ def _load_state_dict_from_save_params(model_path):
# 2. create and load Tensor # 2. create and load Tensor
with fluid.dygraph.guard(): with fluid.dygraph.guard():
for name in var_name_list: for name in var_name_list:
new_var = _varbase_creator(name=name, persistable=True) new_var = _create_tensor(name=name, persistable=True)
_dygraph_tracer().trace_op( _dygraph_tracer().trace_op(
type='load', type='load',
inputs={}, inputs={},
...@@ -458,7 +458,7 @@ def _ndarray_to_tensor(obj, return_numpy): ...@@ -458,7 +458,7 @@ def _ndarray_to_tensor(obj, return_numpy):
def _lod_tensor2varbase(tensor): def _lod_tensor2varbase(tensor):
return_var = _varbase_creator() return_var = _create_tensor()
return_var.value().get_tensor().set(tensor, _current_expected_place()) return_var.value().get_tensor().set(tensor, _current_expected_place())
return return_var return return_var
......
...@@ -650,7 +650,7 @@ def _load_persistable_vars_by_program( ...@@ -650,7 +650,7 @@ def _load_persistable_vars_by_program(
persistable=True, persistable=True,
) )
else: else:
new_var = framework._varbase_creator( new_var = framework._create_tensor(
type=each_var.type(), type=each_var.type(),
name=each_var.name(), name=each_var.name(),
shape=each_var.shape(), shape=each_var.shape(),
...@@ -738,9 +738,7 @@ def _load_persistable_vars( ...@@ -738,9 +738,7 @@ def _load_persistable_vars(
persistable=True, persistable=True,
) )
else: else:
new_var = framework._varbase_creator( new_var = framework._create_tensor(name=new_name, persistable=True)
name=new_name, persistable=True
)
new_var.stop_gradient = extra_var_info[name]['stop_gradient'] new_var.stop_gradient = extra_var_info[name]['stop_gradient']
load_var_dict[new_name] = new_var load_var_dict[new_name] = new_var
......
...@@ -20,7 +20,7 @@ import paddle ...@@ -20,7 +20,7 @@ import paddle
from paddle import _legacy_C_ops from paddle import _legacy_C_ops
from ..fluid.data_feeder import check_variable_and_dtype from ..fluid.data_feeder import check_variable_and_dtype
from ..fluid.framework import _non_static_mode, _varbase_creator from ..fluid.framework import _create_tensor, _non_static_mode
from ..fluid.layer_helper import LayerHelper from ..fluid.layer_helper import LayerHelper
__all__ = [] __all__ = []
...@@ -804,9 +804,9 @@ def accuracy(input, label, k=1, correct=None, total=None, name=None): ...@@ -804,9 +804,9 @@ def accuracy(input, label, k=1, correct=None, total=None, name=None):
label = paddle.cast(label, paddle.int64) label = paddle.cast(label, paddle.int64)
if _non_static_mode(): if _non_static_mode():
if correct is None: if correct is None:
correct = _varbase_creator(dtype="int32") correct = _create_tensor(dtype="int32")
if total is None: if total is None:
total = _varbase_creator(dtype="int32") total = _create_tensor(dtype="int32")
topk_out, topk_indices = paddle.topk(input, k=k) topk_out, topk_indices = paddle.topk(input, k=k)
_acc, _, _ = _legacy_C_ops.accuracy( _acc, _, _ = _legacy_C_ops.accuracy(
......
...@@ -205,7 +205,7 @@ class Dirac(Initializer): ...@@ -205,7 +205,7 @@ class Dirac(Initializer):
if framework.in_dygraph_mode(): if framework.in_dygraph_mode():
with fluid.dygraph.no_grad(): with fluid.dygraph.no_grad():
tmp_tensor = framework._varbase_creator() tmp_tensor = framework._create_tensor()
_C_ops.assign_value_( _C_ops.assign_value_(
tmp_tensor, tmp_tensor,
[len(idx_list)], [len(idx_list)],
...@@ -234,7 +234,7 @@ class Dirac(Initializer): ...@@ -234,7 +234,7 @@ class Dirac(Initializer):
if framework.in_dygraph_mode(): if framework.in_dygraph_mode():
with fluid.dygraph.no_grad(): with fluid.dygraph.no_grad():
tmp_tensor = framework._varbase_creator() tmp_tensor = framework._create_tensor()
_C_ops.assign_value_( _C_ops.assign_value_(
tmp_tensor, tmp_tensor,
[len(value_list)], [len(value_list)],
......
...@@ -17,7 +17,7 @@ import logging ...@@ -17,7 +17,7 @@ import logging
import paddle import paddle
from paddle import _legacy_C_ops, in_dynamic_mode from paddle import _legacy_C_ops, in_dynamic_mode
from paddle.fluid.data_feeder import check_variable_and_dtype from paddle.fluid.data_feeder import check_variable_and_dtype
from paddle.fluid.framework import _varbase_creator from paddle.fluid.framework import _create_tensor
from paddle.fluid.log_helper import get_logger from paddle.fluid.log_helper import get_logger
from paddle.framework import ParamAttr, core from paddle.framework import ParamAttr, core
from paddle.nn import Layer from paddle.nn import Layer
...@@ -87,7 +87,7 @@ class FakeQuantAbsMax(Layer): ...@@ -87,7 +87,7 @@ class FakeQuantAbsMax(Layer):
def forward(self, input): def forward(self, input):
if in_dynamic_mode(): if in_dynamic_mode():
attrs = ('bit_length', self._quant_bits) attrs = ('bit_length', self._quant_bits)
quant_out = _varbase_creator( quant_out = _create_tensor(
type=input.type, type=input.type,
name=f"{input.name}.quantized.dequantized", name=f"{input.name}.quantized.dequantized",
shape=input.shape, shape=input.shape,
...@@ -101,7 +101,7 @@ class FakeQuantAbsMax(Layer): ...@@ -101,7 +101,7 @@ class FakeQuantAbsMax(Layer):
) )
if not out_scale: if not out_scale:
out_scale = _varbase_creator( out_scale = _create_tensor(
type=core.VarDesc.VarType.LOD_TENSOR, type=core.VarDesc.VarType.LOD_TENSOR,
name=self._scale_name, name=self._scale_name,
shape=[1], shape=[1],
...@@ -210,7 +210,7 @@ class FakeQuantMovingAverageAbsMax(Layer): ...@@ -210,7 +210,7 @@ class FakeQuantMovingAverageAbsMax(Layer):
'is_test', 'is_test',
not self.training, not self.training,
) )
quant_out = _varbase_creator( quant_out = _create_tensor(
type=input.type, type=input.type,
name=f"{input.name}.quantized.dequantized", name=f"{input.name}.quantized.dequantized",
shape=input.shape, shape=input.shape,
...@@ -322,7 +322,7 @@ class FakeQuantChannelWiseAbsMax(Layer): ...@@ -322,7 +322,7 @@ class FakeQuantChannelWiseAbsMax(Layer):
'quant_axis', 'quant_axis',
self._quant_axis, self._quant_axis,
) )
quant_out = _varbase_creator( quant_out = _create_tensor(
type=input.type, type=input.type,
name=f"{input.name}.quantized.dequantized", name=f"{input.name}.quantized.dequantized",
shape=input.shape, shape=input.shape,
...@@ -336,7 +336,7 @@ class FakeQuantChannelWiseAbsMax(Layer): ...@@ -336,7 +336,7 @@ class FakeQuantChannelWiseAbsMax(Layer):
out_scale, op=paddle.distributed.ReduceOp.MAX out_scale, op=paddle.distributed.ReduceOp.MAX
) )
if out_scale is None: if out_scale is None:
out_scale = _varbase_creator( out_scale = _create_tensor(
type=core.VarDesc.VarType.LOD_TENSOR, type=core.VarDesc.VarType.LOD_TENSOR,
name=self._scale_name, name=self._scale_name,
shape=[self._channel_num], shape=[self._channel_num],
...@@ -441,7 +441,7 @@ class MovingAverageAbsMaxScale(Layer): ...@@ -441,7 +441,7 @@ class MovingAverageAbsMaxScale(Layer):
not self.training, not self.training,
) )
quant_out = _varbase_creator( quant_out = _create_tensor(
type=input.type, type=input.type,
name=f"{input.name}.tmp", name=f"{input.name}.tmp",
shape=input.shape, shape=input.shape,
......
...@@ -17,8 +17,8 @@ from functools import reduce ...@@ -17,8 +17,8 @@ from functools import reduce
import paddle import paddle
from paddle import _C_ops from paddle import _C_ops
from paddle.fluid.framework import ( from paddle.fluid.framework import (
_create_tensor,
_dygraph_tracer, _dygraph_tracer,
_varbase_creator,
dygraph_only, dygraph_only,
in_dygraph_mode, in_dygraph_mode,
) )
...@@ -26,7 +26,7 @@ from paddle.fluid.framework import ( ...@@ -26,7 +26,7 @@ from paddle.fluid.framework import (
# input==output, inplace strategy of reshape has no cost almostly # input==output, inplace strategy of reshape has no cost almostly
def _inplace_reshape_dygraph(x, shape): def _inplace_reshape_dygraph(x, shape):
x_shape = _varbase_creator(dtype='int64') x_shape = _create_tensor(dtype='int64')
if in_dygraph_mode(): if in_dygraph_mode():
with paddle.fluid.dygraph.no_grad(): with paddle.fluid.dygraph.no_grad():
tmp_out = _C_ops.reshape(x, shape) tmp_out = _C_ops.reshape(x, shape)
...@@ -104,7 +104,7 @@ def parameters_to_vector(parameters, name=None): ...@@ -104,7 +104,7 @@ def parameters_to_vector(parameters, name=None):
origin_shapes.append(param.shape) origin_shapes.append(param.shape)
_inplace_reshape_dygraph(param, [-1]) _inplace_reshape_dygraph(param, [-1])
out = _varbase_creator(dtype=dtype) out = _create_tensor(dtype=dtype)
if in_dygraph_mode(): if in_dygraph_mode():
with paddle.fluid.dygraph.no_grad(): with paddle.fluid.dygraph.no_grad():
tmp = _C_ops.concat(parameters, 0) tmp = _C_ops.concat(parameters, 0)
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import paddle import paddle
from paddle import _legacy_C_ops from paddle import _legacy_C_ops
from paddle.fluid.data_feeder import check_variable_and_dtype from paddle.fluid.data_feeder import check_variable_and_dtype
from paddle.fluid.framework import _varbase_creator from paddle.fluid.framework import _create_tensor
from paddle.framework import ParamAttr, core from paddle.framework import ParamAttr, core
from paddle.nn.initializer import Constant from paddle.nn.initializer import Constant
from paddle.utils import unique_name from paddle.utils import unique_name
...@@ -147,7 +147,7 @@ class FakeQuanterWithAbsMaxObserverLayer(BaseQuanter): ...@@ -147,7 +147,7 @@ class FakeQuanterWithAbsMaxObserverLayer(BaseQuanter):
'is_test', 'is_test',
not self.training, not self.training,
) )
quant_out = _varbase_creator( quant_out = _create_tensor(
type=input.type, type=input.type,
name=f"{input.name}.quantized.dequantized", name=f"{input.name}.quantized.dequantized",
shape=input.shape, shape=input.shape,
......
...@@ -19,7 +19,7 @@ import numpy as np ...@@ -19,7 +19,7 @@ import numpy as np
import paddle import paddle
from paddle import _legacy_C_ops from paddle import _legacy_C_ops
from paddle.fluid.data_feeder import check_variable_and_dtype from paddle.fluid.data_feeder import check_variable_and_dtype
from paddle.fluid.framework import Variable, _non_static_mode, _varbase_creator from paddle.fluid.framework import Variable, _create_tensor, _non_static_mode
from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.layer_helper import LayerHelper
from paddle.nn.initializer import ConstantInitializer from paddle.nn.initializer import ConstantInitializer
...@@ -74,9 +74,9 @@ def accuracy(input, label, k=1, correct=None, total=None): ...@@ -74,9 +74,9 @@ def accuracy(input, label, k=1, correct=None, total=None):
""" """
if _non_static_mode(): if _non_static_mode():
if correct is None: if correct is None:
correct = _varbase_creator(dtype="int32") correct = _create_tensor(dtype="int32")
if total is None: if total is None:
total = _varbase_creator(dtype="int32") total = _create_tensor(dtype="int32")
_k = np.array(k).item(0) if isinstance(k, Variable) else k _k = np.array(k).item(0) if isinstance(k, Variable) else k
topk_out, topk_indices = _legacy_C_ops.top_k_v2( topk_out, topk_indices = _legacy_C_ops.top_k_v2(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册