未验证 提交 240e13a2 编写于 作者: M Meteor Liu 提交者: GitHub

rename _varbase_creator as _create_tensor (#52938)

* rename _varbase_creator as create_tensor

* rename _varbase_creator as create_tensor
上级 a7155c5c
......@@ -23,9 +23,9 @@ from paddle.fluid.data_feeder import ( # noqa: F401
from paddle.fluid.framework import ( # noqa: F401
OpProtoHolder,
Variable,
_create_tensor,
_dygraph_tracer,
_non_static_mode,
_varbase_creator,
convert_np_dtype_to_dtype_,
default_main_program,
device_guard,
......
......@@ -17,7 +17,7 @@ from paddle import _legacy_C_ops
from paddle.distributed import collective
from paddle.fluid import core
from paddle.fluid.data_feeder import check_dtype, check_variable_and_dtype
from paddle.framework import LayerHelper, _varbase_creator, in_dygraph_mode
from paddle.framework import LayerHelper, _create_tensor, in_dygraph_mode
from paddle.nn import Layer
from paddle.nn.utils import dygraph_utils
......@@ -447,7 +447,7 @@ def _linear(x, weight, bias=None, name=None):
Fuction Linear
"""
if in_dygraph_mode():
pre_bias = _varbase_creator(dtype=x.dtype)
pre_bias = _create_tensor(dtype=x.dtype)
_legacy_C_ops.matmul(
x,
weight,
......
......@@ -89,7 +89,7 @@ def _coalesce_tensors(var_groups):
@framework.dygraph_only
def _reshape_inplace(x, shape):
x_shape = framework._varbase_creator(dtype=x.dtype)
x_shape = framework._create_tensor(dtype=x.dtype)
framework._dygraph_tracer().trace_op(
type="reshape2",
inputs={'X': x},
......
......@@ -16,9 +16,9 @@ from .. import core
from ..framework import (
Variable,
convert_np_dtype_to_dtype_,
_varbase_creator,
in_dygraph_mode,
)
from ..framework import _create_tensor as framework_create_tensor
from ..layers.layer_function_generator import OpProtoHolder
from . import no_grad
from .. import framework
......@@ -78,7 +78,7 @@ def monkey_patch_math_varbase():
shape, value, dtype, framework._current_expected_place()
)
else:
out = _varbase_creator(dtype=dtype)
out = framework_create_tensor(dtype=dtype)
out = _legacy_C_ops.fill_constant(
out,
'dtype',
......
......@@ -1148,7 +1148,7 @@ def _debug_string_(proto, throw_on_error=True):
return proto.__str__()
def _varbase_creator(
def _create_tensor(
type=core.VarDesc.VarType.LOD_TENSOR,
name=None,
shape=None,
......@@ -3836,7 +3836,7 @@ class Block:
def create_var(self, *args, **kwargs):
if _non_static_mode():
var = _varbase_creator(*args, **kwargs)
var = _create_tensor(*args, **kwargs)
else:
var = Variable(block=self, *args, **kwargs)
if 'initializer' in kwargs:
......
......@@ -28,7 +28,7 @@ from ..framework import (
dygraph_only,
_dygraph_tracer,
default_main_program,
_varbase_creator,
_create_tensor,
static_only,
_global_flags,
in_dygraph_mode,
......
......@@ -20,7 +20,7 @@ from ..layer_helper import LayerHelper
from ..framework import (
_current_expected_place,
convert_np_dtype_to_dtype_,
_varbase_creator,
_create_tensor,
in_dygraph_mode,
)
from ..framework import Variable
......
......@@ -232,7 +232,7 @@ class Optimizer:
if not isinstance(self._learning_rate, _LearningRateEpochDecay):
var_tmp = None
var_temp = framework._varbase_creator(
var_temp = framework._create_tensor(
None, name='global_step', dtype='int32'
)
......
......@@ -15,7 +15,7 @@
import logging
from . import framework
from .framework import _non_static_mode, _varbase_creator, in_dygraph_mode
from .framework import _non_static_mode, in_dygraph_mode
from . import core
from paddle import _C_ops, _legacy_C_ops
......
......@@ -197,7 +197,7 @@ class RunProgramOpTest(unittest.TestCase):
def prepare_dygraph_output(self):
def create_var_base(is_input, name):
var = framework._varbase_creator(dtype=None, shape=None, name=name)
var = framework._create_tensor(dtype=None, shape=None, name=name)
var.stop_gradient = False
return var
......@@ -210,7 +210,7 @@ class RunProgramOpTest(unittest.TestCase):
if global_var._in_eager_mode_:
outputs['OutScope'] = [core.Scope()]
else:
outputs['OutScope'] = framework._varbase_creator(
outputs['OutScope'] = framework._create_tensor(
type=core.VarDesc.VarType.STEP_SCOPES,
name="program_out_scope",
persistable=True,
......
......@@ -64,7 +64,7 @@ from ..fluid.framework import dygraph_only # noqa: F401
from ..fluid.framework import dygraph_not_support # noqa: F401
from ..fluid.framework import (
convert_np_dtype_to_dtype_,
_varbase_creator,
_create_tensor,
OpProtoHolder,
) # noqa: F401
from ..fluid.framework import _dygraph_tracer # noqa: F401
......
......@@ -31,10 +31,10 @@ from paddle.fluid.framework import (
EagerParamBase,
Program,
Variable,
_create_tensor,
_current_expected_place,
_dygraph_tracer,
_non_static_mode,
_varbase_creator,
)
from .io_utils import (
......@@ -133,7 +133,7 @@ def _load_state_dict_from_save_params(model_path):
# 2. create and load Tensor
with fluid.dygraph.guard():
for name in var_name_list:
new_var = _varbase_creator(name=name, persistable=True)
new_var = _create_tensor(name=name, persistable=True)
_dygraph_tracer().trace_op(
type='load',
inputs={},
......@@ -458,7 +458,7 @@ def _ndarray_to_tensor(obj, return_numpy):
def _lod_tensor2varbase(tensor):
return_var = _varbase_creator()
return_var = _create_tensor()
return_var.value().get_tensor().set(tensor, _current_expected_place())
return return_var
......
......@@ -650,7 +650,7 @@ def _load_persistable_vars_by_program(
persistable=True,
)
else:
new_var = framework._varbase_creator(
new_var = framework._create_tensor(
type=each_var.type(),
name=each_var.name(),
shape=each_var.shape(),
......@@ -738,9 +738,7 @@ def _load_persistable_vars(
persistable=True,
)
else:
new_var = framework._varbase_creator(
name=new_name, persistable=True
)
new_var = framework._create_tensor(name=new_name, persistable=True)
new_var.stop_gradient = extra_var_info[name]['stop_gradient']
load_var_dict[new_name] = new_var
......
......@@ -20,7 +20,7 @@ import paddle
from paddle import _legacy_C_ops
from ..fluid.data_feeder import check_variable_and_dtype
from ..fluid.framework import _non_static_mode, _varbase_creator
from ..fluid.framework import _create_tensor, _non_static_mode
from ..fluid.layer_helper import LayerHelper
__all__ = []
......@@ -804,9 +804,9 @@ def accuracy(input, label, k=1, correct=None, total=None, name=None):
label = paddle.cast(label, paddle.int64)
if _non_static_mode():
if correct is None:
correct = _varbase_creator(dtype="int32")
correct = _create_tensor(dtype="int32")
if total is None:
total = _varbase_creator(dtype="int32")
total = _create_tensor(dtype="int32")
topk_out, topk_indices = paddle.topk(input, k=k)
_acc, _, _ = _legacy_C_ops.accuracy(
......
......@@ -205,7 +205,7 @@ class Dirac(Initializer):
if framework.in_dygraph_mode():
with fluid.dygraph.no_grad():
tmp_tensor = framework._varbase_creator()
tmp_tensor = framework._create_tensor()
_C_ops.assign_value_(
tmp_tensor,
[len(idx_list)],
......@@ -234,7 +234,7 @@ class Dirac(Initializer):
if framework.in_dygraph_mode():
with fluid.dygraph.no_grad():
tmp_tensor = framework._varbase_creator()
tmp_tensor = framework._create_tensor()
_C_ops.assign_value_(
tmp_tensor,
[len(value_list)],
......
......@@ -17,7 +17,7 @@ import logging
import paddle
from paddle import _legacy_C_ops, in_dynamic_mode
from paddle.fluid.data_feeder import check_variable_and_dtype
from paddle.fluid.framework import _varbase_creator
from paddle.fluid.framework import _create_tensor
from paddle.fluid.log_helper import get_logger
from paddle.framework import ParamAttr, core
from paddle.nn import Layer
......@@ -87,7 +87,7 @@ class FakeQuantAbsMax(Layer):
def forward(self, input):
if in_dynamic_mode():
attrs = ('bit_length', self._quant_bits)
quant_out = _varbase_creator(
quant_out = _create_tensor(
type=input.type,
name=f"{input.name}.quantized.dequantized",
shape=input.shape,
......@@ -101,7 +101,7 @@ class FakeQuantAbsMax(Layer):
)
if not out_scale:
out_scale = _varbase_creator(
out_scale = _create_tensor(
type=core.VarDesc.VarType.LOD_TENSOR,
name=self._scale_name,
shape=[1],
......@@ -210,7 +210,7 @@ class FakeQuantMovingAverageAbsMax(Layer):
'is_test',
not self.training,
)
quant_out = _varbase_creator(
quant_out = _create_tensor(
type=input.type,
name=f"{input.name}.quantized.dequantized",
shape=input.shape,
......@@ -322,7 +322,7 @@ class FakeQuantChannelWiseAbsMax(Layer):
'quant_axis',
self._quant_axis,
)
quant_out = _varbase_creator(
quant_out = _create_tensor(
type=input.type,
name=f"{input.name}.quantized.dequantized",
shape=input.shape,
......@@ -336,7 +336,7 @@ class FakeQuantChannelWiseAbsMax(Layer):
out_scale, op=paddle.distributed.ReduceOp.MAX
)
if out_scale is None:
out_scale = _varbase_creator(
out_scale = _create_tensor(
type=core.VarDesc.VarType.LOD_TENSOR,
name=self._scale_name,
shape=[self._channel_num],
......@@ -441,7 +441,7 @@ class MovingAverageAbsMaxScale(Layer):
not self.training,
)
quant_out = _varbase_creator(
quant_out = _create_tensor(
type=input.type,
name=f"{input.name}.tmp",
shape=input.shape,
......
......@@ -17,8 +17,8 @@ from functools import reduce
import paddle
from paddle import _C_ops
from paddle.fluid.framework import (
_create_tensor,
_dygraph_tracer,
_varbase_creator,
dygraph_only,
in_dygraph_mode,
)
......@@ -26,7 +26,7 @@ from paddle.fluid.framework import (
# input==output, inplace strategy of reshape has no cost almostly
def _inplace_reshape_dygraph(x, shape):
x_shape = _varbase_creator(dtype='int64')
x_shape = _create_tensor(dtype='int64')
if in_dygraph_mode():
with paddle.fluid.dygraph.no_grad():
tmp_out = _C_ops.reshape(x, shape)
......@@ -104,7 +104,7 @@ def parameters_to_vector(parameters, name=None):
origin_shapes.append(param.shape)
_inplace_reshape_dygraph(param, [-1])
out = _varbase_creator(dtype=dtype)
out = _create_tensor(dtype=dtype)
if in_dygraph_mode():
with paddle.fluid.dygraph.no_grad():
tmp = _C_ops.concat(parameters, 0)
......
......@@ -15,7 +15,7 @@
import paddle
from paddle import _legacy_C_ops
from paddle.fluid.data_feeder import check_variable_and_dtype
from paddle.fluid.framework import _varbase_creator
from paddle.fluid.framework import _create_tensor
from paddle.framework import ParamAttr, core
from paddle.nn.initializer import Constant
from paddle.utils import unique_name
......@@ -147,7 +147,7 @@ class FakeQuanterWithAbsMaxObserverLayer(BaseQuanter):
'is_test',
not self.training,
)
quant_out = _varbase_creator(
quant_out = _create_tensor(
type=input.type,
name=f"{input.name}.quantized.dequantized",
shape=input.shape,
......
......@@ -19,7 +19,7 @@ import numpy as np
import paddle
from paddle import _legacy_C_ops
from paddle.fluid.data_feeder import check_variable_and_dtype
from paddle.fluid.framework import Variable, _non_static_mode, _varbase_creator
from paddle.fluid.framework import Variable, _create_tensor, _non_static_mode
from paddle.fluid.layer_helper import LayerHelper
from paddle.nn.initializer import ConstantInitializer
......@@ -74,9 +74,9 @@ def accuracy(input, label, k=1, correct=None, total=None):
"""
if _non_static_mode():
if correct is None:
correct = _varbase_creator(dtype="int32")
correct = _create_tensor(dtype="int32")
if total is None:
total = _varbase_creator(dtype="int32")
total = _create_tensor(dtype="int32")
_k = np.array(k).item(0) if isinstance(k, Variable) else k
topk_out, topk_indices = _legacy_C_ops.top_k_v2(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册