未验证 提交 6b7bb6b5 编写于 作者: T Tao Luo 提交者: GitHub

change check_type_and_dtype to check_variable_and_dtype (#22465)

上级 17f2c089
......@@ -71,13 +71,12 @@ def convert_dtype(dtype):
"int32, int64, uint8]")
def check_type_and_dtype(input,
def check_variable_and_dtype(input,
input_name,
expected_type,
expected_dtype,
op_name,
extra_message=''):
check_type(input, input_name, expected_type, op_name, extra_message)
check_type(input, input_name, Variable, op_name, extra_message)
check_dtype(input.dtype, input_name, expected_dtype, op_name, extra_message)
......
......@@ -16,7 +16,7 @@ from __future__ import print_function
import warnings
from .framework import Variable, in_dygraph_mode
from .layer_helper import LayerHelper
from .data_feeder import check_type_and_dtype, check_dtype
from .data_feeder import check_variable_and_dtype, check_dtype
__all__ = ['one_hot', 'embedding']
......@@ -233,7 +233,7 @@ def embedding(input,
"""
helper = LayerHelper('embedding', **locals())
check_type_and_dtype(input, 'input', Variable, ['int64'], 'fluid.embedding')
check_variable_and_dtype(input, 'input', ['int64'], 'fluid.embedding')
check_dtype(dtype, 'dtype', ['float16', 'float32', 'float64'],
'fluid.embedding')
remote_prefetch = is_sparse and (not is_distributed)
......
......@@ -26,7 +26,7 @@ import numpy
import warnings
import six
from functools import reduce, partial
from ..data_feeder import convert_dtype, check_type_and_dtype
from ..data_feeder import convert_dtype, check_variable_and_dtype
from ... import compat as cpt
from ..backward import _infer_var_data_type_shape_
......@@ -257,7 +257,7 @@ def Print(input,
data: 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
'''
check_type_and_dtype(input, 'input', Variable,
check_variable_and_dtype(input, 'input',
['float32', 'float64', 'int32', 'int64', 'bool'],
'fluid.layers.Print')
......
......@@ -22,7 +22,7 @@ from six.moves import cStringIO
from ..proto import framework_pb2
from ..framework import OpProtoHolder, Variable, core, convert_np_dtype_to_dtype_, in_dygraph_mode
from ..layer_helper import LayerHelper
from ..data_feeder import check_type_and_dtype
from ..data_feeder import check_variable_and_dtype
__all__ = [
'deprecated', 'generate_layer_fn', 'generate_activation_fn', 'autodoc',
......@@ -258,8 +258,8 @@ def generate_activation_fn(op_type):
outs = op(inputs)
return outs['Out'][0]
check_type_and_dtype(x, 'x', Variable,
['float16', 'float32', 'float64'], op_type)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
op_type)
helper = LayerHelper(op_type, **locals())
output = helper.create_variable_for_type_inference(dtype=x.dtype)
......
......@@ -21,7 +21,7 @@ from .layer_function_generator import templatedoc
from ..layer_helper import LayerHelper
from ..framework import Variable, in_dygraph_mode
from .. import core
from ..data_feeder import check_type_and_dtype
from ..data_feeder import check_variable_and_dtype
from ..param_attr import ParamAttr
from ..initializer import NumpyArrayInitializer, Constant
from .. import core
......@@ -245,8 +245,8 @@ def cross_entropy(input, label, soft_label=False, ignore_index=kIgnoreIndex):
outs = core.ops.cross_entropy(inputs, attrs)
return outs['Y'][0]
check_type_and_dtype(input, 'input', Variable,
['float16', 'float32', 'float64'], 'cross_entropy')
check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
'cross_entropy')
helper = LayerHelper('cross_entropy', **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
......@@ -262,8 +262,8 @@ def cross_entropy2(input, label, ignore_index=kIgnoreIndex):
outs = core.ops.cross_entropy2(inputs, attrs)
return outs['Y'][0]
check_type_and_dtype(input, 'input', Variable,
['float16', 'float32', 'float64'], 'cross_entropy2')
check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
'cross_entropy2')
helper = LayerHelper('cross_entropy2', **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype)
xshape = helper.create_variable_for_type_inference(dtype=input.dtype)
......@@ -717,9 +717,8 @@ def nce(input,
custom_dist=dist)
"""
helper = LayerHelper('nce', **locals())
check_type_and_dtype(input, 'input', Variable, ['float32', 'float64'],
'nce')
check_type_and_dtype(label, 'label', Variable, ['int64'], 'nce')
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'nce')
check_variable_and_dtype(label, 'label', ['int64'], 'nce')
dim = input.shape[1]
num_true_class = label.shape[1]
......
......@@ -24,7 +24,7 @@ from ..framework import Variable, in_dygraph_mode, _varbase_creator
from .. import core
from ..param_attr import ParamAttr
from . import nn
from ..data_feeder import check_type_and_dtype
from ..data_feeder import check_variable_and_dtype
__all__ = ['accuracy', 'auc']
......@@ -94,8 +94,8 @@ def accuracy(input, label, k=1, correct=None, total=None):
return outs['Accuracy'][0]
helper = LayerHelper("accuracy", **locals())
check_type_and_dtype(input, 'input', Variable,
['float16', 'float32', 'float64'], 'accuracy')
check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
'accuracy')
topk_out, topk_indices = nn.topk(input, k=k)
acc_out = helper.create_variable_for_type_inference(dtype="float32")
if correct is None:
......
......@@ -33,7 +33,7 @@ from . import utils
from .. import unique_name
from functools import reduce
from .. import core
from ..data_feeder import convert_dtype, check_type_and_dtype, check_type, check_dtype
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
__all__ = [
'fc',
......@@ -472,7 +472,7 @@ def embedding(input,
"""
helper = LayerHelper('embedding', **locals())
check_type_and_dtype(input, 'input', Variable, ['int64'],
check_variable_and_dtype(input, 'input', ['int64'],
'fluid.layers.embedding')
check_dtype(dtype, 'dtype', ['float16', 'float32', 'float64'],
'fluid.layers.embedding')
......@@ -840,7 +840,7 @@ def dropout(x,
return outs['Out'][0]
helper = LayerHelper('dropout', **locals())
check_type_and_dtype(x, 'x', Variable, ['float16', 'float32', 'float64'],
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'dropout')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
......@@ -1124,8 +1124,8 @@ def softmax(input, use_cudnn=False, name=None, axis=-1):
return outs['Out'][0]
helper = LayerHelper('softmax', **locals())
check_type_and_dtype(input, 'input', Variable,
['float16', 'float32', 'float64'], 'softmax')
check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
'softmax')
dtype = helper.input_dtype()
softmax_out = helper.create_variable_for_type_inference(dtype)
......@@ -1280,8 +1280,8 @@ def conv2d(input,
conv2d = fluid.layers.conv2d(input=data, num_filters=2, filter_size=3, act="relu")
"""
check_type_and_dtype(input, 'input', Variable,
['float16', 'float32', 'float64'], 'conv2d')
check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
'conv2d')
num_channels = input.shape[1]
if not isinstance(use_cudnn, bool):
raise ValueError("Attr(use_cudnn) should be True or False. Received "
......@@ -2555,8 +2555,8 @@ def batch_norm(input,
assert bias_attr is not False, "bias_attr should not be False in batch_norm."
helper = LayerHelper('batch_norm', **locals())
check_type_and_dtype(input, 'input', Variable,
['float16', 'float32', 'float64'], 'batch_norm')
check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
'batch_norm')
dtype = helper.input_dtype()
has_reserve_space = False
......@@ -3896,8 +3896,8 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None):
outs = core.ops.reduce_sum(inputs, attrs)
return outs['Out'][0]
check_type_and_dtype(input, 'input', Variable,
['float32', 'float64', 'int32', 'int64'], 'reduce_sum')
check_variable_and_dtype(
input, 'input', ['float32', 'float64', 'int32', 'int64'], 'reduce_sum')
helper = LayerHelper('reduce_sum', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(
......@@ -3971,9 +3971,8 @@ def reduce_mean(input, dim=None, keep_dim=False, name=None):
outs = core.ops.reduce_mean(inputs, attrs)
return outs['Out'][0]
check_type_and_dtype(input, 'input', Variable,
['float32', 'float64', 'int32', 'int64'],
'reduce_mean')
check_variable_and_dtype(
input, 'input', ['float32', 'float64', 'int32', 'int64'], 'reduce_mean')
helper = LayerHelper('reduce_mean', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(
......@@ -4601,8 +4600,8 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None):
def __check_input(x, y):
var_names = {'x': x, 'y': y}
for name, val in var_names.items():
check_type_and_dtype(val, name, Variable,
['float16', 'float32', 'float64'], 'matmul')
check_variable_and_dtype(
val, name, ['float16', 'float32', 'float64'], 'matmul')
x_shape = list(x.shape)
y_shape = list(y.shape)
if len(x_shape) == 1:
......@@ -4962,8 +4961,8 @@ def transpose(x, perm, name=None):
outs = core.ops.transpose2(inputs, attrs)
return outs['Out'][0]
check_type_and_dtype(x, 'x', Variable,
['float16', 'float32', 'float64', 'int32', 'int64'],
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'],
'transpose')
check_type(perm, 'perm', list, 'transpose')
......@@ -5589,9 +5588,8 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None):
out = outs['Out'][0]
return dygraph_utils._append_activation_in_dygraph(out, act)
check_type_and_dtype(x, 'x', Variable,
['float16', 'float32', 'float64', 'int32', 'int64'],
'reshape')
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'reshape')
check_type(shape, 'shape', (list, tuple, Variable), 'reshape')
check_type(actual_shape, 'actual_shape', (Variable, type(None)), 'reshape')
......@@ -5719,7 +5717,7 @@ def squeeze(input, axes, name=None):
"""
helper = LayerHelper("squeeze", **locals())
check_type_and_dtype(input, 'input', Variable,
check_variable_and_dtype(input, 'input',
['float32', 'float64', 'int8', 'int32', 'int64'],
'squeeze')
check_type(axes, 'axes', list, 'squeeze')
......@@ -8228,8 +8226,7 @@ def crop_tensor(x, shape=None, offsets=None, name=None):
"""
helper = LayerHelper('crop_tensor', **locals())
check_type_and_dtype(x, 'x', Variable,
['float32', 'float64', 'int32', 'int64'],
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'crop_tensor')
check_type(shape, 'shape', (list, tuple, Variable), 'crop_tensor')
check_type(offsets, 'offsets', (list, tuple, Variable, type(None)),
......@@ -8523,8 +8520,7 @@ def elu(x, alpha=1.0, name=None):
# [ 1. 15.6 ]]
"""
helper = LayerHelper('elu', **locals())
check_type_and_dtype(x, 'x', Variable, ['float16', 'float32', 'float64'],
'elu')
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'elu')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='elu',
......@@ -9342,9 +9338,8 @@ def expand(x, expand_times, name=None):
outs = core.ops.expand(inputs, attrs)
return outs['Out'][0]
check_type_and_dtype(x, 'x', Variable,
['bool', 'float32', 'float64', 'int32', 'int64'],
'expand')
check_variable_and_dtype(
x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'expand')
check_type(expand_times, 'expand_times', (list, tuple, Variable), 'expand')
if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == True:
raise ValueError(
......@@ -10277,12 +10272,10 @@ def _elementwise_op(helper):
assert x is not None, 'x cannot be None in {}'.format(op_type)
assert y is not None, 'y cannot be None in {}'.format(op_type)
check_type_and_dtype(x, 'x', Variable,
['float16', 'float32', 'float64', 'int32', 'int64'],
op_type)
check_type_and_dtype(y, 'y', Variable,
['float16', 'float32', 'float64', 'int32', 'int64'],
op_type)
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], op_type)
check_variable_and_dtype(
y, 'y', ['float16', 'float32', 'float64', 'int32', 'int64'], op_type)
axis = helper.kwargs.get('axis', -1)
use_mkldnn = helper.kwargs.get('use_mkldnn', False)
......@@ -11338,8 +11331,7 @@ def mean(x, name=None):
return outs['Out'][0]
helper = LayerHelper("mean", **locals())
check_type_and_dtype(x, 'x', Variable, ['float16', 'float32', 'float64'],
'mean')
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mean')
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
......@@ -11425,10 +11417,8 @@ def mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None):
return outs['Out'][0]
helper = LayerHelper("mul", **locals())
check_type_and_dtype(x, 'x', Variable, ['float16', 'float32', 'float64'],
'mul')
check_type_and_dtype(y, 'y', Variable, ['float16', 'float32', 'float64'],
'mul')
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mul')
check_variable_and_dtype(y, 'y', ['float16', 'float32', 'float64'], 'mul')
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
......
......@@ -23,7 +23,7 @@ from ..core import VarDesc
from .. import core
from .layer_function_generator import templatedoc
from . import utils
from ..data_feeder import check_type_and_dtype, check_type, check_dtype, convert_dtype
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype
import numpy
import warnings
......@@ -193,8 +193,8 @@ def cast(x, dtype):
# [ 0 4]] int32
"""
helper = LayerHelper('cast', **locals())
check_type_and_dtype(
x, 'x', Variable,
check_variable_and_dtype(
x, 'x',
['bool', 'float16', 'float32', 'float64', 'int32', 'int64', 'uint8'],
'cast')
out = helper.create_variable_for_type_inference(dtype=dtype)
......@@ -269,8 +269,8 @@ def concat(input, axis=0, name=None):
(type(input)))
input = [input]
for id, x in enumerate(input):
check_type_and_dtype(
x, 'input[' + str(id) + ']', Variable,
check_variable_and_dtype(
x, 'input[' + str(id) + ']',
['float16', 'float32', 'float64', 'int32', 'int64'], 'concat')
check_type(axis, 'axis', (int, Variable), 'concat')
inputs = {'X': input}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册