未验证 提交 6b7bb6b5 编写于 作者: T Tao Luo 提交者: GitHub

change check_type_and_dtype to check_variable_and_dtype (#22465)

上级 17f2c089
...@@ -71,13 +71,12 @@ def convert_dtype(dtype): ...@@ -71,13 +71,12 @@ def convert_dtype(dtype):
"int32, int64, uint8]") "int32, int64, uint8]")
def check_type_and_dtype(input, def check_variable_and_dtype(input,
input_name, input_name,
expected_type,
expected_dtype, expected_dtype,
op_name, op_name,
extra_message=''): extra_message=''):
check_type(input, input_name, expected_type, op_name, extra_message) check_type(input, input_name, Variable, op_name, extra_message)
check_dtype(input.dtype, input_name, expected_dtype, op_name, extra_message) check_dtype(input.dtype, input_name, expected_dtype, op_name, extra_message)
......
...@@ -16,7 +16,7 @@ from __future__ import print_function ...@@ -16,7 +16,7 @@ from __future__ import print_function
import warnings import warnings
from .framework import Variable, in_dygraph_mode from .framework import Variable, in_dygraph_mode
from .layer_helper import LayerHelper from .layer_helper import LayerHelper
from .data_feeder import check_type_and_dtype, check_dtype from .data_feeder import check_variable_and_dtype, check_dtype
__all__ = ['one_hot', 'embedding'] __all__ = ['one_hot', 'embedding']
...@@ -233,7 +233,7 @@ def embedding(input, ...@@ -233,7 +233,7 @@ def embedding(input,
""" """
helper = LayerHelper('embedding', **locals()) helper = LayerHelper('embedding', **locals())
check_type_and_dtype(input, 'input', Variable, ['int64'], 'fluid.embedding') check_variable_and_dtype(input, 'input', ['int64'], 'fluid.embedding')
check_dtype(dtype, 'dtype', ['float16', 'float32', 'float64'], check_dtype(dtype, 'dtype', ['float16', 'float32', 'float64'],
'fluid.embedding') 'fluid.embedding')
remote_prefetch = is_sparse and (not is_distributed) remote_prefetch = is_sparse and (not is_distributed)
......
...@@ -26,7 +26,7 @@ import numpy ...@@ -26,7 +26,7 @@ import numpy
import warnings import warnings
import six import six
from functools import reduce, partial from functools import reduce, partial
from ..data_feeder import convert_dtype, check_type_and_dtype from ..data_feeder import convert_dtype, check_variable_and_dtype
from ... import compat as cpt from ... import compat as cpt
from ..backward import _infer_var_data_type_shape_ from ..backward import _infer_var_data_type_shape_
...@@ -257,7 +257,7 @@ def Print(input, ...@@ -257,7 +257,7 @@ def Print(input,
data: 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, data: 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
''' '''
check_type_and_dtype(input, 'input', Variable, check_variable_and_dtype(input, 'input',
['float32', 'float64', 'int32', 'int64', 'bool'], ['float32', 'float64', 'int32', 'int64', 'bool'],
'fluid.layers.Print') 'fluid.layers.Print')
......
...@@ -22,7 +22,7 @@ from six.moves import cStringIO ...@@ -22,7 +22,7 @@ from six.moves import cStringIO
from ..proto import framework_pb2 from ..proto import framework_pb2
from ..framework import OpProtoHolder, Variable, core, convert_np_dtype_to_dtype_, in_dygraph_mode from ..framework import OpProtoHolder, Variable, core, convert_np_dtype_to_dtype_, in_dygraph_mode
from ..layer_helper import LayerHelper from ..layer_helper import LayerHelper
from ..data_feeder import check_type_and_dtype from ..data_feeder import check_variable_and_dtype
__all__ = [ __all__ = [
'deprecated', 'generate_layer_fn', 'generate_activation_fn', 'autodoc', 'deprecated', 'generate_layer_fn', 'generate_activation_fn', 'autodoc',
...@@ -258,8 +258,8 @@ def generate_activation_fn(op_type): ...@@ -258,8 +258,8 @@ def generate_activation_fn(op_type):
outs = op(inputs) outs = op(inputs)
return outs['Out'][0] return outs['Out'][0]
check_type_and_dtype(x, 'x', Variable, check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
['float16', 'float32', 'float64'], op_type) op_type)
helper = LayerHelper(op_type, **locals()) helper = LayerHelper(op_type, **locals())
output = helper.create_variable_for_type_inference(dtype=x.dtype) output = helper.create_variable_for_type_inference(dtype=x.dtype)
......
...@@ -21,7 +21,7 @@ from .layer_function_generator import templatedoc ...@@ -21,7 +21,7 @@ from .layer_function_generator import templatedoc
from ..layer_helper import LayerHelper from ..layer_helper import LayerHelper
from ..framework import Variable, in_dygraph_mode from ..framework import Variable, in_dygraph_mode
from .. import core from .. import core
from ..data_feeder import check_type_and_dtype from ..data_feeder import check_variable_and_dtype
from ..param_attr import ParamAttr from ..param_attr import ParamAttr
from ..initializer import NumpyArrayInitializer, Constant from ..initializer import NumpyArrayInitializer, Constant
from .. import core from .. import core
...@@ -245,8 +245,8 @@ def cross_entropy(input, label, soft_label=False, ignore_index=kIgnoreIndex): ...@@ -245,8 +245,8 @@ def cross_entropy(input, label, soft_label=False, ignore_index=kIgnoreIndex):
outs = core.ops.cross_entropy(inputs, attrs) outs = core.ops.cross_entropy(inputs, attrs)
return outs['Y'][0] return outs['Y'][0]
check_type_and_dtype(input, 'input', Variable, check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
['float16', 'float32', 'float64'], 'cross_entropy') 'cross_entropy')
helper = LayerHelper('cross_entropy', **locals()) helper = LayerHelper('cross_entropy', **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype) out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op( helper.append_op(
...@@ -262,8 +262,8 @@ def cross_entropy2(input, label, ignore_index=kIgnoreIndex): ...@@ -262,8 +262,8 @@ def cross_entropy2(input, label, ignore_index=kIgnoreIndex):
outs = core.ops.cross_entropy2(inputs, attrs) outs = core.ops.cross_entropy2(inputs, attrs)
return outs['Y'][0] return outs['Y'][0]
check_type_and_dtype(input, 'input', Variable, check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
['float16', 'float32', 'float64'], 'cross_entropy2') 'cross_entropy2')
helper = LayerHelper('cross_entropy2', **locals()) helper = LayerHelper('cross_entropy2', **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype) out = helper.create_variable_for_type_inference(dtype=input.dtype)
xshape = helper.create_variable_for_type_inference(dtype=input.dtype) xshape = helper.create_variable_for_type_inference(dtype=input.dtype)
...@@ -717,9 +717,8 @@ def nce(input, ...@@ -717,9 +717,8 @@ def nce(input,
custom_dist=dist) custom_dist=dist)
""" """
helper = LayerHelper('nce', **locals()) helper = LayerHelper('nce', **locals())
check_type_and_dtype(input, 'input', Variable, ['float32', 'float64'], check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'nce')
'nce') check_variable_and_dtype(label, 'label', ['int64'], 'nce')
check_type_and_dtype(label, 'label', Variable, ['int64'], 'nce')
dim = input.shape[1] dim = input.shape[1]
num_true_class = label.shape[1] num_true_class = label.shape[1]
......
...@@ -24,7 +24,7 @@ from ..framework import Variable, in_dygraph_mode, _varbase_creator ...@@ -24,7 +24,7 @@ from ..framework import Variable, in_dygraph_mode, _varbase_creator
from .. import core from .. import core
from ..param_attr import ParamAttr from ..param_attr import ParamAttr
from . import nn from . import nn
from ..data_feeder import check_type_and_dtype from ..data_feeder import check_variable_and_dtype
__all__ = ['accuracy', 'auc'] __all__ = ['accuracy', 'auc']
...@@ -94,8 +94,8 @@ def accuracy(input, label, k=1, correct=None, total=None): ...@@ -94,8 +94,8 @@ def accuracy(input, label, k=1, correct=None, total=None):
return outs['Accuracy'][0] return outs['Accuracy'][0]
helper = LayerHelper("accuracy", **locals()) helper = LayerHelper("accuracy", **locals())
check_type_and_dtype(input, 'input', Variable, check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
['float16', 'float32', 'float64'], 'accuracy') 'accuracy')
topk_out, topk_indices = nn.topk(input, k=k) topk_out, topk_indices = nn.topk(input, k=k)
acc_out = helper.create_variable_for_type_inference(dtype="float32") acc_out = helper.create_variable_for_type_inference(dtype="float32")
if correct is None: if correct is None:
......
...@@ -33,7 +33,7 @@ from . import utils ...@@ -33,7 +33,7 @@ from . import utils
from .. import unique_name from .. import unique_name
from functools import reduce from functools import reduce
from .. import core from .. import core
from ..data_feeder import convert_dtype, check_type_and_dtype, check_type, check_dtype from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
__all__ = [ __all__ = [
'fc', 'fc',
...@@ -472,7 +472,7 @@ def embedding(input, ...@@ -472,7 +472,7 @@ def embedding(input,
""" """
helper = LayerHelper('embedding', **locals()) helper = LayerHelper('embedding', **locals())
check_type_and_dtype(input, 'input', Variable, ['int64'], check_variable_and_dtype(input, 'input', ['int64'],
'fluid.layers.embedding') 'fluid.layers.embedding')
check_dtype(dtype, 'dtype', ['float16', 'float32', 'float64'], check_dtype(dtype, 'dtype', ['float16', 'float32', 'float64'],
'fluid.layers.embedding') 'fluid.layers.embedding')
...@@ -840,7 +840,7 @@ def dropout(x, ...@@ -840,7 +840,7 @@ def dropout(x,
return outs['Out'][0] return outs['Out'][0]
helper = LayerHelper('dropout', **locals()) helper = LayerHelper('dropout', **locals())
check_type_and_dtype(x, 'x', Variable, ['float16', 'float32', 'float64'], check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'dropout') 'dropout')
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
...@@ -1124,8 +1124,8 @@ def softmax(input, use_cudnn=False, name=None, axis=-1): ...@@ -1124,8 +1124,8 @@ def softmax(input, use_cudnn=False, name=None, axis=-1):
return outs['Out'][0] return outs['Out'][0]
helper = LayerHelper('softmax', **locals()) helper = LayerHelper('softmax', **locals())
check_type_and_dtype(input, 'input', Variable, check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
['float16', 'float32', 'float64'], 'softmax') 'softmax')
dtype = helper.input_dtype() dtype = helper.input_dtype()
softmax_out = helper.create_variable_for_type_inference(dtype) softmax_out = helper.create_variable_for_type_inference(dtype)
...@@ -1280,8 +1280,8 @@ def conv2d(input, ...@@ -1280,8 +1280,8 @@ def conv2d(input,
conv2d = fluid.layers.conv2d(input=data, num_filters=2, filter_size=3, act="relu") conv2d = fluid.layers.conv2d(input=data, num_filters=2, filter_size=3, act="relu")
""" """
check_type_and_dtype(input, 'input', Variable, check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
['float16', 'float32', 'float64'], 'conv2d') 'conv2d')
num_channels = input.shape[1] num_channels = input.shape[1]
if not isinstance(use_cudnn, bool): if not isinstance(use_cudnn, bool):
raise ValueError("Attr(use_cudnn) should be True or False. Received " raise ValueError("Attr(use_cudnn) should be True or False. Received "
...@@ -2555,8 +2555,8 @@ def batch_norm(input, ...@@ -2555,8 +2555,8 @@ def batch_norm(input,
assert bias_attr is not False, "bias_attr should not be False in batch_norm." assert bias_attr is not False, "bias_attr should not be False in batch_norm."
helper = LayerHelper('batch_norm', **locals()) helper = LayerHelper('batch_norm', **locals())
check_type_and_dtype(input, 'input', Variable, check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
['float16', 'float32', 'float64'], 'batch_norm') 'batch_norm')
dtype = helper.input_dtype() dtype = helper.input_dtype()
has_reserve_space = False has_reserve_space = False
...@@ -3896,8 +3896,8 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None): ...@@ -3896,8 +3896,8 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None):
outs = core.ops.reduce_sum(inputs, attrs) outs = core.ops.reduce_sum(inputs, attrs)
return outs['Out'][0] return outs['Out'][0]
check_type_and_dtype(input, 'input', Variable, check_variable_and_dtype(
['float32', 'float64', 'int32', 'int64'], 'reduce_sum') input, 'input', ['float32', 'float64', 'int32', 'int64'], 'reduce_sum')
helper = LayerHelper('reduce_sum', **locals()) helper = LayerHelper('reduce_sum', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op( helper.append_op(
...@@ -3971,9 +3971,8 @@ def reduce_mean(input, dim=None, keep_dim=False, name=None): ...@@ -3971,9 +3971,8 @@ def reduce_mean(input, dim=None, keep_dim=False, name=None):
outs = core.ops.reduce_mean(inputs, attrs) outs = core.ops.reduce_mean(inputs, attrs)
return outs['Out'][0] return outs['Out'][0]
check_type_and_dtype(input, 'input', Variable, check_variable_and_dtype(
['float32', 'float64', 'int32', 'int64'], input, 'input', ['float32', 'float64', 'int32', 'int64'], 'reduce_mean')
'reduce_mean')
helper = LayerHelper('reduce_mean', **locals()) helper = LayerHelper('reduce_mean', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op( helper.append_op(
...@@ -4601,8 +4600,8 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None): ...@@ -4601,8 +4600,8 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None):
def __check_input(x, y): def __check_input(x, y):
var_names = {'x': x, 'y': y} var_names = {'x': x, 'y': y}
for name, val in var_names.items(): for name, val in var_names.items():
check_type_and_dtype(val, name, Variable, check_variable_and_dtype(
['float16', 'float32', 'float64'], 'matmul') val, name, ['float16', 'float32', 'float64'], 'matmul')
x_shape = list(x.shape) x_shape = list(x.shape)
y_shape = list(y.shape) y_shape = list(y.shape)
if len(x_shape) == 1: if len(x_shape) == 1:
...@@ -4962,8 +4961,8 @@ def transpose(x, perm, name=None): ...@@ -4962,8 +4961,8 @@ def transpose(x, perm, name=None):
outs = core.ops.transpose2(inputs, attrs) outs = core.ops.transpose2(inputs, attrs)
return outs['Out'][0] return outs['Out'][0]
check_type_and_dtype(x, 'x', Variable, check_variable_and_dtype(
['float16', 'float32', 'float64', 'int32', 'int64'], x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'],
'transpose') 'transpose')
check_type(perm, 'perm', list, 'transpose') check_type(perm, 'perm', list, 'transpose')
...@@ -5589,9 +5588,8 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None): ...@@ -5589,9 +5588,8 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None):
out = outs['Out'][0] out = outs['Out'][0]
return dygraph_utils._append_activation_in_dygraph(out, act) return dygraph_utils._append_activation_in_dygraph(out, act)
check_type_and_dtype(x, 'x', Variable, check_variable_and_dtype(
['float16', 'float32', 'float64', 'int32', 'int64'], x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'reshape')
'reshape')
check_type(shape, 'shape', (list, tuple, Variable), 'reshape') check_type(shape, 'shape', (list, tuple, Variable), 'reshape')
check_type(actual_shape, 'actual_shape', (Variable, type(None)), 'reshape') check_type(actual_shape, 'actual_shape', (Variable, type(None)), 'reshape')
...@@ -5719,7 +5717,7 @@ def squeeze(input, axes, name=None): ...@@ -5719,7 +5717,7 @@ def squeeze(input, axes, name=None):
""" """
helper = LayerHelper("squeeze", **locals()) helper = LayerHelper("squeeze", **locals())
check_type_and_dtype(input, 'input', Variable, check_variable_and_dtype(input, 'input',
['float32', 'float64', 'int8', 'int32', 'int64'], ['float32', 'float64', 'int8', 'int32', 'int64'],
'squeeze') 'squeeze')
check_type(axes, 'axes', list, 'squeeze') check_type(axes, 'axes', list, 'squeeze')
...@@ -8228,8 +8226,7 @@ def crop_tensor(x, shape=None, offsets=None, name=None): ...@@ -8228,8 +8226,7 @@ def crop_tensor(x, shape=None, offsets=None, name=None):
""" """
helper = LayerHelper('crop_tensor', **locals()) helper = LayerHelper('crop_tensor', **locals())
check_type_and_dtype(x, 'x', Variable, check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
['float32', 'float64', 'int32', 'int64'],
'crop_tensor') 'crop_tensor')
check_type(shape, 'shape', (list, tuple, Variable), 'crop_tensor') check_type(shape, 'shape', (list, tuple, Variable), 'crop_tensor')
check_type(offsets, 'offsets', (list, tuple, Variable, type(None)), check_type(offsets, 'offsets', (list, tuple, Variable, type(None)),
...@@ -8523,8 +8520,7 @@ def elu(x, alpha=1.0, name=None): ...@@ -8523,8 +8520,7 @@ def elu(x, alpha=1.0, name=None):
# [ 1. 15.6 ]] # [ 1. 15.6 ]]
""" """
helper = LayerHelper('elu', **locals()) helper = LayerHelper('elu', **locals())
check_type_and_dtype(x, 'x', Variable, ['float16', 'float32', 'float64'], check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'elu')
'elu')
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(
type='elu', type='elu',
...@@ -9342,9 +9338,8 @@ def expand(x, expand_times, name=None): ...@@ -9342,9 +9338,8 @@ def expand(x, expand_times, name=None):
outs = core.ops.expand(inputs, attrs) outs = core.ops.expand(inputs, attrs)
return outs['Out'][0] return outs['Out'][0]
check_type_and_dtype(x, 'x', Variable, check_variable_and_dtype(
['bool', 'float32', 'float64', 'int32', 'int64'], x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'expand')
'expand')
check_type(expand_times, 'expand_times', (list, tuple, Variable), 'expand') check_type(expand_times, 'expand_times', (list, tuple, Variable), 'expand')
if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == True: if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == True:
raise ValueError( raise ValueError(
...@@ -10277,12 +10272,10 @@ def _elementwise_op(helper): ...@@ -10277,12 +10272,10 @@ def _elementwise_op(helper):
assert x is not None, 'x cannot be None in {}'.format(op_type) assert x is not None, 'x cannot be None in {}'.format(op_type)
assert y is not None, 'y cannot be None in {}'.format(op_type) assert y is not None, 'y cannot be None in {}'.format(op_type)
check_type_and_dtype(x, 'x', Variable, check_variable_and_dtype(
['float16', 'float32', 'float64', 'int32', 'int64'], x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], op_type)
op_type) check_variable_and_dtype(
check_type_and_dtype(y, 'y', Variable, y, 'y', ['float16', 'float32', 'float64', 'int32', 'int64'], op_type)
['float16', 'float32', 'float64', 'int32', 'int64'],
op_type)
axis = helper.kwargs.get('axis', -1) axis = helper.kwargs.get('axis', -1)
use_mkldnn = helper.kwargs.get('use_mkldnn', False) use_mkldnn = helper.kwargs.get('use_mkldnn', False)
...@@ -11338,8 +11331,7 @@ def mean(x, name=None): ...@@ -11338,8 +11331,7 @@ def mean(x, name=None):
return outs['Out'][0] return outs['Out'][0]
helper = LayerHelper("mean", **locals()) helper = LayerHelper("mean", **locals())
check_type_and_dtype(x, 'x', Variable, ['float16', 'float32', 'float64'], check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mean')
'mean')
if name is None: if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
else: else:
...@@ -11425,10 +11417,8 @@ def mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None): ...@@ -11425,10 +11417,8 @@ def mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None):
return outs['Out'][0] return outs['Out'][0]
helper = LayerHelper("mul", **locals()) helper = LayerHelper("mul", **locals())
check_type_and_dtype(x, 'x', Variable, ['float16', 'float32', 'float64'], check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mul')
'mul') check_variable_and_dtype(y, 'y', ['float16', 'float32', 'float64'], 'mul')
check_type_and_dtype(y, 'y', Variable, ['float16', 'float32', 'float64'],
'mul')
if name is None: if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
else: else:
......
...@@ -23,7 +23,7 @@ from ..core import VarDesc ...@@ -23,7 +23,7 @@ from ..core import VarDesc
from .. import core from .. import core
from .layer_function_generator import templatedoc from .layer_function_generator import templatedoc
from . import utils from . import utils
from ..data_feeder import check_type_and_dtype, check_type, check_dtype, convert_dtype from ..data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype
import numpy import numpy
import warnings import warnings
...@@ -193,8 +193,8 @@ def cast(x, dtype): ...@@ -193,8 +193,8 @@ def cast(x, dtype):
# [ 0 4]] int32 # [ 0 4]] int32
""" """
helper = LayerHelper('cast', **locals()) helper = LayerHelper('cast', **locals())
check_type_and_dtype( check_variable_and_dtype(
x, 'x', Variable, x, 'x',
['bool', 'float16', 'float32', 'float64', 'int32', 'int64', 'uint8'], ['bool', 'float16', 'float32', 'float64', 'int32', 'int64', 'uint8'],
'cast') 'cast')
out = helper.create_variable_for_type_inference(dtype=dtype) out = helper.create_variable_for_type_inference(dtype=dtype)
...@@ -269,8 +269,8 @@ def concat(input, axis=0, name=None): ...@@ -269,8 +269,8 @@ def concat(input, axis=0, name=None):
(type(input))) (type(input)))
input = [input] input = [input]
for id, x in enumerate(input): for id, x in enumerate(input):
check_type_and_dtype( check_variable_and_dtype(
x, 'input[' + str(id) + ']', Variable, x, 'input[' + str(id) + ']',
['float16', 'float32', 'float64', 'int32', 'int64'], 'concat') ['float16', 'float32', 'float64', 'int32', 'int64'], 'concat')
check_type(axis, 'axis', (int, Variable), 'concat') check_type(axis, 'axis', (int, Variable), 'concat')
inputs = {'X': input} inputs = {'X': input}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册