Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
6b7bb6b5
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
6b7bb6b5
编写于
2月 06, 2020
作者:
T
Tao Luo
提交者:
GitHub
2月 06, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
change check_type_and_dtype to check_variable_and_dtype (#22465)
上级
17f2c089
变更
8
隐藏空白更改
内联
并排
Showing
8 changed file
with
67 addition
and
79 deletion
+67
-79
python/paddle/fluid/data_feeder.py
python/paddle/fluid/data_feeder.py
+6
-7
python/paddle/fluid/input.py
python/paddle/fluid/input.py
+2
-2
python/paddle/fluid/layers/control_flow.py
python/paddle/fluid/layers/control_flow.py
+4
-4
python/paddle/fluid/layers/layer_function_generator.py
python/paddle/fluid/layers/layer_function_generator.py
+3
-3
python/paddle/fluid/layers/loss.py
python/paddle/fluid/layers/loss.py
+7
-8
python/paddle/fluid/layers/metric_op.py
python/paddle/fluid/layers/metric_op.py
+3
-3
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+37
-47
python/paddle/fluid/layers/tensor.py
python/paddle/fluid/layers/tensor.py
+5
-5
未找到文件。
python/paddle/fluid/data_feeder.py
浏览文件 @
6b7bb6b5
...
...
@@ -71,13 +71,12 @@ def convert_dtype(dtype):
"int32, int64, uint8]"
)
def
check_type_and_dtype
(
input
,
input_name
,
expected_type
,
expected_dtype
,
op_name
,
extra_message
=
''
):
check_type
(
input
,
input_name
,
expected_type
,
op_name
,
extra_message
)
def
check_variable_and_dtype
(
input
,
input_name
,
expected_dtype
,
op_name
,
extra_message
=
''
):
check_type
(
input
,
input_name
,
Variable
,
op_name
,
extra_message
)
check_dtype
(
input
.
dtype
,
input_name
,
expected_dtype
,
op_name
,
extra_message
)
...
...
python/paddle/fluid/input.py
浏览文件 @
6b7bb6b5
...
...
@@ -16,7 +16,7 @@ from __future__ import print_function
import
warnings
from
.framework
import
Variable
,
in_dygraph_mode
from
.layer_helper
import
LayerHelper
from
.data_feeder
import
check_
typ
e_and_dtype
,
check_dtype
from
.data_feeder
import
check_
variabl
e_and_dtype
,
check_dtype
__all__
=
[
'one_hot'
,
'embedding'
]
...
...
@@ -233,7 +233,7 @@ def embedding(input,
"""
helper
=
LayerHelper
(
'embedding'
,
**
locals
())
check_
type_and_dtype
(
input
,
'input'
,
Variable
,
[
'int64'
],
'fluid.embedding'
)
check_
variable_and_dtype
(
input
,
'input'
,
[
'int64'
],
'fluid.embedding'
)
check_dtype
(
dtype
,
'dtype'
,
[
'float16'
,
'float32'
,
'float64'
],
'fluid.embedding'
)
remote_prefetch
=
is_sparse
and
(
not
is_distributed
)
...
...
python/paddle/fluid/layers/control_flow.py
浏览文件 @
6b7bb6b5
...
...
@@ -26,7 +26,7 @@ import numpy
import
warnings
import
six
from
functools
import
reduce
,
partial
from
..data_feeder
import
convert_dtype
,
check_
typ
e_and_dtype
from
..data_feeder
import
convert_dtype
,
check_
variabl
e_and_dtype
from
...
import
compat
as
cpt
from
..backward
import
_infer_var_data_type_shape_
...
...
@@ -257,9 +257,9 @@ def Print(input,
data: 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
'''
check_
type_and_dtype
(
input
,
'input'
,
Variable
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
,
'bool'
],
'fluid.layers.Print'
)
check_
variable_and_dtype
(
input
,
'input'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
,
'bool'
],
'fluid.layers.Print'
)
helper
=
LayerHelper
(
'print'
+
"_"
+
input
.
name
,
**
locals
())
output
=
helper
.
create_variable_for_type_inference
(
input
.
dtype
)
...
...
python/paddle/fluid/layers/layer_function_generator.py
浏览文件 @
6b7bb6b5
...
...
@@ -22,7 +22,7 @@ from six.moves import cStringIO
from
..proto
import
framework_pb2
from
..framework
import
OpProtoHolder
,
Variable
,
core
,
convert_np_dtype_to_dtype_
,
in_dygraph_mode
from
..layer_helper
import
LayerHelper
from
..data_feeder
import
check_
typ
e_and_dtype
from
..data_feeder
import
check_
variabl
e_and_dtype
__all__
=
[
'deprecated'
,
'generate_layer_fn'
,
'generate_activation_fn'
,
'autodoc'
,
...
...
@@ -258,8 +258,8 @@ def generate_activation_fn(op_type):
outs
=
op
(
inputs
)
return
outs
[
'Out'
][
0
]
check_
type_and_dtype
(
x
,
'x'
,
Variable
,
[
'float16'
,
'float32'
,
'float64'
],
op_type
)
check_
variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
]
,
op_type
)
helper
=
LayerHelper
(
op_type
,
**
locals
())
output
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
...
...
python/paddle/fluid/layers/loss.py
浏览文件 @
6b7bb6b5
...
...
@@ -21,7 +21,7 @@ from .layer_function_generator import templatedoc
from
..layer_helper
import
LayerHelper
from
..framework
import
Variable
,
in_dygraph_mode
from
..
import
core
from
..data_feeder
import
check_
typ
e_and_dtype
from
..data_feeder
import
check_
variabl
e_and_dtype
from
..param_attr
import
ParamAttr
from
..initializer
import
NumpyArrayInitializer
,
Constant
from
..
import
core
...
...
@@ -245,8 +245,8 @@ def cross_entropy(input, label, soft_label=False, ignore_index=kIgnoreIndex):
outs
=
core
.
ops
.
cross_entropy
(
inputs
,
attrs
)
return
outs
[
'Y'
][
0
]
check_
type_and_dtype
(
input
,
'input'
,
Variable
,
[
'float16'
,
'float32'
,
'float64'
],
'cross_entropy'
)
check_
variable_and_dtype
(
input
,
'input'
,
[
'float16'
,
'float32'
,
'float64'
]
,
'cross_entropy'
)
helper
=
LayerHelper
(
'cross_entropy'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
helper
.
append_op
(
...
...
@@ -262,8 +262,8 @@ def cross_entropy2(input, label, ignore_index=kIgnoreIndex):
outs
=
core
.
ops
.
cross_entropy2
(
inputs
,
attrs
)
return
outs
[
'Y'
][
0
]
check_
type_and_dtype
(
input
,
'input'
,
Variable
,
[
'float16'
,
'float32'
,
'float64'
],
'cross_entropy2'
)
check_
variable_and_dtype
(
input
,
'input'
,
[
'float16'
,
'float32'
,
'float64'
]
,
'cross_entropy2'
)
helper
=
LayerHelper
(
'cross_entropy2'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
xshape
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
...
...
@@ -717,9 +717,8 @@ def nce(input,
custom_dist=dist)
"""
helper
=
LayerHelper
(
'nce'
,
**
locals
())
check_type_and_dtype
(
input
,
'input'
,
Variable
,
[
'float32'
,
'float64'
],
'nce'
)
check_type_and_dtype
(
label
,
'label'
,
Variable
,
[
'int64'
],
'nce'
)
check_variable_and_dtype
(
input
,
'input'
,
[
'float32'
,
'float64'
],
'nce'
)
check_variable_and_dtype
(
label
,
'label'
,
[
'int64'
],
'nce'
)
dim
=
input
.
shape
[
1
]
num_true_class
=
label
.
shape
[
1
]
...
...
python/paddle/fluid/layers/metric_op.py
浏览文件 @
6b7bb6b5
...
...
@@ -24,7 +24,7 @@ from ..framework import Variable, in_dygraph_mode, _varbase_creator
from
..
import
core
from
..param_attr
import
ParamAttr
from
.
import
nn
from
..data_feeder
import
check_
typ
e_and_dtype
from
..data_feeder
import
check_
variabl
e_and_dtype
__all__
=
[
'accuracy'
,
'auc'
]
...
...
@@ -94,8 +94,8 @@ def accuracy(input, label, k=1, correct=None, total=None):
return
outs
[
'Accuracy'
][
0
]
helper
=
LayerHelper
(
"accuracy"
,
**
locals
())
check_
type_and_dtype
(
input
,
'input'
,
Variable
,
[
'float16'
,
'float32'
,
'float64'
],
'accuracy'
)
check_
variable_and_dtype
(
input
,
'input'
,
[
'float16'
,
'float32'
,
'float64'
]
,
'accuracy'
)
topk_out
,
topk_indices
=
nn
.
topk
(
input
,
k
=
k
)
acc_out
=
helper
.
create_variable_for_type_inference
(
dtype
=
"float32"
)
if
correct
is
None
:
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
6b7bb6b5
...
...
@@ -33,7 +33,7 @@ from . import utils
from .. import unique_name
from functools import reduce
from .. import core
from ..data_feeder import convert_dtype, check_
typ
e_and_dtype, check_type, check_dtype
from ..data_feeder import convert_dtype, check_
variabl
e_and_dtype, check_type, check_dtype
__all__ = [
'fc',
...
...
@@ -472,8 +472,8 @@ def embedding(input,
"""
helper = LayerHelper('embedding', **locals())
check_
type_and_dtype(input, 'input', Variable
, ['int64'],
'fluid.layers.embedding')
check_
variable_and_dtype(input, 'input'
, ['int64'],
'fluid.layers.embedding')
check_dtype(dtype, 'dtype', ['float16', 'float32', 'float64'],
'fluid.layers.embedding')
remote_prefetch = is_sparse and (not is_distributed)
...
...
@@ -840,8 +840,8 @@ def dropout(x,
return outs['Out'][0]
helper = LayerHelper('dropout', **locals())
check_
type_and_dtype(x, 'x', Variable
, ['float16', 'float32', 'float64'],
'dropout')
check_
variable_and_dtype(x, 'x'
, ['float16', 'float32', 'float64'],
'dropout')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
mask = helper.create_variable_for_type_inference(
...
...
@@ -1124,8 +1124,8 @@ def softmax(input, use_cudnn=False, name=None, axis=-1):
return outs['Out'][0]
helper = LayerHelper('softmax', **locals())
check_
type_and_dtype(input, 'input', Variable
,
['float16', 'float32', 'float64'],
'softmax')
check_
variable_and_dtype(input, 'input', ['float16', 'float32', 'float64']
,
'softmax')
dtype = helper.input_dtype()
softmax_out = helper.create_variable_for_type_inference(dtype)
...
...
@@ -1280,8 +1280,8 @@ def conv2d(input,
conv2d = fluid.layers.conv2d(input=data, num_filters=2, filter_size=3, act="relu")
"""
check_
type_and_dtype(input, 'input', Variable
,
['float16', 'float32', 'float64'],
'conv2d')
check_
variable_and_dtype(input, 'input', ['float16', 'float32', 'float64']
,
'conv2d')
num_channels = input.shape[1]
if not isinstance(use_cudnn, bool):
raise ValueError("Attr(use_cudnn) should be True or False. Received "
...
...
@@ -2555,8 +2555,8 @@ def batch_norm(input,
assert bias_attr is not False, "bias_attr should not be False in batch_norm."
helper = LayerHelper('batch_norm', **locals())
check_
type_and_dtype(input, 'input', Variable
,
['float16', 'float32', 'float64'],
'batch_norm')
check_
variable_and_dtype(input, 'input', ['float16', 'float32', 'float64']
,
'batch_norm')
dtype = helper.input_dtype()
has_reserve_space = False
...
...
@@ -3896,8 +3896,8 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None):
outs = core.ops.reduce_sum(inputs, attrs)
return outs['Out'][0]
check_
type_and_dtype(input, 'input', Variable,
['float32', 'float64', 'int32', 'int64'], 'reduce_sum')
check_
variable_and_dtype(
input, 'input',
['float32', 'float64', 'int32', 'int64'], 'reduce_sum')
helper = LayerHelper('reduce_sum', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(
...
...
@@ -3971,9 +3971,8 @@ def reduce_mean(input, dim=None, keep_dim=False, name=None):
outs = core.ops.reduce_mean(inputs, attrs)
return outs['Out'][0]
check_type_and_dtype(input, 'input', Variable,
['float32', 'float64', 'int32', 'int64'],
'reduce_mean')
check_variable_and_dtype(
input, 'input', ['float32', 'float64', 'int32', 'int64'], 'reduce_mean')
helper = LayerHelper('reduce_mean', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(
...
...
@@ -4601,8 +4600,8 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None):
def __check_input(x, y):
var_names = {'x': x, 'y': y}
for name, val in var_names.items():
check_
type_and_dtype(val, name, Variable,
['float16', 'float32', 'float64'], 'matmul')
check_
variable_and_dtype(
val, name,
['float16', 'float32', 'float64'], 'matmul')
x_shape = list(x.shape)
y_shape = list(y.shape)
if len(x_shape) == 1:
...
...
@@ -4962,9 +4961,9 @@ def transpose(x, perm, name=None):
outs = core.ops.transpose2(inputs, attrs)
return outs['Out'][0]
check_
type_and_dtype(x, 'x', Variable,
['float16', 'float32', 'float64', 'int32', 'int64'],
'transpose')
check_
variable_and_dtype(
x, 'x',
['float16', 'float32', 'float64', 'int32', 'int64'],
'transpose')
check_type(perm, 'perm', list, 'transpose')
if len(perm) != len(x.shape):
...
...
@@ -5589,9 +5588,8 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None):
out = outs['Out'][0]
return dygraph_utils._append_activation_in_dygraph(out, act)
check_type_and_dtype(x, 'x', Variable,
['float16', 'float32', 'float64', 'int32', 'int64'],
'reshape')
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'reshape')
check_type(shape, 'shape', (list, tuple, Variable), 'reshape')
check_type(actual_shape, 'actual_shape', (Variable, type(None)), 'reshape')
...
...
@@ -5719,9 +5717,9 @@ def squeeze(input, axes, name=None):
"""
helper = LayerHelper("squeeze", **locals())
check_
type_and_dtype(input, 'input', Variable
,
['float32', 'float64', 'int8', 'int32', 'int64'],
'squeeze')
check_
variable_and_dtype(input, 'input'
,
['float32', 'float64', 'int8', 'int32', 'int64'],
'squeeze')
check_type(axes, 'axes', list, 'squeeze')
out = helper.create_variable_for_type_inference(dtype=input.dtype)
x_shape = helper.create_variable_for_type_inference(dtype=input.dtype)
...
...
@@ -8228,9 +8226,8 @@ def crop_tensor(x, shape=None, offsets=None, name=None):
"""
helper = LayerHelper('crop_tensor', **locals())
check_type_and_dtype(x, 'x', Variable,
['float32', 'float64', 'int32', 'int64'],
'crop_tensor')
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'crop_tensor')
check_type(shape, 'shape', (list, tuple, Variable), 'crop_tensor')
check_type(offsets, 'offsets', (list, tuple, Variable, type(None)),
'crop_tensor')
...
...
@@ -8523,8 +8520,7 @@ def elu(x, alpha=1.0, name=None):
# [ 1. 15.6 ]]
"""
helper = LayerHelper('elu', **locals())
check_type_and_dtype(x, 'x', Variable, ['float16', 'float32', 'float64'],
'elu')
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'elu')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='elu',
...
...
@@ -9342,9 +9338,8 @@ def expand(x, expand_times, name=None):
outs = core.ops.expand(inputs, attrs)
return outs['Out'][0]
check_type_and_dtype(x, 'x', Variable,
['bool', 'float32', 'float64', 'int32', 'int64'],
'expand')
check_variable_and_dtype(
x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'expand')
check_type(expand_times, 'expand_times', (list, tuple, Variable), 'expand')
if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == True:
raise ValueError(
...
...
@@ -10277,12 +10272,10 @@ def _elementwise_op(helper):
assert x is not None, 'x cannot be None in {}'.format(op_type)
assert y is not None, 'y cannot be None in {}'.format(op_type)
check_type_and_dtype(x, 'x', Variable,
['float16', 'float32', 'float64', 'int32', 'int64'],
op_type)
check_type_and_dtype(y, 'y', Variable,
['float16', 'float32', 'float64', 'int32', 'int64'],
op_type)
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], op_type)
check_variable_and_dtype(
y, 'y', ['float16', 'float32', 'float64', 'int32', 'int64'], op_type)
axis = helper.kwargs.get('axis', -1)
use_mkldnn = helper.kwargs.get('use_mkldnn', False)
...
...
@@ -11338,8 +11331,7 @@ def mean(x, name=None):
return outs['Out'][0]
helper = LayerHelper("mean", **locals())
check_type_and_dtype(x, 'x', Variable, ['float16', 'float32', 'float64'],
'mean')
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mean')
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
...
...
@@ -11425,10 +11417,8 @@ def mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None):
return outs['Out'][0]
helper = LayerHelper("mul", **locals())
check_type_and_dtype(x, 'x', Variable, ['float16', 'float32', 'float64'],
'mul')
check_type_and_dtype(y, 'y', Variable, ['float16', 'float32', 'float64'],
'mul')
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mul')
check_variable_and_dtype(y, 'y', ['float16', 'float32', 'float64'], 'mul')
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
...
...
python/paddle/fluid/layers/tensor.py
浏览文件 @
6b7bb6b5
...
...
@@ -23,7 +23,7 @@ from ..core import VarDesc
from
..
import
core
from
.layer_function_generator
import
templatedoc
from
.
import
utils
from
..data_feeder
import
check_
typ
e_and_dtype
,
check_type
,
check_dtype
,
convert_dtype
from
..data_feeder
import
check_
variabl
e_and_dtype
,
check_type
,
check_dtype
,
convert_dtype
import
numpy
import
warnings
...
...
@@ -193,8 +193,8 @@ def cast(x, dtype):
# [ 0 4]] int32
"""
helper
=
LayerHelper
(
'cast'
,
**
locals
())
check_
typ
e_and_dtype
(
x
,
'x'
,
Variable
,
check_
variabl
e_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
,
'uint8'
],
'cast'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
...
...
@@ -269,8 +269,8 @@ def concat(input, axis=0, name=None):
(
type
(
input
)))
input
=
[
input
]
for
id
,
x
in
enumerate
(
input
):
check_
typ
e_and_dtype
(
x
,
'input['
+
str
(
id
)
+
']'
,
Variable
,
check_
variabl
e_and_dtype
(
x
,
'input['
+
str
(
id
)
+
']'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'concat'
)
check_type
(
axis
,
'axis'
,
(
int
,
Variable
),
'concat'
)
inputs
=
{
'X'
:
input
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录