Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
96e9b658
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
96e9b658
编写于
10月 23, 2018
作者:
X
Xin Pan
提交者:
GitHub
10月 23, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #13941 from panyx0718/fix2
handle var type inference
上级
5d6783f8
c1383744
变更
11
展开全部
隐藏空白更改
内联
并排
Showing
11 changed file
with
288 addition
and
251 deletion
+288
-251
paddle/fluid/framework/op_desc.cc
paddle/fluid/framework/op_desc.cc
+5
-11
python/paddle/fluid/layer_helper.py
python/paddle/fluid/layer_helper.py
+12
-3
python/paddle/fluid/layers/control_flow.py
python/paddle/fluid/layers/control_flow.py
+17
-16
python/paddle/fluid/layers/detection.py
python/paddle/fluid/layers/detection.py
+38
-27
python/paddle/fluid/layers/io.py
python/paddle/fluid/layers/io.py
+1
-1
python/paddle/fluid/layers/layer_function_generator.py
python/paddle/fluid/layers/layer_function_generator.py
+5
-3
python/paddle/fluid/layers/metric_op.py
python/paddle/fluid/layers/metric_op.py
+5
-5
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+186
-168
python/paddle/fluid/layers/tensor.py
python/paddle/fluid/layers/tensor.py
+17
-14
python/paddle/fluid/regularizer.py
python/paddle/fluid/regularizer.py
+2
-2
python/paddle/fluid/tests/unittests/test_slice_var.py
python/paddle/fluid/tests/unittests/test_slice_var.py
+0
-1
未找到文件。
paddle/fluid/framework/op_desc.cc
浏览文件 @
96e9b658
...
...
@@ -515,20 +515,14 @@ void OpDesc::InferShape(const BlockDesc &block) const {
}
void
OpDesc
::
InferVarType
(
BlockDesc
*
block
)
const
{
// There are a few places that var type can be set.
// When VarDesc is created, default set to LOD_TENSOR.
// When output variable is created, default is defaut set to LOD_TENSOR.
// We limit here to be the only place that operator defines its customized
// var type inference. Hence, we don't do any "default" setting here.
auto
&
info
=
OpInfoMap
::
Instance
().
Get
(
this
->
Type
());
if
(
info
.
infer_var_type_
)
{
info
.
infer_var_type_
(
*
this
,
block
);
}
else
{
// all output type is LoDTensor by default
VLOG
(
10
)
<<
this
->
Type
()
<<
" has not registered InferVarType. Set output variables to "
"LOD_TENSOR"
;
for
(
auto
&
out_pair
:
this
->
outputs_
)
{
for
(
auto
&
out_var_name
:
out_pair
.
second
)
{
block
->
FindRecursiveOrCreateVar
(
out_var_name
)
.
SetType
(
proto
::
VarType
::
LOD_TENSOR
);
}
}
}
}
...
...
python/paddle/fluid/layer_helper.py
浏览文件 @
96e9b658
...
...
@@ -324,10 +324,19 @@ class LayerHelper(object):
raise
ValueError
(
"no Parameter name %s found"
%
name
)
return
param
def
create_tmp_variable
(
self
,
dtype
,
stop_gradient
=
False
):
def
create_variable_for_type_inference
(
self
,
dtype
,
stop_gradient
=
False
):
"""Create a temporary variable that should be type inferred layer.
Note:
The default type will be set to LOD_TENSOR. However, when
the var is used as operator output, its type will be updated
based on operator's `VarTypeInference` implementation in
infer_var_type.
"""
return
self
.
main_program
.
current_block
().
create_var
(
name
=
unique_name
.
generate
(
"."
.
join
([
self
.
name
,
'tmp'
])),
dtype
=
dtype
,
type
=
core
.
VarDesc
.
VarType
.
LOD_TENSOR
,
persistable
=
False
,
stop_gradient
=
stop_gradient
)
...
...
@@ -388,7 +397,7 @@ class LayerHelper(object):
b
=
self
.
create_parameter
(
attr
=
bias_attr
,
shape
=
size
,
dtype
=
input_var
.
dtype
,
is_bias
=
True
)
tmp
=
self
.
create_
tmp_variabl
e
(
dtype
=
input_var
.
dtype
)
tmp
=
self
.
create_
variable_for_type_inferenc
e
(
dtype
=
input_var
.
dtype
)
self
.
append_op
(
type
=
'elementwise_add'
,
inputs
=
{
'X'
:
[
input_var
],
...
...
@@ -414,7 +423,7 @@ class LayerHelper(object):
tmp
=
input_var
# NOTE(dzhwinter): some activation support inplace compution.
if
not
core
.
IsInplace
(
act_type
):
tmp
=
self
.
create_
tmp_variabl
e
(
dtype
=
input_var
.
dtype
)
tmp
=
self
.
create_
variable_for_type_inferenc
e
(
dtype
=
input_var
.
dtype
)
self
.
append_op
(
type
=
act_type
,
inputs
=
{
"X"
:
[
input_var
]},
...
...
python/paddle/fluid/layers/control_flow.py
浏览文件 @
96e9b658
...
...
@@ -80,8 +80,8 @@ def split_lod_tensor(input, mask, level=0):
"""
helper
=
LayerHelper
(
'split_lod_tensor'
,
**
locals
())
out_true
=
helper
.
create_
tmp_variabl
e
(
dtype
=
input
.
dtype
)
out_false
=
helper
.
create_
tmp_variabl
e
(
dtype
=
input
.
dtype
)
out_true
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
input
.
dtype
)
out_false
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
input
.
dtype
)
helper
.
append_op
(
type
=
'split_lod_tensor'
,
inputs
=
{
...
...
@@ -131,7 +131,7 @@ def merge_lod_tensor(in_true, in_false, x, mask, level=0):
in_true=out_true, in_false=out_false, mask=y, x=x, level=level)
"""
helper
=
LayerHelper
(
'merge_lod_tensor'
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
in_true
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
in_true
.
dtype
)
helper
.
append_op
(
type
=
'merge_lod_tensor'
,
inputs
=
{
'X'
:
x
,
...
...
@@ -524,7 +524,7 @@ class StaticRNN(object):
if
not
isinstance
(
o
,
Variable
):
raise
TypeError
(
"step output takes a Variable"
)
tmp_o
=
self
.
helper
.
create_
tmp_variabl
e
(
dtype
=
o
.
dtype
)
tmp_o
=
self
.
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
o
.
dtype
)
self
.
helper
.
append_op
(
type
=
'rnn_memory_helper'
,
inputs
=
{
'X'
:
[
o
]},
...
...
@@ -606,7 +606,8 @@ class StaticRNN(object):
pre_memories
.
append
(
mem
.
pre_mem
.
name
)
mem_var
=
rnn_block
.
var
(
mem
.
mem
.
name
)
assert
isinstance
(
mem_var
,
Variable
)
new_mem
=
self
.
helper
.
create_tmp_variable
(
dtype
=
mem_var
.
dtype
)
new_mem
=
self
.
helper
.
create_variable_for_type_inference
(
dtype
=
mem_var
.
dtype
)
rnn_block
.
append_op
(
type
=
'rnn_memory_helper'
,
...
...
@@ -813,7 +814,7 @@ def max_sequence_len(rank_table):
${out_comment}.
"""
helper
=
LayerHelper
(
"max_seqence_len"
,
**
locals
())
res
=
helper
.
create_
tmp_variabl
e
(
dtype
=
"int64"
)
res
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
"int64"
)
helper
.
append_op
(
type
=
"max_sequence_len"
,
inputs
=
{
"RankTable"
:
rank_table
},
...
...
@@ -884,7 +885,7 @@ def array_to_lod_tensor(x, table):
lod_tensor = fluid.layers.array_to_lod_tensor(array, table)
"""
helper
=
LayerHelper
(
"array_to_lod_tensor"
,
**
locals
())
tmp
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
tmp
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
"array_to_lod_tensor"
,
inputs
=
{
'X'
:
x
,
...
...
@@ -915,7 +916,7 @@ def increment(x, value=1.0, in_place=True):
"""
helper
=
LayerHelper
(
"increment"
,
**
locals
())
if
not
in_place
:
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
else
:
out
=
x
helper
.
append_op
(
...
...
@@ -1012,7 +1013,7 @@ def less_than(x, y, force_cpu=None, cond=None, **ignored):
"""
helper
=
LayerHelper
(
"less_than"
,
**
locals
())
if
cond
is
None
:
cond
=
helper
.
create_
tmp_variabl
e
(
dtype
=
'bool'
)
cond
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
'bool'
)
cond
.
stop_gradient
=
True
attrs
=
dict
()
...
...
@@ -1051,7 +1052,7 @@ def equal(x, y, cond=None, **ignored):
"""
helper
=
LayerHelper
(
"equal"
,
**
locals
())
if
cond
is
None
:
cond
=
helper
.
create_
tmp_variabl
e
(
dtype
=
'bool'
)
cond
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
'bool'
)
cond
.
stop_gradient
=
True
helper
.
append_op
(
...
...
@@ -1098,7 +1099,7 @@ def array_read(array, i):
array
,
Variable
)
or
array
.
type
!=
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
:
raise
TypeError
(
"array should be tensor array vairable"
)
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
array
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
array
.
dtype
)
helper
.
append_op
(
type
=
'read_from_array'
,
inputs
=
{
'X'
:
[
array
],
...
...
@@ -1133,7 +1134,7 @@ def shrink_memory(x, i, table):
usage.
"""
helper
=
LayerHelper
(
'shrink_memory'
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'shrink_rnn_memory'
,
inputs
=
{
'X'
:
[
x
],
...
...
@@ -1170,7 +1171,7 @@ def array_length(array):
"""
helper
=
LayerHelper
(
'array_length'
,
**
locals
())
tmp
=
helper
.
create_
tmp_variabl
e
(
dtype
=
'int64'
)
tmp
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
'int64'
)
tmp
.
stop_gradient
=
True
helper
.
append_op
(
type
=
'lod_array_length'
,
inputs
=
{
'X'
:
[
array
]},
outputs
=
{
'Out'
:
[
tmp
]})
...
...
@@ -1590,7 +1591,7 @@ class DynamicRNN(object):
self
.
mem_dict
=
dict
()
self
.
output_array
=
[]
self
.
outputs
=
[]
self
.
cond
=
self
.
helper
.
create_
tmp_variabl
e
(
dtype
=
'bool'
)
self
.
cond
=
self
.
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
'bool'
)
self
.
cond
.
stop_gradient
=
False
self
.
while_op
=
While
(
self
.
cond
)
self
.
input_array
=
[]
...
...
@@ -1924,7 +1925,7 @@ def reorder_lod_tensor_by_rank(x, rank_table):
helper
.
is_instance
(
'x'
,
Variable
)
helper
.
is_instance
(
'rank_table'
,
Variable
)
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'reorder_lod_tensor_by_rank'
,
inputs
=
{
'X'
:
[
x
],
...
...
@@ -1958,7 +1959,7 @@ def is_empty(x, cond=None, **ignored):
"""
helper
=
LayerHelper
(
"is_empty"
,
**
locals
())
if
cond
is
None
:
cond
=
helper
.
create_
tmp_variabl
e
(
dtype
=
'bool'
)
cond
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
'bool'
)
cond
.
stop_gradient
=
True
elif
not
isinstance
(
cond
,
Variable
):
raise
TypeError
(
"cond takes a variable"
)
...
...
python/paddle/fluid/layers/detection.py
浏览文件 @
96e9b658
...
...
@@ -147,10 +147,11 @@ def rpn_target_assign(bbox_pred,
helper
=
LayerHelper
(
'rpn_target_assign'
,
**
locals
())
# Assign target label to anchors
loc_index
=
helper
.
create_tmp_variable
(
dtype
=
'int32'
)
score_index
=
helper
.
create_tmp_variable
(
dtype
=
'int32'
)
target_label
=
helper
.
create_tmp_variable
(
dtype
=
'int32'
)
target_bbox
=
helper
.
create_tmp_variable
(
dtype
=
anchor_box
.
dtype
)
loc_index
=
helper
.
create_variable_for_type_inference
(
dtype
=
'int32'
)
score_index
=
helper
.
create_variable_for_type_inference
(
dtype
=
'int32'
)
target_label
=
helper
.
create_variable_for_type_inference
(
dtype
=
'int32'
)
target_bbox
=
helper
.
create_variable_for_type_inference
(
dtype
=
anchor_box
.
dtype
)
helper
.
append_op
(
type
=
"rpn_target_assign"
,
inputs
=
{
...
...
@@ -282,7 +283,8 @@ def detection_output(loc,
scores
=
nn
.
reshape
(
x
=
scores
,
shape
=
compile_shape
,
actual_shape
=
run_shape
)
scores
=
nn
.
transpose
(
scores
,
perm
=
[
0
,
2
,
1
])
scores
.
stop_gradient
=
True
nmsed_outs
=
helper
.
create_tmp_variable
(
dtype
=
decoded_box
.
dtype
)
nmsed_outs
=
helper
.
create_variable_for_type_inference
(
dtype
=
decoded_box
.
dtype
)
helper
.
append_op
(
type
=
"multiclass_nms"
,
inputs
=
{
'Scores'
:
scores
,
...
...
@@ -314,7 +316,7 @@ def iou_similarity(x, y, name=None):
"""
helper
=
LayerHelper
(
"iou_similarity"
,
**
locals
())
if
name
is
None
:
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
else
:
out
=
helper
.
create_variable
(
name
=
name
,
dtype
=
x
.
dtype
,
persistable
=
False
)
...
...
@@ -351,7 +353,8 @@ def box_coder(prior_box,
helper
=
LayerHelper
(
"box_coder"
,
**
locals
())
if
name
is
None
:
output_box
=
helper
.
create_tmp_variable
(
dtype
=
prior_box
.
dtype
)
output_box
=
helper
.
create_variable_for_type_inference
(
dtype
=
prior_box
.
dtype
)
else
:
output_box
=
helper
.
create_variable
(
name
=
name
,
dtype
=
prior_box
.
dtype
,
persistable
=
False
)
...
...
@@ -382,7 +385,7 @@ def polygon_box_transform(input, name=None):
"""
helper
=
LayerHelper
(
"polygon_box_transform"
,
**
locals
())
if
name
is
None
:
output
=
helper
.
create_
tmp_variabl
e
(
dtype
=
input
.
dtype
)
output
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
input
.
dtype
)
else
:
output
=
helper
.
create_variable
(
name
=
name
,
dtype
=
prior_box
.
input
,
persistable
=
False
)
...
...
@@ -450,7 +453,7 @@ def detection_map(detect_res,
helper
=
LayerHelper
(
"detection_map"
,
**
locals
())
def
__create_var
(
type
):
return
helper
.
create_
tmp_variabl
e
(
dtype
=
type
)
return
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
type
)
map_out
=
__create_var
(
'float32'
)
accum_pos_count_out
=
out_states
[
0
]
if
out_states
else
__create_var
(
'int32'
)
...
...
@@ -557,8 +560,9 @@ def bipartite_match(dist_matrix,
>>> matched_indices, matched_dist = fluid.layers.bipartite_match(iou)
"""
helper
=
LayerHelper
(
'bipartite_match'
,
**
locals
())
match_indices
=
helper
.
create_tmp_variable
(
dtype
=
'int32'
)
match_distance
=
helper
.
create_tmp_variable
(
dtype
=
dist_matrix
.
dtype
)
match_indices
=
helper
.
create_variable_for_type_inference
(
dtype
=
'int32'
)
match_distance
=
helper
.
create_variable_for_type_inference
(
dtype
=
dist_matrix
.
dtype
)
helper
.
append_op
(
type
=
'bipartite_match'
,
inputs
=
{
'DistMat'
:
dist_matrix
},
...
...
@@ -644,8 +648,8 @@ def target_assign(input,
gt, matched_indices, mismatch_value=0)
"""
helper
=
LayerHelper
(
'target_assign'
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
input
.
dtype
)
out_weight
=
helper
.
create_
tmp_variabl
e
(
dtype
=
'float32'
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
input
.
dtype
)
out_weight
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
'float32'
)
helper
.
append_op
(
type
=
'target_assign'
,
inputs
=
{
...
...
@@ -816,9 +820,10 @@ def ssd_loss(location,
conf_loss
=
nn
.
reshape
(
x
=
conf_loss
,
shape
=
(
num
,
num_prior
),
actual_shape
=
actual_shape
)
conf_loss
.
stop_gradient
=
True
neg_indices
=
helper
.
create_
tmp_variabl
e
(
dtype
=
'int32'
)
neg_indices
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
'int32'
)
dtype
=
matched_indices
.
dtype
updated_matched_indices
=
helper
.
create_tmp_variable
(
dtype
=
dtype
)
updated_matched_indices
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
helper
.
append_op
(
type
=
'mine_hard_examples'
,
inputs
=
{
...
...
@@ -998,8 +1003,8 @@ def prior_box(input,
max_sizes
=
[
max_sizes
]
attrs
[
'max_sizes'
]
=
max_sizes
box
=
helper
.
create_
tmp_variabl
e
(
dtype
)
var
=
helper
.
create_
tmp_variabl
e
(
dtype
)
box
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
var
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
"prior_box"
,
inputs
=
{
"Input"
:
input
,
...
...
@@ -1337,8 +1342,8 @@ def anchor_generator(input,
'offset'
:
offset
}
anchor
=
helper
.
create_
tmp_variabl
e
(
dtype
)
var
=
helper
.
create_
tmp_variabl
e
(
dtype
)
anchor
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
var
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
"anchor_generator"
,
inputs
=
{
"Input"
:
input
},
...
...
@@ -1384,7 +1389,7 @@ def roi_perspective_transform(input,
"""
helper
=
LayerHelper
(
'roi_perspective_transform'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
out
=
helper
.
create_
tmp_variabl
e
(
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
"roi_perspective_transform"
,
inputs
=
{
"X"
:
input
,
...
...
@@ -1418,11 +1423,15 @@ def generate_proposal_labels(rpn_rois,
helper
=
LayerHelper
(
'generate_proposal_labels'
,
**
locals
())
rois
=
helper
.
create_tmp_variable
(
dtype
=
rpn_rois
.
dtype
)
labels_int32
=
helper
.
create_tmp_variable
(
dtype
=
gt_classes
.
dtype
)
bbox_targets
=
helper
.
create_tmp_variable
(
dtype
=
rpn_rois
.
dtype
)
bbox_inside_weights
=
helper
.
create_tmp_variable
(
dtype
=
rpn_rois
.
dtype
)
bbox_outside_weights
=
helper
.
create_tmp_variable
(
dtype
=
rpn_rois
.
dtype
)
rois
=
helper
.
create_variable_for_type_inference
(
dtype
=
rpn_rois
.
dtype
)
labels_int32
=
helper
.
create_variable_for_type_inference
(
dtype
=
gt_classes
.
dtype
)
bbox_targets
=
helper
.
create_variable_for_type_inference
(
dtype
=
rpn_rois
.
dtype
)
bbox_inside_weights
=
helper
.
create_variable_for_type_inference
(
dtype
=
rpn_rois
.
dtype
)
bbox_outside_weights
=
helper
.
create_variable_for_type_inference
(
dtype
=
rpn_rois
.
dtype
)
helper
.
append_op
(
type
=
"generate_proposal_labels"
,
...
...
@@ -1504,8 +1513,10 @@ def generate_proposals(scores,
"""
helper
=
LayerHelper
(
'generate_proposals'
,
**
locals
())
rpn_rois
=
helper
.
create_tmp_variable
(
dtype
=
bbox_deltas
.
dtype
)
rpn_roi_probs
=
helper
.
create_tmp_variable
(
dtype
=
scores
.
dtype
)
rpn_rois
=
helper
.
create_variable_for_type_inference
(
dtype
=
bbox_deltas
.
dtype
)
rpn_roi_probs
=
helper
.
create_variable_for_type_inference
(
dtype
=
scores
.
dtype
)
helper
.
append_op
(
type
=
"generate_proposals"
,
inputs
=
{
...
...
python/paddle/fluid/layers/io.py
浏览文件 @
96e9b658
...
...
@@ -954,7 +954,7 @@ def read_file(reader):
"""
helper
=
LayerHelper
(
'read_file'
)
out
=
[
helper
.
create_
tmp_variabl
e
(
helper
.
create_
variable_for_type_inferenc
e
(
stop_gradient
=
True
,
dtype
=
'float32'
)
for
_
in
range
(
len
(
reader
.
desc
.
shapes
()))
]
...
...
python/paddle/fluid/layers/layer_function_generator.py
浏览文件 @
96e9b658
...
...
@@ -202,10 +202,12 @@ def generate_layer_fn(op_type):
out_var
=
out
[
0
]
if
(
isinstance
(
out
,
list
)
or
isinstance
(
out
,
tuple
))
else
out
else
:
out_var
=
helper
.
create_
tmp_variabl
e
(
dtype
=
dtype
)
out_var
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
dtype
)
outputs
[
o_name
]
=
[
out_var
]
for
name
in
intermediate_output_names
:
outputs
[
name
]
=
[
helper
.
create_tmp_variable
(
dtype
=
dtype
)]
outputs
[
name
]
=
[
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
]
helper
.
append_op
(
type
=
op_type
,
inputs
=
inputs
,
outputs
=
outputs
,
attrs
=
kwargs
)
return
helper
.
append_activation
(
out_var
)
...
...
@@ -229,7 +231,7 @@ def generate_layer_fn_noattr(op_type):
def
func
(
x
,
name
=
None
):
helper
=
LayerHelper
(
op_type
,
**
locals
())
output
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
output
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
op_type
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
output
})
return
output
...
...
python/paddle/fluid/layers/metric_op.py
浏览文件 @
96e9b658
...
...
@@ -58,11 +58,11 @@ def accuracy(input, label, k=1, correct=None, total=None):
"""
helper
=
LayerHelper
(
"accuracy"
,
**
locals
())
topk_out
,
topk_indices
=
nn
.
topk
(
input
,
k
=
k
)
acc_out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
"float32"
)
acc_out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
"float32"
)
if
correct
is
None
:
correct
=
helper
.
create_
tmp_variabl
e
(
dtype
=
"int64"
)
correct
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
"int64"
)
if
total
is
None
:
total
=
helper
.
create_
tmp_variabl
e
(
dtype
=
"int64"
)
total
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
"int64"
)
helper
.
append_op
(
type
=
"accuracy"
,
inputs
=
{
...
...
@@ -124,8 +124,8 @@ def auc(input,
auc_out=fluid.layers.auc(input=prediction, label=label)
"""
helper
=
LayerHelper
(
"auc"
,
**
locals
())
auc_out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
"float64"
)
batch_auc_out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
"float64"
)
auc_out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
"float64"
)
batch_auc_out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
"float64"
)
# make tp, tn, fp, fn persistable, so that can accumulate all batches.
# for batch auc
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
96e9b658
此差异已折叠。
点击以展开。
python/paddle/fluid/layers/tensor.py
浏览文件 @
96e9b658
...
...
@@ -152,7 +152,7 @@ def cast(x, dtype):
result = fluid.layers.cast(x=data, dtype='float64')
"""
helper
=
LayerHelper
(
'cast'
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
dtype
)
helper
.
append_op
(
type
=
'cast'
,
inputs
=
{
'X'
:
[
x
]},
...
...
@@ -184,7 +184,7 @@ def concat(input, axis=0, name=None):
out = fluid.layers.concat(input=[Efirst, Esecond, Ethird, Efourth])
"""
helper
=
LayerHelper
(
'concat'
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
helper
.
input_dtype
())
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
helper
.
input_dtype
())
helper
.
append_op
(
type
=
'concat'
,
inputs
=
{
'X'
:
input
},
...
...
@@ -221,7 +221,8 @@ def sums(input, out=None):
"""
helper
=
LayerHelper
(
'sum'
,
**
locals
())
if
out
is
None
:
out
=
helper
.
create_tmp_variable
(
dtype
=
helper
.
input_dtype
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
())
helper
.
append_op
(
type
=
'sum'
,
inputs
=
{
'X'
:
input
},
...
...
@@ -252,7 +253,7 @@ def assign(input, output=None):
"""
helper
=
LayerHelper
(
'assign'
,
**
locals
())
if
output
is
None
:
output
=
helper
.
create_
tmp_variabl
e
(
dtype
=
input
.
dtype
)
output
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
input
.
dtype
)
if
isinstance
(
input
,
Variable
):
helper
.
append_op
(
type
=
'assign'
,
inputs
=
{
'X'
:
[
input
]},
outputs
=
{
'Out'
:
[
output
]})
...
...
@@ -311,7 +312,7 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None):
helper
=
LayerHelper
(
"fill_constant"
,
**
locals
())
if
out
is
None
:
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
dtype
)
helper
.
append_op
(
type
=
'fill_constant'
,
inputs
=
{},
...
...
@@ -358,7 +359,7 @@ def fill_constant_batch_size_like(input,
${out_comment}.
"""
helper
=
LayerHelper
(
"fill_constant_batch_size_like"
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
dtype
)
helper
.
append_op
(
type
=
'fill_constant_batch_size_like'
,
inputs
=
{
'Input'
:
input
},
...
...
@@ -396,7 +397,7 @@ def argmin(x, axis=0):
out = fluid.layers.argmin(x=in, axis=-1)
"""
helper
=
LayerHelper
(
"arg_min"
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
VarDesc
.
VarType
.
INT64
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
VarDesc
.
VarType
.
INT64
)
helper
.
append_op
(
type
=
'arg_min'
,
inputs
=
{
'X'
:
x
},
...
...
@@ -427,7 +428,7 @@ def argmax(x, axis=0):
out = fluid.layers.argmax(x=in, axis=-1)
"""
helper
=
LayerHelper
(
"arg_max"
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
VarDesc
.
VarType
.
INT64
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
VarDesc
.
VarType
.
INT64
)
helper
.
append_op
(
type
=
'arg_max'
,
inputs
=
{
'X'
:
x
},
...
...
@@ -477,8 +478,10 @@ def argsort(input, axis=-1, name=None):
out, indices = fluid.layers.argsort(input, axis=0)
"""
helper
=
LayerHelper
(
"argsort"
,
**
locals
())
out
=
helper
.
create_tmp_variable
(
dtype
=
input
.
dtype
,
stop_gradient
=
True
)
ids
=
helper
.
create_tmp_variable
(
VarDesc
.
VarType
.
INT64
,
stop_gradient
=
True
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
,
stop_gradient
=
True
)
ids
=
helper
.
create_variable_for_type_inference
(
VarDesc
.
VarType
.
INT64
,
stop_gradient
=
True
)
helper
.
append_op
(
type
=
'argsort'
,
inputs
=
{
'X'
:
input
},
...
...
@@ -562,7 +565,7 @@ def reverse(x, axis):
if
isinstance
(
axis
,
int
):
axis
=
[
axis
]
helper
=
LayerHelper
(
"reverse"
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'reverse'
,
inputs
=
{
'Input'
:
x
},
...
...
@@ -654,7 +657,7 @@ def has_inf(x):
Variable: The tensor variable storing the output, only a bool value.
"""
helper
=
LayerHelper
(
"isinf"
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
"isinf"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
...
...
@@ -670,7 +673,7 @@ def has_nan(x):
Variable: The tensor variable storing the output, only a bool value.
"""
helper
=
LayerHelper
(
"isnan"
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
"isnan"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
...
...
@@ -687,6 +690,6 @@ def isfinite(x):
Variable: The tensor variable storing the output, contains a bool value.
"""
helper
=
LayerHelper
(
"isfinite"
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
"isfinite"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
python/paddle/fluid/regularizer.py
浏览文件 @
96e9b658
...
...
@@ -151,7 +151,7 @@ class L2DecayRegularizer(WeightDecayRegularizer):
decay
=
block
.
create_var
(
dtype
=
"float32"
,
shape
=
param
.
shape
,
type
=
core
.
VarDesc
.
VarType
.
SELECTED_ROWS
)
type
=
core
.
VarDesc
.
VarType
.
LOD_TENSOR
)
block
.
append_op
(
type
=
'extract_rows'
,
inputs
=
{
'X'
:
grad
},
outputs
=
{
'Out'
:
idx
})
block
.
append_op
(
...
...
@@ -228,7 +228,7 @@ class L1DecayRegularizer(WeightDecayRegularizer):
decay
=
block
.
create_var
(
dtype
=
"float32"
,
shape
=
param
.
shape
,
type
=
core
.
VarDesc
.
VarType
.
SELECTED_ROWS
)
type
=
core
.
VarDesc
.
VarType
.
LOD_TENSOR
)
block
.
append_op
(
type
=
'extract_rows'
,
inputs
=
{
'X'
:
grad
},
outputs
=
{
'Out'
:
idx
})
block
.
append_op
(
...
...
python/paddle/fluid/tests/unittests/test_slice_var.py
浏览文件 @
96e9b658
...
...
@@ -30,7 +30,6 @@ class TestSliceVar(unittest.TestCase):
var
=
program
.
global_block
().
create_var
(
name
=
str
(
random
.
randint
(
10000
,
99999
)),
persistable
=
True
,
# dtype=core.VarDesc.VarType.LOD_TENSOR,
shape
=
shape
)
var_list
.
append
(
var
)
blocks
=
slice_variable
(
var_list
,
10
,
min_size
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录