Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
3099a8f3
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
3099a8f3
编写于
10月 23, 2018
作者:
G
guosheng
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' of
https://github.com/PaddlePaddle/paddle
into add-reshape-reuse-input
test=develop
上级
6447b69a
96e9b658
变更
11
隐藏空白更改
内联
并排
Showing
11 changed file
with
289 addition
and
251 deletion
+289
-251
paddle/fluid/framework/op_desc.cc
paddle/fluid/framework/op_desc.cc
+5
-11
python/paddle/fluid/layer_helper.py
python/paddle/fluid/layer_helper.py
+12
-3
python/paddle/fluid/layers/control_flow.py
python/paddle/fluid/layers/control_flow.py
+17
-16
python/paddle/fluid/layers/detection.py
python/paddle/fluid/layers/detection.py
+38
-27
python/paddle/fluid/layers/io.py
python/paddle/fluid/layers/io.py
+1
-1
python/paddle/fluid/layers/layer_function_generator.py
python/paddle/fluid/layers/layer_function_generator.py
+5
-3
python/paddle/fluid/layers/metric_op.py
python/paddle/fluid/layers/metric_op.py
+5
-5
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+187
-168
python/paddle/fluid/layers/tensor.py
python/paddle/fluid/layers/tensor.py
+17
-14
python/paddle/fluid/regularizer.py
python/paddle/fluid/regularizer.py
+2
-2
python/paddle/fluid/tests/unittests/test_slice_var.py
python/paddle/fluid/tests/unittests/test_slice_var.py
+0
-1
未找到文件。
paddle/fluid/framework/op_desc.cc
浏览文件 @
3099a8f3
...
...
@@ -515,20 +515,14 @@ void OpDesc::InferShape(const BlockDesc &block) const {
}
void
OpDesc
::
InferVarType
(
BlockDesc
*
block
)
const
{
// There are a few places that var type can be set.
// When VarDesc is created, default set to LOD_TENSOR.
// When output variable is created, default is defaut set to LOD_TENSOR.
// We limit here to be the only place that operator defines its customized
// var type inference. Hence, we don't do any "default" setting here.
auto
&
info
=
OpInfoMap
::
Instance
().
Get
(
this
->
Type
());
if
(
info
.
infer_var_type_
)
{
info
.
infer_var_type_
(
*
this
,
block
);
}
else
{
// all output type is LoDTensor by default
VLOG
(
10
)
<<
this
->
Type
()
<<
" has not registered InferVarType. Set output variables to "
"LOD_TENSOR"
;
for
(
auto
&
out_pair
:
this
->
outputs_
)
{
for
(
auto
&
out_var_name
:
out_pair
.
second
)
{
block
->
FindRecursiveOrCreateVar
(
out_var_name
)
.
SetType
(
proto
::
VarType
::
LOD_TENSOR
);
}
}
}
}
...
...
python/paddle/fluid/layer_helper.py
浏览文件 @
3099a8f3
...
...
@@ -324,10 +324,19 @@ class LayerHelper(object):
raise
ValueError
(
"no Parameter name %s found"
%
name
)
return
param
def
create_tmp_variable
(
self
,
dtype
,
stop_gradient
=
False
):
def
create_variable_for_type_inference
(
self
,
dtype
,
stop_gradient
=
False
):
"""Create a temporary variable that should be type inferred layer.
Note:
The default type will be set to LOD_TENSOR. However, when
the var is used as operator output, its type will be updated
based on operator's `VarTypeInference` implementation in
infer_var_type.
"""
return
self
.
main_program
.
current_block
().
create_var
(
name
=
unique_name
.
generate
(
"."
.
join
([
self
.
name
,
'tmp'
])),
dtype
=
dtype
,
type
=
core
.
VarDesc
.
VarType
.
LOD_TENSOR
,
persistable
=
False
,
stop_gradient
=
stop_gradient
)
...
...
@@ -388,7 +397,7 @@ class LayerHelper(object):
b
=
self
.
create_parameter
(
attr
=
bias_attr
,
shape
=
size
,
dtype
=
input_var
.
dtype
,
is_bias
=
True
)
tmp
=
self
.
create_
tmp_variabl
e
(
dtype
=
input_var
.
dtype
)
tmp
=
self
.
create_
variable_for_type_inferenc
e
(
dtype
=
input_var
.
dtype
)
self
.
append_op
(
type
=
'elementwise_add'
,
inputs
=
{
'X'
:
[
input_var
],
...
...
@@ -414,7 +423,7 @@ class LayerHelper(object):
tmp
=
input_var
# NOTE(dzhwinter): some activation support inplace compution.
if
not
core
.
IsInplace
(
act_type
):
tmp
=
self
.
create_
tmp_variabl
e
(
dtype
=
input_var
.
dtype
)
tmp
=
self
.
create_
variable_for_type_inferenc
e
(
dtype
=
input_var
.
dtype
)
self
.
append_op
(
type
=
act_type
,
inputs
=
{
"X"
:
[
input_var
]},
...
...
python/paddle/fluid/layers/control_flow.py
浏览文件 @
3099a8f3
...
...
@@ -80,8 +80,8 @@ def split_lod_tensor(input, mask, level=0):
"""
helper
=
LayerHelper
(
'split_lod_tensor'
,
**
locals
())
out_true
=
helper
.
create_
tmp_variabl
e
(
dtype
=
input
.
dtype
)
out_false
=
helper
.
create_
tmp_variabl
e
(
dtype
=
input
.
dtype
)
out_true
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
input
.
dtype
)
out_false
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
input
.
dtype
)
helper
.
append_op
(
type
=
'split_lod_tensor'
,
inputs
=
{
...
...
@@ -131,7 +131,7 @@ def merge_lod_tensor(in_true, in_false, x, mask, level=0):
in_true=out_true, in_false=out_false, mask=y, x=x, level=level)
"""
helper
=
LayerHelper
(
'merge_lod_tensor'
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
in_true
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
in_true
.
dtype
)
helper
.
append_op
(
type
=
'merge_lod_tensor'
,
inputs
=
{
'X'
:
x
,
...
...
@@ -524,7 +524,7 @@ class StaticRNN(object):
if
not
isinstance
(
o
,
Variable
):
raise
TypeError
(
"step output takes a Variable"
)
tmp_o
=
self
.
helper
.
create_
tmp_variabl
e
(
dtype
=
o
.
dtype
)
tmp_o
=
self
.
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
o
.
dtype
)
self
.
helper
.
append_op
(
type
=
'rnn_memory_helper'
,
inputs
=
{
'X'
:
[
o
]},
...
...
@@ -606,7 +606,8 @@ class StaticRNN(object):
pre_memories
.
append
(
mem
.
pre_mem
.
name
)
mem_var
=
rnn_block
.
var
(
mem
.
mem
.
name
)
assert
isinstance
(
mem_var
,
Variable
)
new_mem
=
self
.
helper
.
create_tmp_variable
(
dtype
=
mem_var
.
dtype
)
new_mem
=
self
.
helper
.
create_variable_for_type_inference
(
dtype
=
mem_var
.
dtype
)
rnn_block
.
append_op
(
type
=
'rnn_memory_helper'
,
...
...
@@ -813,7 +814,7 @@ def max_sequence_len(rank_table):
${out_comment}.
"""
helper
=
LayerHelper
(
"max_seqence_len"
,
**
locals
())
res
=
helper
.
create_
tmp_variabl
e
(
dtype
=
"int64"
)
res
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
"int64"
)
helper
.
append_op
(
type
=
"max_sequence_len"
,
inputs
=
{
"RankTable"
:
rank_table
},
...
...
@@ -884,7 +885,7 @@ def array_to_lod_tensor(x, table):
lod_tensor = fluid.layers.array_to_lod_tensor(array, table)
"""
helper
=
LayerHelper
(
"array_to_lod_tensor"
,
**
locals
())
tmp
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
tmp
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
"array_to_lod_tensor"
,
inputs
=
{
'X'
:
x
,
...
...
@@ -915,7 +916,7 @@ def increment(x, value=1.0, in_place=True):
"""
helper
=
LayerHelper
(
"increment"
,
**
locals
())
if
not
in_place
:
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
else
:
out
=
x
helper
.
append_op
(
...
...
@@ -1012,7 +1013,7 @@ def less_than(x, y, force_cpu=None, cond=None, **ignored):
"""
helper
=
LayerHelper
(
"less_than"
,
**
locals
())
if
cond
is
None
:
cond
=
helper
.
create_
tmp_variabl
e
(
dtype
=
'bool'
)
cond
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
'bool'
)
cond
.
stop_gradient
=
True
attrs
=
dict
()
...
...
@@ -1051,7 +1052,7 @@ def equal(x, y, cond=None, **ignored):
"""
helper
=
LayerHelper
(
"equal"
,
**
locals
())
if
cond
is
None
:
cond
=
helper
.
create_
tmp_variabl
e
(
dtype
=
'bool'
)
cond
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
'bool'
)
cond
.
stop_gradient
=
True
helper
.
append_op
(
...
...
@@ -1098,7 +1099,7 @@ def array_read(array, i):
array
,
Variable
)
or
array
.
type
!=
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
:
raise
TypeError
(
"array should be tensor array vairable"
)
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
array
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
array
.
dtype
)
helper
.
append_op
(
type
=
'read_from_array'
,
inputs
=
{
'X'
:
[
array
],
...
...
@@ -1133,7 +1134,7 @@ def shrink_memory(x, i, table):
usage.
"""
helper
=
LayerHelper
(
'shrink_memory'
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'shrink_rnn_memory'
,
inputs
=
{
'X'
:
[
x
],
...
...
@@ -1170,7 +1171,7 @@ def array_length(array):
"""
helper
=
LayerHelper
(
'array_length'
,
**
locals
())
tmp
=
helper
.
create_
tmp_variabl
e
(
dtype
=
'int64'
)
tmp
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
'int64'
)
tmp
.
stop_gradient
=
True
helper
.
append_op
(
type
=
'lod_array_length'
,
inputs
=
{
'X'
:
[
array
]},
outputs
=
{
'Out'
:
[
tmp
]})
...
...
@@ -1590,7 +1591,7 @@ class DynamicRNN(object):
self
.
mem_dict
=
dict
()
self
.
output_array
=
[]
self
.
outputs
=
[]
self
.
cond
=
self
.
helper
.
create_
tmp_variabl
e
(
dtype
=
'bool'
)
self
.
cond
=
self
.
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
'bool'
)
self
.
cond
.
stop_gradient
=
False
self
.
while_op
=
While
(
self
.
cond
)
self
.
input_array
=
[]
...
...
@@ -1924,7 +1925,7 @@ def reorder_lod_tensor_by_rank(x, rank_table):
helper
.
is_instance
(
'x'
,
Variable
)
helper
.
is_instance
(
'rank_table'
,
Variable
)
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'reorder_lod_tensor_by_rank'
,
inputs
=
{
'X'
:
[
x
],
...
...
@@ -1958,7 +1959,7 @@ def is_empty(x, cond=None, **ignored):
"""
helper
=
LayerHelper
(
"is_empty"
,
**
locals
())
if
cond
is
None
:
cond
=
helper
.
create_
tmp_variabl
e
(
dtype
=
'bool'
)
cond
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
'bool'
)
cond
.
stop_gradient
=
True
elif
not
isinstance
(
cond
,
Variable
):
raise
TypeError
(
"cond takes a variable"
)
...
...
python/paddle/fluid/layers/detection.py
浏览文件 @
3099a8f3
...
...
@@ -147,10 +147,11 @@ def rpn_target_assign(bbox_pred,
helper
=
LayerHelper
(
'rpn_target_assign'
,
**
locals
())
# Assign target label to anchors
loc_index
=
helper
.
create_tmp_variable
(
dtype
=
'int32'
)
score_index
=
helper
.
create_tmp_variable
(
dtype
=
'int32'
)
target_label
=
helper
.
create_tmp_variable
(
dtype
=
'int32'
)
target_bbox
=
helper
.
create_tmp_variable
(
dtype
=
anchor_box
.
dtype
)
loc_index
=
helper
.
create_variable_for_type_inference
(
dtype
=
'int32'
)
score_index
=
helper
.
create_variable_for_type_inference
(
dtype
=
'int32'
)
target_label
=
helper
.
create_variable_for_type_inference
(
dtype
=
'int32'
)
target_bbox
=
helper
.
create_variable_for_type_inference
(
dtype
=
anchor_box
.
dtype
)
helper
.
append_op
(
type
=
"rpn_target_assign"
,
inputs
=
{
...
...
@@ -282,7 +283,8 @@ def detection_output(loc,
scores
=
nn
.
reshape
(
x
=
scores
,
shape
=
compile_shape
,
actual_shape
=
run_shape
)
scores
=
nn
.
transpose
(
scores
,
perm
=
[
0
,
2
,
1
])
scores
.
stop_gradient
=
True
nmsed_outs
=
helper
.
create_tmp_variable
(
dtype
=
decoded_box
.
dtype
)
nmsed_outs
=
helper
.
create_variable_for_type_inference
(
dtype
=
decoded_box
.
dtype
)
helper
.
append_op
(
type
=
"multiclass_nms"
,
inputs
=
{
'Scores'
:
scores
,
...
...
@@ -314,7 +316,7 @@ def iou_similarity(x, y, name=None):
"""
helper
=
LayerHelper
(
"iou_similarity"
,
**
locals
())
if
name
is
None
:
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
else
:
out
=
helper
.
create_variable
(
name
=
name
,
dtype
=
x
.
dtype
,
persistable
=
False
)
...
...
@@ -351,7 +353,8 @@ def box_coder(prior_box,
helper
=
LayerHelper
(
"box_coder"
,
**
locals
())
if
name
is
None
:
output_box
=
helper
.
create_tmp_variable
(
dtype
=
prior_box
.
dtype
)
output_box
=
helper
.
create_variable_for_type_inference
(
dtype
=
prior_box
.
dtype
)
else
:
output_box
=
helper
.
create_variable
(
name
=
name
,
dtype
=
prior_box
.
dtype
,
persistable
=
False
)
...
...
@@ -382,7 +385,7 @@ def polygon_box_transform(input, name=None):
"""
helper
=
LayerHelper
(
"polygon_box_transform"
,
**
locals
())
if
name
is
None
:
output
=
helper
.
create_
tmp_variabl
e
(
dtype
=
input
.
dtype
)
output
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
input
.
dtype
)
else
:
output
=
helper
.
create_variable
(
name
=
name
,
dtype
=
prior_box
.
input
,
persistable
=
False
)
...
...
@@ -450,7 +453,7 @@ def detection_map(detect_res,
helper
=
LayerHelper
(
"detection_map"
,
**
locals
())
def
__create_var
(
type
):
return
helper
.
create_
tmp_variabl
e
(
dtype
=
type
)
return
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
type
)
map_out
=
__create_var
(
'float32'
)
accum_pos_count_out
=
out_states
[
0
]
if
out_states
else
__create_var
(
'int32'
)
...
...
@@ -557,8 +560,9 @@ def bipartite_match(dist_matrix,
>>> matched_indices, matched_dist = fluid.layers.bipartite_match(iou)
"""
helper
=
LayerHelper
(
'bipartite_match'
,
**
locals
())
match_indices
=
helper
.
create_tmp_variable
(
dtype
=
'int32'
)
match_distance
=
helper
.
create_tmp_variable
(
dtype
=
dist_matrix
.
dtype
)
match_indices
=
helper
.
create_variable_for_type_inference
(
dtype
=
'int32'
)
match_distance
=
helper
.
create_variable_for_type_inference
(
dtype
=
dist_matrix
.
dtype
)
helper
.
append_op
(
type
=
'bipartite_match'
,
inputs
=
{
'DistMat'
:
dist_matrix
},
...
...
@@ -644,8 +648,8 @@ def target_assign(input,
gt, matched_indices, mismatch_value=0)
"""
helper
=
LayerHelper
(
'target_assign'
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
input
.
dtype
)
out_weight
=
helper
.
create_
tmp_variabl
e
(
dtype
=
'float32'
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
input
.
dtype
)
out_weight
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
'float32'
)
helper
.
append_op
(
type
=
'target_assign'
,
inputs
=
{
...
...
@@ -816,9 +820,10 @@ def ssd_loss(location,
conf_loss
=
nn
.
reshape
(
x
=
conf_loss
,
shape
=
(
num
,
num_prior
),
actual_shape
=
actual_shape
)
conf_loss
.
stop_gradient
=
True
neg_indices
=
helper
.
create_
tmp_variabl
e
(
dtype
=
'int32'
)
neg_indices
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
'int32'
)
dtype
=
matched_indices
.
dtype
updated_matched_indices
=
helper
.
create_tmp_variable
(
dtype
=
dtype
)
updated_matched_indices
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
helper
.
append_op
(
type
=
'mine_hard_examples'
,
inputs
=
{
...
...
@@ -998,8 +1003,8 @@ def prior_box(input,
max_sizes
=
[
max_sizes
]
attrs
[
'max_sizes'
]
=
max_sizes
box
=
helper
.
create_
tmp_variabl
e
(
dtype
)
var
=
helper
.
create_
tmp_variabl
e
(
dtype
)
box
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
var
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
"prior_box"
,
inputs
=
{
"Input"
:
input
,
...
...
@@ -1337,8 +1342,8 @@ def anchor_generator(input,
'offset'
:
offset
}
anchor
=
helper
.
create_
tmp_variabl
e
(
dtype
)
var
=
helper
.
create_
tmp_variabl
e
(
dtype
)
anchor
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
var
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
"anchor_generator"
,
inputs
=
{
"Input"
:
input
},
...
...
@@ -1384,7 +1389,7 @@ def roi_perspective_transform(input,
"""
helper
=
LayerHelper
(
'roi_perspective_transform'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
out
=
helper
.
create_
tmp_variabl
e
(
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
"roi_perspective_transform"
,
inputs
=
{
"X"
:
input
,
...
...
@@ -1418,11 +1423,15 @@ def generate_proposal_labels(rpn_rois,
helper
=
LayerHelper
(
'generate_proposal_labels'
,
**
locals
())
rois
=
helper
.
create_tmp_variable
(
dtype
=
rpn_rois
.
dtype
)
labels_int32
=
helper
.
create_tmp_variable
(
dtype
=
gt_classes
.
dtype
)
bbox_targets
=
helper
.
create_tmp_variable
(
dtype
=
rpn_rois
.
dtype
)
bbox_inside_weights
=
helper
.
create_tmp_variable
(
dtype
=
rpn_rois
.
dtype
)
bbox_outside_weights
=
helper
.
create_tmp_variable
(
dtype
=
rpn_rois
.
dtype
)
rois
=
helper
.
create_variable_for_type_inference
(
dtype
=
rpn_rois
.
dtype
)
labels_int32
=
helper
.
create_variable_for_type_inference
(
dtype
=
gt_classes
.
dtype
)
bbox_targets
=
helper
.
create_variable_for_type_inference
(
dtype
=
rpn_rois
.
dtype
)
bbox_inside_weights
=
helper
.
create_variable_for_type_inference
(
dtype
=
rpn_rois
.
dtype
)
bbox_outside_weights
=
helper
.
create_variable_for_type_inference
(
dtype
=
rpn_rois
.
dtype
)
helper
.
append_op
(
type
=
"generate_proposal_labels"
,
...
...
@@ -1504,8 +1513,10 @@ def generate_proposals(scores,
"""
helper
=
LayerHelper
(
'generate_proposals'
,
**
locals
())
rpn_rois
=
helper
.
create_tmp_variable
(
dtype
=
bbox_deltas
.
dtype
)
rpn_roi_probs
=
helper
.
create_tmp_variable
(
dtype
=
scores
.
dtype
)
rpn_rois
=
helper
.
create_variable_for_type_inference
(
dtype
=
bbox_deltas
.
dtype
)
rpn_roi_probs
=
helper
.
create_variable_for_type_inference
(
dtype
=
scores
.
dtype
)
helper
.
append_op
(
type
=
"generate_proposals"
,
inputs
=
{
...
...
python/paddle/fluid/layers/io.py
浏览文件 @
3099a8f3
...
...
@@ -954,7 +954,7 @@ def read_file(reader):
"""
helper
=
LayerHelper
(
'read_file'
)
out
=
[
helper
.
create_
tmp_variabl
e
(
helper
.
create_
variable_for_type_inferenc
e
(
stop_gradient
=
True
,
dtype
=
'float32'
)
for
_
in
range
(
len
(
reader
.
desc
.
shapes
()))
]
...
...
python/paddle/fluid/layers/layer_function_generator.py
浏览文件 @
3099a8f3
...
...
@@ -202,10 +202,12 @@ def generate_layer_fn(op_type):
out_var
=
out
[
0
]
if
(
isinstance
(
out
,
list
)
or
isinstance
(
out
,
tuple
))
else
out
else
:
out_var
=
helper
.
create_
tmp_variabl
e
(
dtype
=
dtype
)
out_var
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
dtype
)
outputs
[
o_name
]
=
[
out_var
]
for
name
in
intermediate_output_names
:
outputs
[
name
]
=
[
helper
.
create_tmp_variable
(
dtype
=
dtype
)]
outputs
[
name
]
=
[
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
]
helper
.
append_op
(
type
=
op_type
,
inputs
=
inputs
,
outputs
=
outputs
,
attrs
=
kwargs
)
return
helper
.
append_activation
(
out_var
)
...
...
@@ -229,7 +231,7 @@ def generate_layer_fn_noattr(op_type):
def
func
(
x
,
name
=
None
):
helper
=
LayerHelper
(
op_type
,
**
locals
())
output
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
output
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
op_type
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
output
})
return
output
...
...
python/paddle/fluid/layers/metric_op.py
浏览文件 @
3099a8f3
...
...
@@ -58,11 +58,11 @@ def accuracy(input, label, k=1, correct=None, total=None):
"""
helper
=
LayerHelper
(
"accuracy"
,
**
locals
())
topk_out
,
topk_indices
=
nn
.
topk
(
input
,
k
=
k
)
acc_out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
"float32"
)
acc_out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
"float32"
)
if
correct
is
None
:
correct
=
helper
.
create_
tmp_variabl
e
(
dtype
=
"int64"
)
correct
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
"int64"
)
if
total
is
None
:
total
=
helper
.
create_
tmp_variabl
e
(
dtype
=
"int64"
)
total
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
"int64"
)
helper
.
append_op
(
type
=
"accuracy"
,
inputs
=
{
...
...
@@ -124,8 +124,8 @@ def auc(input,
auc_out=fluid.layers.auc(input=prediction, label=label)
"""
helper
=
LayerHelper
(
"auc"
,
**
locals
())
auc_out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
"float64"
)
batch_auc_out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
"float64"
)
auc_out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
"float64"
)
batch_auc_out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
"float64"
)
# make tp, tn, fp, fn persistable, so that can accumulate all batches.
# for batch auc
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
3099a8f3
...
...
@@ -242,7 +242,7 @@ def fc(input,
w
=
helper
.
create_parameter
(
attr
=
param_attr
,
shape
=
param_shape
,
dtype
=
dtype
,
is_bias
=
False
)
tmp
=
helper
.
create_
tmp_variabl
e
(
dtype
)
tmp
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
"mul"
,
inputs
=
{
"X"
:
input_var
,
...
...
@@ -255,7 +255,7 @@ def fc(input,
if
len
(
mul_results
)
==
1
:
pre_bias
=
mul_results
[
0
]
else
:
pre_bias
=
helper
.
create_
tmp_variabl
e
(
dtype
)
pre_bias
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
"sum"
,
inputs
=
{
"X"
:
mul_results
},
...
...
@@ -314,7 +314,7 @@ def embedding(input,
helper
=
LayerHelper
(
'embedding'
,
**
locals
())
w
=
helper
.
create_parameter
(
attr
=
helper
.
param_attr
,
shape
=
size
,
dtype
=
dtype
,
is_bias
=
False
)
tmp
=
helper
.
create_
tmp_variabl
e
(
dtype
)
tmp
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
padding_idx
=
-
1
if
padding_idx
is
None
else
padding_idx
if
padding_idx
>=
0
else
(
size
[
0
]
+
padding_idx
)
helper
.
append_op
(
...
...
@@ -418,10 +418,10 @@ def dynamic_lstm(input,
bias
=
helper
.
create_parameter
(
attr
=
helper
.
bias_attr
,
shape
=
bias_size
,
dtype
=
dtype
,
is_bias
=
True
)
hidden
=
helper
.
create_
tmp_variabl
e
(
dtype
)
cell
=
helper
.
create_
tmp_variabl
e
(
dtype
)
batch_gate
=
helper
.
create_
tmp_variabl
e
(
dtype
)
batch_cell_pre_act
=
helper
.
create_
tmp_variabl
e
(
dtype
)
hidden
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
cell
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
batch_gate
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
batch_cell_pre_act
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
inputs
=
{
'Input'
:
input
,
'Weight'
:
weight
,
'Bias'
:
bias
}
batch_size
=
input
.
shape
[
0
]
if
h_0
:
...
...
@@ -621,12 +621,12 @@ def dynamic_lstmp(input,
bias
=
helper
.
create_parameter
(
attr
=
helper
.
bias_attr
,
shape
=
bias_size
,
dtype
=
dtype
,
is_bias
=
True
)
projection
=
helper
.
create_
tmp_variabl
e
(
dtype
)
cell
=
helper
.
create_
tmp_variabl
e
(
dtype
)
ordered_proj0
=
helper
.
create_
tmp_variabl
e
(
dtype
)
batch_hidden
=
helper
.
create_
tmp_variabl
e
(
dtype
)
batch_gate
=
helper
.
create_
tmp_variabl
e
(
dtype
)
batch_cell_pre_act
=
helper
.
create_
tmp_variabl
e
(
dtype
)
projection
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
cell
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
ordered_proj0
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
batch_hidden
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
batch_gate
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
batch_cell_pre_act
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
'lstmp'
,
...
...
@@ -751,10 +751,10 @@ def dynamic_gru(input,
),
'The shape of h0 should be(batch_size, %d)'
%
size
inputs
[
'H0'
]
=
h_0
hidden
=
helper
.
create_
tmp_variabl
e
(
dtype
)
batch_gate
=
helper
.
create_
tmp_variabl
e
(
dtype
)
batch_reset_hidden_prev
=
helper
.
create_
tmp_variabl
e
(
dtype
)
batch_hidden
=
helper
.
create_
tmp_variabl
e
(
dtype
)
hidden
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
batch_gate
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
batch_reset_hidden_prev
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
batch_hidden
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
'gru'
,
...
...
@@ -844,9 +844,9 @@ def gru_unit(input,
weight
=
helper
.
create_parameter
(
attr
=
helper
.
param_attr
,
shape
=
[
size
,
3
*
size
],
dtype
=
dtype
)
gate
=
helper
.
create_
tmp_variabl
e
(
dtype
)
reset_hidden_pre
=
helper
.
create_
tmp_variabl
e
(
dtype
)
updated_hidden
=
helper
.
create_
tmp_variabl
e
(
dtype
)
gate
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
reset_hidden_pre
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
updated_hidden
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
inputs
=
{
'Input'
:
input
,
'HiddenPrev'
:
hidden
,
'Weight'
:
weight
}
# create bias
if
helper
.
bias_attr
:
...
...
@@ -896,10 +896,14 @@ def linear_chain_crf(input, label, param_attr=None):
attr
=
helper
.
param_attr
,
shape
=
[
size
+
2
,
size
],
dtype
=
helper
.
input_dtype
())
alpha
=
helper
.
create_tmp_variable
(
dtype
=
helper
.
input_dtype
())
emission_exps
=
helper
.
create_tmp_variable
(
dtype
=
helper
.
input_dtype
())
transition_exps
=
helper
.
create_tmp_variable
(
dtype
=
helper
.
input_dtype
())
log_likelihood
=
helper
.
create_tmp_variable
(
dtype
=
helper
.
input_dtype
())
alpha
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
())
emission_exps
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
())
transition_exps
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
())
log_likelihood
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
())
helper
.
append_op
(
type
=
'linear_chain_crf'
,
inputs
=
{
"Emission"
:
[
input
],
...
...
@@ -938,7 +942,8 @@ def crf_decoding(input, param_attr, label=None):
"""
helper
=
LayerHelper
(
'crf_decoding'
,
**
locals
())
transition
=
helper
.
get_parameter
(
param_attr
.
name
)
viterbi_path
=
helper
.
create_tmp_variable
(
dtype
=
helper
.
input_dtype
())
viterbi_path
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
())
helper
.
append_op
(
type
=
'crf_decoding'
,
inputs
=
{
"Emission"
:
[
input
],
...
...
@@ -962,9 +967,9 @@ def cos_sim(X, Y):
Variable: the output of cosine(X, Y).
"""
helper
=
LayerHelper
(
'cos_sim'
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
X
.
dtype
)
xnorm
=
helper
.
create_
tmp_variabl
e
(
dtype
=
X
.
dtype
)
ynorm
=
helper
.
create_
tmp_variabl
e
(
dtype
=
X
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
X
.
dtype
)
xnorm
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
X
.
dtype
)
ynorm
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
X
.
dtype
)
helper
.
append_op
(
type
=
'cos_sim'
,
inputs
=
{
'X'
:
[
X
],
...
...
@@ -1008,8 +1013,9 @@ def dropout(x, dropout_prob, is_test=False, seed=None, name=None):
"""
helper
=
LayerHelper
(
'dropout'
,
**
locals
())
out
=
helper
.
create_tmp_variable
(
dtype
=
x
.
dtype
)
mask
=
helper
.
create_tmp_variable
(
dtype
=
x
.
dtype
,
stop_gradient
=
True
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
mask
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
,
stop_gradient
=
True
)
if
(
seed
is
None
or
seed
==
0
)
and
helper
.
main_program
.
random_seed
!=
0
:
seed
=
helper
.
main_program
.
random_seed
...
...
@@ -1094,7 +1100,7 @@ def cross_entropy(input, label, soft_label=False, ignore_index=-100):
cost = fluid.layers.cross_entropy(input=predict, label=label)
"""
helper
=
LayerHelper
(
'cross_entropy'
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
input
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
input
.
dtype
)
helper
.
append_op
(
type
=
'cross_entropy'
,
inputs
=
{
'X'
:
[
input
],
...
...
@@ -1141,14 +1147,14 @@ def square_error_cost(input, label):
"""
helper
=
LayerHelper
(
'square_error_cost'
,
**
locals
())
minus_out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
input
.
dtype
)
minus_out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
input
.
dtype
)
helper
.
append_op
(
type
=
'elementwise_sub'
,
inputs
=
{
'X'
:
[
input
],
'Y'
:
[
label
]},
outputs
=
{
'Out'
:
[
minus_out
]})
square_out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
input
.
dtype
)
square_out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
input
.
dtype
)
helper
.
append_op
(
type
=
'square'
,
inputs
=
{
'X'
:
[
minus_out
]},
outputs
=
{
'Out'
:
[
square_out
]})
...
...
@@ -1254,12 +1260,13 @@ def chunk_eval(input,
helper
=
LayerHelper
(
"chunk_eval"
,
**
locals
())
# prepare output
precision
=
helper
.
create_tmp_variable
(
dtype
=
"float32"
)
recall
=
helper
.
create_tmp_variable
(
dtype
=
"float32"
)
f1_score
=
helper
.
create_tmp_variable
(
dtype
=
"float32"
)
num_infer_chunks
=
helper
.
create_tmp_variable
(
dtype
=
"int64"
)
num_label_chunks
=
helper
.
create_tmp_variable
(
dtype
=
"int64"
)
num_correct_chunks
=
helper
.
create_tmp_variable
(
dtype
=
"int64"
)
precision
=
helper
.
create_variable_for_type_inference
(
dtype
=
"float32"
)
recall
=
helper
.
create_variable_for_type_inference
(
dtype
=
"float32"
)
f1_score
=
helper
.
create_variable_for_type_inference
(
dtype
=
"float32"
)
num_infer_chunks
=
helper
.
create_variable_for_type_inference
(
dtype
=
"int64"
)
num_label_chunks
=
helper
.
create_variable_for_type_inference
(
dtype
=
"int64"
)
num_correct_chunks
=
helper
.
create_variable_for_type_inference
(
dtype
=
"int64"
)
helper
.
append_op
(
type
=
"chunk_eval"
,
...
...
@@ -1326,7 +1333,7 @@ def sequence_conv(input,
filter_shape
=
[
filter_size
*
input
.
shape
[
1
],
num_filters
]
filter_param
=
helper
.
create_parameter
(
attr
=
helper
.
param_attr
,
shape
=
filter_shape
,
dtype
=
dtype
)
pre_bias
=
helper
.
create_
tmp_variabl
e
(
dtype
)
pre_bias
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
'sequence_conv'
,
...
...
@@ -1382,7 +1389,7 @@ def sequence_softmax(input, use_cudnn=False, name=None):
"""
helper
=
LayerHelper
(
'sequence_softmax'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
softmax_out
=
helper
.
create_
tmp_variabl
e
(
dtype
)
softmax_out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
"sequence_softmax"
,
inputs
=
{
"X"
:
input
},
...
...
@@ -1436,7 +1443,7 @@ def softmax(input, use_cudnn=True, name=None):
"""
helper
=
LayerHelper
(
'softmax'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
softmax_out
=
helper
.
create_
tmp_variabl
e
(
dtype
)
softmax_out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
"softmax"
,
inputs
=
{
"X"
:
input
},
...
...
@@ -1599,7 +1606,7 @@ def conv2d(input,
dtype
=
dtype
,
default_initializer
=
_get_default_param_initializer
())
pre_bias
=
helper
.
create_
tmp_variabl
e
(
dtype
)
pre_bias
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
l_type
,
...
...
@@ -1770,7 +1777,7 @@ def conv3d(input,
dtype
=
dtype
,
default_initializer
=
_get_default_param_initializer
())
pre_bias
=
helper
.
create_
tmp_variabl
e
(
dtype
)
pre_bias
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
l_type
,
...
...
@@ -1849,8 +1856,8 @@ def sequence_pool(input, pool_type):
"""
helper
=
LayerHelper
(
'sequence_pool'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
pool_out
=
helper
.
create_
tmp_variabl
e
(
dtype
)
max_index
=
helper
.
create_
tmp_variabl
e
(
dtype
)
pool_out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
max_index
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
"sequence_pool"
,
...
...
@@ -1886,7 +1893,7 @@ def sequence_concat(input, name=None):
out = fluid.layers.sequence_concat(input=[seq1, seq2, seq3])
"""
helper
=
LayerHelper
(
'sequence_concat'
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
helper
.
input_dtype
())
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
helper
.
input_dtype
())
helper
.
append_op
(
type
=
'sequence_concat'
,
inputs
=
{
'X'
:
input
},
outputs
=
{
'Out'
:
[
out
]})
return
out
...
...
@@ -2013,7 +2020,7 @@ def sequence_slice(input, offset, length, name=None):
"""
helper
=
LayerHelper
(
"sequence_slice"
,
**
locals
())
dtype
=
helper
.
input_dtype
()
out
=
helper
.
create_
tmp_variabl
e
(
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
offset
.
stop_gradient
=
True
length
.
stop_gradient
=
True
...
...
@@ -2099,7 +2106,7 @@ def pool2d(input,
helper
=
LayerHelper
(
l_type
,
**
locals
())
dtype
=
helper
.
input_dtype
()
pool_out
=
helper
.
create_
tmp_variabl
e
(
dtype
)
pool_out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
l_type
,
...
...
@@ -2167,7 +2174,7 @@ def pool3d(input,
l_type
=
"pool3d"
helper
=
LayerHelper
(
l_type
,
**
locals
())
dtype
=
helper
.
input_dtype
()
pool_out
=
helper
.
create_
tmp_variabl
e
(
dtype
)
pool_out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
l_type
,
...
...
@@ -2310,10 +2317,13 @@ def batch_norm(input,
mean_out
=
mean
# variance and variance out share the same memory
variance_out
=
variance
saved_mean
=
helper
.
create_tmp_variable
(
dtype
=
dtype
,
stop_gradient
=
True
)
saved_variance
=
helper
.
create_tmp_variable
(
dtype
=
dtype
,
stop_gradient
=
True
)
saved_mean
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
,
stop_gradient
=
True
)
saved_variance
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
,
stop_gradient
=
True
)
batch_norm_out
=
input
if
in_place
else
helper
.
create_tmp_variable
(
dtype
)
batch_norm_out
=
input
if
in_place
else
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
"batch_norm"
,
...
...
@@ -2430,9 +2440,11 @@ def layer_norm(input,
inputs
[
'Bias'
]
=
bias
# create output
mean_out
=
helper
.
create_tmp_variable
(
dtype
=
dtype
,
stop_gradient
=
True
)
variance_out
=
helper
.
create_tmp_variable
(
dtype
=
dtype
,
stop_gradient
=
True
)
layer_norm_out
=
helper
.
create_tmp_variable
(
dtype
)
mean_out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
,
stop_gradient
=
True
)
variance_out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
,
stop_gradient
=
True
)
layer_norm_out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
"layer_norm"
,
...
...
@@ -2619,7 +2631,7 @@ def conv2d_transpose(input,
img_filter
=
helper
.
create_parameter
(
dtype
=
input
.
dtype
,
shape
=
filter_shape
,
attr
=
helper
.
param_attr
)
pre_bias
=
helper
.
create_
tmp_variabl
e
(
dtype
=
input
.
dtype
)
pre_bias
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
input
.
dtype
)
helper
.
append_op
(
type
=
op_type
,
inputs
=
{
'Input'
:
[
input
],
...
...
@@ -2797,7 +2809,7 @@ def conv3d_transpose(input,
img_filter
=
helper
.
create_parameter
(
dtype
=
input
.
dtype
,
shape
=
filter_shape
,
attr
=
helper
.
param_attr
)
pre_bias
=
helper
.
create_
tmp_variabl
e
(
dtype
=
input
.
dtype
)
pre_bias
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
input
.
dtype
)
helper
.
append_op
(
type
=
l_type
,
inputs
=
{
'Input'
:
[
input
],
...
...
@@ -2876,7 +2888,7 @@ def sequence_expand(x, y, ref_level=-1, name=None):
"""
helper
=
LayerHelper
(
'sequence_expand'
,
input
=
x
,
**
locals
())
dtype
=
helper
.
input_dtype
()
tmp
=
helper
.
create_
tmp_variabl
e
(
dtype
)
tmp
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
'sequence_expand'
,
inputs
=
{
'X'
:
x
,
...
...
@@ -2942,7 +2954,7 @@ def sequence_expand_as(x, y, name=None):
"""
helper
=
LayerHelper
(
'sequence_expand_as'
,
input
=
x
,
**
locals
())
dtype
=
helper
.
input_dtype
()
tmp
=
helper
.
create_
tmp_variabl
e
(
dtype
)
tmp
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
'sequence_expand_as'
,
inputs
=
{
'X'
:
x
,
...
...
@@ -2987,8 +2999,8 @@ def sequence_pad(x, pad_value, maxlen=None, name=None):
helper
=
LayerHelper
(
'sequence_pad'
,
input
=
x
,
**
locals
())
dtype
=
helper
.
input_dtype
()
out
=
helper
.
create_
tmp_variabl
e
(
dtype
)
length
=
helper
.
create_
tmp_variabl
e
(
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
length
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
pad_value
.
stop_gradient
=
True
length
.
stop_gradient
=
True
...
...
@@ -3053,7 +3065,7 @@ def sequence_unpad(x, length, name=None):
helper
=
LayerHelper
(
'sequence_unpad'
,
input
=
x
,
**
locals
())
dtype
=
helper
.
input_dtype
()
out
=
helper
.
create_
tmp_variabl
e
(
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
length
.
stop_gradient
=
True
...
...
@@ -3152,8 +3164,9 @@ def beam_search(pre_ids,
score_type
=
scores
.
dtype
id_type
=
ids
.
dtype
selected_scores
=
helper
.
create_tmp_variable
(
dtype
=
score_type
)
selected_ids
=
helper
.
create_tmp_variable
(
dtype
=
id_type
)
selected_scores
=
helper
.
create_variable_for_type_inference
(
dtype
=
score_type
)
selected_ids
=
helper
.
create_variable_for_type_inference
(
dtype
=
id_type
)
helper
.
append_op
(
type
=
'beam_search'
,
...
...
@@ -3210,8 +3223,8 @@ def beam_search_decode(ids, scores, beam_size, end_id, name=None):
ids, scores, beam_size=5, end_id=0)
"""
helper
=
LayerHelper
(
'beam_search_decode'
,
**
locals
())
sentence_ids
=
helper
.
create_
tmp_variabl
e
(
dtype
=
ids
.
dtype
)
sentence_scores
=
helper
.
create_
tmp_variabl
e
(
dtype
=
ids
.
dtype
)
sentence_ids
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
ids
.
dtype
)
sentence_scores
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
ids
.
dtype
)
helper
.
append_op
(
type
=
"beam_search_decode"
,
...
...
@@ -3341,8 +3354,8 @@ def lstm_unit(x_t,
param_attr
=
param_attr
,
bias_attr
=
bias_attr
)
dtype
=
x_t
.
dtype
c
=
helper
.
create_
tmp_variabl
e
(
dtype
)
h
=
helper
.
create_
tmp_variabl
e
(
dtype
)
c
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
h
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
'lstm_unit'
,
...
...
@@ -3396,7 +3409,7 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None):
"""
helper
=
LayerHelper
(
'reduce_sum'
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
helper
.
input_dtype
())
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
helper
.
input_dtype
())
if
dim
is
not
None
and
not
isinstance
(
dim
,
list
):
dim
=
[
dim
]
helper
.
append_op
(
...
...
@@ -3453,7 +3466,7 @@ def reduce_mean(input, dim=None, keep_dim=False, name=None):
fluid.layers.reduce_mean(x, dim=[0, 1]) # [4.0, 5.0]
"""
helper
=
LayerHelper
(
'reduce_mean'
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
helper
.
input_dtype
())
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
helper
.
input_dtype
())
if
dim
is
not
None
and
not
isinstance
(
dim
,
list
):
dim
=
[
dim
]
helper
.
append_op
(
...
...
@@ -3508,7 +3521,7 @@ def reduce_max(input, dim=None, keep_dim=False, name=None):
fluid.layers.reduce_max(x, dim=[0, 1]) # [7.0, 8.0]
"""
helper
=
LayerHelper
(
'reduce_max'
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
helper
.
input_dtype
())
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
helper
.
input_dtype
())
if
dim
is
not
None
and
not
isinstance
(
dim
,
list
):
dim
=
[
dim
]
helper
.
append_op
(
...
...
@@ -3563,7 +3576,7 @@ def reduce_min(input, dim=None, keep_dim=False, name=None):
fluid.layers.reduce_min(x, dim=[0, 1]) # [1.0, 2.0]
"""
helper
=
LayerHelper
(
'reduce_min'
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
helper
.
input_dtype
())
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
helper
.
input_dtype
())
if
dim
is
not
None
and
not
isinstance
(
dim
,
list
):
dim
=
[
dim
]
helper
.
append_op
(
...
...
@@ -3619,7 +3632,7 @@ def reduce_prod(input, dim=None, keep_dim=False, name=None):
fluid.layers.reduce_prod(x, dim=[0, 1]) # [105.0, 384.0]
"""
helper
=
LayerHelper
(
'reduce_prod'
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
helper
.
input_dtype
())
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
helper
.
input_dtype
())
if
dim
is
not
None
and
not
isinstance
(
dim
,
list
):
dim
=
[
dim
]
helper
.
append_op
(
...
...
@@ -3679,7 +3692,7 @@ def split(input, num_or_sections, dim=-1, name=None):
dim
],
'len(num_or_sections) must not be more than input.shape[dim].'
num
=
len
(
num_or_sections
)
outs
=
[
helper
.
create_
tmp_variabl
e
(
dtype
=
helper
.
input_dtype
())
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
helper
.
input_dtype
())
for
i
in
range
(
num
)
]
helper
.
append_op
(
...
...
@@ -3736,8 +3749,8 @@ def l2_normalize(x, axis, epsilon=1e-12, name=None):
axis
=
0
helper
=
LayerHelper
(
"l2_normalize"
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
norm
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
norm
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
"norm"
,
inputs
=
{
"X"
:
x
},
...
...
@@ -3846,7 +3859,7 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None):
__check_input
(
x
,
y
)
helper
=
LayerHelper
(
'matmul'
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'matmul'
,
inputs
=
{
'X'
:
x
,
...
...
@@ -3917,8 +3930,8 @@ def topk(input, k, name=None):
top5_values, top5_indices = layers.topk(input, k=5)
"""
helper
=
LayerHelper
(
"top_k"
,
**
locals
())
values
=
helper
.
create_
tmp_variabl
e
(
dtype
=
input
.
dtype
)
indices
=
helper
.
create_
tmp_variabl
e
(
dtype
=
"int64"
)
values
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
input
.
dtype
)
indices
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
"int64"
)
helper
.
append_op
(
type
=
"top_k"
,
inputs
=
{
"X"
:
[
input
]},
...
...
@@ -3976,8 +3989,8 @@ def edit_distance(input, label, normalized=True, ignored_tokens=None):
# remove some tokens from input and labels
if
ignored_tokens
is
not
None
and
len
(
ignored_tokens
)
>
0
:
erased_input
=
helper
.
create_
tmp_variabl
e
(
dtype
=
"int64"
)
erased_label
=
helper
.
create_
tmp_variabl
e
(
dtype
=
"int64"
)
erased_input
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
"int64"
)
erased_label
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
"int64"
)
helper
.
append_op
(
type
=
"sequence_erase"
,
...
...
@@ -3994,8 +4007,8 @@ def edit_distance(input, label, normalized=True, ignored_tokens=None):
label
=
erased_label
# edit distance op
edit_distance_out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
"int64"
)
sequence_num
=
helper
.
create_
tmp_variabl
e
(
dtype
=
"int64"
)
edit_distance_out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
"int64"
)
sequence_num
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
"int64"
)
helper
.
append_op
(
type
=
"edit_distance"
,
inputs
=
{
"Hyps"
:
[
input
],
...
...
@@ -4070,7 +4083,7 @@ def ctc_greedy_decoder(input, blank, name=None):
_
,
topk_indices
=
topk
(
input
,
k
=
1
)
# ctc align op
ctc_out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
"int64"
)
ctc_out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
"int64"
)
helper
.
append_op
(
type
=
"ctc_align"
,
inputs
=
{
"Input"
:
[
topk_indices
]},
...
...
@@ -4120,8 +4133,8 @@ def warpctc(input, label, blank=0, norm_by_times=False):
"""
helper
=
LayerHelper
(
'warpctc'
,
**
locals
())
loss_out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
input
.
dtype
)
grad_out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
input
.
dtype
)
loss_out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
input
.
dtype
)
grad_out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
input
.
dtype
)
helper
.
append_op
(
type
=
'warpctc'
,
inputs
=
{
'Logits'
:
[
input
],
...
...
@@ -4182,7 +4195,7 @@ def sequence_reshape(input, new_dim):
x_reshaped = fluid.layers.sequence_reshape(input=x, new_dim=10)
"""
helper
=
LayerHelper
(
'sequence_reshape'
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
helper
.
input_dtype
())
out
=
helper
.
create_
variable_for_type_inferenc
e
(
helper
.
input_dtype
())
helper
.
append_op
(
type
=
'sequence_reshape'
,
inputs
=
{
'X'
:
[
input
]},
...
...
@@ -4279,9 +4292,9 @@ def nce(input,
is_bias
=
True
,
dtype
=
input
.
dtype
)
inputs
[
'Bias'
]
=
b
cost
=
helper
.
create_
tmp_variabl
e
(
dtype
=
input
.
dtype
)
sample_logits
=
helper
.
create_
tmp_variabl
e
(
dtype
=
input
.
dtype
)
sample_labels
=
helper
.
create_
tmp_variabl
e
(
dtype
=
label
.
dtype
)
cost
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
input
.
dtype
)
sample_logits
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
input
.
dtype
)
sample_labels
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
label
.
dtype
)
if
num_neg_samples
is
None
:
num_neg_samples
=
10
...
...
@@ -4357,8 +4370,8 @@ def hsigmoid(input,
helper
=
LayerHelper
(
'hierarchical_sigmoid'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
out
=
helper
.
create_
tmp_variabl
e
(
dtype
)
pre_out
=
helper
.
create_
tmp_variabl
e
(
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
pre_out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
dim
=
input
.
shape
[
1
]
if
num_classes
<
2
:
raise
ValueError
(
"num_classes must not be less than 2."
)
...
...
@@ -4418,8 +4431,8 @@ def transpose(x, perm, name=None):
(
idx
,
perm
[
idx
],
len
(
x
.
shape
)))
helper
=
LayerHelper
(
'transpose'
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
x
.
dtype
)
x_shape
=
helper
.
create_
tmp_variabl
e
(
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
x
.
dtype
)
x_shape
=
helper
.
create_
variable_for_type_inferenc
e
(
x
.
dtype
)
helper
.
append_op
(
type
=
'transpose2'
,
inputs
=
{
'X'
:
[
x
]},
...
...
@@ -4561,7 +4574,7 @@ def im2sequence(input,
inputs
[
"Y"
]
=
input_image_size
attrs
[
"out_stride"
]
=
out_stride
helper
=
LayerHelper
(
'im2sequence'
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
helper
.
input_dtype
())
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
helper
.
input_dtype
())
helper
.
append_op
(
type
=
'im2sequence'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
return
out
...
...
@@ -4594,7 +4607,7 @@ def row_conv(input, future_context_size, param_attr=None, act=None):
filter_shape
=
[
future_context_size
+
1
,
input
.
shape
[
1
]]
filter_param
=
helper
.
create_parameter
(
attr
=
helper
.
param_attr
,
shape
=
filter_shape
,
dtype
=
dtype
)
out
=
helper
.
create_
tmp_variabl
e
(
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
'row_conv'
,
inputs
=
{
'X'
:
[
input
],
...
...
@@ -4627,7 +4640,7 @@ def multiplex(inputs, index):
raise
ValueError
(
"inputs should be a list object and contains at least "
"2 elements."
)
out
=
helper
.
create_
tmp_variabl
e
(
inputs
[
0
].
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
inputs
[
0
].
dtype
)
helper
.
append_op
(
type
=
'multiplex'
,
inputs
=
{
'X'
:
inputs
,
...
...
@@ -4698,8 +4711,8 @@ def softmax_with_cross_entropy(logits,
logits=fc, label=label)
"""
helper
=
LayerHelper
(
'softmax_with_cross_entropy'
,
**
locals
())
softmax
=
helper
.
create_
tmp_variabl
e
(
dtype
=
logits
.
dtype
)
loss
=
helper
.
create_
tmp_variabl
e
(
dtype
=
logits
.
dtype
)
softmax
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
logits
.
dtype
)
loss
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
logits
.
dtype
)
helper
.
append_op
(
type
=
'softmax_with_cross_entropy'
,
inputs
=
{
'Logits'
:
logits
,
...
...
@@ -4749,8 +4762,8 @@ def smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None):
"""
helper
=
LayerHelper
(
'smooth_l1_loss'
,
**
locals
())
diff
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
loss
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
diff
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
loss
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'smooth_l1_loss'
,
inputs
=
{
...
...
@@ -4783,7 +4796,7 @@ def one_hot(input, depth):
one_hot_label = layers.one_hot(input=label, depth=10)
"""
helper
=
LayerHelper
(
"one_hot"
,
**
locals
())
one_hot_out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
'float32'
)
one_hot_out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
'float32'
)
helper
.
append_op
(
type
=
"one_hot"
,
inputs
=
{
'X'
:
input
},
...
...
@@ -4930,8 +4943,9 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None):
"except one unknown dimension."
)
helper
=
LayerHelper
(
"reshape2"
,
**
locals
())
x_shape
=
helper
.
create_tmp_variable
(
dtype
=
x
.
dtype
)
out
=
x
if
inplace
else
helper
.
create_tmp_variable
(
dtype
=
x
.
dtype
)
out
=
x
if
inplace
else
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
x_shape
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
"reshape2"
,
inputs
=
inputs
,
...
...
@@ -4980,8 +4994,8 @@ def squeeze(input, axes, name=None):
y = layers.sequeeze(input=x, axes=[1])
"""
helper
=
LayerHelper
(
"squeeze"
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
input
.
dtype
)
x_shape
=
helper
.
create_
tmp_variabl
e
(
dtype
=
input
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
input
.
dtype
)
x_shape
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
input
.
dtype
)
helper
.
append_op
(
type
=
"squeeze2"
,
inputs
=
{
"X"
:
input
},
...
...
@@ -5017,8 +5031,8 @@ def unsqueeze(input, axes, name=None):
y = layers.unsequeeze(input=x, axes=[1])
"""
helper
=
LayerHelper
(
"unsqueeze"
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
input
.
dtype
)
x_shape
=
helper
.
create_
tmp_variabl
e
(
dtype
=
input
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
input
.
dtype
)
x_shape
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
input
.
dtype
)
helper
.
append_op
(
type
=
"unsqueeze2"
,
inputs
=
{
"X"
:
input
},
...
...
@@ -5108,7 +5122,7 @@ def lod_reset(x, y=None, target_lod=None):
out = layers.lod_reset(x=x, y=y)
"""
helper
=
LayerHelper
(
"lod_reset"
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
if
y
is
not
None
:
helper
.
append_op
(
type
=
"lod_reset"
,
inputs
=
{
'X'
:
x
,
...
...
@@ -5177,8 +5191,9 @@ def lrn(input, n=5, k=1.0, alpha=1e-4, beta=0.75, name=None):
"dims of input must be 4(not %d), and it's order must be NCHW"
%
(
dims
))
mid_out
=
helper
.
create_tmp_variable
(
dtype
=
dtype
,
stop_gradient
=
True
)
lrn_out
=
helper
.
create_tmp_variable
(
dtype
)
mid_out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
,
stop_gradient
=
True
)
lrn_out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
"lrn"
,
inputs
=
{
"X"
:
input
},
...
...
@@ -5243,7 +5258,7 @@ def pad(x, paddings, pad_value=0., name=None):
"""
helper
=
LayerHelper
(
'pad'
,
input
=
x
,
**
locals
())
dtype
=
helper
.
input_dtype
()
out
=
helper
.
create_
tmp_variabl
e
(
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
'pad'
,
inputs
=
{
'X'
:
x
},
...
...
@@ -5323,7 +5338,7 @@ def pad_constant_like(x, y, pad_value=0., name=None):
"""
helper
=
LayerHelper
(
'pad_constant_like'
,
input
=
x
,
**
locals
())
dtype
=
helper
.
input_dtype
()
out
=
helper
.
create_
tmp_variabl
e
(
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
'pad_constant_like'
,
inputs
=
{
'X'
:
x
,
...
...
@@ -5388,7 +5403,7 @@ def label_smooth(label,
raise
ValueError
(
"The value of epsilon must be between 0 and 1."
)
helper
=
LayerHelper
(
"label_smooth"
,
**
locals
())
label
.
stop_gradient
=
True
smooth_label
=
helper
.
create_
tmp_variabl
e
(
dtype
)
smooth_label
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
"label_smooth"
,
inputs
=
{
"X"
:
label
,
...
...
@@ -5420,8 +5435,8 @@ def roi_pool(input, rois, pooled_height=1, pooled_width=1, spatial_scale=1.0):
"""
helper
=
LayerHelper
(
'roi_pool'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
pool_out
=
helper
.
create_
tmp_variabl
e
(
dtype
)
argmaxes
=
helper
.
create_
tmp_variabl
e
(
dtype
=
'int32'
)
pool_out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
argmaxes
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
'int32'
)
helper
.
append_op
(
type
=
"roi_pool"
,
inputs
=
{
"X"
:
input
,
...
...
@@ -5469,7 +5484,7 @@ def roi_align(input,
"""
helper
=
LayerHelper
(
'roi_align'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
align_out
=
helper
.
create_
tmp_variabl
e
(
dtype
)
align_out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
"roi_align"
,
inputs
=
{
"X"
:
input
,
...
...
@@ -5594,7 +5609,7 @@ def image_resize(input,
out_h
=
int
(
input
.
shape
[
2
]
*
scale
)
out_w
=
int
(
input
.
shape
[
3
]
*
scale
)
out
=
helper
.
create_
tmp_variabl
e
(
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
resample_methods
[
resample
],
inputs
=
inputs
,
...
...
@@ -5703,7 +5718,7 @@ def gather(input, index):
"""
helper
=
LayerHelper
(
'gather'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
out
=
helper
.
create_
tmp_variabl
e
(
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
"gather"
,
inputs
=
{
"X"
:
input
,
...
...
@@ -5743,7 +5758,7 @@ def scatter(input, index, updates, name=None):
"""
helper
=
LayerHelper
(
'scatter'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
out
=
helper
.
create_
tmp_variabl
e
(
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
"scatter"
,
inputs
=
{
"X"
:
input
,
...
...
@@ -5803,7 +5818,7 @@ def sequence_scatter(input, index, updates, name=None):
"""
helper
=
LayerHelper
(
'sequence_scatter'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
out
=
helper
.
create_
tmp_variabl
e
(
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
"sequence_scatter"
,
inputs
=
{
"X"
:
input
,
...
...
@@ -5833,7 +5848,7 @@ def random_crop(x, shape, seed=None):
"""
helper
=
LayerHelper
(
"random_crop"
,
**
locals
())
dtype
=
x
.
dtype
out
=
helper
.
create_
tmp_variabl
e
(
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
if
seed
is
None
:
seed
=
np
.
random
.
randint
(
-
65536
,
65536
)
op_attrs
=
{
"shape"
:
shape
}
...
...
@@ -5879,7 +5894,7 @@ def log(x, name=None):
"""
helper
=
LayerHelper
(
'log'
,
**
locals
())
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
out
=
helper
.
create_
tmp_variabl
e
(
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
"log"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
...
...
@@ -5910,7 +5925,7 @@ def relu(x, name=None):
"""
helper
=
LayerHelper
(
'relu'
,
**
locals
())
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
out
=
helper
.
create_
tmp_variabl
e
(
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
"relu"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
...
...
@@ -5949,9 +5964,9 @@ def mean_iou(input, label, num_classes):
"""
helper
=
LayerHelper
(
'mean_iou'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
out_mean_iou
=
helper
.
create_
tmp_variabl
e
(
dtype
=
'float32'
)
out_wrong
=
helper
.
create_
tmp_variabl
e
(
dtype
=
'int32'
)
out_correct
=
helper
.
create_
tmp_variabl
e
(
dtype
=
'int32'
)
out_mean_iou
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
'float32'
)
out_wrong
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
'int32'
)
out_correct
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
'int32'
)
helper
.
append_op
(
type
=
"mean_iou"
,
inputs
=
{
"Predictions"
:
input
,
...
...
@@ -6043,7 +6058,7 @@ def crop(x, shape=None, offsets=None, name=None):
if
offsets
is
None
:
offsets
=
[
0
]
*
len
(
x
.
shape
)
out
=
helper
.
create_
tmp_variabl
e
(
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
x
.
dtype
)
ipts
=
{
'X'
:
x
}
attrs
=
{}
if
isinstance
(
shape
,
Variable
):
...
...
@@ -6123,7 +6138,7 @@ def rank_loss(label, left, right, name=None):
if
not
(
isinstance
(
right
,
Variable
)):
raise
ValueError
(
"The right should be a Variable"
)
out
=
helper
.
create_
tmp_variabl
e
(
"float32"
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
"float32"
)
helper
.
append_op
(
type
=
'rank_loss'
,
...
...
@@ -6169,8 +6184,8 @@ def margin_rank_loss(label, left, right, margin=0.1, name=None):
raise
ValueError
(
"The left should be a Variable."
)
if
not
isinstance
(
right
,
Variable
):
raise
ValueError
(
"The right should be a Variable."
)
out
=
helper
.
create_
tmp_variabl
e
(
left
.
dtype
)
act
=
helper
.
create_
tmp_variabl
e
(
left
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
left
.
dtype
)
act
=
helper
.
create_
variable_for_type_inferenc
e
(
left
.
dtype
)
helper
.
append_op
(
type
=
'margin_rank_loss'
,
inputs
=
{
"Label"
:
label
,
...
...
@@ -6255,7 +6270,7 @@ def pad2d(input,
helper
=
LayerHelper
(
'pad2d'
,
**
locals
())
dtype
=
helper
.
input_dtype
(
input_param_name
=
'input'
)
out
=
helper
.
create_
tmp_variabl
e
(
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
'pad2d'
,
inputs
=
{
'X'
:
input
},
...
...
@@ -6284,7 +6299,7 @@ def elu(x, alpha=1.0, name=None):
output(${out_type}): ${out_comment}
"""
helper
=
LayerHelper
(
'elu'
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'elu'
,
inputs
=
{
'X'
:
x
},
...
...
@@ -6307,7 +6322,7 @@ def relu6(x, threshold=6.0, name=None):
output(${out_type}): ${out_comment}
"""
helper
=
LayerHelper
(
'relu6'
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'relu6'
,
inputs
=
{
'X'
:
x
},
...
...
@@ -6330,7 +6345,7 @@ def pow(x, factor=1.0, name=None):
output(${out_type}): ${out_comment}
"""
helper
=
LayerHelper
(
'pow'
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'pow'
,
inputs
=
{
'X'
:
x
},
...
...
@@ -6354,7 +6369,7 @@ def stanh(x, scale_a=2.0 / 3.0, scale_b=1.7159, name=None):
output(${out_type}): ${out_comment}
"""
helper
=
LayerHelper
(
'stanh'
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'stanh'
,
inputs
=
{
'X'
:
x
},
...
...
@@ -6379,7 +6394,7 @@ def hard_sigmoid(x, slope=0.2, offset=0.5, name=None):
output(${out_type}): ${out_comment}
"""
helper
=
LayerHelper
(
'hard_sigmoid'
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'hard_sigmoid'
,
inputs
=
{
'X'
:
x
},
...
...
@@ -6403,7 +6418,7 @@ def swish(x, beta=1.0, name=None):
output(${out_type}): ${out_comment}
"""
helper
=
LayerHelper
(
'swish'
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'swish'
,
inputs
=
{
'X'
:
x
},
...
...
@@ -6455,7 +6470,7 @@ def prelu(x, mode, param_attr=None, name=None):
dtype
=
'float32'
,
is_bias
=
False
,
default_initializer
=
Constant
(
1.0
))
out
=
helper
.
create_
tmp_variabl
e
(
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
"prelu"
,
inputs
=
{
"X"
:
x
,
...
...
@@ -6479,7 +6494,7 @@ def brelu(x, t_min=0.0, t_max=24.0, name=None):
output(${out_type}): ${out_comment}
"""
helper
=
LayerHelper
(
'brelu'
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'brelu'
,
inputs
=
{
'X'
:
x
},
...
...
@@ -6502,7 +6517,7 @@ def leaky_relu(x, alpha=0.02, name=None):
output(${out_type}): ${out_comment}
"""
helper
=
LayerHelper
(
'leaky_relu'
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'leaky_relu'
,
inputs
=
{
'X'
:
x
},
...
...
@@ -6524,7 +6539,7 @@ def soft_relu(x, threshold=40.0, name=None):
output(${out_type}): ${out_comment}
"""
helper
=
LayerHelper
(
'soft_relu'
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'soft_relu'
,
inputs
=
{
'X'
:
x
},
...
...
@@ -6591,8 +6606,8 @@ def flatten(x, axis=1, name=None):
if
not
(
isinstance
(
axis
,
int
))
or
axis
>
len
(
x
.
shape
)
or
axis
<
0
:
raise
ValueError
(
"The axis should be a int, and in range [0, rank(x)]"
)
out
=
helper
.
create_
tmp_variabl
e
(
x
.
dtype
)
x_shape
=
helper
.
create_
tmp_variabl
e
(
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
x
.
dtype
)
x_shape
=
helper
.
create_
variable_for_type_inferenc
e
(
x
.
dtype
)
helper
.
append_op
(
type
=
'flatten2'
,
inputs
=
{
"X"
:
x
},
...
...
@@ -6638,7 +6653,8 @@ def sequence_enumerate(input, win_size, pad_value=0, name=None):
out = fluid.layers.sequence_enumerate(input=x, win_size=3, pad_value=0)
"""
helper
=
LayerHelper
(
'sequence_enumerate'
,
**
locals
())
out
=
helper
.
create_tmp_variable
(
helper
.
input_dtype
(),
stop_gradient
=
True
)
out
=
helper
.
create_variable_for_type_inference
(
helper
.
input_dtype
(),
stop_gradient
=
True
)
helper
.
append_op
(
type
=
'sequence_enumerate'
,
inputs
=
{
'X'
:
input
},
...
...
@@ -6678,9 +6694,9 @@ def sequence_mask(x, maxlen=None, dtype='int64', name=None):
helper
=
LayerHelper
(
'sequence_mask'
,
**
locals
())
if
name
is
None
:
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
dtype
)
else
:
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
dtype
,
name
=
name
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
dtype
,
name
=
name
)
helper
.
append_op
(
type
=
'sequence_mask'
,
...
...
@@ -6723,7 +6739,7 @@ def stack(x, axis=0):
if
not
isinstance
(
x
,
list
)
and
not
isinstance
(
x
,
tuple
):
x
=
[
x
]
out
=
helper
.
create_
tmp_variabl
e
(
x
[
0
].
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
x
[
0
].
dtype
)
helper
.
append_op
(
type
=
'stack'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Y'
:
out
},
attrs
=
{
'axis'
:
axis
})
...
...
@@ -6761,7 +6777,7 @@ def unstack(x, axis=0, num=None):
outs
=
[]
for
_
in
num
:
outs
.
append
(
helper
.
create_
tmp_variabl
e
(
x
.
dtype
))
outs
.
append
(
helper
.
create_
variable_for_type_inferenc
e
(
x
.
dtype
))
helper
.
append_op
(
type
=
'unstack'
,
...
...
@@ -6813,7 +6829,7 @@ def expand(x, expand_times, name=None):
"""
helper
=
LayerHelper
(
'expand'
,
input
=
x
,
**
locals
())
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
out
=
helper
.
create_
tmp_variabl
e
(
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
'expand'
,
inputs
=
{
'X'
:
x
},
...
...
@@ -6852,7 +6868,7 @@ def uniform_random_batch_size_like(input,
"""
helper
=
LayerHelper
(
'uniform_random_batch_size_like'
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
c_dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
helper
.
append_op
(
type
=
'uniform_random_batch_size_like'
,
...
...
@@ -6889,7 +6905,7 @@ def gaussian_random(shape, mean=0.0, std=1.0, seed=0, dtype='float32'):
"""
helper
=
LayerHelper
(
'gaussian_random'
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
c_dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
helper
.
append_op
(
type
=
'gaussian_random'
,
...
...
@@ -6924,7 +6940,7 @@ def sampling_id(x, min=0.0, max=1.0, seed=0, dtype='float32'):
"""
helper
=
LayerHelper
(
'sampling_id'
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
helper
.
append_op
(
type
=
'sampling_id'
,
inputs
=
{
'X'
:
x
},
...
...
@@ -6963,7 +6979,7 @@ def gaussian_random_batch_size_like(input,
"""
helper
=
LayerHelper
(
'gaussian_random_batch_size_like'
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
)
c_dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
helper
.
append_op
(
type
=
'gaussian_random_batch_size_like'
,
...
...
@@ -6995,7 +7011,8 @@ def sum(x):
"""
helper
=
LayerHelper
(
'sum'
,
**
locals
())
out
=
helper
.
create_tmp_variable
(
dtype
=
helper
.
input_dtype
(
'x'
))
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
(
'x'
))
helper
.
append_op
(
type
=
'sum'
,
inputs
=
{
'X'
:
x
},
...
...
@@ -7022,7 +7039,8 @@ def slice(input, axes, starts, ends):
"""
helper
=
LayerHelper
(
'slice'
,
**
locals
())
out
=
helper
.
create_tmp_variable
(
dtype
=
helper
.
input_dtype
(
'input'
))
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
(
'input'
))
helper
.
append_op
(
type
=
'slice'
,
inputs
=
{
'Input'
:
input
},
...
...
@@ -7048,7 +7066,8 @@ def shape(input):
"""
helper
=
LayerHelper
(
'shape'
,
**
locals
())
out
=
helper
.
create_tmp_variable
(
dtype
=
helper
.
input_dtype
(
'input'
))
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
(
'input'
))
helper
.
append_op
(
type
=
'shape'
,
inputs
=
{
'Input'
:
input
},
outputs
=
{
'Out'
:
out
})
...
...
@@ -7065,7 +7084,7 @@ def _elementwise_op(helper):
use_mkldnn
=
helper
.
kwargs
.
get
(
'use_mkldnn'
,
False
)
name
=
helper
.
kwargs
.
get
(
'name'
,
None
)
if
name
is
None
:
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
else
:
out
=
helper
.
create_variable
(
name
=
name
,
dtype
=
x
.
dtype
,
persistable
=
False
)
...
...
@@ -7099,7 +7118,7 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
helper
=
LayerHelper
(
'scale'
,
**
locals
())
if
name
is
None
:
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
else
:
out
=
helper
.
create_variable
(
name
=
name
,
dtype
=
x
.
dtype
,
persistable
=
False
)
...
...
@@ -7165,7 +7184,7 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
if
out
is
None
:
if
name
is
None
:
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
else
:
out
=
helper
.
create_variable
(
name
=
name
,
dtype
=
x
.
dtype
,
persistable
=
False
)
...
...
@@ -7273,7 +7292,7 @@ def clip(x, min, max, name=None):
helper
=
LayerHelper
(
"clip"
,
**
locals
())
if
name
is
None
:
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
else
:
out
=
helper
.
create_variable
(
name
=
name
,
dtype
=
x
.
dtype
,
persistable
=
False
)
...
...
@@ -7305,7 +7324,7 @@ def clip_by_norm(x, max_norm, name=None):
helper
=
LayerHelper
(
"clip_by_norm"
,
**
locals
())
if
name
is
None
:
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
else
:
out
=
helper
.
create_variable
(
name
=
name
,
dtype
=
x
.
dtype
,
persistable
=
False
)
...
...
@@ -7335,7 +7354,7 @@ def mean(x, name=None):
helper
=
LayerHelper
(
"mean"
,
**
locals
())
if
name
is
None
:
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
else
:
out
=
helper
.
create_variable
(
name
=
name
,
dtype
=
x
.
dtype
,
persistable
=
False
)
...
...
@@ -7365,7 +7384,7 @@ def mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None):
helper
=
LayerHelper
(
"mul"
,
**
locals
())
if
name
is
None
:
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
else
:
out
=
helper
.
create_variable
(
name
=
name
,
dtype
=
x
.
dtype
,
persistable
=
False
)
...
...
@@ -7399,7 +7418,7 @@ def sigmoid_cross_entropy_with_logits(x, label, name=None):
helper
=
LayerHelper
(
"sigmoid_cross_entropy_with_logits"
,
**
locals
())
if
name
is
None
:
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
else
:
out
=
helper
.
create_variable
(
name
=
name
,
dtype
=
x
.
dtype
,
persistable
=
False
)
...
...
@@ -7429,7 +7448,7 @@ def maxout(x, groups, name=None):
helper
=
LayerHelper
(
"maxout"
,
**
locals
())
if
name
is
None
:
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
else
:
out
=
helper
.
create_variable
(
name
=
name
,
dtype
=
x
.
dtype
,
persistable
=
False
)
...
...
@@ -7468,7 +7487,7 @@ def affine_channel(x, scale=None, bias=None, data_layout='NCHW', name=None):
helper
=
LayerHelper
(
"affine_channel"
,
**
locals
())
if
name
is
None
:
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
else
:
out
=
helper
.
create_variable
(
name
=
name
,
dtype
=
x
.
dtype
,
persistable
=
False
)
...
...
python/paddle/fluid/layers/tensor.py
浏览文件 @
3099a8f3
...
...
@@ -152,7 +152,7 @@ def cast(x, dtype):
result = fluid.layers.cast(x=data, dtype='float64')
"""
helper
=
LayerHelper
(
'cast'
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
dtype
)
helper
.
append_op
(
type
=
'cast'
,
inputs
=
{
'X'
:
[
x
]},
...
...
@@ -184,7 +184,7 @@ def concat(input, axis=0, name=None):
out = fluid.layers.concat(input=[Efirst, Esecond, Ethird, Efourth])
"""
helper
=
LayerHelper
(
'concat'
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
helper
.
input_dtype
())
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
helper
.
input_dtype
())
helper
.
append_op
(
type
=
'concat'
,
inputs
=
{
'X'
:
input
},
...
...
@@ -221,7 +221,8 @@ def sums(input, out=None):
"""
helper
=
LayerHelper
(
'sum'
,
**
locals
())
if
out
is
None
:
out
=
helper
.
create_tmp_variable
(
dtype
=
helper
.
input_dtype
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
())
helper
.
append_op
(
type
=
'sum'
,
inputs
=
{
'X'
:
input
},
...
...
@@ -252,7 +253,7 @@ def assign(input, output=None):
"""
helper
=
LayerHelper
(
'assign'
,
**
locals
())
if
output
is
None
:
output
=
helper
.
create_
tmp_variabl
e
(
dtype
=
input
.
dtype
)
output
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
input
.
dtype
)
if
isinstance
(
input
,
Variable
):
helper
.
append_op
(
type
=
'assign'
,
inputs
=
{
'X'
:
[
input
]},
outputs
=
{
'Out'
:
[
output
]})
...
...
@@ -311,7 +312,7 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None):
helper
=
LayerHelper
(
"fill_constant"
,
**
locals
())
if
out
is
None
:
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
dtype
)
helper
.
append_op
(
type
=
'fill_constant'
,
inputs
=
{},
...
...
@@ -358,7 +359,7 @@ def fill_constant_batch_size_like(input,
${out_comment}.
"""
helper
=
LayerHelper
(
"fill_constant_batch_size_like"
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
dtype
)
helper
.
append_op
(
type
=
'fill_constant_batch_size_like'
,
inputs
=
{
'Input'
:
input
},
...
...
@@ -396,7 +397,7 @@ def argmin(x, axis=0):
out = fluid.layers.argmin(x=in, axis=-1)
"""
helper
=
LayerHelper
(
"arg_min"
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
VarDesc
.
VarType
.
INT64
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
VarDesc
.
VarType
.
INT64
)
helper
.
append_op
(
type
=
'arg_min'
,
inputs
=
{
'X'
:
x
},
...
...
@@ -427,7 +428,7 @@ def argmax(x, axis=0):
out = fluid.layers.argmax(x=in, axis=-1)
"""
helper
=
LayerHelper
(
"arg_max"
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
VarDesc
.
VarType
.
INT64
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
VarDesc
.
VarType
.
INT64
)
helper
.
append_op
(
type
=
'arg_max'
,
inputs
=
{
'X'
:
x
},
...
...
@@ -477,8 +478,10 @@ def argsort(input, axis=-1, name=None):
out, indices = fluid.layers.argsort(input, axis=0)
"""
helper
=
LayerHelper
(
"argsort"
,
**
locals
())
out
=
helper
.
create_tmp_variable
(
dtype
=
input
.
dtype
,
stop_gradient
=
True
)
ids
=
helper
.
create_tmp_variable
(
VarDesc
.
VarType
.
INT64
,
stop_gradient
=
True
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
,
stop_gradient
=
True
)
ids
=
helper
.
create_variable_for_type_inference
(
VarDesc
.
VarType
.
INT64
,
stop_gradient
=
True
)
helper
.
append_op
(
type
=
'argsort'
,
inputs
=
{
'X'
:
input
},
...
...
@@ -562,7 +565,7 @@ def reverse(x, axis):
if
isinstance
(
axis
,
int
):
axis
=
[
axis
]
helper
=
LayerHelper
(
"reverse"
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'reverse'
,
inputs
=
{
'Input'
:
x
},
...
...
@@ -654,7 +657,7 @@ def has_inf(x):
Variable: The tensor variable storing the output, only a bool value.
"""
helper
=
LayerHelper
(
"isinf"
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
"isinf"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
...
...
@@ -670,7 +673,7 @@ def has_nan(x):
Variable: The tensor variable storing the output, only a bool value.
"""
helper
=
LayerHelper
(
"isnan"
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
"isnan"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
...
...
@@ -687,6 +690,6 @@ def isfinite(x):
Variable: The tensor variable storing the output, contains a bool value.
"""
helper
=
LayerHelper
(
"isfinite"
,
**
locals
())
out
=
helper
.
create_
tmp_variabl
e
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_
variable_for_type_inferenc
e
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
"isfinite"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
python/paddle/fluid/regularizer.py
浏览文件 @
3099a8f3
...
...
@@ -151,7 +151,7 @@ class L2DecayRegularizer(WeightDecayRegularizer):
decay
=
block
.
create_var
(
dtype
=
"float32"
,
shape
=
param
.
shape
,
type
=
core
.
VarDesc
.
VarType
.
SELECTED_ROWS
)
type
=
core
.
VarDesc
.
VarType
.
LOD_TENSOR
)
block
.
append_op
(
type
=
'extract_rows'
,
inputs
=
{
'X'
:
grad
},
outputs
=
{
'Out'
:
idx
})
block
.
append_op
(
...
...
@@ -228,7 +228,7 @@ class L1DecayRegularizer(WeightDecayRegularizer):
decay
=
block
.
create_var
(
dtype
=
"float32"
,
shape
=
param
.
shape
,
type
=
core
.
VarDesc
.
VarType
.
SELECTED_ROWS
)
type
=
core
.
VarDesc
.
VarType
.
LOD_TENSOR
)
block
.
append_op
(
type
=
'extract_rows'
,
inputs
=
{
'X'
:
grad
},
outputs
=
{
'Out'
:
idx
})
block
.
append_op
(
...
...
python/paddle/fluid/tests/unittests/test_slice_var.py
浏览文件 @
3099a8f3
...
...
@@ -30,7 +30,6 @@ class TestSliceVar(unittest.TestCase):
var
=
program
.
global_block
().
create_var
(
name
=
str
(
random
.
randint
(
10000
,
99999
)),
persistable
=
True
,
# dtype=core.VarDesc.VarType.LOD_TENSOR,
shape
=
shape
)
var_list
.
append
(
var
)
blocks
=
slice_variable
(
var_list
,
10
,
min_size
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录