提交 3099a8f3 编写于 作者: G guosheng

Merge branch 'develop' of https://github.com/PaddlePaddle/paddle into add-reshape-reuse-input

test=develop
...@@ -515,20 +515,14 @@ void OpDesc::InferShape(const BlockDesc &block) const { ...@@ -515,20 +515,14 @@ void OpDesc::InferShape(const BlockDesc &block) const {
} }
void OpDesc::InferVarType(BlockDesc *block) const { void OpDesc::InferVarType(BlockDesc *block) const {
// There are a few places that var type can be set.
// When VarDesc is created, default set to LOD_TENSOR.
// When output variable is created, default is defaut set to LOD_TENSOR.
// We limit here to be the only place that operator defines its customized
// var type inference. Hence, we don't do any "default" setting here.
auto &info = OpInfoMap::Instance().Get(this->Type()); auto &info = OpInfoMap::Instance().Get(this->Type());
if (info.infer_var_type_) { if (info.infer_var_type_) {
info.infer_var_type_(*this, block); info.infer_var_type_(*this, block);
} else {
// all output type is LoDTensor by default
VLOG(10) << this->Type()
<< " has not registered InferVarType. Set output variables to "
"LOD_TENSOR";
for (auto &out_pair : this->outputs_) {
for (auto &out_var_name : out_pair.second) {
block->FindRecursiveOrCreateVar(out_var_name)
.SetType(proto::VarType::LOD_TENSOR);
}
}
} }
} }
......
...@@ -324,10 +324,19 @@ class LayerHelper(object): ...@@ -324,10 +324,19 @@ class LayerHelper(object):
raise ValueError("no Parameter name %s found" % name) raise ValueError("no Parameter name %s found" % name)
return param return param
def create_tmp_variable(self, dtype, stop_gradient=False): def create_variable_for_type_inference(self, dtype, stop_gradient=False):
"""Create a temporary variable that should be type inferred layer.
Note:
The default type will be set to LOD_TENSOR. However, when
the var is used as operator output, its type will be updated
based on operator's `VarTypeInference` implementation in
infer_var_type.
"""
return self.main_program.current_block().create_var( return self.main_program.current_block().create_var(
name=unique_name.generate(".".join([self.name, 'tmp'])), name=unique_name.generate(".".join([self.name, 'tmp'])),
dtype=dtype, dtype=dtype,
type=core.VarDesc.VarType.LOD_TENSOR,
persistable=False, persistable=False,
stop_gradient=stop_gradient) stop_gradient=stop_gradient)
...@@ -388,7 +397,7 @@ class LayerHelper(object): ...@@ -388,7 +397,7 @@ class LayerHelper(object):
b = self.create_parameter( b = self.create_parameter(
attr=bias_attr, shape=size, dtype=input_var.dtype, is_bias=True) attr=bias_attr, shape=size, dtype=input_var.dtype, is_bias=True)
tmp = self.create_tmp_variable(dtype=input_var.dtype) tmp = self.create_variable_for_type_inference(dtype=input_var.dtype)
self.append_op( self.append_op(
type='elementwise_add', type='elementwise_add',
inputs={'X': [input_var], inputs={'X': [input_var],
...@@ -414,7 +423,7 @@ class LayerHelper(object): ...@@ -414,7 +423,7 @@ class LayerHelper(object):
tmp = input_var tmp = input_var
# NOTE(dzhwinter): some activation support inplace compution. # NOTE(dzhwinter): some activation support inplace compution.
if not core.IsInplace(act_type): if not core.IsInplace(act_type):
tmp = self.create_tmp_variable(dtype=input_var.dtype) tmp = self.create_variable_for_type_inference(dtype=input_var.dtype)
self.append_op( self.append_op(
type=act_type, type=act_type,
inputs={"X": [input_var]}, inputs={"X": [input_var]},
......
...@@ -80,8 +80,8 @@ def split_lod_tensor(input, mask, level=0): ...@@ -80,8 +80,8 @@ def split_lod_tensor(input, mask, level=0):
""" """
helper = LayerHelper('split_lod_tensor', **locals()) helper = LayerHelper('split_lod_tensor', **locals())
out_true = helper.create_tmp_variable(dtype=input.dtype) out_true = helper.create_variable_for_type_inference(dtype=input.dtype)
out_false = helper.create_tmp_variable(dtype=input.dtype) out_false = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op( helper.append_op(
type='split_lod_tensor', type='split_lod_tensor',
inputs={ inputs={
...@@ -131,7 +131,7 @@ def merge_lod_tensor(in_true, in_false, x, mask, level=0): ...@@ -131,7 +131,7 @@ def merge_lod_tensor(in_true, in_false, x, mask, level=0):
in_true=out_true, in_false=out_false, mask=y, x=x, level=level) in_true=out_true, in_false=out_false, mask=y, x=x, level=level)
""" """
helper = LayerHelper('merge_lod_tensor', **locals()) helper = LayerHelper('merge_lod_tensor', **locals())
out = helper.create_tmp_variable(dtype=in_true.dtype) out = helper.create_variable_for_type_inference(dtype=in_true.dtype)
helper.append_op( helper.append_op(
type='merge_lod_tensor', type='merge_lod_tensor',
inputs={'X': x, inputs={'X': x,
...@@ -524,7 +524,7 @@ class StaticRNN(object): ...@@ -524,7 +524,7 @@ class StaticRNN(object):
if not isinstance(o, Variable): if not isinstance(o, Variable):
raise TypeError("step output takes a Variable") raise TypeError("step output takes a Variable")
tmp_o = self.helper.create_tmp_variable(dtype=o.dtype) tmp_o = self.helper.create_variable_for_type_inference(dtype=o.dtype)
self.helper.append_op( self.helper.append_op(
type='rnn_memory_helper', type='rnn_memory_helper',
inputs={'X': [o]}, inputs={'X': [o]},
...@@ -606,7 +606,8 @@ class StaticRNN(object): ...@@ -606,7 +606,8 @@ class StaticRNN(object):
pre_memories.append(mem.pre_mem.name) pre_memories.append(mem.pre_mem.name)
mem_var = rnn_block.var(mem.mem.name) mem_var = rnn_block.var(mem.mem.name)
assert isinstance(mem_var, Variable) assert isinstance(mem_var, Variable)
new_mem = self.helper.create_tmp_variable(dtype=mem_var.dtype) new_mem = self.helper.create_variable_for_type_inference(
dtype=mem_var.dtype)
rnn_block.append_op( rnn_block.append_op(
type='rnn_memory_helper', type='rnn_memory_helper',
...@@ -813,7 +814,7 @@ def max_sequence_len(rank_table): ...@@ -813,7 +814,7 @@ def max_sequence_len(rank_table):
${out_comment}. ${out_comment}.
""" """
helper = LayerHelper("max_seqence_len", **locals()) helper = LayerHelper("max_seqence_len", **locals())
res = helper.create_tmp_variable(dtype="int64") res = helper.create_variable_for_type_inference(dtype="int64")
helper.append_op( helper.append_op(
type="max_sequence_len", type="max_sequence_len",
inputs={"RankTable": rank_table}, inputs={"RankTable": rank_table},
...@@ -884,7 +885,7 @@ def array_to_lod_tensor(x, table): ...@@ -884,7 +885,7 @@ def array_to_lod_tensor(x, table):
lod_tensor = fluid.layers.array_to_lod_tensor(array, table) lod_tensor = fluid.layers.array_to_lod_tensor(array, table)
""" """
helper = LayerHelper("array_to_lod_tensor", **locals()) helper = LayerHelper("array_to_lod_tensor", **locals())
tmp = helper.create_tmp_variable(dtype=x.dtype) tmp = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(
type="array_to_lod_tensor", type="array_to_lod_tensor",
inputs={'X': x, inputs={'X': x,
...@@ -915,7 +916,7 @@ def increment(x, value=1.0, in_place=True): ...@@ -915,7 +916,7 @@ def increment(x, value=1.0, in_place=True):
""" """
helper = LayerHelper("increment", **locals()) helper = LayerHelper("increment", **locals())
if not in_place: if not in_place:
out = helper.create_tmp_variable(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
else: else:
out = x out = x
helper.append_op( helper.append_op(
...@@ -1012,7 +1013,7 @@ def less_than(x, y, force_cpu=None, cond=None, **ignored): ...@@ -1012,7 +1013,7 @@ def less_than(x, y, force_cpu=None, cond=None, **ignored):
""" """
helper = LayerHelper("less_than", **locals()) helper = LayerHelper("less_than", **locals())
if cond is None: if cond is None:
cond = helper.create_tmp_variable(dtype='bool') cond = helper.create_variable_for_type_inference(dtype='bool')
cond.stop_gradient = True cond.stop_gradient = True
attrs = dict() attrs = dict()
...@@ -1051,7 +1052,7 @@ def equal(x, y, cond=None, **ignored): ...@@ -1051,7 +1052,7 @@ def equal(x, y, cond=None, **ignored):
""" """
helper = LayerHelper("equal", **locals()) helper = LayerHelper("equal", **locals())
if cond is None: if cond is None:
cond = helper.create_tmp_variable(dtype='bool') cond = helper.create_variable_for_type_inference(dtype='bool')
cond.stop_gradient = True cond.stop_gradient = True
helper.append_op( helper.append_op(
...@@ -1098,7 +1099,7 @@ def array_read(array, i): ...@@ -1098,7 +1099,7 @@ def array_read(array, i):
array, array,
Variable) or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY: Variable) or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY:
raise TypeError("array should be tensor array vairable") raise TypeError("array should be tensor array vairable")
out = helper.create_tmp_variable(dtype=array.dtype) out = helper.create_variable_for_type_inference(dtype=array.dtype)
helper.append_op( helper.append_op(
type='read_from_array', type='read_from_array',
inputs={'X': [array], inputs={'X': [array],
...@@ -1133,7 +1134,7 @@ def shrink_memory(x, i, table): ...@@ -1133,7 +1134,7 @@ def shrink_memory(x, i, table):
usage. usage.
""" """
helper = LayerHelper('shrink_memory', **locals()) helper = LayerHelper('shrink_memory', **locals())
out = helper.create_tmp_variable(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(
type='shrink_rnn_memory', type='shrink_rnn_memory',
inputs={'X': [x], inputs={'X': [x],
...@@ -1170,7 +1171,7 @@ def array_length(array): ...@@ -1170,7 +1171,7 @@ def array_length(array):
""" """
helper = LayerHelper('array_length', **locals()) helper = LayerHelper('array_length', **locals())
tmp = helper.create_tmp_variable(dtype='int64') tmp = helper.create_variable_for_type_inference(dtype='int64')
tmp.stop_gradient = True tmp.stop_gradient = True
helper.append_op( helper.append_op(
type='lod_array_length', inputs={'X': [array]}, outputs={'Out': [tmp]}) type='lod_array_length', inputs={'X': [array]}, outputs={'Out': [tmp]})
...@@ -1590,7 +1591,7 @@ class DynamicRNN(object): ...@@ -1590,7 +1591,7 @@ class DynamicRNN(object):
self.mem_dict = dict() self.mem_dict = dict()
self.output_array = [] self.output_array = []
self.outputs = [] self.outputs = []
self.cond = self.helper.create_tmp_variable(dtype='bool') self.cond = self.helper.create_variable_for_type_inference(dtype='bool')
self.cond.stop_gradient = False self.cond.stop_gradient = False
self.while_op = While(self.cond) self.while_op = While(self.cond)
self.input_array = [] self.input_array = []
...@@ -1924,7 +1925,7 @@ def reorder_lod_tensor_by_rank(x, rank_table): ...@@ -1924,7 +1925,7 @@ def reorder_lod_tensor_by_rank(x, rank_table):
helper.is_instance('x', Variable) helper.is_instance('x', Variable)
helper.is_instance('rank_table', Variable) helper.is_instance('rank_table', Variable)
out = helper.create_tmp_variable(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(
type='reorder_lod_tensor_by_rank', type='reorder_lod_tensor_by_rank',
inputs={'X': [x], inputs={'X': [x],
...@@ -1958,7 +1959,7 @@ def is_empty(x, cond=None, **ignored): ...@@ -1958,7 +1959,7 @@ def is_empty(x, cond=None, **ignored):
""" """
helper = LayerHelper("is_empty", **locals()) helper = LayerHelper("is_empty", **locals())
if cond is None: if cond is None:
cond = helper.create_tmp_variable(dtype='bool') cond = helper.create_variable_for_type_inference(dtype='bool')
cond.stop_gradient = True cond.stop_gradient = True
elif not isinstance(cond, Variable): elif not isinstance(cond, Variable):
raise TypeError("cond takes a variable") raise TypeError("cond takes a variable")
......
...@@ -147,10 +147,11 @@ def rpn_target_assign(bbox_pred, ...@@ -147,10 +147,11 @@ def rpn_target_assign(bbox_pred,
helper = LayerHelper('rpn_target_assign', **locals()) helper = LayerHelper('rpn_target_assign', **locals())
# Assign target label to anchors # Assign target label to anchors
loc_index = helper.create_tmp_variable(dtype='int32') loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_tmp_variable(dtype='int32') score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_tmp_variable(dtype='int32') target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_tmp_variable(dtype=anchor_box.dtype) target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
helper.append_op( helper.append_op(
type="rpn_target_assign", type="rpn_target_assign",
inputs={ inputs={
...@@ -282,7 +283,8 @@ def detection_output(loc, ...@@ -282,7 +283,8 @@ def detection_output(loc,
scores = nn.reshape(x=scores, shape=compile_shape, actual_shape=run_shape) scores = nn.reshape(x=scores, shape=compile_shape, actual_shape=run_shape)
scores = nn.transpose(scores, perm=[0, 2, 1]) scores = nn.transpose(scores, perm=[0, 2, 1])
scores.stop_gradient = True scores.stop_gradient = True
nmsed_outs = helper.create_tmp_variable(dtype=decoded_box.dtype) nmsed_outs = helper.create_variable_for_type_inference(
dtype=decoded_box.dtype)
helper.append_op( helper.append_op(
type="multiclass_nms", type="multiclass_nms",
inputs={'Scores': scores, inputs={'Scores': scores,
...@@ -314,7 +316,7 @@ def iou_similarity(x, y, name=None): ...@@ -314,7 +316,7 @@ def iou_similarity(x, y, name=None):
""" """
helper = LayerHelper("iou_similarity", **locals()) helper = LayerHelper("iou_similarity", **locals())
if name is None: if name is None:
out = helper.create_tmp_variable(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
else: else:
out = helper.create_variable( out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False) name=name, dtype=x.dtype, persistable=False)
...@@ -351,7 +353,8 @@ def box_coder(prior_box, ...@@ -351,7 +353,8 @@ def box_coder(prior_box,
helper = LayerHelper("box_coder", **locals()) helper = LayerHelper("box_coder", **locals())
if name is None: if name is None:
output_box = helper.create_tmp_variable(dtype=prior_box.dtype) output_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
else: else:
output_box = helper.create_variable( output_box = helper.create_variable(
name=name, dtype=prior_box.dtype, persistable=False) name=name, dtype=prior_box.dtype, persistable=False)
...@@ -382,7 +385,7 @@ def polygon_box_transform(input, name=None): ...@@ -382,7 +385,7 @@ def polygon_box_transform(input, name=None):
""" """
helper = LayerHelper("polygon_box_transform", **locals()) helper = LayerHelper("polygon_box_transform", **locals())
if name is None: if name is None:
output = helper.create_tmp_variable(dtype=input.dtype) output = helper.create_variable_for_type_inference(dtype=input.dtype)
else: else:
output = helper.create_variable( output = helper.create_variable(
name=name, dtype=prior_box.input, persistable=False) name=name, dtype=prior_box.input, persistable=False)
...@@ -450,7 +453,7 @@ def detection_map(detect_res, ...@@ -450,7 +453,7 @@ def detection_map(detect_res,
helper = LayerHelper("detection_map", **locals()) helper = LayerHelper("detection_map", **locals())
def __create_var(type): def __create_var(type):
return helper.create_tmp_variable(dtype=type) return helper.create_variable_for_type_inference(dtype=type)
map_out = __create_var('float32') map_out = __create_var('float32')
accum_pos_count_out = out_states[0] if out_states else __create_var('int32') accum_pos_count_out = out_states[0] if out_states else __create_var('int32')
...@@ -557,8 +560,9 @@ def bipartite_match(dist_matrix, ...@@ -557,8 +560,9 @@ def bipartite_match(dist_matrix,
>>> matched_indices, matched_dist = fluid.layers.bipartite_match(iou) >>> matched_indices, matched_dist = fluid.layers.bipartite_match(iou)
""" """
helper = LayerHelper('bipartite_match', **locals()) helper = LayerHelper('bipartite_match', **locals())
match_indices = helper.create_tmp_variable(dtype='int32') match_indices = helper.create_variable_for_type_inference(dtype='int32')
match_distance = helper.create_tmp_variable(dtype=dist_matrix.dtype) match_distance = helper.create_variable_for_type_inference(
dtype=dist_matrix.dtype)
helper.append_op( helper.append_op(
type='bipartite_match', type='bipartite_match',
inputs={'DistMat': dist_matrix}, inputs={'DistMat': dist_matrix},
...@@ -644,8 +648,8 @@ def target_assign(input, ...@@ -644,8 +648,8 @@ def target_assign(input,
gt, matched_indices, mismatch_value=0) gt, matched_indices, mismatch_value=0)
""" """
helper = LayerHelper('target_assign', **locals()) helper = LayerHelper('target_assign', **locals())
out = helper.create_tmp_variable(dtype=input.dtype) out = helper.create_variable_for_type_inference(dtype=input.dtype)
out_weight = helper.create_tmp_variable(dtype='float32') out_weight = helper.create_variable_for_type_inference(dtype='float32')
helper.append_op( helper.append_op(
type='target_assign', type='target_assign',
inputs={ inputs={
...@@ -816,9 +820,10 @@ def ssd_loss(location, ...@@ -816,9 +820,10 @@ def ssd_loss(location,
conf_loss = nn.reshape( conf_loss = nn.reshape(
x=conf_loss, shape=(num, num_prior), actual_shape=actual_shape) x=conf_loss, shape=(num, num_prior), actual_shape=actual_shape)
conf_loss.stop_gradient = True conf_loss.stop_gradient = True
neg_indices = helper.create_tmp_variable(dtype='int32') neg_indices = helper.create_variable_for_type_inference(dtype='int32')
dtype = matched_indices.dtype dtype = matched_indices.dtype
updated_matched_indices = helper.create_tmp_variable(dtype=dtype) updated_matched_indices = helper.create_variable_for_type_inference(
dtype=dtype)
helper.append_op( helper.append_op(
type='mine_hard_examples', type='mine_hard_examples',
inputs={ inputs={
...@@ -998,8 +1003,8 @@ def prior_box(input, ...@@ -998,8 +1003,8 @@ def prior_box(input,
max_sizes = [max_sizes] max_sizes = [max_sizes]
attrs['max_sizes'] = max_sizes attrs['max_sizes'] = max_sizes
box = helper.create_tmp_variable(dtype) box = helper.create_variable_for_type_inference(dtype)
var = helper.create_tmp_variable(dtype) var = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type="prior_box", type="prior_box",
inputs={"Input": input, inputs={"Input": input,
...@@ -1337,8 +1342,8 @@ def anchor_generator(input, ...@@ -1337,8 +1342,8 @@ def anchor_generator(input,
'offset': offset 'offset': offset
} }
anchor = helper.create_tmp_variable(dtype) anchor = helper.create_variable_for_type_inference(dtype)
var = helper.create_tmp_variable(dtype) var = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type="anchor_generator", type="anchor_generator",
inputs={"Input": input}, inputs={"Input": input},
...@@ -1384,7 +1389,7 @@ def roi_perspective_transform(input, ...@@ -1384,7 +1389,7 @@ def roi_perspective_transform(input,
""" """
helper = LayerHelper('roi_perspective_transform', **locals()) helper = LayerHelper('roi_perspective_transform', **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
out = helper.create_tmp_variable(dtype) out = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type="roi_perspective_transform", type="roi_perspective_transform",
inputs={"X": input, inputs={"X": input,
...@@ -1418,11 +1423,15 @@ def generate_proposal_labels(rpn_rois, ...@@ -1418,11 +1423,15 @@ def generate_proposal_labels(rpn_rois,
helper = LayerHelper('generate_proposal_labels', **locals()) helper = LayerHelper('generate_proposal_labels', **locals())
rois = helper.create_tmp_variable(dtype=rpn_rois.dtype) rois = helper.create_variable_for_type_inference(dtype=rpn_rois.dtype)
labels_int32 = helper.create_tmp_variable(dtype=gt_classes.dtype) labels_int32 = helper.create_variable_for_type_inference(
bbox_targets = helper.create_tmp_variable(dtype=rpn_rois.dtype) dtype=gt_classes.dtype)
bbox_inside_weights = helper.create_tmp_variable(dtype=rpn_rois.dtype) bbox_targets = helper.create_variable_for_type_inference(
bbox_outside_weights = helper.create_tmp_variable(dtype=rpn_rois.dtype) dtype=rpn_rois.dtype)
bbox_inside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_outside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
helper.append_op( helper.append_op(
type="generate_proposal_labels", type="generate_proposal_labels",
...@@ -1504,8 +1513,10 @@ def generate_proposals(scores, ...@@ -1504,8 +1513,10 @@ def generate_proposals(scores,
""" """
helper = LayerHelper('generate_proposals', **locals()) helper = LayerHelper('generate_proposals', **locals())
rpn_rois = helper.create_tmp_variable(dtype=bbox_deltas.dtype) rpn_rois = helper.create_variable_for_type_inference(
rpn_roi_probs = helper.create_tmp_variable(dtype=scores.dtype) dtype=bbox_deltas.dtype)
rpn_roi_probs = helper.create_variable_for_type_inference(
dtype=scores.dtype)
helper.append_op( helper.append_op(
type="generate_proposals", type="generate_proposals",
inputs={ inputs={
......
...@@ -954,7 +954,7 @@ def read_file(reader): ...@@ -954,7 +954,7 @@ def read_file(reader):
""" """
helper = LayerHelper('read_file') helper = LayerHelper('read_file')
out = [ out = [
helper.create_tmp_variable( helper.create_variable_for_type_inference(
stop_gradient=True, dtype='float32') stop_gradient=True, dtype='float32')
for _ in range(len(reader.desc.shapes())) for _ in range(len(reader.desc.shapes()))
] ]
......
...@@ -202,10 +202,12 @@ def generate_layer_fn(op_type): ...@@ -202,10 +202,12 @@ def generate_layer_fn(op_type):
out_var = out[0] if (isinstance(out, list) or out_var = out[0] if (isinstance(out, list) or
isinstance(out, tuple)) else out isinstance(out, tuple)) else out
else: else:
out_var = helper.create_tmp_variable(dtype=dtype) out_var = helper.create_variable_for_type_inference(dtype=dtype)
outputs[o_name] = [out_var] outputs[o_name] = [out_var]
for name in intermediate_output_names: for name in intermediate_output_names:
outputs[name] = [helper.create_tmp_variable(dtype=dtype)] outputs[name] = [
helper.create_variable_for_type_inference(dtype=dtype)
]
helper.append_op( helper.append_op(
type=op_type, inputs=inputs, outputs=outputs, attrs=kwargs) type=op_type, inputs=inputs, outputs=outputs, attrs=kwargs)
return helper.append_activation(out_var) return helper.append_activation(out_var)
...@@ -229,7 +231,7 @@ def generate_layer_fn_noattr(op_type): ...@@ -229,7 +231,7 @@ def generate_layer_fn_noattr(op_type):
def func(x, name=None): def func(x, name=None):
helper = LayerHelper(op_type, **locals()) helper = LayerHelper(op_type, **locals())
output = helper.create_tmp_variable(dtype=x.dtype) output = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type=op_type, inputs={"X": x}, outputs={"Out": output}) helper.append_op(type=op_type, inputs={"X": x}, outputs={"Out": output})
return output return output
......
...@@ -58,11 +58,11 @@ def accuracy(input, label, k=1, correct=None, total=None): ...@@ -58,11 +58,11 @@ def accuracy(input, label, k=1, correct=None, total=None):
""" """
helper = LayerHelper("accuracy", **locals()) helper = LayerHelper("accuracy", **locals())
topk_out, topk_indices = nn.topk(input, k=k) topk_out, topk_indices = nn.topk(input, k=k)
acc_out = helper.create_tmp_variable(dtype="float32") acc_out = helper.create_variable_for_type_inference(dtype="float32")
if correct is None: if correct is None:
correct = helper.create_tmp_variable(dtype="int64") correct = helper.create_variable_for_type_inference(dtype="int64")
if total is None: if total is None:
total = helper.create_tmp_variable(dtype="int64") total = helper.create_variable_for_type_inference(dtype="int64")
helper.append_op( helper.append_op(
type="accuracy", type="accuracy",
inputs={ inputs={
...@@ -124,8 +124,8 @@ def auc(input, ...@@ -124,8 +124,8 @@ def auc(input,
auc_out=fluid.layers.auc(input=prediction, label=label) auc_out=fluid.layers.auc(input=prediction, label=label)
""" """
helper = LayerHelper("auc", **locals()) helper = LayerHelper("auc", **locals())
auc_out = helper.create_tmp_variable(dtype="float64") auc_out = helper.create_variable_for_type_inference(dtype="float64")
batch_auc_out = helper.create_tmp_variable(dtype="float64") batch_auc_out = helper.create_variable_for_type_inference(dtype="float64")
# make tp, tn, fp, fn persistable, so that can accumulate all batches. # make tp, tn, fp, fn persistable, so that can accumulate all batches.
# for batch auc # for batch auc
......
...@@ -242,7 +242,7 @@ def fc(input, ...@@ -242,7 +242,7 @@ def fc(input,
w = helper.create_parameter( w = helper.create_parameter(
attr=param_attr, shape=param_shape, dtype=dtype, is_bias=False) attr=param_attr, shape=param_shape, dtype=dtype, is_bias=False)
tmp = helper.create_tmp_variable(dtype) tmp = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type="mul", type="mul",
inputs={"X": input_var, inputs={"X": input_var,
...@@ -255,7 +255,7 @@ def fc(input, ...@@ -255,7 +255,7 @@ def fc(input,
if len(mul_results) == 1: if len(mul_results) == 1:
pre_bias = mul_results[0] pre_bias = mul_results[0]
else: else:
pre_bias = helper.create_tmp_variable(dtype) pre_bias = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type="sum", type="sum",
inputs={"X": mul_results}, inputs={"X": mul_results},
...@@ -314,7 +314,7 @@ def embedding(input, ...@@ -314,7 +314,7 @@ def embedding(input,
helper = LayerHelper('embedding', **locals()) helper = LayerHelper('embedding', **locals())
w = helper.create_parameter( w = helper.create_parameter(
attr=helper.param_attr, shape=size, dtype=dtype, is_bias=False) attr=helper.param_attr, shape=size, dtype=dtype, is_bias=False)
tmp = helper.create_tmp_variable(dtype) tmp = helper.create_variable_for_type_inference(dtype)
padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else ( padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else (
size[0] + padding_idx) size[0] + padding_idx)
helper.append_op( helper.append_op(
...@@ -418,10 +418,10 @@ def dynamic_lstm(input, ...@@ -418,10 +418,10 @@ def dynamic_lstm(input,
bias = helper.create_parameter( bias = helper.create_parameter(
attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True) attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True)
hidden = helper.create_tmp_variable(dtype) hidden = helper.create_variable_for_type_inference(dtype)
cell = helper.create_tmp_variable(dtype) cell = helper.create_variable_for_type_inference(dtype)
batch_gate = helper.create_tmp_variable(dtype) batch_gate = helper.create_variable_for_type_inference(dtype)
batch_cell_pre_act = helper.create_tmp_variable(dtype) batch_cell_pre_act = helper.create_variable_for_type_inference(dtype)
inputs = {'Input': input, 'Weight': weight, 'Bias': bias} inputs = {'Input': input, 'Weight': weight, 'Bias': bias}
batch_size = input.shape[0] batch_size = input.shape[0]
if h_0: if h_0:
...@@ -621,12 +621,12 @@ def dynamic_lstmp(input, ...@@ -621,12 +621,12 @@ def dynamic_lstmp(input,
bias = helper.create_parameter( bias = helper.create_parameter(
attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True) attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True)
projection = helper.create_tmp_variable(dtype) projection = helper.create_variable_for_type_inference(dtype)
cell = helper.create_tmp_variable(dtype) cell = helper.create_variable_for_type_inference(dtype)
ordered_proj0 = helper.create_tmp_variable(dtype) ordered_proj0 = helper.create_variable_for_type_inference(dtype)
batch_hidden = helper.create_tmp_variable(dtype) batch_hidden = helper.create_variable_for_type_inference(dtype)
batch_gate = helper.create_tmp_variable(dtype) batch_gate = helper.create_variable_for_type_inference(dtype)
batch_cell_pre_act = helper.create_tmp_variable(dtype) batch_cell_pre_act = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type='lstmp', type='lstmp',
...@@ -751,10 +751,10 @@ def dynamic_gru(input, ...@@ -751,10 +751,10 @@ def dynamic_gru(input,
), 'The shape of h0 should be(batch_size, %d)' % size ), 'The shape of h0 should be(batch_size, %d)' % size
inputs['H0'] = h_0 inputs['H0'] = h_0
hidden = helper.create_tmp_variable(dtype) hidden = helper.create_variable_for_type_inference(dtype)
batch_gate = helper.create_tmp_variable(dtype) batch_gate = helper.create_variable_for_type_inference(dtype)
batch_reset_hidden_prev = helper.create_tmp_variable(dtype) batch_reset_hidden_prev = helper.create_variable_for_type_inference(dtype)
batch_hidden = helper.create_tmp_variable(dtype) batch_hidden = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type='gru', type='gru',
...@@ -844,9 +844,9 @@ def gru_unit(input, ...@@ -844,9 +844,9 @@ def gru_unit(input,
weight = helper.create_parameter( weight = helper.create_parameter(
attr=helper.param_attr, shape=[size, 3 * size], dtype=dtype) attr=helper.param_attr, shape=[size, 3 * size], dtype=dtype)
gate = helper.create_tmp_variable(dtype) gate = helper.create_variable_for_type_inference(dtype)
reset_hidden_pre = helper.create_tmp_variable(dtype) reset_hidden_pre = helper.create_variable_for_type_inference(dtype)
updated_hidden = helper.create_tmp_variable(dtype) updated_hidden = helper.create_variable_for_type_inference(dtype)
inputs = {'Input': input, 'HiddenPrev': hidden, 'Weight': weight} inputs = {'Input': input, 'HiddenPrev': hidden, 'Weight': weight}
# create bias # create bias
if helper.bias_attr: if helper.bias_attr:
...@@ -896,10 +896,14 @@ def linear_chain_crf(input, label, param_attr=None): ...@@ -896,10 +896,14 @@ def linear_chain_crf(input, label, param_attr=None):
attr=helper.param_attr, attr=helper.param_attr,
shape=[size + 2, size], shape=[size + 2, size],
dtype=helper.input_dtype()) dtype=helper.input_dtype())
alpha = helper.create_tmp_variable(dtype=helper.input_dtype()) alpha = helper.create_variable_for_type_inference(
emission_exps = helper.create_tmp_variable(dtype=helper.input_dtype()) dtype=helper.input_dtype())
transition_exps = helper.create_tmp_variable(dtype=helper.input_dtype()) emission_exps = helper.create_variable_for_type_inference(
log_likelihood = helper.create_tmp_variable(dtype=helper.input_dtype()) dtype=helper.input_dtype())
transition_exps = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
log_likelihood = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
helper.append_op( helper.append_op(
type='linear_chain_crf', type='linear_chain_crf',
inputs={"Emission": [input], inputs={"Emission": [input],
...@@ -938,7 +942,8 @@ def crf_decoding(input, param_attr, label=None): ...@@ -938,7 +942,8 @@ def crf_decoding(input, param_attr, label=None):
""" """
helper = LayerHelper('crf_decoding', **locals()) helper = LayerHelper('crf_decoding', **locals())
transition = helper.get_parameter(param_attr.name) transition = helper.get_parameter(param_attr.name)
viterbi_path = helper.create_tmp_variable(dtype=helper.input_dtype()) viterbi_path = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
helper.append_op( helper.append_op(
type='crf_decoding', type='crf_decoding',
inputs={"Emission": [input], inputs={"Emission": [input],
...@@ -962,9 +967,9 @@ def cos_sim(X, Y): ...@@ -962,9 +967,9 @@ def cos_sim(X, Y):
Variable: the output of cosine(X, Y). Variable: the output of cosine(X, Y).
""" """
helper = LayerHelper('cos_sim', **locals()) helper = LayerHelper('cos_sim', **locals())
out = helper.create_tmp_variable(dtype=X.dtype) out = helper.create_variable_for_type_inference(dtype=X.dtype)
xnorm = helper.create_tmp_variable(dtype=X.dtype) xnorm = helper.create_variable_for_type_inference(dtype=X.dtype)
ynorm = helper.create_tmp_variable(dtype=X.dtype) ynorm = helper.create_variable_for_type_inference(dtype=X.dtype)
helper.append_op( helper.append_op(
type='cos_sim', type='cos_sim',
inputs={'X': [X], inputs={'X': [X],
...@@ -1008,8 +1013,9 @@ def dropout(x, dropout_prob, is_test=False, seed=None, name=None): ...@@ -1008,8 +1013,9 @@ def dropout(x, dropout_prob, is_test=False, seed=None, name=None):
""" """
helper = LayerHelper('dropout', **locals()) helper = LayerHelper('dropout', **locals())
out = helper.create_tmp_variable(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
mask = helper.create_tmp_variable(dtype=x.dtype, stop_gradient=True) mask = helper.create_variable_for_type_inference(
dtype=x.dtype, stop_gradient=True)
if (seed is None or seed == 0) and helper.main_program.random_seed != 0: if (seed is None or seed == 0) and helper.main_program.random_seed != 0:
seed = helper.main_program.random_seed seed = helper.main_program.random_seed
...@@ -1094,7 +1100,7 @@ def cross_entropy(input, label, soft_label=False, ignore_index=-100): ...@@ -1094,7 +1100,7 @@ def cross_entropy(input, label, soft_label=False, ignore_index=-100):
cost = fluid.layers.cross_entropy(input=predict, label=label) cost = fluid.layers.cross_entropy(input=predict, label=label)
""" """
helper = LayerHelper('cross_entropy', **locals()) helper = LayerHelper('cross_entropy', **locals())
out = helper.create_tmp_variable(dtype=input.dtype) out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op( helper.append_op(
type='cross_entropy', type='cross_entropy',
inputs={'X': [input], inputs={'X': [input],
...@@ -1141,14 +1147,14 @@ def square_error_cost(input, label): ...@@ -1141,14 +1147,14 @@ def square_error_cost(input, label):
""" """
helper = LayerHelper('square_error_cost', **locals()) helper = LayerHelper('square_error_cost', **locals())
minus_out = helper.create_tmp_variable(dtype=input.dtype) minus_out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op( helper.append_op(
type='elementwise_sub', type='elementwise_sub',
inputs={'X': [input], inputs={'X': [input],
'Y': [label]}, 'Y': [label]},
outputs={'Out': [minus_out]}) outputs={'Out': [minus_out]})
square_out = helper.create_tmp_variable(dtype=input.dtype) square_out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op( helper.append_op(
type='square', inputs={'X': [minus_out]}, type='square', inputs={'X': [minus_out]},
outputs={'Out': [square_out]}) outputs={'Out': [square_out]})
...@@ -1254,12 +1260,13 @@ def chunk_eval(input, ...@@ -1254,12 +1260,13 @@ def chunk_eval(input,
helper = LayerHelper("chunk_eval", **locals()) helper = LayerHelper("chunk_eval", **locals())
# prepare output # prepare output
precision = helper.create_tmp_variable(dtype="float32") precision = helper.create_variable_for_type_inference(dtype="float32")
recall = helper.create_tmp_variable(dtype="float32") recall = helper.create_variable_for_type_inference(dtype="float32")
f1_score = helper.create_tmp_variable(dtype="float32") f1_score = helper.create_variable_for_type_inference(dtype="float32")
num_infer_chunks = helper.create_tmp_variable(dtype="int64") num_infer_chunks = helper.create_variable_for_type_inference(dtype="int64")
num_label_chunks = helper.create_tmp_variable(dtype="int64") num_label_chunks = helper.create_variable_for_type_inference(dtype="int64")
num_correct_chunks = helper.create_tmp_variable(dtype="int64") num_correct_chunks = helper.create_variable_for_type_inference(
dtype="int64")
helper.append_op( helper.append_op(
type="chunk_eval", type="chunk_eval",
...@@ -1326,7 +1333,7 @@ def sequence_conv(input, ...@@ -1326,7 +1333,7 @@ def sequence_conv(input,
filter_shape = [filter_size * input.shape[1], num_filters] filter_shape = [filter_size * input.shape[1], num_filters]
filter_param = helper.create_parameter( filter_param = helper.create_parameter(
attr=helper.param_attr, shape=filter_shape, dtype=dtype) attr=helper.param_attr, shape=filter_shape, dtype=dtype)
pre_bias = helper.create_tmp_variable(dtype) pre_bias = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type='sequence_conv', type='sequence_conv',
...@@ -1382,7 +1389,7 @@ def sequence_softmax(input, use_cudnn=False, name=None): ...@@ -1382,7 +1389,7 @@ def sequence_softmax(input, use_cudnn=False, name=None):
""" """
helper = LayerHelper('sequence_softmax', **locals()) helper = LayerHelper('sequence_softmax', **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
softmax_out = helper.create_tmp_variable(dtype) softmax_out = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type="sequence_softmax", type="sequence_softmax",
inputs={"X": input}, inputs={"X": input},
...@@ -1436,7 +1443,7 @@ def softmax(input, use_cudnn=True, name=None): ...@@ -1436,7 +1443,7 @@ def softmax(input, use_cudnn=True, name=None):
""" """
helper = LayerHelper('softmax', **locals()) helper = LayerHelper('softmax', **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
softmax_out = helper.create_tmp_variable(dtype) softmax_out = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type="softmax", type="softmax",
inputs={"X": input}, inputs={"X": input},
...@@ -1599,7 +1606,7 @@ def conv2d(input, ...@@ -1599,7 +1606,7 @@ def conv2d(input,
dtype=dtype, dtype=dtype,
default_initializer=_get_default_param_initializer()) default_initializer=_get_default_param_initializer())
pre_bias = helper.create_tmp_variable(dtype) pre_bias = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type=l_type, type=l_type,
...@@ -1770,7 +1777,7 @@ def conv3d(input, ...@@ -1770,7 +1777,7 @@ def conv3d(input,
dtype=dtype, dtype=dtype,
default_initializer=_get_default_param_initializer()) default_initializer=_get_default_param_initializer())
pre_bias = helper.create_tmp_variable(dtype) pre_bias = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type=l_type, type=l_type,
...@@ -1849,8 +1856,8 @@ def sequence_pool(input, pool_type): ...@@ -1849,8 +1856,8 @@ def sequence_pool(input, pool_type):
""" """
helper = LayerHelper('sequence_pool', **locals()) helper = LayerHelper('sequence_pool', **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
pool_out = helper.create_tmp_variable(dtype) pool_out = helper.create_variable_for_type_inference(dtype)
max_index = helper.create_tmp_variable(dtype) max_index = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type="sequence_pool", type="sequence_pool",
...@@ -1886,7 +1893,7 @@ def sequence_concat(input, name=None): ...@@ -1886,7 +1893,7 @@ def sequence_concat(input, name=None):
out = fluid.layers.sequence_concat(input=[seq1, seq2, seq3]) out = fluid.layers.sequence_concat(input=[seq1, seq2, seq3])
""" """
helper = LayerHelper('sequence_concat', **locals()) helper = LayerHelper('sequence_concat', **locals())
out = helper.create_tmp_variable(dtype=helper.input_dtype()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op( helper.append_op(
type='sequence_concat', inputs={'X': input}, outputs={'Out': [out]}) type='sequence_concat', inputs={'X': input}, outputs={'Out': [out]})
return out return out
...@@ -2013,7 +2020,7 @@ def sequence_slice(input, offset, length, name=None): ...@@ -2013,7 +2020,7 @@ def sequence_slice(input, offset, length, name=None):
""" """
helper = LayerHelper("sequence_slice", **locals()) helper = LayerHelper("sequence_slice", **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
out = helper.create_tmp_variable(dtype) out = helper.create_variable_for_type_inference(dtype)
offset.stop_gradient = True offset.stop_gradient = True
length.stop_gradient = True length.stop_gradient = True
...@@ -2099,7 +2106,7 @@ def pool2d(input, ...@@ -2099,7 +2106,7 @@ def pool2d(input,
helper = LayerHelper(l_type, **locals()) helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
pool_out = helper.create_tmp_variable(dtype) pool_out = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type=l_type, type=l_type,
...@@ -2167,7 +2174,7 @@ def pool3d(input, ...@@ -2167,7 +2174,7 @@ def pool3d(input,
l_type = "pool3d" l_type = "pool3d"
helper = LayerHelper(l_type, **locals()) helper = LayerHelper(l_type, **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
pool_out = helper.create_tmp_variable(dtype) pool_out = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type=l_type, type=l_type,
...@@ -2310,10 +2317,13 @@ def batch_norm(input, ...@@ -2310,10 +2317,13 @@ def batch_norm(input,
mean_out = mean mean_out = mean
# variance and variance out share the same memory # variance and variance out share the same memory
variance_out = variance variance_out = variance
saved_mean = helper.create_tmp_variable(dtype=dtype, stop_gradient=True) saved_mean = helper.create_variable_for_type_inference(
saved_variance = helper.create_tmp_variable(dtype=dtype, stop_gradient=True) dtype=dtype, stop_gradient=True)
saved_variance = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
batch_norm_out = input if in_place else helper.create_tmp_variable(dtype) batch_norm_out = input if in_place else helper.create_variable_for_type_inference(
dtype)
helper.append_op( helper.append_op(
type="batch_norm", type="batch_norm",
...@@ -2430,9 +2440,11 @@ def layer_norm(input, ...@@ -2430,9 +2440,11 @@ def layer_norm(input,
inputs['Bias'] = bias inputs['Bias'] = bias
# create output # create output
mean_out = helper.create_tmp_variable(dtype=dtype, stop_gradient=True) mean_out = helper.create_variable_for_type_inference(
variance_out = helper.create_tmp_variable(dtype=dtype, stop_gradient=True) dtype=dtype, stop_gradient=True)
layer_norm_out = helper.create_tmp_variable(dtype) variance_out = helper.create_variable_for_type_inference(
dtype=dtype, stop_gradient=True)
layer_norm_out = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type="layer_norm", type="layer_norm",
...@@ -2619,7 +2631,7 @@ def conv2d_transpose(input, ...@@ -2619,7 +2631,7 @@ def conv2d_transpose(input,
img_filter = helper.create_parameter( img_filter = helper.create_parameter(
dtype=input.dtype, shape=filter_shape, attr=helper.param_attr) dtype=input.dtype, shape=filter_shape, attr=helper.param_attr)
pre_bias = helper.create_tmp_variable(dtype=input.dtype) pre_bias = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op( helper.append_op(
type=op_type, type=op_type,
inputs={'Input': [input], inputs={'Input': [input],
...@@ -2797,7 +2809,7 @@ def conv3d_transpose(input, ...@@ -2797,7 +2809,7 @@ def conv3d_transpose(input,
img_filter = helper.create_parameter( img_filter = helper.create_parameter(
dtype=input.dtype, shape=filter_shape, attr=helper.param_attr) dtype=input.dtype, shape=filter_shape, attr=helper.param_attr)
pre_bias = helper.create_tmp_variable(dtype=input.dtype) pre_bias = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op( helper.append_op(
type=l_type, type=l_type,
inputs={'Input': [input], inputs={'Input': [input],
...@@ -2876,7 +2888,7 @@ def sequence_expand(x, y, ref_level=-1, name=None): ...@@ -2876,7 +2888,7 @@ def sequence_expand(x, y, ref_level=-1, name=None):
""" """
helper = LayerHelper('sequence_expand', input=x, **locals()) helper = LayerHelper('sequence_expand', input=x, **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
tmp = helper.create_tmp_variable(dtype) tmp = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type='sequence_expand', type='sequence_expand',
inputs={'X': x, inputs={'X': x,
...@@ -2942,7 +2954,7 @@ def sequence_expand_as(x, y, name=None): ...@@ -2942,7 +2954,7 @@ def sequence_expand_as(x, y, name=None):
""" """
helper = LayerHelper('sequence_expand_as', input=x, **locals()) helper = LayerHelper('sequence_expand_as', input=x, **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
tmp = helper.create_tmp_variable(dtype) tmp = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type='sequence_expand_as', type='sequence_expand_as',
inputs={'X': x, inputs={'X': x,
...@@ -2987,8 +2999,8 @@ def sequence_pad(x, pad_value, maxlen=None, name=None): ...@@ -2987,8 +2999,8 @@ def sequence_pad(x, pad_value, maxlen=None, name=None):
helper = LayerHelper('sequence_pad', input=x, **locals()) helper = LayerHelper('sequence_pad', input=x, **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
out = helper.create_tmp_variable(dtype) out = helper.create_variable_for_type_inference(dtype)
length = helper.create_tmp_variable(dtype) length = helper.create_variable_for_type_inference(dtype)
pad_value.stop_gradient = True pad_value.stop_gradient = True
length.stop_gradient = True length.stop_gradient = True
...@@ -3053,7 +3065,7 @@ def sequence_unpad(x, length, name=None): ...@@ -3053,7 +3065,7 @@ def sequence_unpad(x, length, name=None):
helper = LayerHelper('sequence_unpad', input=x, **locals()) helper = LayerHelper('sequence_unpad', input=x, **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
out = helper.create_tmp_variable(dtype) out = helper.create_variable_for_type_inference(dtype)
length.stop_gradient = True length.stop_gradient = True
...@@ -3152,8 +3164,9 @@ def beam_search(pre_ids, ...@@ -3152,8 +3164,9 @@ def beam_search(pre_ids,
score_type = scores.dtype score_type = scores.dtype
id_type = ids.dtype id_type = ids.dtype
selected_scores = helper.create_tmp_variable(dtype=score_type) selected_scores = helper.create_variable_for_type_inference(
selected_ids = helper.create_tmp_variable(dtype=id_type) dtype=score_type)
selected_ids = helper.create_variable_for_type_inference(dtype=id_type)
helper.append_op( helper.append_op(
type='beam_search', type='beam_search',
...@@ -3210,8 +3223,8 @@ def beam_search_decode(ids, scores, beam_size, end_id, name=None): ...@@ -3210,8 +3223,8 @@ def beam_search_decode(ids, scores, beam_size, end_id, name=None):
ids, scores, beam_size=5, end_id=0) ids, scores, beam_size=5, end_id=0)
""" """
helper = LayerHelper('beam_search_decode', **locals()) helper = LayerHelper('beam_search_decode', **locals())
sentence_ids = helper.create_tmp_variable(dtype=ids.dtype) sentence_ids = helper.create_variable_for_type_inference(dtype=ids.dtype)
sentence_scores = helper.create_tmp_variable(dtype=ids.dtype) sentence_scores = helper.create_variable_for_type_inference(dtype=ids.dtype)
helper.append_op( helper.append_op(
type="beam_search_decode", type="beam_search_decode",
...@@ -3341,8 +3354,8 @@ def lstm_unit(x_t, ...@@ -3341,8 +3354,8 @@ def lstm_unit(x_t,
param_attr=param_attr, param_attr=param_attr,
bias_attr=bias_attr) bias_attr=bias_attr)
dtype = x_t.dtype dtype = x_t.dtype
c = helper.create_tmp_variable(dtype) c = helper.create_variable_for_type_inference(dtype)
h = helper.create_tmp_variable(dtype) h = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type='lstm_unit', type='lstm_unit',
...@@ -3396,7 +3409,7 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None): ...@@ -3396,7 +3409,7 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None):
""" """
helper = LayerHelper('reduce_sum', **locals()) helper = LayerHelper('reduce_sum', **locals())
out = helper.create_tmp_variable(dtype=helper.input_dtype()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
if dim is not None and not isinstance(dim, list): if dim is not None and not isinstance(dim, list):
dim = [dim] dim = [dim]
helper.append_op( helper.append_op(
...@@ -3453,7 +3466,7 @@ def reduce_mean(input, dim=None, keep_dim=False, name=None): ...@@ -3453,7 +3466,7 @@ def reduce_mean(input, dim=None, keep_dim=False, name=None):
fluid.layers.reduce_mean(x, dim=[0, 1]) # [4.0, 5.0] fluid.layers.reduce_mean(x, dim=[0, 1]) # [4.0, 5.0]
""" """
helper = LayerHelper('reduce_mean', **locals()) helper = LayerHelper('reduce_mean', **locals())
out = helper.create_tmp_variable(dtype=helper.input_dtype()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
if dim is not None and not isinstance(dim, list): if dim is not None and not isinstance(dim, list):
dim = [dim] dim = [dim]
helper.append_op( helper.append_op(
...@@ -3508,7 +3521,7 @@ def reduce_max(input, dim=None, keep_dim=False, name=None): ...@@ -3508,7 +3521,7 @@ def reduce_max(input, dim=None, keep_dim=False, name=None):
fluid.layers.reduce_max(x, dim=[0, 1]) # [7.0, 8.0] fluid.layers.reduce_max(x, dim=[0, 1]) # [7.0, 8.0]
""" """
helper = LayerHelper('reduce_max', **locals()) helper = LayerHelper('reduce_max', **locals())
out = helper.create_tmp_variable(dtype=helper.input_dtype()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
if dim is not None and not isinstance(dim, list): if dim is not None and not isinstance(dim, list):
dim = [dim] dim = [dim]
helper.append_op( helper.append_op(
...@@ -3563,7 +3576,7 @@ def reduce_min(input, dim=None, keep_dim=False, name=None): ...@@ -3563,7 +3576,7 @@ def reduce_min(input, dim=None, keep_dim=False, name=None):
fluid.layers.reduce_min(x, dim=[0, 1]) # [1.0, 2.0] fluid.layers.reduce_min(x, dim=[0, 1]) # [1.0, 2.0]
""" """
helper = LayerHelper('reduce_min', **locals()) helper = LayerHelper('reduce_min', **locals())
out = helper.create_tmp_variable(dtype=helper.input_dtype()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
if dim is not None and not isinstance(dim, list): if dim is not None and not isinstance(dim, list):
dim = [dim] dim = [dim]
helper.append_op( helper.append_op(
...@@ -3619,7 +3632,7 @@ def reduce_prod(input, dim=None, keep_dim=False, name=None): ...@@ -3619,7 +3632,7 @@ def reduce_prod(input, dim=None, keep_dim=False, name=None):
fluid.layers.reduce_prod(x, dim=[0, 1]) # [105.0, 384.0] fluid.layers.reduce_prod(x, dim=[0, 1]) # [105.0, 384.0]
""" """
helper = LayerHelper('reduce_prod', **locals()) helper = LayerHelper('reduce_prod', **locals())
out = helper.create_tmp_variable(dtype=helper.input_dtype()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
if dim is not None and not isinstance(dim, list): if dim is not None and not isinstance(dim, list):
dim = [dim] dim = [dim]
helper.append_op( helper.append_op(
...@@ -3679,7 +3692,7 @@ def split(input, num_or_sections, dim=-1, name=None): ...@@ -3679,7 +3692,7 @@ def split(input, num_or_sections, dim=-1, name=None):
dim], 'len(num_or_sections) must not be more than input.shape[dim].' dim], 'len(num_or_sections) must not be more than input.shape[dim].'
num = len(num_or_sections) num = len(num_or_sections)
outs = [ outs = [
helper.create_tmp_variable(dtype=helper.input_dtype()) helper.create_variable_for_type_inference(dtype=helper.input_dtype())
for i in range(num) for i in range(num)
] ]
helper.append_op( helper.append_op(
...@@ -3736,8 +3749,8 @@ def l2_normalize(x, axis, epsilon=1e-12, name=None): ...@@ -3736,8 +3749,8 @@ def l2_normalize(x, axis, epsilon=1e-12, name=None):
axis = 0 axis = 0
helper = LayerHelper("l2_normalize", **locals()) helper = LayerHelper("l2_normalize", **locals())
out = helper.create_tmp_variable(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
norm = helper.create_tmp_variable(dtype=x.dtype) norm = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(
type="norm", type="norm",
inputs={"X": x}, inputs={"X": x},
...@@ -3846,7 +3859,7 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None): ...@@ -3846,7 +3859,7 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None):
__check_input(x, y) __check_input(x, y)
helper = LayerHelper('matmul', **locals()) helper = LayerHelper('matmul', **locals())
out = helper.create_tmp_variable(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(
type='matmul', type='matmul',
inputs={'X': x, inputs={'X': x,
...@@ -3917,8 +3930,8 @@ def topk(input, k, name=None): ...@@ -3917,8 +3930,8 @@ def topk(input, k, name=None):
top5_values, top5_indices = layers.topk(input, k=5) top5_values, top5_indices = layers.topk(input, k=5)
""" """
helper = LayerHelper("top_k", **locals()) helper = LayerHelper("top_k", **locals())
values = helper.create_tmp_variable(dtype=input.dtype) values = helper.create_variable_for_type_inference(dtype=input.dtype)
indices = helper.create_tmp_variable(dtype="int64") indices = helper.create_variable_for_type_inference(dtype="int64")
helper.append_op( helper.append_op(
type="top_k", type="top_k",
inputs={"X": [input]}, inputs={"X": [input]},
...@@ -3976,8 +3989,8 @@ def edit_distance(input, label, normalized=True, ignored_tokens=None): ...@@ -3976,8 +3989,8 @@ def edit_distance(input, label, normalized=True, ignored_tokens=None):
# remove some tokens from input and labels # remove some tokens from input and labels
if ignored_tokens is not None and len(ignored_tokens) > 0: if ignored_tokens is not None and len(ignored_tokens) > 0:
erased_input = helper.create_tmp_variable(dtype="int64") erased_input = helper.create_variable_for_type_inference(dtype="int64")
erased_label = helper.create_tmp_variable(dtype="int64") erased_label = helper.create_variable_for_type_inference(dtype="int64")
helper.append_op( helper.append_op(
type="sequence_erase", type="sequence_erase",
...@@ -3994,8 +4007,8 @@ def edit_distance(input, label, normalized=True, ignored_tokens=None): ...@@ -3994,8 +4007,8 @@ def edit_distance(input, label, normalized=True, ignored_tokens=None):
label = erased_label label = erased_label
# edit distance op # edit distance op
edit_distance_out = helper.create_tmp_variable(dtype="int64") edit_distance_out = helper.create_variable_for_type_inference(dtype="int64")
sequence_num = helper.create_tmp_variable(dtype="int64") sequence_num = helper.create_variable_for_type_inference(dtype="int64")
helper.append_op( helper.append_op(
type="edit_distance", type="edit_distance",
inputs={"Hyps": [input], inputs={"Hyps": [input],
...@@ -4070,7 +4083,7 @@ def ctc_greedy_decoder(input, blank, name=None): ...@@ -4070,7 +4083,7 @@ def ctc_greedy_decoder(input, blank, name=None):
_, topk_indices = topk(input, k=1) _, topk_indices = topk(input, k=1)
# ctc align op # ctc align op
ctc_out = helper.create_tmp_variable(dtype="int64") ctc_out = helper.create_variable_for_type_inference(dtype="int64")
helper.append_op( helper.append_op(
type="ctc_align", type="ctc_align",
inputs={"Input": [topk_indices]}, inputs={"Input": [topk_indices]},
...@@ -4120,8 +4133,8 @@ def warpctc(input, label, blank=0, norm_by_times=False): ...@@ -4120,8 +4133,8 @@ def warpctc(input, label, blank=0, norm_by_times=False):
""" """
helper = LayerHelper('warpctc', **locals()) helper = LayerHelper('warpctc', **locals())
loss_out = helper.create_tmp_variable(dtype=input.dtype) loss_out = helper.create_variable_for_type_inference(dtype=input.dtype)
grad_out = helper.create_tmp_variable(dtype=input.dtype) grad_out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op( helper.append_op(
type='warpctc', type='warpctc',
inputs={'Logits': [input], inputs={'Logits': [input],
...@@ -4182,7 +4195,7 @@ def sequence_reshape(input, new_dim): ...@@ -4182,7 +4195,7 @@ def sequence_reshape(input, new_dim):
x_reshaped = fluid.layers.sequence_reshape(input=x, new_dim=10) x_reshaped = fluid.layers.sequence_reshape(input=x, new_dim=10)
""" """
helper = LayerHelper('sequence_reshape', **locals()) helper = LayerHelper('sequence_reshape', **locals())
out = helper.create_tmp_variable(helper.input_dtype()) out = helper.create_variable_for_type_inference(helper.input_dtype())
helper.append_op( helper.append_op(
type='sequence_reshape', type='sequence_reshape',
inputs={'X': [input]}, inputs={'X': [input]},
...@@ -4279,9 +4292,9 @@ def nce(input, ...@@ -4279,9 +4292,9 @@ def nce(input,
is_bias=True, is_bias=True,
dtype=input.dtype) dtype=input.dtype)
inputs['Bias'] = b inputs['Bias'] = b
cost = helper.create_tmp_variable(dtype=input.dtype) cost = helper.create_variable_for_type_inference(dtype=input.dtype)
sample_logits = helper.create_tmp_variable(dtype=input.dtype) sample_logits = helper.create_variable_for_type_inference(dtype=input.dtype)
sample_labels = helper.create_tmp_variable(dtype=label.dtype) sample_labels = helper.create_variable_for_type_inference(dtype=label.dtype)
if num_neg_samples is None: if num_neg_samples is None:
num_neg_samples = 10 num_neg_samples = 10
...@@ -4357,8 +4370,8 @@ def hsigmoid(input, ...@@ -4357,8 +4370,8 @@ def hsigmoid(input,
helper = LayerHelper('hierarchical_sigmoid', **locals()) helper = LayerHelper('hierarchical_sigmoid', **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
out = helper.create_tmp_variable(dtype) out = helper.create_variable_for_type_inference(dtype)
pre_out = helper.create_tmp_variable(dtype) pre_out = helper.create_variable_for_type_inference(dtype)
dim = input.shape[1] dim = input.shape[1]
if num_classes < 2: if num_classes < 2:
raise ValueError("num_classes must not be less than 2.") raise ValueError("num_classes must not be less than 2.")
...@@ -4418,8 +4431,8 @@ def transpose(x, perm, name=None): ...@@ -4418,8 +4431,8 @@ def transpose(x, perm, name=None):
(idx, perm[idx], len(x.shape))) (idx, perm[idx], len(x.shape)))
helper = LayerHelper('transpose', **locals()) helper = LayerHelper('transpose', **locals())
out = helper.create_tmp_variable(x.dtype) out = helper.create_variable_for_type_inference(x.dtype)
x_shape = helper.create_tmp_variable(x.dtype) x_shape = helper.create_variable_for_type_inference(x.dtype)
helper.append_op( helper.append_op(
type='transpose2', type='transpose2',
inputs={'X': [x]}, inputs={'X': [x]},
...@@ -4561,7 +4574,7 @@ def im2sequence(input, ...@@ -4561,7 +4574,7 @@ def im2sequence(input,
inputs["Y"] = input_image_size inputs["Y"] = input_image_size
attrs["out_stride"] = out_stride attrs["out_stride"] = out_stride
helper = LayerHelper('im2sequence', **locals()) helper = LayerHelper('im2sequence', **locals())
out = helper.create_tmp_variable(dtype=helper.input_dtype()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op( helper.append_op(
type='im2sequence', inputs=inputs, outputs={'Out': out}, attrs=attrs) type='im2sequence', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out return out
...@@ -4594,7 +4607,7 @@ def row_conv(input, future_context_size, param_attr=None, act=None): ...@@ -4594,7 +4607,7 @@ def row_conv(input, future_context_size, param_attr=None, act=None):
filter_shape = [future_context_size + 1, input.shape[1]] filter_shape = [future_context_size + 1, input.shape[1]]
filter_param = helper.create_parameter( filter_param = helper.create_parameter(
attr=helper.param_attr, shape=filter_shape, dtype=dtype) attr=helper.param_attr, shape=filter_shape, dtype=dtype)
out = helper.create_tmp_variable(dtype) out = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type='row_conv', type='row_conv',
inputs={'X': [input], inputs={'X': [input],
...@@ -4627,7 +4640,7 @@ def multiplex(inputs, index): ...@@ -4627,7 +4640,7 @@ def multiplex(inputs, index):
raise ValueError("inputs should be a list object and contains at least " raise ValueError("inputs should be a list object and contains at least "
"2 elements.") "2 elements.")
out = helper.create_tmp_variable(inputs[0].dtype) out = helper.create_variable_for_type_inference(inputs[0].dtype)
helper.append_op( helper.append_op(
type='multiplex', type='multiplex',
inputs={'X': inputs, inputs={'X': inputs,
...@@ -4698,8 +4711,8 @@ def softmax_with_cross_entropy(logits, ...@@ -4698,8 +4711,8 @@ def softmax_with_cross_entropy(logits,
logits=fc, label=label) logits=fc, label=label)
""" """
helper = LayerHelper('softmax_with_cross_entropy', **locals()) helper = LayerHelper('softmax_with_cross_entropy', **locals())
softmax = helper.create_tmp_variable(dtype=logits.dtype) softmax = helper.create_variable_for_type_inference(dtype=logits.dtype)
loss = helper.create_tmp_variable(dtype=logits.dtype) loss = helper.create_variable_for_type_inference(dtype=logits.dtype)
helper.append_op( helper.append_op(
type='softmax_with_cross_entropy', type='softmax_with_cross_entropy',
inputs={'Logits': logits, inputs={'Logits': logits,
...@@ -4749,8 +4762,8 @@ def smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None): ...@@ -4749,8 +4762,8 @@ def smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None):
""" """
helper = LayerHelper('smooth_l1_loss', **locals()) helper = LayerHelper('smooth_l1_loss', **locals())
diff = helper.create_tmp_variable(dtype=x.dtype) diff = helper.create_variable_for_type_inference(dtype=x.dtype)
loss = helper.create_tmp_variable(dtype=x.dtype) loss = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(
type='smooth_l1_loss', type='smooth_l1_loss',
inputs={ inputs={
...@@ -4783,7 +4796,7 @@ def one_hot(input, depth): ...@@ -4783,7 +4796,7 @@ def one_hot(input, depth):
one_hot_label = layers.one_hot(input=label, depth=10) one_hot_label = layers.one_hot(input=label, depth=10)
""" """
helper = LayerHelper("one_hot", **locals()) helper = LayerHelper("one_hot", **locals())
one_hot_out = helper.create_tmp_variable(dtype='float32') one_hot_out = helper.create_variable_for_type_inference(dtype='float32')
helper.append_op( helper.append_op(
type="one_hot", type="one_hot",
inputs={'X': input}, inputs={'X': input},
...@@ -4930,8 +4943,9 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None): ...@@ -4930,8 +4943,9 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None):
"except one unknown dimension.") "except one unknown dimension.")
helper = LayerHelper("reshape2", **locals()) helper = LayerHelper("reshape2", **locals())
x_shape = helper.create_tmp_variable(dtype=x.dtype) out = x if inplace else helper.create_variable_for_type_inference(
out = x if inplace else helper.create_tmp_variable(dtype=x.dtype) dtype=x.dtype)
x_shape = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(
type="reshape2", type="reshape2",
inputs=inputs, inputs=inputs,
...@@ -4980,8 +4994,8 @@ def squeeze(input, axes, name=None): ...@@ -4980,8 +4994,8 @@ def squeeze(input, axes, name=None):
y = layers.sequeeze(input=x, axes=[1]) y = layers.sequeeze(input=x, axes=[1])
""" """
helper = LayerHelper("squeeze", **locals()) helper = LayerHelper("squeeze", **locals())
out = helper.create_tmp_variable(dtype=input.dtype) out = helper.create_variable_for_type_inference(dtype=input.dtype)
x_shape = helper.create_tmp_variable(dtype=input.dtype) x_shape = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op( helper.append_op(
type="squeeze2", type="squeeze2",
inputs={"X": input}, inputs={"X": input},
...@@ -5017,8 +5031,8 @@ def unsqueeze(input, axes, name=None): ...@@ -5017,8 +5031,8 @@ def unsqueeze(input, axes, name=None):
y = layers.unsequeeze(input=x, axes=[1]) y = layers.unsequeeze(input=x, axes=[1])
""" """
helper = LayerHelper("unsqueeze", **locals()) helper = LayerHelper("unsqueeze", **locals())
out = helper.create_tmp_variable(dtype=input.dtype) out = helper.create_variable_for_type_inference(dtype=input.dtype)
x_shape = helper.create_tmp_variable(dtype=input.dtype) x_shape = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op( helper.append_op(
type="unsqueeze2", type="unsqueeze2",
inputs={"X": input}, inputs={"X": input},
...@@ -5108,7 +5122,7 @@ def lod_reset(x, y=None, target_lod=None): ...@@ -5108,7 +5122,7 @@ def lod_reset(x, y=None, target_lod=None):
out = layers.lod_reset(x=x, y=y) out = layers.lod_reset(x=x, y=y)
""" """
helper = LayerHelper("lod_reset", **locals()) helper = LayerHelper("lod_reset", **locals())
out = helper.create_tmp_variable(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
if y is not None: if y is not None:
helper.append_op( helper.append_op(
type="lod_reset", inputs={'X': x, type="lod_reset", inputs={'X': x,
...@@ -5177,8 +5191,9 @@ def lrn(input, n=5, k=1.0, alpha=1e-4, beta=0.75, name=None): ...@@ -5177,8 +5191,9 @@ def lrn(input, n=5, k=1.0, alpha=1e-4, beta=0.75, name=None):
"dims of input must be 4(not %d), and it's order must be NCHW" % "dims of input must be 4(not %d), and it's order must be NCHW" %
(dims)) (dims))
mid_out = helper.create_tmp_variable(dtype=dtype, stop_gradient=True) mid_out = helper.create_variable_for_type_inference(
lrn_out = helper.create_tmp_variable(dtype) dtype=dtype, stop_gradient=True)
lrn_out = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type="lrn", type="lrn",
inputs={"X": input}, inputs={"X": input},
...@@ -5243,7 +5258,7 @@ def pad(x, paddings, pad_value=0., name=None): ...@@ -5243,7 +5258,7 @@ def pad(x, paddings, pad_value=0., name=None):
""" """
helper = LayerHelper('pad', input=x, **locals()) helper = LayerHelper('pad', input=x, **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
out = helper.create_tmp_variable(dtype) out = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type='pad', type='pad',
inputs={'X': x}, inputs={'X': x},
...@@ -5323,7 +5338,7 @@ def pad_constant_like(x, y, pad_value=0., name=None): ...@@ -5323,7 +5338,7 @@ def pad_constant_like(x, y, pad_value=0., name=None):
""" """
helper = LayerHelper('pad_constant_like', input=x, **locals()) helper = LayerHelper('pad_constant_like', input=x, **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
out = helper.create_tmp_variable(dtype) out = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type='pad_constant_like', type='pad_constant_like',
inputs={'X': x, inputs={'X': x,
...@@ -5388,7 +5403,7 @@ def label_smooth(label, ...@@ -5388,7 +5403,7 @@ def label_smooth(label,
raise ValueError("The value of epsilon must be between 0 and 1.") raise ValueError("The value of epsilon must be between 0 and 1.")
helper = LayerHelper("label_smooth", **locals()) helper = LayerHelper("label_smooth", **locals())
label.stop_gradient = True label.stop_gradient = True
smooth_label = helper.create_tmp_variable(dtype) smooth_label = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type="label_smooth", type="label_smooth",
inputs={"X": label, inputs={"X": label,
...@@ -5420,8 +5435,8 @@ def roi_pool(input, rois, pooled_height=1, pooled_width=1, spatial_scale=1.0): ...@@ -5420,8 +5435,8 @@ def roi_pool(input, rois, pooled_height=1, pooled_width=1, spatial_scale=1.0):
""" """
helper = LayerHelper('roi_pool', **locals()) helper = LayerHelper('roi_pool', **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
pool_out = helper.create_tmp_variable(dtype) pool_out = helper.create_variable_for_type_inference(dtype)
argmaxes = helper.create_tmp_variable(dtype='int32') argmaxes = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op( helper.append_op(
type="roi_pool", type="roi_pool",
inputs={"X": input, inputs={"X": input,
...@@ -5469,7 +5484,7 @@ def roi_align(input, ...@@ -5469,7 +5484,7 @@ def roi_align(input,
""" """
helper = LayerHelper('roi_align', **locals()) helper = LayerHelper('roi_align', **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
align_out = helper.create_tmp_variable(dtype) align_out = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type="roi_align", type="roi_align",
inputs={"X": input, inputs={"X": input,
...@@ -5594,7 +5609,7 @@ def image_resize(input, ...@@ -5594,7 +5609,7 @@ def image_resize(input,
out_h = int(input.shape[2] * scale) out_h = int(input.shape[2] * scale)
out_w = int(input.shape[3] * scale) out_w = int(input.shape[3] * scale)
out = helper.create_tmp_variable(dtype) out = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type=resample_methods[resample], type=resample_methods[resample],
inputs=inputs, inputs=inputs,
...@@ -5703,7 +5718,7 @@ def gather(input, index): ...@@ -5703,7 +5718,7 @@ def gather(input, index):
""" """
helper = LayerHelper('gather', **locals()) helper = LayerHelper('gather', **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
out = helper.create_tmp_variable(dtype) out = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type="gather", type="gather",
inputs={"X": input, inputs={"X": input,
...@@ -5743,7 +5758,7 @@ def scatter(input, index, updates, name=None): ...@@ -5743,7 +5758,7 @@ def scatter(input, index, updates, name=None):
""" """
helper = LayerHelper('scatter', **locals()) helper = LayerHelper('scatter', **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
out = helper.create_tmp_variable(dtype) out = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type="scatter", type="scatter",
inputs={"X": input, inputs={"X": input,
...@@ -5803,7 +5818,7 @@ def sequence_scatter(input, index, updates, name=None): ...@@ -5803,7 +5818,7 @@ def sequence_scatter(input, index, updates, name=None):
""" """
helper = LayerHelper('sequence_scatter', **locals()) helper = LayerHelper('sequence_scatter', **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
out = helper.create_tmp_variable(dtype) out = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type="sequence_scatter", type="sequence_scatter",
inputs={"X": input, inputs={"X": input,
...@@ -5833,7 +5848,7 @@ def random_crop(x, shape, seed=None): ...@@ -5833,7 +5848,7 @@ def random_crop(x, shape, seed=None):
""" """
helper = LayerHelper("random_crop", **locals()) helper = LayerHelper("random_crop", **locals())
dtype = x.dtype dtype = x.dtype
out = helper.create_tmp_variable(dtype) out = helper.create_variable_for_type_inference(dtype)
if seed is None: if seed is None:
seed = np.random.randint(-65536, 65536) seed = np.random.randint(-65536, 65536)
op_attrs = {"shape": shape} op_attrs = {"shape": shape}
...@@ -5879,7 +5894,7 @@ def log(x, name=None): ...@@ -5879,7 +5894,7 @@ def log(x, name=None):
""" """
helper = LayerHelper('log', **locals()) helper = LayerHelper('log', **locals())
dtype = helper.input_dtype(input_param_name='x') dtype = helper.input_dtype(input_param_name='x')
out = helper.create_tmp_variable(dtype) out = helper.create_variable_for_type_inference(dtype)
helper.append_op(type="log", inputs={"X": x}, outputs={"Out": out}) helper.append_op(type="log", inputs={"X": x}, outputs={"Out": out})
return out return out
...@@ -5910,7 +5925,7 @@ def relu(x, name=None): ...@@ -5910,7 +5925,7 @@ def relu(x, name=None):
""" """
helper = LayerHelper('relu', **locals()) helper = LayerHelper('relu', **locals())
dtype = helper.input_dtype(input_param_name='x') dtype = helper.input_dtype(input_param_name='x')
out = helper.create_tmp_variable(dtype) out = helper.create_variable_for_type_inference(dtype)
helper.append_op(type="relu", inputs={"X": x}, outputs={"Out": out}) helper.append_op(type="relu", inputs={"X": x}, outputs={"Out": out})
return out return out
...@@ -5949,9 +5964,9 @@ def mean_iou(input, label, num_classes): ...@@ -5949,9 +5964,9 @@ def mean_iou(input, label, num_classes):
""" """
helper = LayerHelper('mean_iou', **locals()) helper = LayerHelper('mean_iou', **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
out_mean_iou = helper.create_tmp_variable(dtype='float32') out_mean_iou = helper.create_variable_for_type_inference(dtype='float32')
out_wrong = helper.create_tmp_variable(dtype='int32') out_wrong = helper.create_variable_for_type_inference(dtype='int32')
out_correct = helper.create_tmp_variable(dtype='int32') out_correct = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op( helper.append_op(
type="mean_iou", type="mean_iou",
inputs={"Predictions": input, inputs={"Predictions": input,
...@@ -6043,7 +6058,7 @@ def crop(x, shape=None, offsets=None, name=None): ...@@ -6043,7 +6058,7 @@ def crop(x, shape=None, offsets=None, name=None):
if offsets is None: if offsets is None:
offsets = [0] * len(x.shape) offsets = [0] * len(x.shape)
out = helper.create_tmp_variable(x.dtype) out = helper.create_variable_for_type_inference(x.dtype)
ipts = {'X': x} ipts = {'X': x}
attrs = {} attrs = {}
if isinstance(shape, Variable): if isinstance(shape, Variable):
...@@ -6123,7 +6138,7 @@ def rank_loss(label, left, right, name=None): ...@@ -6123,7 +6138,7 @@ def rank_loss(label, left, right, name=None):
if not (isinstance(right, Variable)): if not (isinstance(right, Variable)):
raise ValueError("The right should be a Variable") raise ValueError("The right should be a Variable")
out = helper.create_tmp_variable("float32") out = helper.create_variable_for_type_inference("float32")
helper.append_op( helper.append_op(
type='rank_loss', type='rank_loss',
...@@ -6169,8 +6184,8 @@ def margin_rank_loss(label, left, right, margin=0.1, name=None): ...@@ -6169,8 +6184,8 @@ def margin_rank_loss(label, left, right, margin=0.1, name=None):
raise ValueError("The left should be a Variable.") raise ValueError("The left should be a Variable.")
if not isinstance(right, Variable): if not isinstance(right, Variable):
raise ValueError("The right should be a Variable.") raise ValueError("The right should be a Variable.")
out = helper.create_tmp_variable(left.dtype) out = helper.create_variable_for_type_inference(left.dtype)
act = helper.create_tmp_variable(left.dtype) act = helper.create_variable_for_type_inference(left.dtype)
helper.append_op( helper.append_op(
type='margin_rank_loss', type='margin_rank_loss',
inputs={"Label": label, inputs={"Label": label,
...@@ -6255,7 +6270,7 @@ def pad2d(input, ...@@ -6255,7 +6270,7 @@ def pad2d(input,
helper = LayerHelper('pad2d', **locals()) helper = LayerHelper('pad2d', **locals())
dtype = helper.input_dtype(input_param_name='input') dtype = helper.input_dtype(input_param_name='input')
out = helper.create_tmp_variable(dtype) out = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type='pad2d', type='pad2d',
inputs={'X': input}, inputs={'X': input},
...@@ -6284,7 +6299,7 @@ def elu(x, alpha=1.0, name=None): ...@@ -6284,7 +6299,7 @@ def elu(x, alpha=1.0, name=None):
output(${out_type}): ${out_comment} output(${out_type}): ${out_comment}
""" """
helper = LayerHelper('elu', **locals()) helper = LayerHelper('elu', **locals())
out = helper.create_tmp_variable(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(
type='elu', type='elu',
inputs={'X': x}, inputs={'X': x},
...@@ -6307,7 +6322,7 @@ def relu6(x, threshold=6.0, name=None): ...@@ -6307,7 +6322,7 @@ def relu6(x, threshold=6.0, name=None):
output(${out_type}): ${out_comment} output(${out_type}): ${out_comment}
""" """
helper = LayerHelper('relu6', **locals()) helper = LayerHelper('relu6', **locals())
out = helper.create_tmp_variable(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(
type='relu6', type='relu6',
inputs={'X': x}, inputs={'X': x},
...@@ -6330,7 +6345,7 @@ def pow(x, factor=1.0, name=None): ...@@ -6330,7 +6345,7 @@ def pow(x, factor=1.0, name=None):
output(${out_type}): ${out_comment} output(${out_type}): ${out_comment}
""" """
helper = LayerHelper('pow', **locals()) helper = LayerHelper('pow', **locals())
out = helper.create_tmp_variable(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(
type='pow', type='pow',
inputs={'X': x}, inputs={'X': x},
...@@ -6354,7 +6369,7 @@ def stanh(x, scale_a=2.0 / 3.0, scale_b=1.7159, name=None): ...@@ -6354,7 +6369,7 @@ def stanh(x, scale_a=2.0 / 3.0, scale_b=1.7159, name=None):
output(${out_type}): ${out_comment} output(${out_type}): ${out_comment}
""" """
helper = LayerHelper('stanh', **locals()) helper = LayerHelper('stanh', **locals())
out = helper.create_tmp_variable(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(
type='stanh', type='stanh',
inputs={'X': x}, inputs={'X': x},
...@@ -6379,7 +6394,7 @@ def hard_sigmoid(x, slope=0.2, offset=0.5, name=None): ...@@ -6379,7 +6394,7 @@ def hard_sigmoid(x, slope=0.2, offset=0.5, name=None):
output(${out_type}): ${out_comment} output(${out_type}): ${out_comment}
""" """
helper = LayerHelper('hard_sigmoid', **locals()) helper = LayerHelper('hard_sigmoid', **locals())
out = helper.create_tmp_variable(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(
type='hard_sigmoid', type='hard_sigmoid',
inputs={'X': x}, inputs={'X': x},
...@@ -6403,7 +6418,7 @@ def swish(x, beta=1.0, name=None): ...@@ -6403,7 +6418,7 @@ def swish(x, beta=1.0, name=None):
output(${out_type}): ${out_comment} output(${out_type}): ${out_comment}
""" """
helper = LayerHelper('swish', **locals()) helper = LayerHelper('swish', **locals())
out = helper.create_tmp_variable(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(
type='swish', type='swish',
inputs={'X': x}, inputs={'X': x},
...@@ -6455,7 +6470,7 @@ def prelu(x, mode, param_attr=None, name=None): ...@@ -6455,7 +6470,7 @@ def prelu(x, mode, param_attr=None, name=None):
dtype='float32', dtype='float32',
is_bias=False, is_bias=False,
default_initializer=Constant(1.0)) default_initializer=Constant(1.0))
out = helper.create_tmp_variable(dtype) out = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type="prelu", type="prelu",
inputs={"X": x, inputs={"X": x,
...@@ -6479,7 +6494,7 @@ def brelu(x, t_min=0.0, t_max=24.0, name=None): ...@@ -6479,7 +6494,7 @@ def brelu(x, t_min=0.0, t_max=24.0, name=None):
output(${out_type}): ${out_comment} output(${out_type}): ${out_comment}
""" """
helper = LayerHelper('brelu', **locals()) helper = LayerHelper('brelu', **locals())
out = helper.create_tmp_variable(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(
type='brelu', type='brelu',
inputs={'X': x}, inputs={'X': x},
...@@ -6502,7 +6517,7 @@ def leaky_relu(x, alpha=0.02, name=None): ...@@ -6502,7 +6517,7 @@ def leaky_relu(x, alpha=0.02, name=None):
output(${out_type}): ${out_comment} output(${out_type}): ${out_comment}
""" """
helper = LayerHelper('leaky_relu', **locals()) helper = LayerHelper('leaky_relu', **locals())
out = helper.create_tmp_variable(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(
type='leaky_relu', type='leaky_relu',
inputs={'X': x}, inputs={'X': x},
...@@ -6524,7 +6539,7 @@ def soft_relu(x, threshold=40.0, name=None): ...@@ -6524,7 +6539,7 @@ def soft_relu(x, threshold=40.0, name=None):
output(${out_type}): ${out_comment} output(${out_type}): ${out_comment}
""" """
helper = LayerHelper('soft_relu', **locals()) helper = LayerHelper('soft_relu', **locals())
out = helper.create_tmp_variable(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(
type='soft_relu', type='soft_relu',
inputs={'X': x}, inputs={'X': x},
...@@ -6591,8 +6606,8 @@ def flatten(x, axis=1, name=None): ...@@ -6591,8 +6606,8 @@ def flatten(x, axis=1, name=None):
if not (isinstance(axis, int)) or axis > len(x.shape) or axis < 0: if not (isinstance(axis, int)) or axis > len(x.shape) or axis < 0:
raise ValueError("The axis should be a int, and in range [0, rank(x)]") raise ValueError("The axis should be a int, and in range [0, rank(x)]")
out = helper.create_tmp_variable(x.dtype) out = helper.create_variable_for_type_inference(x.dtype)
x_shape = helper.create_tmp_variable(x.dtype) x_shape = helper.create_variable_for_type_inference(x.dtype)
helper.append_op( helper.append_op(
type='flatten2', type='flatten2',
inputs={"X": x}, inputs={"X": x},
...@@ -6638,7 +6653,8 @@ def sequence_enumerate(input, win_size, pad_value=0, name=None): ...@@ -6638,7 +6653,8 @@ def sequence_enumerate(input, win_size, pad_value=0, name=None):
out = fluid.layers.sequence_enumerate(input=x, win_size=3, pad_value=0) out = fluid.layers.sequence_enumerate(input=x, win_size=3, pad_value=0)
""" """
helper = LayerHelper('sequence_enumerate', **locals()) helper = LayerHelper('sequence_enumerate', **locals())
out = helper.create_tmp_variable(helper.input_dtype(), stop_gradient=True) out = helper.create_variable_for_type_inference(
helper.input_dtype(), stop_gradient=True)
helper.append_op( helper.append_op(
type='sequence_enumerate', type='sequence_enumerate',
inputs={'X': input}, inputs={'X': input},
...@@ -6678,9 +6694,9 @@ def sequence_mask(x, maxlen=None, dtype='int64', name=None): ...@@ -6678,9 +6694,9 @@ def sequence_mask(x, maxlen=None, dtype='int64', name=None):
helper = LayerHelper('sequence_mask', **locals()) helper = LayerHelper('sequence_mask', **locals())
if name is None: if name is None:
out = helper.create_tmp_variable(dtype=dtype) out = helper.create_variable_for_type_inference(dtype=dtype)
else: else:
out = helper.create_tmp_variable(dtype=dtype, name=name) out = helper.create_variable_for_type_inference(dtype=dtype, name=name)
helper.append_op( helper.append_op(
type='sequence_mask', type='sequence_mask',
...@@ -6723,7 +6739,7 @@ def stack(x, axis=0): ...@@ -6723,7 +6739,7 @@ def stack(x, axis=0):
if not isinstance(x, list) and not isinstance(x, tuple): if not isinstance(x, list) and not isinstance(x, tuple):
x = [x] x = [x]
out = helper.create_tmp_variable(x[0].dtype) out = helper.create_variable_for_type_inference(x[0].dtype)
helper.append_op( helper.append_op(
type='stack', inputs={'X': x}, outputs={'Y': out}, type='stack', inputs={'X': x}, outputs={'Y': out},
attrs={'axis': axis}) attrs={'axis': axis})
...@@ -6761,7 +6777,7 @@ def unstack(x, axis=0, num=None): ...@@ -6761,7 +6777,7 @@ def unstack(x, axis=0, num=None):
outs = [] outs = []
for _ in num: for _ in num:
outs.append(helper.create_tmp_variable(x.dtype)) outs.append(helper.create_variable_for_type_inference(x.dtype))
helper.append_op( helper.append_op(
type='unstack', type='unstack',
...@@ -6813,7 +6829,7 @@ def expand(x, expand_times, name=None): ...@@ -6813,7 +6829,7 @@ def expand(x, expand_times, name=None):
""" """
helper = LayerHelper('expand', input=x, **locals()) helper = LayerHelper('expand', input=x, **locals())
dtype = helper.input_dtype(input_param_name='x') dtype = helper.input_dtype(input_param_name='x')
out = helper.create_tmp_variable(dtype) out = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type='expand', type='expand',
inputs={'X': x}, inputs={'X': x},
...@@ -6852,7 +6868,7 @@ def uniform_random_batch_size_like(input, ...@@ -6852,7 +6868,7 @@ def uniform_random_batch_size_like(input,
""" """
helper = LayerHelper('uniform_random_batch_size_like', **locals()) helper = LayerHelper('uniform_random_batch_size_like', **locals())
out = helper.create_tmp_variable(dtype) out = helper.create_variable_for_type_inference(dtype)
c_dtype = convert_np_dtype_to_dtype_(dtype) c_dtype = convert_np_dtype_to_dtype_(dtype)
helper.append_op( helper.append_op(
type='uniform_random_batch_size_like', type='uniform_random_batch_size_like',
...@@ -6889,7 +6905,7 @@ def gaussian_random(shape, mean=0.0, std=1.0, seed=0, dtype='float32'): ...@@ -6889,7 +6905,7 @@ def gaussian_random(shape, mean=0.0, std=1.0, seed=0, dtype='float32'):
""" """
helper = LayerHelper('gaussian_random', **locals()) helper = LayerHelper('gaussian_random', **locals())
out = helper.create_tmp_variable(dtype) out = helper.create_variable_for_type_inference(dtype)
c_dtype = convert_np_dtype_to_dtype_(dtype) c_dtype = convert_np_dtype_to_dtype_(dtype)
helper.append_op( helper.append_op(
type='gaussian_random', type='gaussian_random',
...@@ -6924,7 +6940,7 @@ def sampling_id(x, min=0.0, max=1.0, seed=0, dtype='float32'): ...@@ -6924,7 +6940,7 @@ def sampling_id(x, min=0.0, max=1.0, seed=0, dtype='float32'):
""" """
helper = LayerHelper('sampling_id', **locals()) helper = LayerHelper('sampling_id', **locals())
out = helper.create_tmp_variable(dtype) out = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type='sampling_id', type='sampling_id',
inputs={'X': x}, inputs={'X': x},
...@@ -6963,7 +6979,7 @@ def gaussian_random_batch_size_like(input, ...@@ -6963,7 +6979,7 @@ def gaussian_random_batch_size_like(input,
""" """
helper = LayerHelper('gaussian_random_batch_size_like', **locals()) helper = LayerHelper('gaussian_random_batch_size_like', **locals())
out = helper.create_tmp_variable(dtype) out = helper.create_variable_for_type_inference(dtype)
c_dtype = convert_np_dtype_to_dtype_(dtype) c_dtype = convert_np_dtype_to_dtype_(dtype)
helper.append_op( helper.append_op(
type='gaussian_random_batch_size_like', type='gaussian_random_batch_size_like',
...@@ -6995,7 +7011,8 @@ def sum(x): ...@@ -6995,7 +7011,8 @@ def sum(x):
""" """
helper = LayerHelper('sum', **locals()) helper = LayerHelper('sum', **locals())
out = helper.create_tmp_variable(dtype=helper.input_dtype('x')) out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('x'))
helper.append_op( helper.append_op(
type='sum', type='sum',
inputs={'X': x}, inputs={'X': x},
...@@ -7022,7 +7039,8 @@ def slice(input, axes, starts, ends): ...@@ -7022,7 +7039,8 @@ def slice(input, axes, starts, ends):
""" """
helper = LayerHelper('slice', **locals()) helper = LayerHelper('slice', **locals())
out = helper.create_tmp_variable(dtype=helper.input_dtype('input')) out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('input'))
helper.append_op( helper.append_op(
type='slice', type='slice',
inputs={'Input': input}, inputs={'Input': input},
...@@ -7048,7 +7066,8 @@ def shape(input): ...@@ -7048,7 +7066,8 @@ def shape(input):
""" """
helper = LayerHelper('shape', **locals()) helper = LayerHelper('shape', **locals())
out = helper.create_tmp_variable(dtype=helper.input_dtype('input')) out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('input'))
helper.append_op( helper.append_op(
type='shape', inputs={'Input': input}, outputs={'Out': out}) type='shape', inputs={'Input': input}, outputs={'Out': out})
...@@ -7065,7 +7084,7 @@ def _elementwise_op(helper): ...@@ -7065,7 +7084,7 @@ def _elementwise_op(helper):
use_mkldnn = helper.kwargs.get('use_mkldnn', False) use_mkldnn = helper.kwargs.get('use_mkldnn', False)
name = helper.kwargs.get('name', None) name = helper.kwargs.get('name', None)
if name is None: if name is None:
out = helper.create_tmp_variable(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
else: else:
out = helper.create_variable( out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False) name=name, dtype=x.dtype, persistable=False)
...@@ -7099,7 +7118,7 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None): ...@@ -7099,7 +7118,7 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
helper = LayerHelper('scale', **locals()) helper = LayerHelper('scale', **locals())
if name is None: if name is None:
out = helper.create_tmp_variable(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
else: else:
out = helper.create_variable( out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False) name=name, dtype=x.dtype, persistable=False)
...@@ -7165,7 +7184,7 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True): ...@@ -7165,7 +7184,7 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
if out is None: if out is None:
if name is None: if name is None:
out = helper.create_tmp_variable(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
else: else:
out = helper.create_variable( out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False) name=name, dtype=x.dtype, persistable=False)
...@@ -7273,7 +7292,7 @@ def clip(x, min, max, name=None): ...@@ -7273,7 +7292,7 @@ def clip(x, min, max, name=None):
helper = LayerHelper("clip", **locals()) helper = LayerHelper("clip", **locals())
if name is None: if name is None:
out = helper.create_tmp_variable(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
else: else:
out = helper.create_variable( out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False) name=name, dtype=x.dtype, persistable=False)
...@@ -7305,7 +7324,7 @@ def clip_by_norm(x, max_norm, name=None): ...@@ -7305,7 +7324,7 @@ def clip_by_norm(x, max_norm, name=None):
helper = LayerHelper("clip_by_norm", **locals()) helper = LayerHelper("clip_by_norm", **locals())
if name is None: if name is None:
out = helper.create_tmp_variable(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
else: else:
out = helper.create_variable( out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False) name=name, dtype=x.dtype, persistable=False)
...@@ -7335,7 +7354,7 @@ def mean(x, name=None): ...@@ -7335,7 +7354,7 @@ def mean(x, name=None):
helper = LayerHelper("mean", **locals()) helper = LayerHelper("mean", **locals())
if name is None: if name is None:
out = helper.create_tmp_variable(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
else: else:
out = helper.create_variable( out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False) name=name, dtype=x.dtype, persistable=False)
...@@ -7365,7 +7384,7 @@ def mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None): ...@@ -7365,7 +7384,7 @@ def mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None):
helper = LayerHelper("mul", **locals()) helper = LayerHelper("mul", **locals())
if name is None: if name is None:
out = helper.create_tmp_variable(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
else: else:
out = helper.create_variable( out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False) name=name, dtype=x.dtype, persistable=False)
...@@ -7399,7 +7418,7 @@ def sigmoid_cross_entropy_with_logits(x, label, name=None): ...@@ -7399,7 +7418,7 @@ def sigmoid_cross_entropy_with_logits(x, label, name=None):
helper = LayerHelper("sigmoid_cross_entropy_with_logits", **locals()) helper = LayerHelper("sigmoid_cross_entropy_with_logits", **locals())
if name is None: if name is None:
out = helper.create_tmp_variable(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
else: else:
out = helper.create_variable( out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False) name=name, dtype=x.dtype, persistable=False)
...@@ -7429,7 +7448,7 @@ def maxout(x, groups, name=None): ...@@ -7429,7 +7448,7 @@ def maxout(x, groups, name=None):
helper = LayerHelper("maxout", **locals()) helper = LayerHelper("maxout", **locals())
if name is None: if name is None:
out = helper.create_tmp_variable(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
else: else:
out = helper.create_variable( out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False) name=name, dtype=x.dtype, persistable=False)
...@@ -7468,7 +7487,7 @@ def affine_channel(x, scale=None, bias=None, data_layout='NCHW', name=None): ...@@ -7468,7 +7487,7 @@ def affine_channel(x, scale=None, bias=None, data_layout='NCHW', name=None):
helper = LayerHelper("affine_channel", **locals()) helper = LayerHelper("affine_channel", **locals())
if name is None: if name is None:
out = helper.create_tmp_variable(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
else: else:
out = helper.create_variable( out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False) name=name, dtype=x.dtype, persistable=False)
......
...@@ -152,7 +152,7 @@ def cast(x, dtype): ...@@ -152,7 +152,7 @@ def cast(x, dtype):
result = fluid.layers.cast(x=data, dtype='float64') result = fluid.layers.cast(x=data, dtype='float64')
""" """
helper = LayerHelper('cast', **locals()) helper = LayerHelper('cast', **locals())
out = helper.create_tmp_variable(dtype=dtype) out = helper.create_variable_for_type_inference(dtype=dtype)
helper.append_op( helper.append_op(
type='cast', type='cast',
inputs={'X': [x]}, inputs={'X': [x]},
...@@ -184,7 +184,7 @@ def concat(input, axis=0, name=None): ...@@ -184,7 +184,7 @@ def concat(input, axis=0, name=None):
out = fluid.layers.concat(input=[Efirst, Esecond, Ethird, Efourth]) out = fluid.layers.concat(input=[Efirst, Esecond, Ethird, Efourth])
""" """
helper = LayerHelper('concat', **locals()) helper = LayerHelper('concat', **locals())
out = helper.create_tmp_variable(dtype=helper.input_dtype()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op( helper.append_op(
type='concat', type='concat',
inputs={'X': input}, inputs={'X': input},
...@@ -221,7 +221,8 @@ def sums(input, out=None): ...@@ -221,7 +221,8 @@ def sums(input, out=None):
""" """
helper = LayerHelper('sum', **locals()) helper = LayerHelper('sum', **locals())
if out is None: if out is None:
out = helper.create_tmp_variable(dtype=helper.input_dtype()) out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
helper.append_op( helper.append_op(
type='sum', type='sum',
inputs={'X': input}, inputs={'X': input},
...@@ -252,7 +253,7 @@ def assign(input, output=None): ...@@ -252,7 +253,7 @@ def assign(input, output=None):
""" """
helper = LayerHelper('assign', **locals()) helper = LayerHelper('assign', **locals())
if output is None: if output is None:
output = helper.create_tmp_variable(dtype=input.dtype) output = helper.create_variable_for_type_inference(dtype=input.dtype)
if isinstance(input, Variable): if isinstance(input, Variable):
helper.append_op( helper.append_op(
type='assign', inputs={'X': [input]}, outputs={'Out': [output]}) type='assign', inputs={'X': [input]}, outputs={'Out': [output]})
...@@ -311,7 +312,7 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None): ...@@ -311,7 +312,7 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None):
helper = LayerHelper("fill_constant", **locals()) helper = LayerHelper("fill_constant", **locals())
if out is None: if out is None:
out = helper.create_tmp_variable(dtype=dtype) out = helper.create_variable_for_type_inference(dtype=dtype)
helper.append_op( helper.append_op(
type='fill_constant', type='fill_constant',
inputs={}, inputs={},
...@@ -358,7 +359,7 @@ def fill_constant_batch_size_like(input, ...@@ -358,7 +359,7 @@ def fill_constant_batch_size_like(input,
${out_comment}. ${out_comment}.
""" """
helper = LayerHelper("fill_constant_batch_size_like", **locals()) helper = LayerHelper("fill_constant_batch_size_like", **locals())
out = helper.create_tmp_variable(dtype=dtype) out = helper.create_variable_for_type_inference(dtype=dtype)
helper.append_op( helper.append_op(
type='fill_constant_batch_size_like', type='fill_constant_batch_size_like',
inputs={'Input': input}, inputs={'Input': input},
...@@ -396,7 +397,7 @@ def argmin(x, axis=0): ...@@ -396,7 +397,7 @@ def argmin(x, axis=0):
out = fluid.layers.argmin(x=in, axis=-1) out = fluid.layers.argmin(x=in, axis=-1)
""" """
helper = LayerHelper("arg_min", **locals()) helper = LayerHelper("arg_min", **locals())
out = helper.create_tmp_variable(VarDesc.VarType.INT64) out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
helper.append_op( helper.append_op(
type='arg_min', type='arg_min',
inputs={'X': x}, inputs={'X': x},
...@@ -427,7 +428,7 @@ def argmax(x, axis=0): ...@@ -427,7 +428,7 @@ def argmax(x, axis=0):
out = fluid.layers.argmax(x=in, axis=-1) out = fluid.layers.argmax(x=in, axis=-1)
""" """
helper = LayerHelper("arg_max", **locals()) helper = LayerHelper("arg_max", **locals())
out = helper.create_tmp_variable(VarDesc.VarType.INT64) out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
helper.append_op( helper.append_op(
type='arg_max', type='arg_max',
inputs={'X': x}, inputs={'X': x},
...@@ -477,8 +478,10 @@ def argsort(input, axis=-1, name=None): ...@@ -477,8 +478,10 @@ def argsort(input, axis=-1, name=None):
out, indices = fluid.layers.argsort(input, axis=0) out, indices = fluid.layers.argsort(input, axis=0)
""" """
helper = LayerHelper("argsort", **locals()) helper = LayerHelper("argsort", **locals())
out = helper.create_tmp_variable(dtype=input.dtype, stop_gradient=True) out = helper.create_variable_for_type_inference(
ids = helper.create_tmp_variable(VarDesc.VarType.INT64, stop_gradient=True) dtype=input.dtype, stop_gradient=True)
ids = helper.create_variable_for_type_inference(
VarDesc.VarType.INT64, stop_gradient=True)
helper.append_op( helper.append_op(
type='argsort', type='argsort',
inputs={'X': input}, inputs={'X': input},
...@@ -562,7 +565,7 @@ def reverse(x, axis): ...@@ -562,7 +565,7 @@ def reverse(x, axis):
if isinstance(axis, int): if isinstance(axis, int):
axis = [axis] axis = [axis]
helper = LayerHelper("reverse", **locals()) helper = LayerHelper("reverse", **locals())
out = helper.create_tmp_variable(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(
type='reverse', type='reverse',
inputs={'Input': x}, inputs={'Input': x},
...@@ -654,7 +657,7 @@ def has_inf(x): ...@@ -654,7 +657,7 @@ def has_inf(x):
Variable: The tensor variable storing the output, only a bool value. Variable: The tensor variable storing the output, only a bool value.
""" """
helper = LayerHelper("isinf", **locals()) helper = LayerHelper("isinf", **locals())
out = helper.create_tmp_variable(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type="isinf", inputs={"X": x}, outputs={"Out": out}) helper.append_op(type="isinf", inputs={"X": x}, outputs={"Out": out})
return out return out
...@@ -670,7 +673,7 @@ def has_nan(x): ...@@ -670,7 +673,7 @@ def has_nan(x):
Variable: The tensor variable storing the output, only a bool value. Variable: The tensor variable storing the output, only a bool value.
""" """
helper = LayerHelper("isnan", **locals()) helper = LayerHelper("isnan", **locals())
out = helper.create_tmp_variable(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type="isnan", inputs={"X": x}, outputs={"Out": out}) helper.append_op(type="isnan", inputs={"X": x}, outputs={"Out": out})
return out return out
...@@ -687,6 +690,6 @@ def isfinite(x): ...@@ -687,6 +690,6 @@ def isfinite(x):
Variable: The tensor variable storing the output, contains a bool value. Variable: The tensor variable storing the output, contains a bool value.
""" """
helper = LayerHelper("isfinite", **locals()) helper = LayerHelper("isfinite", **locals())
out = helper.create_tmp_variable(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type="isfinite", inputs={"X": x}, outputs={"Out": out}) helper.append_op(type="isfinite", inputs={"X": x}, outputs={"Out": out})
return out return out
...@@ -151,7 +151,7 @@ class L2DecayRegularizer(WeightDecayRegularizer): ...@@ -151,7 +151,7 @@ class L2DecayRegularizer(WeightDecayRegularizer):
decay = block.create_var( decay = block.create_var(
dtype="float32", dtype="float32",
shape=param.shape, shape=param.shape,
type=core.VarDesc.VarType.SELECTED_ROWS) type=core.VarDesc.VarType.LOD_TENSOR)
block.append_op( block.append_op(
type='extract_rows', inputs={'X': grad}, outputs={'Out': idx}) type='extract_rows', inputs={'X': grad}, outputs={'Out': idx})
block.append_op( block.append_op(
...@@ -228,7 +228,7 @@ class L1DecayRegularizer(WeightDecayRegularizer): ...@@ -228,7 +228,7 @@ class L1DecayRegularizer(WeightDecayRegularizer):
decay = block.create_var( decay = block.create_var(
dtype="float32", dtype="float32",
shape=param.shape, shape=param.shape,
type=core.VarDesc.VarType.SELECTED_ROWS) type=core.VarDesc.VarType.LOD_TENSOR)
block.append_op( block.append_op(
type='extract_rows', inputs={'X': grad}, outputs={'Out': idx}) type='extract_rows', inputs={'X': grad}, outputs={'Out': idx})
block.append_op( block.append_op(
......
...@@ -30,7 +30,6 @@ class TestSliceVar(unittest.TestCase): ...@@ -30,7 +30,6 @@ class TestSliceVar(unittest.TestCase):
var = program.global_block().create_var( var = program.global_block().create_var(
name=str(random.randint(10000, 99999)), name=str(random.randint(10000, 99999)),
persistable=True, persistable=True,
# dtype=core.VarDesc.VarType.LOD_TENSOR,
shape=shape) shape=shape)
var_list.append(var) var_list.append(var)
blocks = slice_variable(var_list, 10, min_size) blocks = slice_variable(var_list, 10, min_size)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册