未验证 提交 a1772bb8 编写于 作者: W Weilong Wu 提交者: GitHub

[Eager] polish some apis logic (#49733)

* [Eager] polish some apis logic

* polish api logic
上级 1c8531ce
......@@ -158,15 +158,14 @@ class Dirichlet(exponential_family.ExponentialFamily):
def _dirichlet(concentration, name=None):
op_type = 'dirichlet'
check_variable_and_dtype(
concentration, 'concentration', ['float32', 'float64'], op_type
)
if in_dygraph_mode():
return paddle._C_ops.dirichlet(concentration)
else:
op_type = 'dirichlet'
check_variable_and_dtype(
concentration, 'concentration', ['float32', 'float64'], op_type
)
helper = LayerHelper(op_type, **locals())
out = helper.create_variable_for_type_inference(
dtype=concentration.dtype
......
......@@ -266,6 +266,7 @@ class UniformInitializer(Initializer):
block = self._check_block(block)
assert isinstance(block, framework.Block)
if not in_dygraph_mode():
check_variable_and_dtype(
var,
"Out",
......@@ -381,13 +382,6 @@ class NormalInitializer(Initializer):
assert isinstance(block, framework.Block)
check_variable_and_dtype(
var,
"Out",
["uint16", "float16", "float32", "float64"],
"guassian_random",
)
if self._seed == 0:
self._seed = block.program.random_seed
......@@ -405,6 +399,12 @@ class NormalInitializer(Initializer):
return None
else:
check_variable_and_dtype(
var,
"Out",
["uint16", "float16", "float32", "float64"],
"guassian_random",
)
op = block.append_op(
type="gaussian_random",
outputs={"Out": var},
......@@ -596,6 +596,7 @@ class XavierInitializer(Initializer):
block = self._check_block(block)
assert isinstance(block, framework.Block)
if not in_dygraph_mode():
check_variable_and_dtype(
var,
"Out",
......
......@@ -1143,9 +1143,7 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None):
raise ValueError("loop_vars in while_loop should not be empty")
pre_cond = cond(*loop_vars)
check_variable_and_dtype(
pre_cond, 'var of cond returned', ['bool'], 'fluid.layers.while_loop'
)
if reduce(lambda a, b: a * b, pre_cond.shape, 1) != 1:
raise TypeError(
"the shape of the variable returned by cond should be [1],"
......@@ -1167,6 +1165,12 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None):
map_structure(assign_skip_lod_tensor_array, output_vars, loop_vars)
return loop_vars
else:
check_variable_and_dtype(
pre_cond,
'var of cond returned',
['bool'],
'fluid.layers.while_loop',
)
while_loop_block = While(pre_cond, is_test, name)
has_mutable_vars_in_loop = hold_mutable_vars(loop_vars)
with while_loop_block.block():
......
......@@ -102,10 +102,6 @@ def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None):
y = F.unfold(x, [3, 3], 1, 1, 1)
"""
helper = LayerHelper("unfold", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'unfold')
assert len(x.shape) == 4, "input should be the format of [N, C, H, W]"
if isinstance(kernel_sizes, int):
......@@ -149,6 +145,9 @@ def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None):
if in_dygraph_mode():
return _C_ops.unfold(x, kernel_sizes, strides, paddings, dilations)
helper = LayerHelper("unfold", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'unfold')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="unfold",
......@@ -2237,11 +2236,6 @@ def fold(
# y.shape = [2,3,4,5]
"""
helper = LayerHelper("fold", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'fold')
assert len(x.shape) == 3, "input should be the format of [N, C, L]"
def _is_list_or_turple_(data):
......@@ -2311,6 +2305,9 @@ def fold(
dilations,
)
else:
helper = LayerHelper("fold", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'fold')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="fold",
......
......@@ -521,8 +521,7 @@ def edit_distance(
# [4]
"""
check_variable_and_dtype(input, 'input', ['int64'], 'edit_distance')
check_variable_and_dtype(label, 'label', ['int64'], 'edit_distance')
helper = LayerHelper("edit_distance", **locals())
# remove some tokens from input and labels
......@@ -551,6 +550,8 @@ def edit_distance(
input, label, input_length, label_length, normalized
)
check_variable_and_dtype(input, 'input', ['int64'], 'edit_distance')
check_variable_and_dtype(label, 'label', ['int64'], 'edit_distance')
this_inputs = {"Hyps": [input], "Refs": [label]}
if input_length is not None and label_length is not None:
this_inputs['HypsLength'] = [input_length]
......@@ -1075,16 +1076,16 @@ def smooth_l1_loss(input, label, reduction='mean', delta=1.0, name=None):
print(output)
# [0.068004]
"""
if in_dygraph_mode():
out, residual = _C_ops.huber_loss(input, label, delta)
else:
check_variable_and_dtype(
input, 'input', ['float32', 'float64'], 'smooth_l1_loss'
)
check_variable_and_dtype(
label, 'label', ['float32', 'float64'], 'smooth_l1_loss'
)
if in_dygraph_mode():
out, residual = _C_ops.huber_loss(input, label, delta)
else:
helper = LayerHelper('huber_loss', **locals())
residual = helper.create_variable_for_type_inference(
dtype=helper.input_dtype()
......
......@@ -85,11 +85,6 @@ class Orthogonal(Initializer):
block = self._check_block(block)
assert isinstance(var, framework.Parameter)
assert isinstance(block, framework.Block)
# 'qr' op only support float32/float64 now
check_variable_and_dtype(
var, "Out", ["float32", "float64"], "Orthogonal"
)
self._seed = block.program.random_seed
shape = var.shape
......@@ -129,6 +124,11 @@ class Orthogonal(Initializer):
return None
# 'qr' op only support float32/float64 now
check_variable_and_dtype(
var, "Out", ["float32", "float64"], "Orthogonal"
)
normal_var = block.create_var(
name=unique_name.generate('.'.join(['gaussian_random', 'tmp'])),
dtype=var.dtype,
......
......@@ -37,14 +37,6 @@ def check_finite_and_unscale(x, scale, name=None, float_status=None):
scale: The scale of check_finite_and_unscale operator.
float_status(Tensor): (Only used on NPU) The float status to check overflow.
"""
check_type(x, 'x', (tuple, list), 'check_finite_and_unscale')
for e in x:
check_variable_and_dtype(
e,
"x",
['float16', 'float32', 'float64'],
'check_finite_and_unscale',
)
helper = LayerHelper("check_finite_and_unscale", **locals())
......@@ -54,6 +46,15 @@ def check_finite_and_unscale(x, scale, name=None, float_status=None):
_C_ops.check_finite_and_unscale_(x, scale, found_inf)
return x, found_inf
check_type(x, 'x', (tuple, list), 'check_finite_and_unscale')
for e in x:
check_variable_and_dtype(
e,
"x",
['float16', 'float32', 'float64'],
'check_finite_and_unscale',
)
inputs = {'X': x, 'Scale': scale}
if core.is_compiled_with_npu():
check_variable_and_dtype(
......@@ -110,6 +111,20 @@ def update_loss_scaling(
decr_ratio(float): The less-than-one-multiplier to use when decreasing
loss scaling.
"""
if in_dygraph_mode():
_C_ops.update_loss_scaling_(
x,
found_inf,
prev_loss_scaling,
num_good_steps,
num_bad_steps,
incr_every_n_steps,
decr_every_n_nan_or_inf,
incr_ratio,
decr_ratio,
stop_update,
)
return x
check_variable_and_dtype(
prev_loss_scaling,
......@@ -131,21 +146,6 @@ def update_loss_scaling(
prev_loss_scaling.dtype == e.dtype
), "The dtype of prev_loss_scaling should be equal to the dtype of x."
if in_dygraph_mode():
_C_ops.update_loss_scaling_(
x,
found_inf,
prev_loss_scaling,
num_good_steps,
num_bad_steps,
incr_every_n_steps,
decr_every_n_nan_or_inf,
incr_ratio,
decr_ratio,
stop_update,
)
return x
helper = LayerHelper("update_loss_scaling", **locals())
inputs = {
......
......@@ -2378,10 +2378,6 @@ def eigvals(x, name=None):
# [(-0.27078833542132674+0j), (0.29962280156230725+0j), (0.8824477020120244+0j)] #complex128
"""
check_variable_and_dtype(
x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigvals'
)
x_shape = list(x.shape)
if len(x_shape) < 2:
raise ValueError(
......@@ -2400,6 +2396,12 @@ def eigvals(x, name=None):
if in_dygraph_mode():
return _C_ops.eigvals(x)
else:
check_variable_and_dtype(
x,
'dtype',
['float32', 'float64', 'complex64', 'complex128'],
'eigvals',
)
helper = LayerHelper('eigvals', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='eigvals', inputs={'X': x}, outputs={'Out': out})
......
......@@ -2014,7 +2014,6 @@ def renorm(x, p, axis, max_norm):
"""
input_shape = x.shape
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'renorm')
if not axis < len(input_shape):
raise ValueError(
"the axis:{} should be less then the shape's size {}:{}".format(
......@@ -2033,6 +2032,7 @@ def renorm(x, p, axis, max_norm):
out = _C_ops.renorm(x, p, axis, max_norm)
return out
else:
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'renorm')
inputs = {'X': x}
attrs = {'p': p, 'axis': axis, 'max_norm': max_norm}
......
......@@ -644,7 +644,6 @@ def where(condition, x=None, y=None, name=None):
check_variable_and_dtype(
y, 'y', ['float32', 'float64', 'int32', 'int64'], 'where'
)
helper = LayerHelper("where", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
......
......@@ -492,11 +492,6 @@ def prior_box(
flip=True)
"""
helper = LayerHelper("prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(
input, 'input', ['uint8', 'int8', 'float32', 'float64'], 'prior_box'
)
def _is_list_or_tuple_(data):
return isinstance(data, list) or isinstance(data, tuple)
......@@ -541,6 +536,11 @@ def prior_box(
return box, var
else:
helper = LayerHelper("prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(
input, 'input', ['uint8', 'int8', 'float32', 'float64'], 'prior_box'
)
attrs = {
'min_sizes': min_sizes,
'aspect_ratios': aspect_ratios,
......@@ -679,13 +679,6 @@ def box_coder(
box_normalized=False)
"""
check_variable_and_dtype(
prior_box, 'prior_box', ['float32', 'float64'], 'box_coder'
)
check_variable_and_dtype(
target_box, 'target_box', ['float32', 'float64'], 'box_coder'
)
if in_dygraph_mode():
if isinstance(prior_box_var, Variable):
output_box = _C_ops.box_coder(
......@@ -712,6 +705,12 @@ def box_coder(
return output_box
else:
check_variable_and_dtype(
prior_box, 'prior_box', ['float32', 'float64'], 'box_coder'
)
check_variable_and_dtype(
target_box, 'target_box', ['float32', 'float64'], 'box_coder'
)
helper = LayerHelper("box_coder", **locals())
output_box = helper.create_variable_for_type_inference(
......@@ -2268,21 +2267,6 @@ def matrix_nms(
nms_top_k=400, keep_top_k=200, normalized=False)
"""
check_variable_and_dtype(
bboxes, 'BBoxes', ['float32', 'float64'], 'matrix_nms'
)
check_variable_and_dtype(
scores, 'Scores', ['float32', 'float64'], 'matrix_nms'
)
check_type(score_threshold, 'score_threshold', float, 'matrix_nms')
check_type(post_threshold, 'post_threshold', float, 'matrix_nms')
check_type(nms_top_k, 'nums_top_k', int, 'matrix_nms')
check_type(keep_top_k, 'keep_top_k', int, 'matrix_nms')
check_type(normalized, 'normalized', bool, 'matrix_nms')
check_type(use_gaussian, 'use_gaussian', bool, 'matrix_nms')
check_type(gaussian_sigma, 'gaussian_sigma', float, 'matrix_nms')
check_type(background_label, 'background_label', int, 'matrix_nms')
if in_dygraph_mode():
out, index, rois_num = _C_ops.matrix_nms(
bboxes,
......@@ -2302,6 +2286,20 @@ def matrix_nms(
rois_num = None
return out, rois_num, index
else:
check_variable_and_dtype(
bboxes, 'BBoxes', ['float32', 'float64'], 'matrix_nms'
)
check_variable_and_dtype(
scores, 'Scores', ['float32', 'float64'], 'matrix_nms'
)
check_type(score_threshold, 'score_threshold', float, 'matrix_nms')
check_type(post_threshold, 'post_threshold', float, 'matrix_nms')
check_type(nms_top_k, 'nums_top_k', int, 'matrix_nms')
check_type(keep_top_k, 'keep_top_k', int, 'matrix_nms')
check_type(normalized, 'normalized', bool, 'matrix_nms')
check_type(use_gaussian, 'use_gaussian', bool, 'matrix_nms')
check_type(gaussian_sigma, 'gaussian_sigma', float, 'matrix_nms')
check_type(background_label, 'background_label', int, 'matrix_nms')
helper = LayerHelper('matrix_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
index = helper.create_variable_for_type_inference(dtype='int32')
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册