Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
a1772bb8
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
a1772bb8
编写于
1月 13, 2023
作者:
W
Weilong Wu
提交者:
GitHub
1月 13, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[Eager] polish some apis logic (#49733)
* [Eager] polish some apis logic * polish api logic
上级
1c8531ce
变更
11
隐藏空白更改
内联
并排
Showing
11 changed file
with
106 addition
and
105 deletion
+106
-105
python/paddle/distribution/dirichlet.py
python/paddle/distribution/dirichlet.py
+4
-5
python/paddle/fluid/initializer.py
python/paddle/fluid/initializer.py
+20
-19
python/paddle/fluid/layers/control_flow.py
python/paddle/fluid/layers/control_flow.py
+7
-3
python/paddle/nn/functional/common.py
python/paddle/nn/functional/common.py
+6
-9
python/paddle/nn/functional/loss.py
python/paddle/nn/functional/loss.py
+9
-8
python/paddle/nn/initializer/orthogonal.py
python/paddle/nn/initializer/orthogonal.py
+5
-5
python/paddle/static/amp/amp_nn.py
python/paddle/static/amp/amp_nn.py
+23
-23
python/paddle/tensor/linalg.py
python/paddle/tensor/linalg.py
+6
-4
python/paddle/tensor/math.py
python/paddle/tensor/math.py
+1
-1
python/paddle/tensor/search.py
python/paddle/tensor/search.py
+0
-1
python/paddle/vision/ops.py
python/paddle/vision/ops.py
+25
-27
未找到文件。
python/paddle/distribution/dirichlet.py
浏览文件 @
a1772bb8
...
...
@@ -158,15 +158,14 @@ class Dirichlet(exponential_family.ExponentialFamily):
def
_dirichlet
(
concentration
,
name
=
None
):
op_type
=
'dirichlet'
check_variable_and_dtype
(
concentration
,
'concentration'
,
[
'float32'
,
'float64'
],
op_type
)
if
in_dygraph_mode
():
return
paddle
.
_C_ops
.
dirichlet
(
concentration
)
else
:
op_type
=
'dirichlet'
check_variable_and_dtype
(
concentration
,
'concentration'
,
[
'float32'
,
'float64'
],
op_type
)
helper
=
LayerHelper
(
op_type
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
concentration
.
dtype
...
...
python/paddle/fluid/initializer.py
浏览文件 @
a1772bb8
...
...
@@ -266,12 +266,13 @@ class UniformInitializer(Initializer):
block
=
self
.
_check_block
(
block
)
assert
isinstance
(
block
,
framework
.
Block
)
check_variable_and_dtype
(
var
,
"Out"
,
[
"uint16"
,
"float16"
,
"float32"
,
"float64"
],
"uniform_random"
,
)
if
not
in_dygraph_mode
():
check_variable_and_dtype
(
var
,
"Out"
,
[
"uint16"
,
"float16"
,
"float32"
,
"float64"
],
"uniform_random"
,
)
if
self
.
_seed
==
0
:
self
.
_seed
=
block
.
program
.
random_seed
...
...
@@ -381,13 +382,6 @@ class NormalInitializer(Initializer):
assert
isinstance
(
block
,
framework
.
Block
)
check_variable_and_dtype
(
var
,
"Out"
,
[
"uint16"
,
"float16"
,
"float32"
,
"float64"
],
"guassian_random"
,
)
if
self
.
_seed
==
0
:
self
.
_seed
=
block
.
program
.
random_seed
...
...
@@ -405,6 +399,12 @@ class NormalInitializer(Initializer):
return
None
else
:
check_variable_and_dtype
(
var
,
"Out"
,
[
"uint16"
,
"float16"
,
"float32"
,
"float64"
],
"guassian_random"
,
)
op
=
block
.
append_op
(
type
=
"gaussian_random"
,
outputs
=
{
"Out"
:
var
},
...
...
@@ -596,12 +596,13 @@ class XavierInitializer(Initializer):
block
=
self
.
_check_block
(
block
)
assert
isinstance
(
block
,
framework
.
Block
)
check_variable_and_dtype
(
var
,
"Out"
,
[
"uint16"
,
"float16"
,
"float32"
,
"float64"
],
"xavier_init"
,
)
if
not
in_dygraph_mode
():
check_variable_and_dtype
(
var
,
"Out"
,
[
"uint16"
,
"float16"
,
"float32"
,
"float64"
],
"xavier_init"
,
)
f_in
,
f_out
=
self
.
_compute_fans
(
var
)
...
...
python/paddle/fluid/layers/control_flow.py
浏览文件 @
a1772bb8
...
...
@@ -1143,9 +1143,7 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None):
raise
ValueError
(
"loop_vars in while_loop should not be empty"
)
pre_cond
=
cond
(
*
loop_vars
)
check_variable_and_dtype
(
pre_cond
,
'var of cond returned'
,
[
'bool'
],
'fluid.layers.while_loop'
)
if
reduce
(
lambda
a
,
b
:
a
*
b
,
pre_cond
.
shape
,
1
)
!=
1
:
raise
TypeError
(
"the shape of the variable returned by cond should be [1],"
...
...
@@ -1167,6 +1165,12 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None):
map_structure
(
assign_skip_lod_tensor_array
,
output_vars
,
loop_vars
)
return
loop_vars
else
:
check_variable_and_dtype
(
pre_cond
,
'var of cond returned'
,
[
'bool'
],
'fluid.layers.while_loop'
,
)
while_loop_block
=
While
(
pre_cond
,
is_test
,
name
)
has_mutable_vars_in_loop
=
hold_mutable_vars
(
loop_vars
)
with
while_loop_block
.
block
():
...
...
python/paddle/nn/functional/common.py
浏览文件 @
a1772bb8
...
...
@@ -102,10 +102,6 @@ def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None):
y = F.unfold(x, [3, 3], 1, 1, 1)
"""
helper
=
LayerHelper
(
"unfold"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'unfold'
)
assert
len
(
x
.
shape
)
==
4
,
"input should be the format of [N, C, H, W]"
if
isinstance
(
kernel_sizes
,
int
):
...
...
@@ -149,6 +145,9 @@ def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
unfold
(
x
,
kernel_sizes
,
strides
,
paddings
,
dilations
)
helper
=
LayerHelper
(
"unfold"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'unfold'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
"unfold"
,
...
...
@@ -2237,11 +2236,6 @@ def fold(
# y.shape = [2,3,4,5]
"""
helper
=
LayerHelper
(
"fold"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'fold'
)
assert
len
(
x
.
shape
)
==
3
,
"input should be the format of [N, C, L]"
def
_is_list_or_turple_
(
data
):
...
...
@@ -2311,6 +2305,9 @@ def fold(
dilations
,
)
else
:
helper
=
LayerHelper
(
"fold"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'fold'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
"fold"
,
...
...
python/paddle/nn/functional/loss.py
浏览文件 @
a1772bb8
...
...
@@ -521,8 +521,7 @@ def edit_distance(
# [4]
"""
check_variable_and_dtype
(
input
,
'input'
,
[
'int64'
],
'edit_distance'
)
check_variable_and_dtype
(
label
,
'label'
,
[
'int64'
],
'edit_distance'
)
helper
=
LayerHelper
(
"edit_distance"
,
**
locals
())
# remove some tokens from input and labels
...
...
@@ -551,6 +550,8 @@ def edit_distance(
input
,
label
,
input_length
,
label_length
,
normalized
)
check_variable_and_dtype
(
input
,
'input'
,
[
'int64'
],
'edit_distance'
)
check_variable_and_dtype
(
label
,
'label'
,
[
'int64'
],
'edit_distance'
)
this_inputs
=
{
"Hyps"
:
[
input
],
"Refs"
:
[
label
]}
if
input_length
is
not
None
and
label_length
is
not
None
:
this_inputs
[
'HypsLength'
]
=
[
input_length
]
...
...
@@ -1075,16 +1076,16 @@ def smooth_l1_loss(input, label, reduction='mean', delta=1.0, name=None):
print(output)
# [0.068004]
"""
check_variable_and_dtype
(
input
,
'input'
,
[
'float32'
,
'float64'
],
'smooth_l1_loss'
)
check_variable_and_dtype
(
label
,
'label'
,
[
'float32'
,
'float64'
],
'smooth_l1_loss'
)
if
in_dygraph_mode
():
out
,
residual
=
_C_ops
.
huber_loss
(
input
,
label
,
delta
)
else
:
check_variable_and_dtype
(
input
,
'input'
,
[
'float32'
,
'float64'
],
'smooth_l1_loss'
)
check_variable_and_dtype
(
label
,
'label'
,
[
'float32'
,
'float64'
],
'smooth_l1_loss'
)
helper
=
LayerHelper
(
'huber_loss'
,
**
locals
())
residual
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
()
...
...
python/paddle/nn/initializer/orthogonal.py
浏览文件 @
a1772bb8
...
...
@@ -85,11 +85,6 @@ class Orthogonal(Initializer):
block
=
self
.
_check_block
(
block
)
assert
isinstance
(
var
,
framework
.
Parameter
)
assert
isinstance
(
block
,
framework
.
Block
)
# 'qr' op only support float32/float64 now
check_variable_and_dtype
(
var
,
"Out"
,
[
"float32"
,
"float64"
],
"Orthogonal"
)
self
.
_seed
=
block
.
program
.
random_seed
shape
=
var
.
shape
...
...
@@ -129,6 +124,11 @@ class Orthogonal(Initializer):
return
None
# 'qr' op only support float32/float64 now
check_variable_and_dtype
(
var
,
"Out"
,
[
"float32"
,
"float64"
],
"Orthogonal"
)
normal_var
=
block
.
create_var
(
name
=
unique_name
.
generate
(
'.'
.
join
([
'gaussian_random'
,
'tmp'
])),
dtype
=
var
.
dtype
,
...
...
python/paddle/static/amp/amp_nn.py
浏览文件 @
a1772bb8
...
...
@@ -37,14 +37,6 @@ def check_finite_and_unscale(x, scale, name=None, float_status=None):
scale: The scale of check_finite_and_unscale operator.
float_status(Tensor): (Only used on NPU) The float status to check overflow.
"""
check_type
(
x
,
'x'
,
(
tuple
,
list
),
'check_finite_and_unscale'
)
for
e
in
x
:
check_variable_and_dtype
(
e
,
"x"
,
[
'float16'
,
'float32'
,
'float64'
],
'check_finite_and_unscale'
,
)
helper
=
LayerHelper
(
"check_finite_and_unscale"
,
**
locals
())
...
...
@@ -54,6 +46,15 @@ def check_finite_and_unscale(x, scale, name=None, float_status=None):
_C_ops
.
check_finite_and_unscale_
(
x
,
scale
,
found_inf
)
return
x
,
found_inf
check_type
(
x
,
'x'
,
(
tuple
,
list
),
'check_finite_and_unscale'
)
for
e
in
x
:
check_variable_and_dtype
(
e
,
"x"
,
[
'float16'
,
'float32'
,
'float64'
],
'check_finite_and_unscale'
,
)
inputs
=
{
'X'
:
x
,
'Scale'
:
scale
}
if
core
.
is_compiled_with_npu
():
check_variable_and_dtype
(
...
...
@@ -110,6 +111,20 @@ def update_loss_scaling(
decr_ratio(float): The less-than-one-multiplier to use when decreasing
loss scaling.
"""
if
in_dygraph_mode
():
_C_ops
.
update_loss_scaling_
(
x
,
found_inf
,
prev_loss_scaling
,
num_good_steps
,
num_bad_steps
,
incr_every_n_steps
,
decr_every_n_nan_or_inf
,
incr_ratio
,
decr_ratio
,
stop_update
,
)
return
x
check_variable_and_dtype
(
prev_loss_scaling
,
...
...
@@ -131,21 +146,6 @@ def update_loss_scaling(
prev_loss_scaling
.
dtype
==
e
.
dtype
),
"The dtype of prev_loss_scaling should be equal to the dtype of x."
if
in_dygraph_mode
():
_C_ops
.
update_loss_scaling_
(
x
,
found_inf
,
prev_loss_scaling
,
num_good_steps
,
num_bad_steps
,
incr_every_n_steps
,
decr_every_n_nan_or_inf
,
incr_ratio
,
decr_ratio
,
stop_update
,
)
return
x
helper
=
LayerHelper
(
"update_loss_scaling"
,
**
locals
())
inputs
=
{
...
...
python/paddle/tensor/linalg.py
浏览文件 @
a1772bb8
...
...
@@ -2378,10 +2378,6 @@ def eigvals(x, name=None):
# [(-0.27078833542132674+0j), (0.29962280156230725+0j), (0.8824477020120244+0j)] #complex128
"""
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
'eigvals'
)
x_shape
=
list
(
x
.
shape
)
if
len
(
x_shape
)
<
2
:
raise
ValueError
(
...
...
@@ -2400,6 +2396,12 @@ def eigvals(x, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
eigvals
(
x
)
else
:
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
'eigvals'
,
)
helper
=
LayerHelper
(
'eigvals'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'eigvals'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
})
...
...
python/paddle/tensor/math.py
浏览文件 @
a1772bb8
...
...
@@ -2014,7 +2014,6 @@ def renorm(x, p, axis, max_norm):
"""
input_shape
=
x
.
shape
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'renorm'
)
if
not
axis
<
len
(
input_shape
):
raise
ValueError
(
"the axis:{} should be less then the shape's size {}:{}"
.
format
(
...
...
@@ -2033,6 +2032,7 @@ def renorm(x, p, axis, max_norm):
out
=
_C_ops
.
renorm
(
x
,
p
,
axis
,
max_norm
)
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'renorm'
)
inputs
=
{
'X'
:
x
}
attrs
=
{
'p'
:
p
,
'axis'
:
axis
,
'max_norm'
:
max_norm
}
...
...
python/paddle/tensor/search.py
浏览文件 @
a1772bb8
...
...
@@ -644,7 +644,6 @@ def where(condition, x=None, y=None, name=None):
check_variable_and_dtype
(
y
,
'y'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'where'
)
helper
=
LayerHelper
(
"where"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
...
...
python/paddle/vision/ops.py
浏览文件 @
a1772bb8
...
...
@@ -492,11 +492,6 @@ def prior_box(
flip=True)
"""
helper
=
LayerHelper
(
"prior_box"
,
**
locals
())
dtype
=
helper
.
input_dtype
()
check_variable_and_dtype
(
input
,
'input'
,
[
'uint8'
,
'int8'
,
'float32'
,
'float64'
],
'prior_box'
)
def
_is_list_or_tuple_
(
data
):
return
isinstance
(
data
,
list
)
or
isinstance
(
data
,
tuple
)
...
...
@@ -541,6 +536,11 @@ def prior_box(
return
box
,
var
else
:
helper
=
LayerHelper
(
"prior_box"
,
**
locals
())
dtype
=
helper
.
input_dtype
()
check_variable_and_dtype
(
input
,
'input'
,
[
'uint8'
,
'int8'
,
'float32'
,
'float64'
],
'prior_box'
)
attrs
=
{
'min_sizes'
:
min_sizes
,
'aspect_ratios'
:
aspect_ratios
,
...
...
@@ -679,13 +679,6 @@ def box_coder(
box_normalized=False)
"""
check_variable_and_dtype
(
prior_box
,
'prior_box'
,
[
'float32'
,
'float64'
],
'box_coder'
)
check_variable_and_dtype
(
target_box
,
'target_box'
,
[
'float32'
,
'float64'
],
'box_coder'
)
if
in_dygraph_mode
():
if
isinstance
(
prior_box_var
,
Variable
):
output_box
=
_C_ops
.
box_coder
(
...
...
@@ -712,6 +705,12 @@ def box_coder(
return
output_box
else
:
check_variable_and_dtype
(
prior_box
,
'prior_box'
,
[
'float32'
,
'float64'
],
'box_coder'
)
check_variable_and_dtype
(
target_box
,
'target_box'
,
[
'float32'
,
'float64'
],
'box_coder'
)
helper
=
LayerHelper
(
"box_coder"
,
**
locals
())
output_box
=
helper
.
create_variable_for_type_inference
(
...
...
@@ -2268,21 +2267,6 @@ def matrix_nms(
nms_top_k=400, keep_top_k=200, normalized=False)
"""
check_variable_and_dtype
(
bboxes
,
'BBoxes'
,
[
'float32'
,
'float64'
],
'matrix_nms'
)
check_variable_and_dtype
(
scores
,
'Scores'
,
[
'float32'
,
'float64'
],
'matrix_nms'
)
check_type
(
score_threshold
,
'score_threshold'
,
float
,
'matrix_nms'
)
check_type
(
post_threshold
,
'post_threshold'
,
float
,
'matrix_nms'
)
check_type
(
nms_top_k
,
'nums_top_k'
,
int
,
'matrix_nms'
)
check_type
(
keep_top_k
,
'keep_top_k'
,
int
,
'matrix_nms'
)
check_type
(
normalized
,
'normalized'
,
bool
,
'matrix_nms'
)
check_type
(
use_gaussian
,
'use_gaussian'
,
bool
,
'matrix_nms'
)
check_type
(
gaussian_sigma
,
'gaussian_sigma'
,
float
,
'matrix_nms'
)
check_type
(
background_label
,
'background_label'
,
int
,
'matrix_nms'
)
if
in_dygraph_mode
():
out
,
index
,
rois_num
=
_C_ops
.
matrix_nms
(
bboxes
,
...
...
@@ -2302,6 +2286,20 @@ def matrix_nms(
rois_num
=
None
return
out
,
rois_num
,
index
else
:
check_variable_and_dtype
(
bboxes
,
'BBoxes'
,
[
'float32'
,
'float64'
],
'matrix_nms'
)
check_variable_and_dtype
(
scores
,
'Scores'
,
[
'float32'
,
'float64'
],
'matrix_nms'
)
check_type
(
score_threshold
,
'score_threshold'
,
float
,
'matrix_nms'
)
check_type
(
post_threshold
,
'post_threshold'
,
float
,
'matrix_nms'
)
check_type
(
nms_top_k
,
'nums_top_k'
,
int
,
'matrix_nms'
)
check_type
(
keep_top_k
,
'keep_top_k'
,
int
,
'matrix_nms'
)
check_type
(
normalized
,
'normalized'
,
bool
,
'matrix_nms'
)
check_type
(
use_gaussian
,
'use_gaussian'
,
bool
,
'matrix_nms'
)
check_type
(
gaussian_sigma
,
'gaussian_sigma'
,
float
,
'matrix_nms'
)
check_type
(
background_label
,
'background_label'
,
int
,
'matrix_nms'
)
helper
=
LayerHelper
(
'matrix_nms'
,
**
locals
())
output
=
helper
.
create_variable_for_type_inference
(
dtype
=
bboxes
.
dtype
)
index
=
helper
.
create_variable_for_type_inference
(
dtype
=
'int32'
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录