Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
ac14920a
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
ac14920a
编写于
4月 07, 2022
作者:
H
hong
提交者:
GitHub
4月 07, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add scatter_add_nd, label_smooth, huber_loss yaml (#41462)
上级
55e26637
变更
9
显示空白变更内容
内联
并排
Showing
9 changed file
with
61 addition
and
23 deletion
+61
-23
python/paddle/fluid/layers/loss.py
python/paddle/fluid/layers/loss.py
+4
-0
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+5
-2
python/paddle/fluid/tests/unittests/test_huber_loss_op.py
python/paddle/fluid/tests/unittests/test_huber_loss_op.py
+6
-2
python/paddle/fluid/tests/unittests/test_label_smooth_op.py
python/paddle/fluid/tests/unittests/test_label_smooth_op.py
+2
-2
python/paddle/fluid/tests/unittests/test_scatter_nd_op.py
python/paddle/fluid/tests/unittests/test_scatter_nd_op.py
+8
-8
python/paddle/nn/functional/common.py
python/paddle/nn/functional/common.py
+4
-0
python/paddle/utils/code_gen/api.yaml
python/paddle/utils/code_gen/api.yaml
+14
-1
python/paddle/utils/code_gen/backward.yaml
python/paddle/utils/code_gen/backward.yaml
+17
-7
tools/infrt/skipped_phi_api.json
tools/infrt/skipped_phi_api.json
+1
-1
未找到文件。
python/paddle/fluid/layers/loss.py
浏览文件 @
ac14920a
...
@@ -1610,6 +1610,10 @@ def huber_loss(input, label, delta):
...
@@ -1610,6 +1610,10 @@ def huber_loss(input, label, delta):
HuberLoss, = exe.run(feed={'input':input_data ,'label':label_data}, fetch_list=[loss.name])
HuberLoss, = exe.run(feed={'input':input_data ,'label':label_data}, fetch_list=[loss.name])
print(HuberLoss) #[[1.5], [0.5], [0.5], [0. ]], dtype=float32
print(HuberLoss) #[[1.5], [0.5], [0.5], [0. ]], dtype=float32
"""
"""
if
in_dygraph_mode
():
out
,
residual
=
_C_ops
.
final_state_huber_loss
(
input
,
label
,
delta
)
return
out
helper
=
LayerHelper
(
'huber_loss'
,
**
locals
())
helper
=
LayerHelper
(
'huber_loss'
,
**
locals
())
check_variable_and_dtype
(
input
,
'input'
,
[
'float32'
,
'float64'
],
check_variable_and_dtype
(
input
,
'input'
,
[
'float32'
,
'float64'
],
'huber_loss'
)
'huber_loss'
)
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
ac14920a
...
@@ -7095,6 +7095,10 @@ def label_smooth(label,
...
@@ -7095,6 +7095,10 @@ def label_smooth(label,
smooth_label = layers.label_smooth(
smooth_label = layers.label_smooth(
label=one_hot_label, epsilon=0.1, dtype="float32")
label=one_hot_label, epsilon=0.1, dtype="float32")
"""
"""
if in_dygraph_mode():
return _C_ops.final_state_label_smooth(label, prior_dist,
float(epsilon))
if epsilon > 1. or epsilon < 0.:
if epsilon > 1. or epsilon < 0.:
raise ValueError("The value of epsilon must be between 0 and 1.")
raise ValueError("The value of epsilon must be between 0 and 1.")
...
@@ -8839,8 +8843,7 @@ def scatter_nd_add(ref, index, updates, name=None):
...
@@ -8839,8 +8843,7 @@ def scatter_nd_add(ref, index, updates, name=None):
"""
"""
if in_dygraph_mode():
if in_dygraph_mode():
op = getattr(_C_ops, 'scatter_nd_add')
return _C_ops.final_state_scatter_nd_add(ref, index, updates)
return op(ref, index, updates)
else:
else:
if _in_legacy_dygraph():
if _in_legacy_dygraph():
op = getattr(_C_ops, 'scatter_nd_add')
op = getattr(_C_ops, 'scatter_nd_add')
...
...
python/paddle/fluid/tests/unittests/test_huber_loss_op.py
浏览文件 @
ac14920a
...
@@ -18,6 +18,7 @@ import unittest
...
@@ -18,6 +18,7 @@ import unittest
import
numpy
as
np
import
numpy
as
np
from
op_test
import
OpTest
from
op_test
import
OpTest
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
import
paddle
from
paddle.fluid
import
compiler
,
Program
,
program_guard
from
paddle.fluid
import
compiler
,
Program
,
program_guard
...
@@ -32,6 +33,8 @@ def huber_loss_forward(val, delta):
...
@@ -32,6 +33,8 @@ def huber_loss_forward(val, delta):
class
TestHuberLossOp
(
OpTest
):
class
TestHuberLossOp
(
OpTest
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
op_type
=
'huber_loss'
self
.
op_type
=
'huber_loss'
self
.
python_api
=
paddle
.
fluid
.
layers
.
huber_loss
self
.
python_out_sig
=
[
"Out"
]
self
.
delta
=
1.0
self
.
delta
=
1.0
self
.
init_input
()
self
.
init_input
()
shape
=
self
.
set_shape
()
shape
=
self
.
set_shape
()
...
@@ -52,10 +55,10 @@ class TestHuberLossOp(OpTest):
...
@@ -52,10 +55,10 @@ class TestHuberLossOp(OpTest):
return
(
100
,
1
)
return
(
100
,
1
)
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
def
test_check_grad_normal
(
self
):
def
test_check_grad_normal
(
self
):
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
)
self
.
check_grad
([
'X'
,
'Y'
],
'Out'
,
check_eager
=
True
)
def
test_check_grad_ingore_x
(
self
):
def
test_check_grad_ingore_x
(
self
):
self
.
check_grad
(
self
.
check_grad
(
...
@@ -103,4 +106,5 @@ class TestHuberLossOpError(unittest.TestCase):
...
@@ -103,4 +106,5 @@ class TestHuberLossOpError(unittest.TestCase):
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
paddle
.
enable_static
()
unittest
.
main
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_label_smooth_op.py
浏览文件 @
ac14920a
...
@@ -39,10 +39,10 @@ class TestLabelSmoothOp(OpTest):
...
@@ -39,10 +39,10 @@ class TestLabelSmoothOp(OpTest):
self
.
outputs
=
{
'Out'
:
smoothed_label
}
self
.
outputs
=
{
'Out'
:
smoothed_label
}
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
Fals
e
)
self
.
check_output
(
check_eager
=
Tru
e
)
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
self
.
check_grad
([
"X"
],
"Out"
,
check_eager
=
Fals
e
)
self
.
check_grad
([
"X"
],
"Out"
,
check_eager
=
Tru
e
)
class
TestLabelSmoothOpWithPriorDist
(
TestLabelSmoothOp
):
class
TestLabelSmoothOpWithPriorDist
(
TestLabelSmoothOp
):
...
...
python/paddle/fluid/tests/unittests/test_scatter_nd_op.py
浏览文件 @
ac14920a
...
@@ -77,10 +77,10 @@ class TestScatterNdAddSimpleOp(OpTest):
...
@@ -77,10 +77,10 @@ class TestScatterNdAddSimpleOp(OpTest):
self
.
outputs
=
{
'Out'
:
expect_np
}
self
.
outputs
=
{
'Out'
:
expect_np
}
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
Fals
e
)
self
.
check_output
(
check_eager
=
Tru
e
)
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
,
'Updates'
],
'Out'
,
check_eager
=
Fals
e
)
self
.
check_grad
([
'X'
,
'Updates'
],
'Out'
,
check_eager
=
Tru
e
)
class
TestScatterNdAddWithEmptyIndex
(
OpTest
):
class
TestScatterNdAddWithEmptyIndex
(
OpTest
):
...
@@ -101,10 +101,10 @@ class TestScatterNdAddWithEmptyIndex(OpTest):
...
@@ -101,10 +101,10 @@ class TestScatterNdAddWithEmptyIndex(OpTest):
self
.
outputs
=
{
'Out'
:
expect_np
}
self
.
outputs
=
{
'Out'
:
expect_np
}
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
Fals
e
)
self
.
check_output
(
check_eager
=
Tru
e
)
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
,
'Updates'
],
'Out'
,
check_eager
=
Fals
e
)
self
.
check_grad
([
'X'
,
'Updates'
],
'Out'
,
check_eager
=
Tru
e
)
class
TestScatterNdAddWithHighRankSame
(
OpTest
):
class
TestScatterNdAddWithHighRankSame
(
OpTest
):
...
@@ -128,10 +128,10 @@ class TestScatterNdAddWithHighRankSame(OpTest):
...
@@ -128,10 +128,10 @@ class TestScatterNdAddWithHighRankSame(OpTest):
self
.
outputs
=
{
'Out'
:
expect_np
}
self
.
outputs
=
{
'Out'
:
expect_np
}
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
Fals
e
)
self
.
check_output
(
check_eager
=
Tru
e
)
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
,
'Updates'
],
'Out'
,
check_eager
=
Fals
e
)
self
.
check_grad
([
'X'
,
'Updates'
],
'Out'
,
check_eager
=
Tru
e
)
class
TestScatterNdAddWithHighRankDiff
(
OpTest
):
class
TestScatterNdAddWithHighRankDiff
(
OpTest
):
...
@@ -154,10 +154,10 @@ class TestScatterNdAddWithHighRankDiff(OpTest):
...
@@ -154,10 +154,10 @@ class TestScatterNdAddWithHighRankDiff(OpTest):
self
.
outputs
=
{
'Out'
:
expect_np
}
self
.
outputs
=
{
'Out'
:
expect_np
}
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
(
check_eager
=
Fals
e
)
self
.
check_output
(
check_eager
=
Tru
e
)
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
,
'Updates'
],
'Out'
,
check_eager
=
Fals
e
)
self
.
check_grad
([
'X'
,
'Updates'
],
'Out'
,
check_eager
=
Tru
e
)
#Test Python API
#Test Python API
...
...
python/paddle/nn/functional/common.py
浏览文件 @
ac14920a
...
@@ -1623,6 +1623,10 @@ def label_smooth(label, prior_dist=None, epsilon=0.1, name=None):
...
@@ -1623,6 +1623,10 @@ def label_smooth(label, prior_dist=None, epsilon=0.1, name=None):
#[[[0.03333334 0.93333334 0.03333334]
#[[[0.03333334 0.93333334 0.03333334]
# [0.93333334 0.03333334 0.93333334]]]
# [0.93333334 0.03333334 0.93333334]]]
"""
"""
if
in_dygraph_mode
():
return
_C_ops
.
final_state_label_smooth
(
label
,
prior_dist
,
float
(
epsilon
))
if
epsilon
>
1.
or
epsilon
<
0.
:
if
epsilon
>
1.
or
epsilon
<
0.
:
raise
ValueError
(
"The value of epsilon must be between 0 and 1."
)
raise
ValueError
(
"The value of epsilon must be between 0 and 1."
)
...
...
python/paddle/utils/code_gen/api.yaml
浏览文件 @
ac14920a
...
@@ -830,7 +830,7 @@
...
@@ -830,7 +830,7 @@
func
:
HuberLossInferMeta
func
:
HuberLossInferMeta
kernel
:
kernel
:
func
:
huber_loss
func
:
huber_loss
#
backward : huber_loss_grad
backward
:
huber_loss_grad
-
api
:
imag
-
api
:
imag
args
:
(Tensor x)
args
:
(Tensor x)
...
@@ -934,6 +934,19 @@
...
@@ -934,6 +934,19 @@
func
:
kthvalue
func
:
kthvalue
backward
:
kthvalue_grad
backward
:
kthvalue_grad
# label_smooth
-
api
:
label_smooth
args
:
(Tensor label, Tensor prior_dist, float epsilon)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
label
]
kernel
:
func
:
label_smooth
data_type
:
label
optional
:
prior_dist
backward
:
label_smooth_grad
# leaky_relu
# leaky_relu
-
api
:
leaky_relu
-
api
:
leaky_relu
args
:
(Tensor x, float alpha)
args
:
(Tensor x, float alpha)
...
...
python/paddle/utils/code_gen/backward.yaml
浏览文件 @
ac14920a
...
@@ -100,7 +100,7 @@
...
@@ -100,7 +100,7 @@
func
:
asinh_grad
func
:
asinh_grad
-
backward_api
:
atan2_grad
-
backward_api
:
atan2_grad
forward
:
cross
(Tensor x, Tensor y) -> Tensor(out)
forward
:
atan2
(Tensor x, Tensor y) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out_grad)
args
:
(Tensor x, Tensor y, Tensor out_grad)
output
:
Tensor(x_grad), Tensor(y_grad)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
infer_meta
:
...
@@ -193,7 +193,7 @@
...
@@ -193,7 +193,7 @@
func
:
cholesky_grad
func
:
cholesky_grad
-
backward_api
:
cholesky_solve_grad
-
backward_api
:
cholesky_solve_grad
forward
:
cholesky (Tensor x, Tensor y, bool upper) -> Tensor(out)
forward
:
cholesky
_solve
(Tensor x, Tensor y, bool upper) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out, Tensor out_grad, bool upper)
args
:
(Tensor x, Tensor y, Tensor out, Tensor out_grad, bool upper)
output
:
Tensor(x_grad), Tensor(y_grad)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
infer_meta
:
...
@@ -414,7 +414,7 @@
...
@@ -414,7 +414,7 @@
data_type
:
out_grad
data_type
:
out_grad
-
backward_api
:
erfinv_grad
-
backward_api
:
erfinv_grad
forward
:
erf (Tensor x) -> Tensor(out)
forward
:
erf
inv
(Tensor x) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad)
args
:
(Tensor out, Tensor out_grad)
output
:
Tensor(x_grad)
output
:
Tensor(x_grad)
infer_meta
:
infer_meta
:
...
@@ -568,6 +568,16 @@
...
@@ -568,6 +568,16 @@
kernel
:
kernel
:
func
:
hard_sigmoid_grad
func
:
hard_sigmoid_grad
-
backward_api
:
huber_loss_grad
forward
:
huber_loss (Tensor input, Tensor label, float delta) -> Tensor(out), Tensor(residual)
args
:
(Tensor residual, Tensor out_grad, float delta)
output
:
Tensor(input_grad), Tensor(label_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
residual
,
residual
]
kernel
:
func
:
huber_loss_grad
-
backward_api
:
imag_grad
-
backward_api
:
imag_grad
forward
:
imag (Tensor x) -> Tensor(out)
forward
:
imag (Tensor x) -> Tensor(out)
args
:
(Tensor out_grad)
args
:
(Tensor out_grad)
...
@@ -639,7 +649,7 @@
...
@@ -639,7 +649,7 @@
func
:
leaky_relu_grad
func
:
leaky_relu_grad
-
backward_api
:
lerp_grad
-
backward_api
:
lerp_grad
forward
:
transpose
(Tensor x, Tensor y, Tensor weight) -> Tensor(out)
forward
:
lerp
(Tensor x, Tensor y, Tensor weight) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor weight, Tensor out, Tensor out_grad)
args
:
(Tensor x, Tensor y, Tensor weight, Tensor out, Tensor out_grad)
output
:
Tensor(x_grad), Tensor(y_grad)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
infer_meta
:
...
@@ -898,7 +908,7 @@
...
@@ -898,7 +908,7 @@
func
:
mode_grad
func
:
mode_grad
-
backward_api
:
modulo_grad
-
backward_api
:
modulo_grad
forward
:
add
(Tensor x, Tensor y) -> Tensor(out)
forward
:
modulo
(Tensor x, Tensor y) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out_grad, int axis = -1)
args
:
(Tensor x, Tensor y, Tensor out_grad, int axis = -1)
output
:
Tensor(x_grad), Tensor(y_grad)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
infer_meta
:
...
@@ -1141,14 +1151,14 @@
...
@@ -1141,14 +1151,14 @@
no_need_buffer
:
updates
no_need_buffer
:
updates
-
backward_api
:
scatter_nd_add_grad
-
backward_api
:
scatter_nd_add_grad
forward
:
scatter (Tensor x, Tensor index, Tensor updates) -> Tensor(out)
forward
:
scatter
_nd_add
(Tensor x, Tensor index, Tensor updates) -> Tensor(out)
args
:
(Tensor index, Tensor updates, Tensor out_grad)
args
:
(Tensor index, Tensor updates, Tensor out_grad)
output
:
Tensor(x_grad), Tensor(updates_grad)
output
:
Tensor(x_grad), Tensor(updates_grad)
infer_meta
:
infer_meta
:
func
:
ScatterNdAddGradInferMeta
func
:
ScatterNdAddGradInferMeta
param
:
[
index
,
updates
,
out_grad
]
param
:
[
index
,
updates
,
out_grad
]
kernel
:
kernel
:
func
:
scatter_nd_grad
func
:
scatter_nd_
add_
grad
no_need_buffer
:
updates
no_need_buffer
:
updates
-
backward_api
:
segment_pool_grad
-
backward_api
:
segment_pool_grad
...
...
tools/infrt/skipped_phi_api.json
浏览文件 @
ac14920a
{
{
"phi_apis"
:[
"conj"
,
"dropout"
,
"expand_as"
,
"flatten"
,
"nll_loss"
,
"psroi_pool"
,
"roi_align"
,
"roi_pool"
],
"phi_apis"
:[
"conj"
,
"dropout"
,
"expand_as"
,
"flatten"
,
"nll_loss"
,
"psroi_pool"
,
"roi_align"
,
"roi_pool"
,
"label_smooth"
],
"phi_kernels"
:[
"equal_all"
]
"phi_kernels"
:[
"equal_all"
]
}
}
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录