Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
4558d395
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
4558d395
编写于
9月 08, 2020
作者:
myq406450149
提交者:
GitHub
9月 08, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix Norm op error (#26771)
* fix frobenius_norm error, rm p=0 2-axis support. test=develop
上级
4d7d6612
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
187 addition
and
89 deletion
+187
-89
paddle/fluid/operators/p_norm_op.cc
paddle/fluid/operators/p_norm_op.cc
+6
-0
python/paddle/fluid/tests/unittests/test_norm_all.py
python/paddle/fluid/tests/unittests/test_norm_all.py
+152
-34
python/paddle/tensor/linalg.py
python/paddle/tensor/linalg.py
+29
-55
未找到文件。
paddle/fluid/operators/p_norm_op.cc
浏览文件 @
4558d395
...
...
@@ -105,6 +105,12 @@ class PnormOp : public framework::OperatorWithKernel {
bool
asvector
=
ctx
->
Attrs
().
Get
<
bool
>
(
"asvector"
);
if
(
asvector
)
{
reduce_dims
.
emplace_back
(
1
);
if
(
keepdim
)
{
for
(
int
i
=
1
;
i
<
x_dim
.
size
();
++
i
)
{
reduce_dims
.
emplace_back
(
1
);
}
x_dim
=
framework
::
make_ddim
(
reduce_dims
);
}
}
else
{
if
(
axis
<
0
)
axis
=
x_dim
.
size
()
+
axis
;
for
(
int
i
=
0
;
i
<
x_dim
.
size
();
++
i
)
{
...
...
python/paddle/fluid/tests/unittests/test_norm_all.py
浏览文件 @
4558d395
...
...
@@ -26,11 +26,11 @@ def p_norm(x, axis, porder, keepdims=False):
if
axis
is
None
:
x
=
x
.
flatten
()
if
porder
==
np
.
inf
:
r
=
np
.
amax
(
np
.
abs
(
x
))
r
=
np
.
amax
(
np
.
abs
(
x
)
,
keepdims
=
keepdims
)
elif
porder
==
-
np
.
inf
:
r
=
np
.
amin
(
np
.
abs
(
x
))
r
=
np
.
amin
(
np
.
abs
(
x
)
,
keepdims
=
keepdims
)
else
:
r
=
np
.
linalg
.
norm
(
x
,
ord
=
porder
)
r
=
np
.
linalg
.
norm
(
x
,
ord
=
porder
,
keepdims
=
keepdims
)
elif
isinstance
(
axis
,
list
or
tuple
)
and
len
(
axis
)
==
2
:
if
porder
==
np
.
inf
:
axis
=
tuple
(
axis
)
...
...
@@ -41,10 +41,10 @@ def p_norm(x, axis, porder, keepdims=False):
elif
porder
==
0
:
axis
=
tuple
(
axis
)
r
=
x
.
astype
(
bool
)
r
=
np
.
sum
(
r
,
axis
)
r
=
np
.
sum
(
r
,
axis
,
keepdims
=
keepdims
)
elif
porder
==
1
:
axis
=
tuple
(
axis
)
r
=
np
.
sum
(
np
.
abs
(
x
),
axis
)
r
=
np
.
sum
(
np
.
abs
(
x
),
axis
,
keepdims
=
keepdims
)
else
:
axis
=
tuple
(
axis
)
xp
=
np
.
power
(
np
.
abs
(
x
),
porder
)
...
...
@@ -61,7 +61,7 @@ def p_norm(x, axis, porder, keepdims=False):
def
frobenius_norm
(
x
,
axis
=
None
,
keepdims
=
False
):
if
isinstance
(
axis
,
list
):
axis
=
tuple
(
axis
)
if
axis
is
None
:
axis
=
(
-
2
,
-
1
)
if
axis
is
None
:
x
=
x
.
reshape
(
1
,
x
.
size
)
r
=
np
.
linalg
.
norm
(
x
,
ord
=
'fro'
,
axis
=
axis
,
keepdims
=
keepdims
).
astype
(
x
.
dtype
)
return
r
...
...
@@ -217,28 +217,37 @@ class TestPnormOp5(TestPnormOp):
self
.
check_grad
([
'X'
],
'Out'
,
user_defined_grads
=
self
.
gradient
)
def
run_fro
(
self
,
p
,
axis
,
shape_x
,
dtype
):
def
run_fro
(
self
,
p
,
axis
,
shape_x
,
dtype
,
keep_dim
,
check_dim
=
False
):
with
fluid
.
program_guard
(
fluid
.
Program
()):
data
=
fluid
.
data
(
name
=
"X"
,
shape
=
shape_x
,
dtype
=
dtype
)
out
=
paddle
.
norm
(
x
=
data
,
p
=
p
,
axis
=
axis
)
out
=
paddle
.
norm
(
x
=
data
,
p
=
p
,
axis
=
axis
,
keepdim
=
keep_dim
)
place
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
np_input
=
(
np
.
random
.
rand
(
*
shape_x
)
+
1.0
).
astype
(
dtype
)
expected_result
=
frobenius_norm
(
np_input
,
axis
=
axis
)
expected_result
=
frobenius_norm
(
np_input
,
axis
=
axis
,
keepdims
=
keep_dim
)
result
,
=
exe
.
run
(
feed
=
{
"X"
:
np_input
},
fetch_list
=
[
out
])
self
.
assertEqual
((
np
.
abs
(
result
-
expected_result
)
<
1e-6
).
all
(),
True
)
if
keep_dim
and
check_dim
:
self
.
assertEqual
(
(
np
.
abs
(
np
.
array
(
result
.
shape
)
-
np
.
array
(
expected_result
.
shape
))
<
1e-6
).
all
(),
True
)
def
run_pnorm
(
self
,
p
,
axis
,
shape_x
,
dtype
):
def
run_pnorm
(
self
,
p
,
axis
,
shape_x
,
dtype
,
keep_dim
,
check_dim
=
False
):
with
fluid
.
program_guard
(
fluid
.
Program
()):
data
=
fluid
.
data
(
name
=
"X"
,
shape
=
shape_x
,
dtype
=
dtype
)
out
=
paddle
.
norm
(
x
=
data
,
p
=
p
,
axis
=
axis
)
out
=
paddle
.
norm
(
x
=
data
,
p
=
p
,
axis
=
axis
,
keepdim
=
keep_dim
)
place
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
np_input
=
(
np
.
random
.
rand
(
*
shape_x
)
+
1.0
).
astype
(
dtype
)
expected_result
=
p_norm
(
np_input
,
porder
=
p
,
axis
=
axis
).
astype
(
dtype
)
expected_result
=
p_norm
(
np_input
,
porder
=
p
,
axis
=
axis
,
keepdims
=
keep_dim
).
astype
(
dtype
)
result
,
=
exe
.
run
(
feed
=
{
"X"
:
np_input
},
fetch_list
=
[
out
])
self
.
assertEqual
((
np
.
abs
(
result
-
expected_result
)
<
1e-6
).
all
(),
True
)
if
keep_dim
and
check_dim
:
self
.
assertEqual
(
(
np
.
abs
(
np
.
array
(
result
.
shape
)
-
np
.
array
(
expected_result
.
shape
))
<
1e-6
).
all
(),
True
)
def
run_graph
(
self
,
p
,
axis
,
shape_x
,
dtype
):
...
...
@@ -253,6 +262,7 @@ def run_graph(self, p, axis, shape_x, dtype):
# compute frobenius norm along last two dimensions.
out_fro
=
paddle
.
norm
(
x
,
p
=
'fro'
)
out_fro
=
paddle
.
norm
(
x
,
p
=
'fro'
,
axis
=
0
)
out_fro
=
paddle
.
norm
(
x
,
p
=
'fro'
,
axis
=
[
0
,
1
])
# compute 2-order norm along [0,1] dimension.
out_pnorm
=
paddle
.
norm
(
x
,
p
=
2
,
axis
=
[
0
,
1
])
...
...
@@ -274,27 +284,133 @@ def run_graph(self, p, axis, shape_x, dtype):
class
API_NormTest
(
unittest
.
TestCase
):
def
test_basic
(
self
):
run_fro
(
self
,
p
=
'fro'
,
axis
=
None
,
shape_x
=
[
2
,
3
,
4
],
dtype
=
"float32"
)
run_fro
(
self
,
p
=
'fro'
,
axis
=
[
0
,
1
],
shape_x
=
[
2
,
3
,
4
],
dtype
=
"float64"
)
run_pnorm
(
self
,
p
=
2
,
axis
=
None
,
shape_x
=
[
3
,
4
],
dtype
=
"float32"
)
run_pnorm
(
self
,
p
=
2
,
axis
=
1
,
shape_x
=
[
3
,
4
],
dtype
=
"float64"
)
run_pnorm
(
self
,
p
=
np
.
inf
,
axis
=
0
,
shape_x
=
[
2
,
3
,
4
],
dtype
=
"float32"
)
run_pnorm
(
self
,
p
=
np
.
inf
,
axis
=
None
,
shape_x
=
[
2
,
3
,
4
],
dtype
=
"float32"
)
run_pnorm
(
self
,
p
=-
np
.
inf
,
axis
=
0
,
shape_x
=
[
2
,
3
,
4
],
dtype
=
"float64"
)
keep_dims
=
{
False
,
True
}
for
keep
in
keep_dims
:
run_fro
(
self
,
p
=
'fro'
,
axis
=
None
,
shape_x
=
[
2
,
3
,
4
],
dtype
=
"float32"
,
keep_dim
=
keep
)
run_fro
(
self
,
p
=
'fro'
,
axis
=
[
0
,
1
],
shape_x
=
[
2
,
3
,
4
],
dtype
=
"float64"
,
keep_dim
=
keep
,
check_dim
=
True
)
run_pnorm
(
self
,
p
=-
np
.
inf
,
axis
=
None
,
shape_x
=
[
2
,
3
,
4
],
dtype
=
"float64"
)
run_pnorm
(
self
,
p
=
0
,
axis
=
1
,
shape_x
=
[
3
,
4
],
dtype
=
"float64"
)
run_pnorm
(
self
,
p
=
1
,
axis
=
1
,
shape_x
=
[
3
,
4
],
dtype
=
"float64"
)
run_pnorm
(
self
,
p
=
0
,
axis
=
None
,
shape_x
=
[
3
,
4
],
dtype
=
"float64"
)
run_pnorm
(
self
,
p
=
2
,
axis
=
[
0
,
1
],
shape_x
=
[
2
,
3
,
4
],
dtype
=
"float64"
)
run_pnorm
(
self
,
p
=
2
,
axis
=-
1
,
shape_x
=
[
2
,
3
,
4
],
dtype
=
"float64"
)
run_pnorm
(
self
,
p
=
1
,
axis
=
[
0
,
1
],
shape_x
=
[
2
,
3
,
4
],
dtype
=
"float64"
)
run_pnorm
(
self
,
p
=
0
,
axis
=
[
0
,
1
],
shape_x
=
[
2
,
3
,
4
],
dtype
=
"float64"
)
self
,
p
=
2
,
axis
=
None
,
shape_x
=
[
3
,
4
],
dtype
=
"float32"
,
keep_dim
=
keep
)
run_pnorm
(
self
,
p
=
np
.
inf
,
axis
=
[
0
,
1
],
shape_x
=
[
2
,
3
,
4
],
dtype
=
"float64"
)
self
,
p
=
2
,
axis
=
1
,
shape_x
=
[
3
,
4
],
dtype
=
"float64"
,
keep_dim
=
keep
,
check_dim
=
True
)
run_pnorm
(
self
,
p
=-
np
.
inf
,
axis
=
[
0
,
1
],
shape_x
=
[
2
,
3
,
4
],
dtype
=
"float64"
)
self
,
p
=
np
.
inf
,
axis
=
0
,
shape_x
=
[
2
,
3
,
4
],
dtype
=
"float32"
,
keep_dim
=
keep
,
check_dim
=
True
)
run_pnorm
(
self
,
p
=
np
.
inf
,
axis
=
None
,
shape_x
=
[
2
,
3
,
4
],
dtype
=
"float32"
,
keep_dim
=
keep
)
run_pnorm
(
self
,
p
=-
np
.
inf
,
axis
=
0
,
shape_x
=
[
2
,
3
,
4
],
dtype
=
"float64"
,
keep_dim
=
keep
,
check_dim
=
True
)
run_pnorm
(
self
,
p
=-
np
.
inf
,
axis
=
None
,
shape_x
=
[
2
,
3
,
4
],
dtype
=
"float64"
,
keep_dim
=
keep
)
run_pnorm
(
self
,
p
=
0
,
axis
=
1
,
shape_x
=
[
3
,
4
],
dtype
=
"float64"
,
keep_dim
=
keep
,
check_dim
=
True
)
run_pnorm
(
self
,
p
=
1
,
axis
=
1
,
shape_x
=
[
3
,
4
],
dtype
=
"float64"
,
keep_dim
=
keep
,
check_dim
=
True
)
run_pnorm
(
self
,
p
=
0
,
axis
=
None
,
shape_x
=
[
3
,
4
],
dtype
=
"float64"
,
keep_dim
=
keep
,
check_dim
=
True
)
run_pnorm
(
self
,
p
=
2
,
axis
=
[
0
,
1
],
shape_x
=
[
2
,
3
,
4
],
dtype
=
"float64"
,
keep_dim
=
keep
,
check_dim
=
True
)
run_pnorm
(
self
,
p
=
2
,
axis
=-
1
,
shape_x
=
[
2
,
3
,
4
],
dtype
=
"float64"
,
keep_dim
=
keep
,
check_dim
=
True
)
run_pnorm
(
self
,
p
=
1
,
axis
=
[
0
,
1
],
shape_x
=
[
2
,
3
,
4
],
dtype
=
"float64"
,
keep_dim
=
keep
,
check_dim
=
True
)
run_pnorm
(
self
,
p
=
np
.
inf
,
axis
=
[
0
,
1
],
shape_x
=
[
2
,
3
,
4
],
dtype
=
"float64"
,
keep_dim
=
keep
,
check_dim
=
True
)
run_pnorm
(
self
,
p
=-
np
.
inf
,
axis
=
[
0
,
1
],
shape_x
=
[
2
,
3
,
4
],
dtype
=
"float64"
,
keep_dim
=
keep
,
check_dim
=
True
)
def
test_dygraph
(
self
):
run_graph
(
self
,
p
=
'fro'
,
axis
=
None
,
shape_x
=
[
2
,
3
,
4
],
dtype
=
"float32"
)
...
...
@@ -315,6 +431,7 @@ class API_NormTest(unittest.TestCase):
paddle
.
norm
(
data
,
p
=
p
,
out
=
out
)
self
.
assertRaises
(
TypeError
,
err_dtype
,
"fro"
,
[
2
,
2
],
"int64"
)
self
.
assertRaises
(
ValueError
,
paddle
.
norm
,
"inf"
,
[
2
],
"int64"
)
out
=
fluid
.
data
(
name
=
"out"
,
shape
=
[
1
],
dtype
=
"int64"
)
self
.
assertRaises
(
TypeError
,
err_dtype
,
"fro"
,
[
2
,
2
],
"float64"
,
out
)
...
...
@@ -325,6 +442,7 @@ class API_NormTest(unittest.TestCase):
self
.
assertRaises
(
ValueError
,
paddle
.
norm
,
data
,
p
=
"unsupport norm"
)
self
.
assertRaises
(
ValueError
,
paddle
.
norm
,
data
,
p
=
[
1
])
self
.
assertRaises
(
ValueError
,
paddle
.
norm
,
data
,
p
=
[
1
],
axis
=-
1
)
self
.
assertRaises
(
ValueError
,
paddle
.
norm
,
0
,
[
1
,
0
],
"float64"
)
data
=
fluid
.
data
(
name
=
"data_3d"
,
shape
=
[
2
,
2
,
2
],
dtype
=
"float64"
)
self
.
assertRaises
(
ValueError
,
paddle
.
norm
,
data
,
p
=
'unspport'
,
axis
=
[
-
3
,
-
2
,
-
1
])
...
...
python/paddle/tensor/linalg.py
浏览文件 @
4558d395
...
...
@@ -183,12 +183,13 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
x (Tensor): The input tensor could be N-D tensor, and the input data
type could be float32 or float64.
p (float|string, optional): Order of the norm. Supported values are `fro`, `0`, `1`, `2`,
`inf`,`-inf` and any positive real number yielding the corresponding p-norm.
Not supported: ord < 0, nuclear norm
.
`inf`, `-inf` and any positive real number yielding the corresponding p-norm. Not supported: ord < 0 and nuclear norm.
Default value is `fro`
.
axis (int|list|tuple, optional): The axis on which to apply norm operation. If axis is int
or list(int)/tuple(int) with only one element, the vector norm is computed over the axis.
If `axis < 0`, the dimension to norm operation is rank(input) + axis.
If axis is a list(int)/tuple(int) with two elements, the matrix norm is computed over the axis.
Defalut value is `None`.
keepdim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have fewer dimension
than the :attr:`input` unless :attr:`keepdim` is true, default
...
...
@@ -197,13 +198,9 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Variable: Tensor,
results of norm operation on the specified axis of input tensor,
Tensor:
results of norm operation on the specified axis of input tensor,
it's data type is the same as input's Tensor.
Raises:
TypeError, if out data type is different with the input data type.
ValueError, If `p` or `axis` is invalid.
Examples:
.. code-block:: python
...
...
@@ -256,15 +253,13 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
"The dim of frobenius norm op should be None or two elements list!"
)
if
in_dygraph_mode
():
if
dim
is
None
:
dim
=
[
-
1
]
return
core
.
ops
.
frobenius_norm
(
input
,
'dim'
,
dim
,
'keepdim'
,
keepdim
)
attrs
=
{
'dim'
:
dim
if
dim
!=
None
else
[
-
2
,
-
1
],
'keep_dim'
:
keepdim
,
'reduce_all'
:
False
}
if
len
(
attrs
[
'dim'
])
==
len
(
input
.
shape
):
if
dim
is
None
:
return
core
.
ops
.
frobenius_norm
(
input
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
True
)
return
core
.
ops
.
frobenius_norm
(
input
,
'dim'
,
dim
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
False
)
attrs
=
{
'dim'
:
dim
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
False
}
if
dim
is
None
:
attrs
[
'reduce_all'
]
=
True
check_variable_and_dtype
(
input
,
'input'
,
[
'float32'
,
'float64'
],
'frobenius_norm'
)
...
...
@@ -351,42 +346,6 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
return
reduce_out
def
p0_matrix_norm
(
input
,
porder
=
0.
,
axis
=
axis
,
keepdim
=
False
,
name
=
None
):
block
=
LayerHelper
(
'norm'
,
**
locals
())
out
=
block
.
create_variable_for_type_inference
(
dtype
=
block
.
input_dtype
())
cast_out
=
block
.
create_variable_for_type_inference
(
dtype
=
bool
)
block
.
append_op
(
type
=
'cast'
,
inputs
=
{
'X'
:
input
},
outputs
=
{
'Out'
:
cast_out
},
attrs
=
{
'in_dtype'
:
input
.
dtype
,
'out_dtype'
:
int
(
core
.
VarDesc
.
VarType
.
BOOL
)
})
cast_out2
=
block
.
create_variable_for_type_inference
(
dtype
=
bool
)
block
.
append_op
(
type
=
'cast'
,
inputs
=
{
'X'
:
cast_out
},
outputs
=
{
'Out'
:
cast_out2
},
attrs
=
{
'in_dtype'
:
cast_out
.
dtype
,
'out_dtype'
:
int
(
core
.
VarDesc
.
VarType
.
FP32
)
})
sum_out
=
block
.
create_variable_for_type_inference
(
dtype
=
block
.
input_dtype
())
block
.
append_op
(
type
=
'reduce_sum'
,
inputs
=
{
'X'
:
cast_out2
},
outputs
=
{
'Out'
:
sum_out
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
True
if
axis
is
None
else
False
})
return
sum_out
def
p_matrix_norm
(
input
,
porder
=
1.
,
axis
=
axis
,
keepdim
=
False
,
name
=
None
):
block
=
LayerHelper
(
'norm'
,
**
locals
())
out
=
block
.
create_variable_for_type_inference
(
...
...
@@ -448,7 +407,20 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
#calculate vector norm, where axis is int or list with only one integer
if
isinstance
(
axis
,
int
):
if
isinstance
(
p
,
(
int
,
float
)):
if
isinstance
(
p
,
str
):
if
p
==
"fro"
:
return
vector_norm
(
x
,
porder
=
2
,
axis
=
axis
,
keepdim
=
keepdim
,
asvector
=
False
,
name
=
name
)
else
:
raise
ValueError
(
"only valid string values are 'fro', found {}"
.
format
(
p
))
elif
isinstance
(
p
,
(
int
,
float
)):
return
vector_norm
(
x
,
axis
=
axis
,
...
...
@@ -464,10 +436,12 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
elif
isinstance
(
axis
,
list
)
and
len
(
axis
)
==
2
:
if
p
==
"fro"
:
return
frobenius_norm
(
x
,
dim
=
axis
,
keepdim
=
keepdim
,
name
=
name
)
elif
p
==
0
:
return
p0_matrix_norm
(
x
,
axis
=
axis
,
keepdim
=
keepdim
,
name
=
name
)
elif
p
==
np
.
inf
or
p
==
-
np
.
inf
:
return
inf_norm
(
x
,
porder
=
p
,
axis
=
axis
,
keepdim
=
keepdim
,
name
=
name
)
elif
p
==
0
:
raise
ValueError
(
"just suport axis type int or list (length of list <=1) if p = 0, found {}"
.
format
(
axis
))
else
:
return
p_matrix_norm
(
x
,
porder
=
p
,
axis
=
axis
,
keepdim
=
keepdim
,
name
=
name
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录