Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
c80fcf90
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
c80fcf90
编写于
8月 25, 2020
作者:
Z
zhupengyang
提交者:
GitHub
8月 25, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
reduce_mean error if keepdim=True and reduce_all=True (#26614)
上级
a31dbc8f
变更
4
显示空白变更内容
内联
并排
Showing
4 changed file
with
119 addition
and
104 deletion
+119
-104
paddle/fluid/operators/reduce_ops/reduce_mean_op.part.cu
paddle/fluid/operators/reduce_ops/reduce_mean_op.part.cu
+1
-3
paddle/fluid/operators/reduce_ops/reduce_op.h
paddle/fluid/operators/reduce_ops/reduce_op.h
+2
-2
python/paddle/fluid/tests/unittests/test_mean_op.py
python/paddle/fluid/tests/unittests/test_mean_op.py
+116
-3
python/paddle/fluid/tests/unittests/test_reduce_op.py
python/paddle/fluid/tests/unittests/test_reduce_op.py
+0
-96
未找到文件。
paddle/fluid/operators/reduce_ops/reduce_mean_op.part.cu
浏览文件 @
c80fcf90
...
@@ -21,6 +21,4 @@ using CUDAReduceMeanGradKernel =
...
@@ -21,6 +21,4 @@ using CUDAReduceMeanGradKernel =
ops
::
MeanGradFunctor
,
true
>
;
ops
::
MeanGradFunctor
,
true
>
;
REGISTER_OP_CUDA_KERNEL
(
reduce_mean_grad
,
CUDAReduceMeanGradKernel
<
float
>
,
REGISTER_OP_CUDA_KERNEL
(
reduce_mean_grad
,
CUDAReduceMeanGradKernel
<
float
>
,
CUDAReduceMeanGradKernel
<
double
>
,
CUDAReduceMeanGradKernel
<
double
>
);
CUDAReduceMeanGradKernel
<
int
>
,
CUDAReduceMeanGradKernel
<
int64_t
>
);
paddle/fluid/operators/reduce_ops/reduce_op.h
浏览文件 @
c80fcf90
...
@@ -236,8 +236,8 @@ class ReduceGradKernel : public framework::OpKernel<T> {
...
@@ -236,8 +236,8 @@ class ReduceGradKernel : public framework::OpKernel<T> {
if
(
reduce_all
)
{
if
(
reduce_all
)
{
auto
x
=
EigenVector
<
T
>::
Flatten
(
*
input0
);
auto
x
=
EigenVector
<
T
>::
Flatten
(
*
input0
);
auto
x_reduce
=
EigenVector
<
T
>::
F
rom
(
*
input1
);
auto
x_reduce
=
EigenVector
<
T
>::
F
latten
(
*
input1
);
auto
x_reduce_grad
=
EigenVector
<
T
>::
F
rom
(
*
input2
);
auto
x_reduce_grad
=
EigenVector
<
T
>::
F
latten
(
*
input2
);
auto
x_grad
=
EigenVector
<
T
>::
Flatten
(
*
output
);
auto
x_grad
=
EigenVector
<
T
>::
Flatten
(
*
output
);
auto
&
place
=
auto
&
place
=
*
context
.
template
device_context
<
DeviceContext
>().
eigen_device
();
*
context
.
template
device_context
<
DeviceContext
>().
eigen_device
();
...
...
python/paddle/fluid/tests/unittests/test_mean_op.py
浏览文件 @
c80fcf90
...
@@ -22,6 +22,8 @@ import paddle.fluid.core as core
...
@@ -22,6 +22,8 @@ import paddle.fluid.core as core
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
paddle.fluid
import
Program
,
program_guard
from
paddle.fluid
import
Program
,
program_guard
np
.
random
.
seed
(
10
)
class
TestMeanOp
(
OpTest
):
class
TestMeanOp
(
OpTest
):
def
setUp
(
self
):
def
setUp
(
self
):
...
@@ -74,10 +76,105 @@ class TestFP16MeanOp(TestMeanOp):
...
@@ -74,10 +76,105 @@ class TestFP16MeanOp(TestMeanOp):
place
,
[
'X'
],
'Out'
,
max_relative_error
=
0.8
)
place
,
[
'X'
],
'Out'
,
max_relative_error
=
0.8
)
def
ref_reduce_mean
(
x
,
axis
=
None
,
keepdim
=
False
,
reduce_all
=
False
):
if
isinstance
(
axis
,
list
):
axis
=
tuple
(
axis
)
if
reduce_all
:
axis
=
None
return
np
.
mean
(
x
,
axis
=
axis
,
keepdims
=
keepdim
)
class
TestReduceMeanOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
'reduce_mean'
self
.
dtype
=
'float64'
self
.
shape
=
[
2
,
3
,
4
,
5
]
self
.
axis
=
[
0
]
self
.
keepdim
=
False
self
.
reduce_all
=
False
self
.
set_attrs
()
np
.
random
.
seed
(
10
)
x_np
=
np
.
random
.
uniform
(
-
1
,
1
,
self
.
shape
).
astype
(
self
.
dtype
)
out_np
=
ref_reduce_mean
(
x_np
,
self
.
axis
,
self
.
keepdim
,
self
.
reduce_all
)
self
.
inputs
=
{
'X'
:
x_np
}
self
.
outputs
=
{
'Out'
:
out_np
}
self
.
attrs
=
{
'dim'
:
self
.
axis
,
'keep_dim'
:
self
.
keepdim
,
'reduce_all'
:
self
.
reduce_all
}
def
set_attrs
(
self
):
pass
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
[
'Out'
])
class
TestReduceMeanOpDefaultAttrs
(
TestReduceMeanOp
):
def
setUp
(
self
):
self
.
op_type
=
'reduce_mean'
self
.
dtype
=
'float64'
self
.
shape
=
[
2
,
3
,
4
,
5
]
x_np
=
np
.
random
.
uniform
(
-
1
,
1
,
self
.
shape
).
astype
(
self
.
dtype
)
out_np
=
np
.
mean
(
x_np
,
axis
=
0
)
self
.
inputs
=
{
'X'
:
x_np
}
self
.
outputs
=
{
'Out'
:
out_np
}
class
TestReduceMeanOpFloat32
(
TestReduceMeanOp
):
def
set_attrs
(
self
):
self
.
dtype
=
'float32'
class
TestReduceMeanOpShape1D
(
TestReduceMeanOp
):
def
set_attrs
(
self
):
self
.
shape
=
[
100
]
class
TestReduceMeanOpShape6D
(
TestReduceMeanOp
):
def
set_attrs
(
self
):
self
.
shape
=
[
2
,
3
,
4
,
5
,
6
,
7
]
class
TestReduceMeanOpAxisAll
(
TestReduceMeanOp
):
def
set_attrs
(
self
):
self
.
axis
=
[
0
,
1
,
2
,
3
]
class
TestReduceMeanOpAxisTuple
(
TestReduceMeanOp
):
def
set_attrs
(
self
):
self
.
axis
=
(
0
,
1
,
2
)
class
TestReduceMeanOpAxisNegative
(
TestReduceMeanOp
):
def
set_attrs
(
self
):
self
.
axis
=
[
-
2
,
-
1
]
class
TestReduceMeanOpKeepdimTrue1
(
TestReduceMeanOp
):
def
set_attrs
(
self
):
self
.
keepdim
=
True
class
TestReduceMeanOpKeepdimTrue2
(
TestReduceMeanOp
):
def
set_attrs
(
self
):
self
.
axis
=
[
0
,
1
,
2
,
3
]
self
.
keepdim
=
True
class
TestReduceMeanOpReduceAllTrue
(
TestReduceMeanOp
):
def
set_attrs
(
self
):
self
.
reduce_all
=
True
class
TestMeanAPI
(
unittest
.
TestCase
):
class
TestMeanAPI
(
unittest
.
TestCase
):
"""
# test paddle.tensor.stat.mean
test paddle.tensor.stat.mean
"""
def
setUp
(
self
):
def
setUp
(
self
):
self
.
x_shape
=
[
2
,
3
,
4
,
5
]
self
.
x_shape
=
[
2
,
3
,
4
,
5
]
...
@@ -128,6 +225,22 @@ class TestMeanAPI(unittest.TestCase):
...
@@ -128,6 +225,22 @@ class TestMeanAPI(unittest.TestCase):
test_case
(
self
.
x
,
[
0
,
1
,
2
,
3
])
test_case
(
self
.
x
,
[
0
,
1
,
2
,
3
])
paddle
.
enable_static
()
paddle
.
enable_static
()
def
test_fluid_api
(
self
):
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
x
=
fluid
.
data
(
"x"
,
shape
=
[
10
,
10
],
dtype
=
"float32"
)
out
=
fluid
.
layers
.
reduce_mean
(
input
=
x
,
dim
=
1
)
place
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
x_np
=
np
.
random
.
rand
(
10
,
10
).
astype
(
np
.
float32
)
res
=
exe
.
run
(
feed
=
{
"x"
:
x_np
},
fetch_list
=
[
out
])
self
.
assertEqual
(
np
.
allclose
(
res
[
0
],
np
.
mean
(
x_np
,
axis
=
1
)),
True
)
with
fluid
.
dygraph
.
guard
():
x_np
=
np
.
random
.
rand
(
10
,
10
).
astype
(
np
.
float32
)
x
=
fluid
.
dygraph
.
to_variable
(
x_np
)
out
=
fluid
.
layers
.
reduce_mean
(
input
=
x
,
dim
=
1
)
self
.
assertEqual
(
np
.
allclose
(
out
.
numpy
(),
np
.
mean
(
x_np
,
axis
=
1
)),
True
)
def
test_errors
(
self
):
def
test_errors
(
self
):
paddle
.
disable_static
()
paddle
.
disable_static
()
x
=
np
.
random
.
uniform
(
-
1
,
1
,
[
10
,
12
]).
astype
(
'float32'
)
x
=
np
.
random
.
uniform
(
-
1
,
1
,
[
10
,
12
]).
astype
(
'float32'
)
...
...
python/paddle/fluid/tests/unittests/test_reduce_op.py
浏览文件 @
c80fcf90
...
@@ -67,22 +67,6 @@ class TestSumOp6D(OpTest):
...
@@ -67,22 +67,6 @@ class TestSumOp6D(OpTest):
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
)
class
TestMeanOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"reduce_mean"
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
5
,
6
,
2
,
10
)).
astype
(
"float64"
)}
self
.
attrs
=
{
'dim'
:
[
1
]}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
].
mean
(
axis
=
tuple
(
self
.
attrs
[
'dim'
]))
}
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
)
@
skip_check_grad_ci
(
@
skip_check_grad_ci
(
reason
=
"reduce_max is discontinuous non-derivable function,"
reason
=
"reduce_max is discontinuous non-derivable function,"
" its gradient check is not supported by unittest framework."
)
" its gradient check is not supported by unittest framework."
)
...
@@ -318,21 +302,6 @@ class TestReduceAll(Test1DReduce):
...
@@ -318,21 +302,6 @@ class TestReduceAll(Test1DReduce):
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
].
sum
()}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
].
sum
()}
## reduction in multi dims
class
TestReduceMeanOpMultiAxises
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"reduce_mean"
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
5
,
6
,
2
,
10
)).
astype
(
"float64"
)}
self
.
attrs
=
{
'dim'
:
[
1
,
2
]}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
].
mean
(
axis
=
(
1
,
2
))}
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
)
@
skip_check_grad_ci
(
@
skip_check_grad_ci
(
reason
=
"reduce_max is discontinuous non-derivable function,"
reason
=
"reduce_max is discontinuous non-derivable function,"
" its gradient check is not supported by unittest framework."
)
" its gradient check is not supported by unittest framework."
)
...
@@ -420,40 +389,6 @@ class TestReduceSumWithNumelOne(OpTest):
...
@@ -420,40 +389,6 @@ class TestReduceSumWithNumelOne(OpTest):
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
)
class
TestReduceMeanWithDimOne
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"reduce_mean"
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
100
,
1
,
1
)).
astype
(
"float64"
)}
self
.
attrs
=
{
'dim'
:
[
1
],
'keep_dim'
:
False
}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
].
mean
(
axis
=
tuple
(
self
.
attrs
[
'dim'
]),
keepdims
=
False
)
}
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
)
class
TestReduceMeanWithNumelOne
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"reduce_mean"
self
.
inputs
=
{
'X'
:
np
.
random
.
random
((
100
,
1
)).
astype
(
"float64"
)}
self
.
attrs
=
{
'dim'
:
[
1
],
'keep_dim'
:
True
}
self
.
outputs
=
{
'Out'
:
self
.
inputs
[
'X'
].
mean
(
axis
=
tuple
(
self
.
attrs
[
'dim'
]),
keepdims
=
True
)
}
def
test_check_output
(
self
):
self
.
check_output
()
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
)
class
TestReduceAll
(
OpTest
):
class
TestReduceAll
(
OpTest
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
op_type
=
"reduce_sum"
self
.
op_type
=
"reduce_sum"
...
@@ -536,18 +471,6 @@ class TestReduceSumOpError(unittest.TestCase):
...
@@ -536,18 +471,6 @@ class TestReduceSumOpError(unittest.TestCase):
self
.
assertRaises
(
TypeError
,
fluid
.
layers
.
reduce_sum
,
x2
)
self
.
assertRaises
(
TypeError
,
fluid
.
layers
.
reduce_sum
,
x2
)
class
TestReduceMeanOpError
(
unittest
.
TestCase
):
def
test_errors
(
self
):
with
program_guard
(
Program
(),
Program
()):
# The input type of reduce_mean_op must be Variable.
x1
=
fluid
.
create_lod_tensor
(
np
.
array
([[
-
1
]]),
[[
1
]],
fluid
.
CPUPlace
())
self
.
assertRaises
(
TypeError
,
fluid
.
layers
.
reduce_mean
,
x1
)
# The input dtype of reduce_mean_op must be float32 or float64 or int32 or int64.
x2
=
fluid
.
layers
.
data
(
name
=
'x2'
,
shape
=
[
4
],
dtype
=
"uint8"
)
self
.
assertRaises
(
TypeError
,
fluid
.
layers
.
reduce_mean
,
x2
)
class
API_TestSumOpError
(
unittest
.
TestCase
):
class
API_TestSumOpError
(
unittest
.
TestCase
):
def
test_errors
(
self
):
def
test_errors
(
self
):
def
test_dtype1
():
def
test_dtype1
():
...
@@ -649,24 +572,5 @@ class API_TestSumOp(unittest.TestCase):
...
@@ -649,24 +572,5 @@ class API_TestSumOp(unittest.TestCase):
self
.
assertTrue
((
out3
==
np
.
sum
(
np_x
,
axis
=
(
0
,
1
,
2
))).
all
())
self
.
assertTrue
((
out3
==
np
.
sum
(
np_x
,
axis
=
(
0
,
1
,
2
))).
all
())
class
API_TestReduceMeanOp
(
unittest
.
TestCase
):
def
test_static
(
self
):
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
x
=
fluid
.
data
(
"x"
,
shape
=
[
10
,
10
],
dtype
=
"float32"
)
out
=
fluid
.
layers
.
reduce_mean
(
input
=
x
,
dim
=
1
)
place
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
x_np
=
np
.
random
.
rand
(
10
,
10
).
astype
(
np
.
float32
)
res
=
exe
.
run
(
feed
=
{
"x"
:
x_np
},
fetch_list
=
[
out
])
self
.
assertEqual
(
np
.
allclose
(
res
[
0
],
np
.
mean
(
x_np
,
axis
=
1
)),
True
)
def
test_dygraph
(
self
):
with
fluid
.
dygraph
.
guard
():
x_np
=
np
.
random
.
rand
(
10
,
10
).
astype
(
np
.
float32
)
x
=
fluid
.
dygraph
.
to_variable
(
x_np
)
out
=
fluid
.
layers
.
reduce_mean
(
input
=
x
,
dim
=
1
)
self
.
assertEqual
(
np
.
allclose
(
out
.
numpy
(),
np
.
mean
(
x_np
,
axis
=
1
)),
True
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
unittest
.
main
()
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录