Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
2c3e4c18
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
2c3e4c18
编写于
3月 20, 2019
作者:
Z
zhoukunsheng
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
test=develop
add reduce_all, reduce_any op
上级
f26ba5bd
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
216 addition
and
0 deletion
+216
-0
paddle/fluid/API.spec
paddle/fluid/API.spec
+2
-0
paddle/fluid/operators/reduce_ops/reduce_all_any_op.h
paddle/fluid/operators/reduce_ops/reduce_all_any_op.h
+36
-0
paddle/fluid/operators/reduce_ops/reduce_all_op.cc
paddle/fluid/operators/reduce_ops/reduce_all_op.cc
+20
-0
paddle/fluid/operators/reduce_ops/reduce_all_op.cu
paddle/fluid/operators/reduce_ops/reduce_all_op.cu
+19
-0
paddle/fluid/operators/reduce_ops/reduce_any_op.cc
paddle/fluid/operators/reduce_ops/reduce_any_op.cc
+20
-0
paddle/fluid/operators/reduce_ops/reduce_any_op.cu
paddle/fluid/operators/reduce_ops/reduce_any_op.cu
+19
-0
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+100
-0
未找到文件。
paddle/fluid/API.spec
浏览文件 @
2c3e4c18
...
...
@@ -106,6 +106,8 @@ paddle.fluid.layers.reduce_mean (ArgSpec(args=['input', 'dim', 'keep_dim', 'name
paddle.fluid.layers.reduce_max (ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)), ('document', '66a622db727551761ce4eb73eaa7f6a4'))
paddle.fluid.layers.reduce_min (ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)), ('document', 'd50ac552b5d131468ed466d08bb2d38c'))
paddle.fluid.layers.reduce_prod (ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)), ('document', 'fcd8301a0ce15f219c7a4bcd0c1e8eca'))
paddle.fluid.layers.reduce_all (ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)), ('document', '5b416682bbba15421521a0fbb9b6a38c'))
paddle.fluid.layers.reduce_any (ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)), ('document', 'd9ecb1bc85d12153f46ba027c10cb405'))
paddle.fluid.layers.sequence_first_step (ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None), ('document', '2b290d3d77882bfe9bb8d331cac8cdd3'))
paddle.fluid.layers.sequence_last_step (ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None), ('document', 'c16a892f44f7fe71bfa5afc32d3f34ce'))
paddle.fluid.layers.sequence_slice (ArgSpec(args=['input', 'offset', 'length', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'fdcea0e8b5bc7d8d4b1b072c521014e6'))
...
...
paddle/fluid/operators/reduce_ops/reduce_all_any_op.h
0 → 100644
浏览文件 @
2c3e4c18
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/fluid/operators/reduce_ops/reduce_op.h"
namespace
paddle
{
namespace
operators
{
struct
AllFunctor
{
template
<
typename
DeviceContext
,
typename
X
,
typename
Y
,
typename
Dim
>
void
operator
()(
const
DeviceContext
&
place
,
X
*
x
,
Y
*
y
,
const
Dim
&
dim
)
{
y
->
device
(
place
)
=
x
->
all
(
dim
);
}
};
struct
AnyFunctor
{
template
<
typename
DeviceContext
,
typename
X
,
typename
Y
,
typename
Dim
>
void
operator
()(
const
DeviceContext
&
place
,
X
*
x
,
Y
*
y
,
const
Dim
&
dim
)
{
y
->
device
(
place
)
=
x
->
any
(
dim
);
}
};
}
// namespace operators
}
// namespace paddle
paddle/fluid/operators/reduce_ops/reduce_all_op.cc
0 → 100644
浏览文件 @
2c3e4c18
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/operators/reduce_ops/reduce_all_any_op.h"
REGISTER_REDUCE_OP
(
reduce_all
);
REGISTER_OP_CPU_KERNEL
(
reduce_all
,
ops
::
ReduceKernel
<
paddle
::
platform
::
CPUDeviceContext
,
bool
,
ops
::
AllFunctor
>
);
paddle/fluid/operators/reduce_ops/reduce_all_op.cu
0 → 100644
浏览文件 @
2c3e4c18
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/operators/reduce_ops/reduce_all_any_op.h"
REGISTER_OP_CUDA_KERNEL
(
reduce_all
,
ops
::
ReduceKernel
<
paddle
::
platform
::
CUDADeviceContext
,
bool
,
ops
::
AllFunctor
>
);
paddle/fluid/operators/reduce_ops/reduce_any_op.cc
0 → 100644
浏览文件 @
2c3e4c18
// Copyright (c) 2018 PaddlePaddle Authors. Any Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/operators/reduce_ops/reduce_all_any_op.h"
REGISTER_REDUCE_OP
(
reduce_any
);
REGISTER_OP_CPU_KERNEL
(
reduce_any
,
ops
::
ReduceKernel
<
paddle
::
platform
::
CPUDeviceContext
,
bool
,
ops
::
AnyFunctor
>
);
paddle/fluid/operators/reduce_ops/reduce_any_op.cu
0 → 100644
浏览文件 @
2c3e4c18
// Copyright (c) 2018 PaddlePaddle Authors. Any Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/operators/reduce_ops/reduce_all_any_op.h"
REGISTER_OP_CUDA_KERNEL
(
reduce_any
,
ops
::
ReduceKernel
<
paddle
::
platform
::
CUDADeviceContext
,
bool
,
ops
::
AnyFunctor
>
);
python/paddle/fluid/layers/nn.py
浏览文件 @
2c3e4c18
...
...
@@ -73,6 +73,8 @@ __all__ = [
'reduce_max'
,
'reduce_min'
,
'reduce_prod'
,
'reduce_all'
,
'reduce_any'
,
'sequence_first_step'
,
'sequence_last_step'
,
'sequence_slice'
,
...
...
@@ -4699,6 +4701,104 @@ def reduce_prod(input, dim=None, keep_dim=False, name=None):
return
out
def
reduce_all
(
input
,
dim
=
None
,
keep_dim
=
False
,
name
=
None
):
"""
Computes the logical and of tensor elements over the given dimension.
Args:
input (Variable): The input variable which is a Tensor or LoDTensor.
dim (list|int|None): The dimension along which the logical and is computed.
If :attr:`None`, compute the logical and over all elements of
:attr:`input` and return a Tensor variable with a single element,
otherwise must be in the range :math:`[-rank(input), rank(input))`.
If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`.
keep_dim (bool): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
Variable: The reduced Tensor variable.
Examples:
.. code-block:: python
# x is a bool Tensor variable with following elements:
# [[True, False]
# [True, True]]
# Each example is followed by the correspending output tensor.
fluid.layers.reduce_all(x) # False
fluid.layers.reduce_all(x, dim=0) # [True, False]
fluid.layers.reduce_all(x, dim=-1) # [False, True]
fluid.layers.reduce_all(x, dim=1,
keep_dim=True) # [[False], [True]]
"""
helper
=
LayerHelper
(
'reduce_all'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
())
if
dim
is
not
None
and
not
isinstance
(
dim
,
list
):
dim
=
[
dim
]
helper
.
append_op
(
type
=
'reduce_all'
,
inputs
=
{
'X'
:
input
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'dim'
:
dim
if
dim
!=
None
else
[
0
],
'keep_dim'
:
keep_dim
,
'reduce_all'
:
True
if
dim
==
None
else
False
})
return
out
def
reduce_any
(
input
,
dim
=
None
,
keep_dim
=
False
,
name
=
None
):
"""
Computes the logical or of tensor elements over the given dimension.
Args:
input (Variable): The input variable which is a Tensor or LoDTensor.
dim (list|int|None): The dimension along which the logical or is computed.
If :attr:`None`, compute the logical or over all elements of
:attr:`input` and return a Tensor variable with a single element,
otherwise must be in the range :math:`[-rank(input), rank(input))`.
If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`.
keep_dim (bool): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have one fewer dimension
than the :attr:`input` unless :attr:`keep_dim` is true.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
Variable: The reduced Tensor variable.
Examples:
.. code-block:: python
# x is a bool Tensor variable with following elements:
# [[True, False]
# [False, False]]
# Each example is followed by the correspending output tensor.
fluid.layers.reduce_any(x) # True
fluid.layers.reduce_any(x, dim=0) # [True, False]
fluid.layers.reduce_any(x, dim=-1) # [True, False]
fluid.layers.reduce_any(x, dim=1,
keep_dim=True) # [[True], [False]]
"""
helper
=
LayerHelper
(
'reduce_any'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
())
if
dim
is
not
None
and
not
isinstance
(
dim
,
list
):
dim
=
[
dim
]
helper
.
append_op
(
type
=
'reduce_any'
,
inputs
=
{
'X'
:
input
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'dim'
:
dim
if
dim
!=
None
else
[
0
],
'keep_dim'
:
keep_dim
,
'reduce_all'
:
True
if
dim
==
None
else
False
})
return
out
def
split
(
input
,
num_or_sections
,
dim
=-
1
,
name
=
None
):
"""
Split the input tensor into multiple sub-tensors.
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录