Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
b61aaa2c
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
b61aaa2c
编写于
4月 12, 2020
作者:
L
liuwei1031
提交者:
GitHub
4月 12, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add logsumexp op, test=develop (#23585)
上级
2fd728a9
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
143 addition
and
3 deletion
+143
-3
python/paddle/__init__.py
python/paddle/__init__.py
+1
-1
python/paddle/fluid/tests/unittests/test_logsumexp.py
python/paddle/fluid/tests/unittests/test_logsumexp.py
+85
-0
python/paddle/tensor/__init__.py
python/paddle/tensor/__init__.py
+1
-1
python/paddle/tensor/math.py
python/paddle/tensor/math.py
+56
-1
未找到文件。
python/paddle/__init__.py
浏览文件 @
b61aaa2c
...
@@ -137,7 +137,7 @@ from .tensor.math import mm #DEFINE_ALIAS
...
@@ -137,7 +137,7 @@ from .tensor.math import mm #DEFINE_ALIAS
from
.tensor.math
import
div
#DEFINE_ALIAS
from
.tensor.math
import
div
#DEFINE_ALIAS
from
.tensor.math
import
add
#DEFINE_ALIAS
from
.tensor.math
import
add
#DEFINE_ALIAS
# from .tensor.math import atan #DEFINE_ALIAS
# from .tensor.math import atan #DEFINE_ALIAS
# from .tensor.math import logsumexp
#DEFINE_ALIAS
from
.tensor.math
import
logsumexp
#DEFINE_ALIAS
# from .tensor.math import inverse #DEFINE_ALIAS
# from .tensor.math import inverse #DEFINE_ALIAS
# from .tensor.math import log1p #DEFINE_ALIAS
# from .tensor.math import log1p #DEFINE_ALIAS
# from .tensor.math import erf #DEFINE_ALIAS
# from .tensor.math import erf #DEFINE_ALIAS
...
...
python/paddle/fluid/tests/unittests/test_logsumexp.py
0 → 100644
浏览文件 @
b61aaa2c
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
__future__
import
print_function
import
paddle
import
paddle.fluid
as
fluid
import
unittest
import
numpy
as
np
from
op_test
import
OpTest
from
paddle.fluid
import
Program
,
program_guard
from
paddle.fluid.layer_helper
import
LayerHelper
class
TestLogSumOpError
(
unittest
.
TestCase
):
def
test_errors
(
self
):
with
program_guard
(
Program
(),
Program
()):
x1
=
fluid
.
layers
.
data
(
name
=
'x1'
,
shape
=
[
120
],
dtype
=
"uint8"
)
self
.
assertRaises
(
Exception
,
paddle
.
logsumexp
,
x1
)
x2
=
fluid
.
layers
.
data
(
name
=
'x2'
,
shape
=
[
2
,
3
],
dtype
=
"int"
)
self
.
assertRaises
(
Exception
,
paddle
.
logsumexp
,
x2
)
x3
=
fluid
.
layers
.
data
(
name
=
'x3'
,
shape
=
[
3
],
dtype
=
"float16"
)
self
.
assertRaises
(
Exception
,
paddle
.
logsumexp
,
x3
)
self
.
assertRaises
(
AssertionError
,
paddle
.
logsumexp
,
None
)
class
TestLogSumExpOp
(
unittest
.
TestCase
):
def
test_dygraph
(
self
):
with
fluid
.
dygraph
.
guard
():
np_x
=
np
.
random
.
uniform
(
0.1
,
1
,
[
123
]).
astype
(
np
.
float32
)
x
=
fluid
.
dygraph
.
to_variable
(
np_x
)
self
.
assertTrue
(
np
.
allclose
(
paddle
.
logsumexp
(
x
).
numpy
(),
np
.
log
(
np
.
sum
(
np
.
exp
(
np_x
)))))
np_x
=
np
.
random
.
uniform
(
0.1
,
1
,
[
2
,
3
,
4
]).
astype
(
np
.
float32
)
x
=
fluid
.
dygraph
.
to_variable
(
np_x
)
self
.
assertTrue
(
np
.
allclose
(
paddle
.
logsumexp
(
x
,
dim
=
[
1
,
2
]).
numpy
(),
np
.
log
(
np
.
sum
(
np
.
exp
(
np_x
),
axis
=
(
1
,
2
)))))
np_x
=
np
.
random
.
uniform
(
0.1
,
1
,
[
2
,
3
,
4
]).
astype
(
np
.
float32
)
x
=
fluid
.
dygraph
.
to_variable
(
np_x
)
self
.
assertTrue
(
np
.
allclose
(
paddle
.
logsumexp
(
x
,
dim
=
[
2
]).
numpy
(),
np
.
log
(
np
.
sum
(
np
.
exp
(
np_x
),
axis
=
(
2
)))))
np_x
=
np
.
random
.
uniform
(
0.1
,
1
,
[
2
,
3
,
4
]).
astype
(
np
.
float32
)
x
=
fluid
.
dygraph
.
to_variable
(
np_x
)
self
.
assertTrue
(
np
.
allclose
(
paddle
.
logsumexp
(
x
,
keepdim
=
True
).
numpy
(),
np
.
log
(
np
.
sum
(
np
.
exp
(
np_x
),
keepdims
=
True
))))
np_x
=
np
.
random
.
uniform
(
0.1
,
1
,
[
2
,
3
,
4
]).
astype
(
np
.
float32
)
x
=
fluid
.
dygraph
.
to_variable
(
np_x
)
helper
=
LayerHelper
(
"test_logsumexp"
)
out
=
helper
.
create_variable
(
type
=
x
.
type
,
name
=
'out'
,
dtype
=
x
.
dtype
,
persistable
=
False
)
paddle
.
logsumexp
(
x
,
out
=
out
)
self
.
assertTrue
(
np
.
allclose
(
out
.
numpy
(),
np
.
log
(
np
.
sum
(
np
.
exp
(
np_x
)))))
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/tensor/__init__.py
浏览文件 @
b61aaa2c
...
@@ -112,7 +112,7 @@ from .math import mm #DEFINE_ALIAS
...
@@ -112,7 +112,7 @@ from .math import mm #DEFINE_ALIAS
from
.math
import
div
#DEFINE_ALIAS
from
.math
import
div
#DEFINE_ALIAS
from
.math
import
add
#DEFINE_ALIAS
from
.math
import
add
#DEFINE_ALIAS
# from .math import atan #DEFINE_ALIAS
# from .math import atan #DEFINE_ALIAS
# from .math import logsumexp
#DEFINE_ALIAS
from
.math
import
logsumexp
#DEFINE_ALIAS
# from .math import inverse #DEFINE_ALIAS
# from .math import inverse #DEFINE_ALIAS
# from .math import log1p #DEFINE_ALIAS
# from .math import log1p #DEFINE_ALIAS
# from .math import erf #DEFINE_ALIAS
# from .math import erf #DEFINE_ALIAS
...
...
python/paddle/tensor/math.py
浏览文件 @
b61aaa2c
...
@@ -18,6 +18,7 @@ math functions
...
@@ -18,6 +18,7 @@ math functions
from
__future__
import
print_function
from
__future__
import
print_function
from
paddle.common_ops_import
import
*
from
paddle.common_ops_import
import
*
from
..fluid
import
layers
from
..fluid.framework
import
core
from
..fluid.framework
import
core
from
..fluid.layers.layer_function_generator
import
_generate_doc_string_
from
..fluid.layers.layer_function_generator
import
_generate_doc_string_
...
@@ -70,7 +71,7 @@ __all__ = [
...
@@ -70,7 +71,7 @@ __all__ = [
'div'
,
'div'
,
'add'
,
'add'
,
# 'atan',
# 'atan',
#
'logsumexp',
'logsumexp'
,
# 'inverse',
# 'inverse',
# 'log1p',
# 'log1p',
# 'erf',
# 'erf',
...
@@ -994,3 +995,57 @@ def addmm(input, x, y, alpha=1.0, beta=1.0, name=None):
...
@@ -994,3 +995,57 @@ def addmm(input, x, y, alpha=1.0, beta=1.0, name=None):
helper
.
append_op
(
helper
.
append_op
(
type
=
"addmm"
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
"Out"
:
out
})
type
=
"addmm"
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
"Out"
:
out
})
return
out
return
out
def
logsumexp
(
x
,
dim
=
None
,
keepdim
=
False
,
out
=
None
,
name
=
None
):
"""
This operator calculates the log of the sum of exponentials of the input Tensor.
.. math::
logsumexp(x) = \log\sum exp(x)
Parameters:
x (Variable): Input LoDTensor or Tensor. Must be one of the following types: float32, float64.
dim (list|int, optional): The dimensions along which the sum is performed. If :attr:`None`,
sum all elements of :attr:`input` and return a Tensor variable with a single element,
otherwise must be in the range :math:`[-rank(input), rank(input))`. If :math:`dim[i] < 0`,
the dimension to reduce is :math:`rank + dim[i]`.
keep_dim (bool, optional): Whether to reserve the reduced dimension in the output Tensor.
The result tensor will have one fewer dimension than the :attr:`input` unless :attr:`keep_dim`
is true, default value is False.
name (str, optional): The default value is None. Normally there is no need for user to
set this property. For more information, please refer to :ref:`api_guide_Name`
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
with fluid.dygraph.guard():
np_x = np.random.uniform(0.1, 1, [10]).astype(np.float32)
x = fluid.dygraph.to_variable(np_x)
print(paddle.logsumexp(x).numpy())
"""
op_type
=
'logsumexp'
assert
x
is
not
None
,
'x cannot be None in {}'
.
format
(
op_type
)
# reduce_sum does not support float16
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
op_type
)
exp_out
=
layers
.
exp
(
x
)
sum_out
=
layers
.
reduce_sum
(
exp_out
,
dim
,
keepdim
)
if
out
is
not
None
:
check_variable_and_dtype
(
out
,
'out'
,
[
x
.
dtype
],
op_type
)
helper
=
LayerHelper
(
op_type
,
**
locals
())
helper
.
append_op
(
type
=
"log"
,
inputs
=
{
"X"
:
sum_out
},
outputs
=
{
"Out"
:
out
})
return
out
return
layers
.
log
(
sum_out
,
name
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录