Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
912be4f8
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
912be4f8
编写于
10月 09, 2022
作者:
K
Kevin吴嘉文
提交者:
GitHub
10月 09, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix numpy issue in codeblock examples for operators under python/paddle/tensor folder (#46765)
上级
218c0129
变更
6
显示空白变更内容
内联
并排
Showing
6 changed file
with
273 addition
and
244 deletion
+273
-244
python/paddle/tensor/creation.py
python/paddle/tensor/creation.py
+84
-71
python/paddle/tensor/linalg.py
python/paddle/tensor/linalg.py
+49
-34
python/paddle/tensor/logic.py
python/paddle/tensor/logic.py
+14
-20
python/paddle/tensor/manipulation.py
python/paddle/tensor/manipulation.py
+13
-17
python/paddle/tensor/math.py
python/paddle/tensor/math.py
+75
-69
python/paddle/tensor/stat.py
python/paddle/tensor/stat.py
+38
-33
未找到文件。
python/paddle/tensor/creation.py
浏览文件 @
912be4f8
...
@@ -1044,33 +1044,34 @@ def triu(x, diagonal=0, name=None):
...
@@ -1044,33 +1044,34 @@ def triu(x, diagonal=0, name=None):
Examples:
Examples:
.. code-block:: python
.. code-block:: python
import numpy as np
import paddle
import paddle
data = np.arange(1, 13, dtype="int64").reshape(3,-1
)
x = paddle.arange(1, 13, dtype="int64").reshape([3,-1]
)
#
array([[ 1, 2, 3, 4]
,
#
Tensor(shape=[3, 4], dtype=int64, place=Place(cpu), stop_gradient=True
,
# [
5, 6, 7, 8
],
# [
[1 , 2 , 3 , 4
],
#
[ 9, 10, 11, 12]])
#
[5 , 6 , 7 , 8 ],
# [9 , 10, 11, 12]])
# example 1, default diagonal
# example 1, default diagonal
x = paddle.to_tensor(data)
triu1 = paddle.tensor.triu(x)
triu1 = paddle.tensor.triu(x)
# array([[ 1, 2, 3, 4],
# Tensor(shape=[3, 4], dtype=int64, place=Place(cpu), stop_gradient=True,
# [ 0, 6, 7, 8],
# [[1 , 2 , 3 , 4 ],
# [ 0, 0, 11, 12]])
# [0 , 6 , 7 , 8 ],
# [0 , 0 , 11, 12]])
# example 2, positive diagonal value
# example 2, positive diagonal value
triu2 = paddle.tensor.triu(x, diagonal=2)
triu2 = paddle.tensor.triu(x, diagonal=2)
# array([[0, 0, 3, 4],
# Tensor(shape=[3, 4], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[0, 0, 3, 4],
# [0, 0, 0, 8],
# [0, 0, 0, 8],
# [0, 0, 0, 0]])
# [0, 0, 0, 0]])
# example 3, negative diagonal value
# example 3, negative diagonal value
triu3 = paddle.tensor.triu(x, diagonal=-1)
triu3 = paddle.tensor.triu(x, diagonal=-1)
# array([[ 1, 2, 3, 4],
# Tensor(shape=[3, 4], dtype=int64, place=Place(cpu), stop_gradient=True,
# [ 5, 6, 7, 8],
# [[1 , 2 , 3 , 4 ],
# [ 0, 10, 11, 12]])
# [5 , 6 , 7 , 8 ],
# [0 , 10, 11, 12]])
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
...
@@ -1178,24 +1179,27 @@ def diagflat(x, offset=0, name=None):
...
@@ -1178,24 +1179,27 @@ def diagflat(x, offset=0, name=None):
x = paddle.to_tensor([1, 2, 3])
x = paddle.to_tensor([1, 2, 3])
y = paddle.diagflat(x)
y = paddle.diagflat(x)
print(y.numpy())
print(y)
# [[1 0 0]
# Tensor(shape=[3, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
# [0 2 0]
# [[1, 0, 0],
# [0 0 3]]
# [0, 2, 0],
# [0, 0, 3]])
y = paddle.diagflat(x, offset=1)
y = paddle.diagflat(x, offset=1)
print(y.numpy())
print(y)
# [[0 1 0 0]
# Tensor(shape=[4, 4], dtype=int64, place=Place(cpu), stop_gradient=True,
# [0 0 2 0]
# [[0, 1, 0, 0],
# [0 0 0 3]
# [0, 0, 2, 0],
# [0 0 0 0]]
# [0, 0, 0, 3],
# [0, 0, 0, 0]])
y = paddle.diagflat(x, offset=-1)
y = paddle.diagflat(x, offset=-1)
print(y.numpy())
print(y)
# [[0 0 0 0]
# Tensor(shape=[4, 4], dtype=int64, place=Place(cpu), stop_gradient=True,
# [1 0 0 0]
# [[0, 0, 0, 0],
# [0 2 0 0]
# [1, 0, 0, 0],
# [0 0 3 0]]
# [0, 2, 0, 0],
# [0, 0, 3, 0]])
.. code-block:: python
.. code-block:: python
:name: code-example-2
:name: code-example-2
...
@@ -1204,27 +1208,30 @@ def diagflat(x, offset=0, name=None):
...
@@ -1204,27 +1208,30 @@ def diagflat(x, offset=0, name=None):
x = paddle.to_tensor([[1, 2], [3, 4]])
x = paddle.to_tensor([[1, 2], [3, 4]])
y = paddle.diagflat(x)
y = paddle.diagflat(x)
print(y.numpy())
print(y)
# [[1 0 0 0]
# Tensor(shape=[4, 4], dtype=int64, place=Place(cpu), stop_gradient=True,
# [0 2 0 0]
# [[1, 0, 0, 0],
# [0 0 3 0]
# [0, 2, 0, 0],
# [0 0 0 4]]
# [0, 0, 3, 0],
# [0, 0, 0, 4]])
y = paddle.diagflat(x, offset=1)
y = paddle.diagflat(x, offset=1)
print(y.numpy())
print(y)
# [[0 1 0 0 0]
# Tensor(shape=[5, 5], dtype=int64, place=Place(cpu), stop_gradient=True,
# [0 0 2 0 0]
# [[0, 1, 0, 0, 0],
# [0 0 0 3 0]
# [0, 0, 2, 0, 0],
# [0 0 0 0 4]
# [0, 0, 0, 3, 0],
# [0 0 0 0 0]]
# [0, 0, 0, 0, 4],
# [0, 0, 0, 0, 0]])
y = paddle.diagflat(x, offset=-1)
y = paddle.diagflat(x, offset=-1)
print(y.numpy())
print(y)
# [[0 0 0 0 0]
# Tensor(shape=[5, 5], dtype=int64, place=Place(cpu), stop_gradient=True,
# [1 0 0 0 0]
# [[0, 0, 0, 0, 0],
# [0 2 0 0 0]
# [1, 0, 0, 0, 0],
# [0 0 3 0 0]
# [0, 2, 0, 0, 0],
# [0 0 0 4 0]]
# [0, 0, 3, 0, 0],
# [0, 0, 0, 4, 0]])
"""
"""
padding_value
=
0
padding_value
=
0
if
in_dygraph_mode
():
if
in_dygraph_mode
():
...
@@ -1318,23 +1325,26 @@ def diag(x, offset=0, padding_value=0, name=None):
...
@@ -1318,23 +1325,26 @@ def diag(x, offset=0, padding_value=0, name=None):
paddle.disable_static()
paddle.disable_static()
x = paddle.to_tensor([1, 2, 3])
x = paddle.to_tensor([1, 2, 3])
y = paddle.diag(x)
y = paddle.diag(x)
print(y.numpy())
print(y)
# [[1 0 0]
# Tensor(shape=[3, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
# [0 2 0]
# [[1, 0, 0],
# [0 0 3]]
# [0, 2, 0],
# [0, 0, 3]])
y = paddle.diag(x, offset=1)
y = paddle.diag(x, offset=1)
print(y.numpy())
print(y)
# [[0 1 0 0]
# Tensor(shape=[4, 4], dtype=int64, place=Place(cpu), stop_gradient=True,
# [0 0 2 0]
# [[0, 1, 0, 0],
# [0 0 0 3]
# [0, 0, 2, 0],
# [0 0 0 0]]
# [0, 0, 0, 3],
# [0, 0, 0, 0]])
y = paddle.diag(x, padding_value=6)
y = paddle.diag(x, padding_value=6)
print(y.numpy())
print(y)
# [[1 6 6]
# Tensor(shape=[3, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
# [6 2 6]
# [[1, 6, 6],
# [6 6 3]]
# [6, 2, 6],
# [6, 6, 3]])
.. code-block:: python
.. code-block:: python
:name: code-example-2
:name: code-example-2
...
@@ -1344,16 +1354,19 @@ def diag(x, offset=0, padding_value=0, name=None):
...
@@ -1344,16 +1354,19 @@ def diag(x, offset=0, padding_value=0, name=None):
paddle.disable_static()
paddle.disable_static()
x = paddle.to_tensor([[1, 2, 3], [4, 5, 6]])
x = paddle.to_tensor([[1, 2, 3], [4, 5, 6]])
y = paddle.diag(x)
y = paddle.diag(x)
print(y.numpy())
print(y)
# [1 5]
# Tensor(shape=[2], dtype=int64, place=Place(cpu), stop_gradient=True,
# [1, 5])
y = paddle.diag(x, offset=1)
y = paddle.diag(x, offset=1)
print(y.numpy())
print(y)
# [2 6]
# Tensor(shape=[2], dtype=int64, place=Place(cpu), stop_gradient=True,
# [2, 6])
y = paddle.diag(x, offset=-1)
y = paddle.diag(x, offset=-1)
print(y.numpy())
print(y)
# [4]
# Tensor(shape=[1], dtype=int64, place=Place(cpu), stop_gradient=True,
# [4])
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
diag
(
x
,
offset
,
padding_value
)
return
_C_ops
.
diag
(
x
,
offset
,
padding_value
)
...
@@ -1755,7 +1768,7 @@ def _memcpy(input, place=None, output=None):
...
@@ -1755,7 +1768,7 @@ def _memcpy(input, place=None, output=None):
.. code-block:: python
.. code-block:: python
import paddle
import paddle
import numpy as np
data = paddle.full(shape=[3, 2], fill_value=2.5, dtype='float64') # [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
data = paddle.full(shape=[3, 2], fill_value=2.5, dtype='float64') # [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
result = paddle._memcpy(data, place=paddle.CPUPlace()) # result2 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
result = paddle._memcpy(data, place=paddle.CPUPlace()) # result2 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
"""
"""
...
@@ -1816,10 +1829,10 @@ def complex(real, imag, name=None):
...
@@ -1816,10 +1829,10 @@ def complex(real, imag, name=None):
x = paddle.arange(2, dtype=paddle.float32).unsqueeze(-1)
x = paddle.arange(2, dtype=paddle.float32).unsqueeze(-1)
y = paddle.arange(3, dtype=paddle.float32)
y = paddle.arange(3, dtype=paddle.float32)
z = paddle.complex(x, y)
z = paddle.complex(x, y)
print(z
.numpy()
)
print(z)
# Tensor(shape=[2, 3], dtype=complex64, place=Place(cpu), stop_gradient=True,
#
[[0.+0.j 0.+1.j 0.+2.j]
#
[[0j , 1j , 2j ],
#
[1.+0.j 1.+1.j 1.+2.j]]
#
[(1+0j), (1+1j), (1+2j)]])
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
complex
(
real
,
imag
)
return
_C_ops
.
complex
(
real
,
imag
)
...
...
python/paddle/tensor/linalg.py
浏览文件 @
912be4f8
...
@@ -292,38 +292,53 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
...
@@ -292,38 +292,53 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
.. code-block:: python
.. code-block:: python
import paddle
import paddle
import numpy as np
x = paddle.arange(24, dtype="float32").reshape([2, 3, 4]) - 12
shape=[2, 3, 4]
# x: Tensor(shape=[2, 3, 4], dtype=float32, place=Place(cpu), stop_gradient=True,
np_input = np.arange(24).astype('float32') - 12
# [[[-12., -11., -10., -9. ],
np_input = np_input.reshape(shape)
# [-8. , -7. , -6. , -5. ],
x = paddle.to_tensor(np_input)
# [-4. , -3. , -2. , -1. ]],
#[[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]]
# [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.]]]
# [[ 0. , 1. , 2. , 3. ],
# [ 4. , 5. , 6. , 7. ],
# [ 8. , 9. , 10., 11.]]])
# compute frobenius norm along last two dimensions.
# compute frobenius norm along last two dimensions.
out_fro = paddle.linalg.norm(x, p='fro', axis=[0,1])
out_fro = paddle.linalg.norm(x, p='fro', axis=[0,1])
# out_fro.numpy() [17.435596 16.911535 16.7332 16.911535]
# out_fro: Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
# [17.43559647, 16.91153526, 16.73320007, 16.91153526])
# compute 2-order vector norm along last dimension.
# compute 2-order vector norm along last dimension.
out_pnorm = paddle.linalg.norm(x, p=2, axis=-1)
out_pnorm = paddle.linalg.norm(x, p=2, axis=-1)
#out_pnorm.numpy(): [[21.118711 13.190906 5.477226]
# out_pnorm: Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
# [ 3.7416575 11.224972 19.131126]]
# [[21.11871147, 13.19090557, 5.47722578 ],
# [3.74165750 , 11.22497177, 19.13112640]])
# compute 2-order norm along [0,1] dimension.
# compute 2-order norm along [0,1] dimension.
out_pnorm = paddle.linalg.norm(x, p=2, axis=[0,1])
out_pnorm = paddle.linalg.norm(x, p=2, axis=[0,1])
#out_pnorm.numpy(): [17.435596 16.911535 16.7332 16.911535]
# out_pnorm: Tensor(shape=[4], dtype=float32, place=Place(cpu), stop_gradient=True,
# [17.43559647, 16.91153526, 16.73320007, 16.91153526])
# compute inf-order norm
# compute inf-order norm
out_pnorm = paddle.linalg.norm(x, p=np.inf)
out_pnorm = paddle.linalg.norm(x, p=float("inf"))
#out_pnorm.numpy() = [12.]
# out_pnorm = Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
out_pnorm = paddle.linalg.norm(x, p=np.inf, axis=0)
# [12.])
#out_pnorm.numpy(): [[12. 11. 10. 9.] [8. 7. 6. 7.] [8. 9. 10. 11.]]
out_pnorm = paddle.linalg.norm(x, p=float("inf"), axis=0)
# out_pnorm: Tensor(shape=[3, 4], dtype=float32, place=Place(cpu), stop_gradient=True,
# [[12., 11., 10., 9. ],
# [8. , 7. , 6. , 7. ],
# [8. , 9. , 10., 11.]])
# compute -inf-order norm
# compute -inf-order norm
out_pnorm = paddle.linalg.norm(x, p=-np.inf)
out_pnorm = paddle.linalg.norm(x, p=-float("inf"))
#out_pnorm.numpy(): [0.]
# out_pnorm: Tensor(shape=[1], dtype=float32, place=Place(cpu), stop_gradient=True,
out_pnorm = paddle.linalg.norm(x, p=-np.inf, axis=0)
# [0.])
#out_pnorm.numpy(): [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]]
out_pnorm = paddle.linalg.norm(x, p=-float("inf"), axis=0)
# out_pnorm: Tensor(shape=[3, 4], dtype=float32, place=Place(cpu), stop_gradient=True,
# [[0., 1., 2., 3.],
# [4., 5., 6., 5.],
# [4., 3., 2., 1.]])
"""
"""
def
frobenius_norm
(
input
,
dim
=
None
,
keepdim
=
False
,
name
=
None
):
def
frobenius_norm
(
input
,
dim
=
None
,
keepdim
=
False
,
name
=
None
):
...
@@ -634,10 +649,9 @@ def dist(x, y, p=2, name=None):
...
@@ -634,10 +649,9 @@ def dist(x, y, p=2, name=None):
.. code-block:: python
.. code-block:: python
import paddle
import paddle
import numpy as np
x = paddle.to_tensor(
np.array([[3, 3],[3, 3]]),
"float32")
x = paddle.to_tensor(
[[3, 3],[3, 3]], dtype=
"float32")
y = paddle.to_tensor(
np.array([[3, 3],[3, 1]]),
"float32")
y = paddle.to_tensor(
[[3, 3],[3, 1]], dtype=
"float32")
out = paddle.dist(x, y, 0)
out = paddle.dist(x, y, 0)
print(out) # out = [1.]
print(out) # out = [1.]
...
@@ -1046,14 +1060,18 @@ def dot(x, y, name=None):
...
@@ -1046,14 +1060,18 @@ def dot(x, y, name=None):
.. code-block:: python
.. code-block:: python
import paddle
import paddle
import numpy as np
x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32)
# 1-D Tensor * 1-D Tensor
y_data = np.random.uniform(1, 3, [10]).astype(np.float32)
x = paddle.to_tensor([1, 2, 3])
x = paddle.to_tensor(x_data)
y = paddle.to_tensor([4, 5, 6])
y = paddle.to_tensor(y_data)
z = paddle.dot(x, y)
z = paddle.dot(x, y)
print(z)
print(z) # [32]
# 2-D Tensor * 2-D Tensor
x = paddle.to_tensor([[1, 2, 3], [2, 4, 6]])
y = paddle.to_tensor([[4, 5, 6], [4, 5, 6]])
z = paddle.dot(x, y)
print(z) # [[32], [64]]
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
...
@@ -2454,7 +2472,6 @@ def multi_dot(x, name=None):
...
@@ -2454,7 +2472,6 @@ def multi_dot(x, name=None):
.. code-block:: python
.. code-block:: python
import paddle
import paddle
import numpy as np
# A * B
# A * B
A = paddle.rand([3, 4])
A = paddle.rand([3, 4])
...
@@ -3016,7 +3033,6 @@ def triangular_solve(x,
...
@@ -3016,7 +3033,6 @@ def triangular_solve(x,
# -x3 = 5
# -x3 = 5
import paddle
import paddle
import numpy as np
x = paddle.to_tensor([[1, 1, 1],
x = paddle.to_tensor([[1, 1, 1],
[0, 2, 1],
[0, 2, 1],
...
@@ -3127,14 +3143,13 @@ def eigvalsh(x, UPLO='L', name=None):
...
@@ -3127,14 +3143,13 @@ def eigvalsh(x, UPLO='L', name=None):
Examples:
Examples:
.. code-block:: python
.. code-block:: python
import numpy as np
import paddle
import paddle
x_data = np.array([[1, -2j], [2j, 5]])
x = paddle.to_tensor([[1, -2j], [2j, 5]])
x = paddle.to_tensor(x_data)
out_value = paddle.eigvalsh(x, UPLO='L')
out_value = paddle.eigvalsh(x, UPLO='L')
print(out_value)
print(out_value)
#[0.17157288, 5.82842712]
# Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=True,
# [0.17157286, 5.82842731])
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
values
,
_
=
_C_ops
.
eigvalsh
(
x
,
UPLO
,
x
.
stop_gradient
)
values
,
_
=
_C_ops
.
eigvalsh
(
x
,
UPLO
,
x
.
stop_gradient
)
...
...
python/paddle/tensor/logic.py
浏览文件 @
912be4f8
...
@@ -150,14 +150,14 @@ def logical_or(x, y, out=None, name=None):
...
@@ -150,14 +150,14 @@ def logical_or(x, y, out=None, name=None):
.. code-block:: python
.. code-block:: python
import paddle
import paddle
import numpy as np
x_data = np.array([True, False], dtype=np.bool_).reshape(2, 1)
x = paddle.to_tensor([True, False], dtype="bool").reshape([2, 1])
y_data = np.array([True, False, True, False], dtype=np.bool_).reshape(2, 2)
y = paddle.to_tensor([True, False, True, False], dtype="bool").reshape([2, 2])
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
res = paddle.logical_or(x, y)
res = paddle.logical_or(x, y)
print(res) # [[ True True] [ True False]]
print(res)
# Tensor(shape=[2, 2], dtype=bool, place=Place(cpu), stop_gradient=True,
# [[True , True ],
# [True , False]])
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
logical_or
(
x
,
y
)
return
_C_ops
.
logical_or
(
x
,
y
)
...
@@ -195,14 +195,14 @@ def logical_xor(x, y, out=None, name=None):
...
@@ -195,14 +195,14 @@ def logical_xor(x, y, out=None, name=None):
.. code-block:: python
.. code-block:: python
import paddle
import paddle
import numpy as np
x_data = np.array([True, False], dtype=np.bool_).reshape([2, 1])
x = paddle.to_tensor([True, False], dtype="bool").reshape([2, 1])
y_data = np.array([True, False, True, False], dtype=np.bool_).reshape([2, 2])
y = paddle.to_tensor([True, False, True, False], dtype="bool").reshape([2, 2])
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
res = paddle.logical_xor(x, y)
res = paddle.logical_xor(x, y)
print(res) # [[False, True], [ True, False]]
print(res)
# Tensor(shape=[2, 2], dtype=bool, place=Place(cpu), stop_gradient=True,
# [[False, True ],
# [True , False]])
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
logical_xor
(
x
,
y
)
return
_C_ops
.
logical_xor
(
x
,
y
)
...
@@ -373,22 +373,20 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
...
@@ -373,22 +373,20 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
y = paddle.to_tensor([10000.1, 1e-08])
y = paddle.to_tensor([10000.1, 1e-08])
result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=False, name="ignore_nan")
equal_nan=False, name="ignore_nan")
np_result1 = result1.numpy()
# [False]
# [False]
result2 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
result2 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=True, name="equal_nan")
equal_nan=True, name="equal_nan")
np_result2 = result2.numpy()
# [False]
# [False]
x = paddle.to_tensor([1.0, float('nan')])
x = paddle.to_tensor([1.0, float('nan')])
y = paddle.to_tensor([1.0, float('nan')])
y = paddle.to_tensor([1.0, float('nan')])
result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
result1 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=False, name="ignore_nan")
equal_nan=False, name="ignore_nan")
np_result1 = result1.numpy()
# [False]
# [False]
result2 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
result2 = paddle.allclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=True, name="equal_nan")
equal_nan=True, name="equal_nan")
np_result2 = result2.numpy()
# [True]
# [True]
"""
"""
...
@@ -966,22 +964,18 @@ def isclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
...
@@ -966,22 +964,18 @@ def isclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
y = paddle.to_tensor([10000.1, 1e-08])
y = paddle.to_tensor([10000.1, 1e-08])
result1 = paddle.isclose(x, y, rtol=1e-05, atol=1e-08,
result1 = paddle.isclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=False, name="ignore_nan")
equal_nan=False, name="ignore_nan")
np_result1 = result1.numpy()
# [True, False]
# [True, False]
result2 = paddle.isclose(x, y, rtol=1e-05, atol=1e-08,
result2 = paddle.isclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=True, name="equal_nan")
equal_nan=True, name="equal_nan")
np_result2 = result2.numpy()
# [True, False]
# [True, False]
x = paddle.to_tensor([1.0, float('nan')])
x = paddle.to_tensor([1.0, float('nan')])
y = paddle.to_tensor([1.0, float('nan')])
y = paddle.to_tensor([1.0, float('nan')])
result1 = paddle.isclose(x, y, rtol=1e-05, atol=1e-08,
result1 = paddle.isclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=False, name="ignore_nan")
equal_nan=False, name="ignore_nan")
np_result1 = result1.numpy()
# [True, False]
# [True, False]
result2 = paddle.isclose(x, y, rtol=1e-05, atol=1e-08,
result2 = paddle.isclose(x, y, rtol=1e-05, atol=1e-08,
equal_nan=True, name="equal_nan")
equal_nan=True, name="equal_nan")
np_result2 = result2.numpy()
# [True, True]
# [True, True]
"""
"""
...
...
python/paddle/tensor/manipulation.py
浏览文件 @
912be4f8
...
@@ -1229,12 +1229,9 @@ def flip(x, axis, name=None):
...
@@ -1229,12 +1229,9 @@ def flip(x, axis, name=None):
.. code-block:: python
.. code-block:: python
import paddle
import paddle
import numpy as np
image_shape=(3, 2, 2)
image_shape=(3, 2, 2)
x = np.arange(image_shape[0] * image_shape[1] * image_shape[2]).reshape(image_shape)
img = paddle.arange(image_shape[0] * image_shape[1] * image_shape[2]).reshape(image_shape)
x = x.astype('float32')
img = paddle.to_tensor(x)
tmp = paddle.flip(img, [0,1])
tmp = paddle.flip(img, [0,1])
print(tmp) # [[[10,11],[8, 9]], [[6, 7],[4, 5]], [[2, 3],[0, 1]]]
print(tmp) # [[[10,11],[8, 9]], [[6, 7],[4, 5]], [[2, 3],[0, 1]]]
...
@@ -2877,15 +2874,12 @@ def chunk(x, chunks, axis=0, name=None):
...
@@ -2877,15 +2874,12 @@ def chunk(x, chunks, axis=0, name=None):
Returns:
Returns:
list(Tensor): The list of segmented Tensors.
list(Tensor): The list of segmented Tensors.
Example:
Example
s
:
.. code-block:: python
.. code-block:: python
import numpy as np
import paddle
import paddle
# x is a Tensor which shape is [3, 9, 5]
x = paddle.rand([3, 9, 5])
x_np = np.random.random([3, 9, 5]).astype("int32")
x = paddle.to_tensor(x_np)
out0, out1, out2 = paddle.chunk(x, chunks=3, axis=1)
out0, out1, out2 = paddle.chunk(x, chunks=3, axis=1)
# out0.shape [3, 3, 5]
# out0.shape [3, 3, 5]
...
@@ -4440,10 +4434,11 @@ def index_add(x, index, axis, value, name=None):
...
@@ -4440,10 +4434,11 @@ def index_add(x, index, axis, value, name=None):
index = paddle.to_tensor([0, 2], dtype="int32")
index = paddle.to_tensor([0, 2], dtype="int32")
value = paddle.to_tensor([[1, 1, 1], [1, 1, 1]], dtype="float32")
value = paddle.to_tensor([[1, 1, 1], [1, 1, 1]], dtype="float32")
outplace_res = paddle.index_add(input_tensor, index, 0, value)
outplace_res = paddle.index_add(input_tensor, index, 0, value)
print(outplace_res.numpy())
print(outplace_res)
# [[2 2 2]
# Tensor(shape=[3, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [1 1 1]
# [[2., 2., 2.],
# [2 2 2]]
# [1., 1., 1.],
# [2., 2., 2.]])
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
index_add
(
x
,
index
,
value
,
axis
)
return
_C_ops
.
index_add
(
x
,
index
,
value
,
axis
)
...
@@ -4487,10 +4482,11 @@ def index_add_(x, index, axis, value, name=None):
...
@@ -4487,10 +4482,11 @@ def index_add_(x, index, axis, value, name=None):
index = paddle.to_tensor([0, 2], dtype="int32")
index = paddle.to_tensor([0, 2], dtype="int32")
value = paddle.to_tensor([[1, 1], [1, 1], [1, 1]], dtype="float32")
value = paddle.to_tensor([[1, 1], [1, 1], [1, 1]], dtype="float32")
inplace_res = paddle.index_add_(input_tensor, index, 1, value)
inplace_res = paddle.index_add_(input_tensor, index, 1, value)
print(inplace_res.numpy())
print(inplace_res)
# [[2, 1, 2]
# Tensor(shape=[3, 3], dtype=float32, place=Place(gpu:0), stop_gradient=True,
# [2, 1, 2]
# [[2., 1., 2.],
# [2, 1, 2]]
# [2., 1., 2.],
# [2., 1., 2.]])
"""
"""
return
_C_ops
.
index_add_
(
x
,
index
,
value
,
axis
)
return
_C_ops
.
index_add_
(
x
,
index
,
value
,
axis
)
...
...
python/paddle/tensor/math.py
浏览文件 @
912be4f8
...
@@ -929,34 +929,37 @@ def maximum(x, y, name=None):
...
@@ -929,34 +929,37 @@ def maximum(x, y, name=None):
.. code-block:: python
.. code-block:: python
import numpy as np
import paddle
import paddle
x = paddle.to_tensor([[1, 2], [7, 8]])
x = paddle.to_tensor([[1, 2], [7, 8]])
y = paddle.to_tensor([[3, 4], [5, 6]])
y = paddle.to_tensor([[3, 4], [5, 6]])
res = paddle.maximum(x, y)
res = paddle.maximum(x, y)
print(res)
print(res)
# Tensor(shape=[2, 2], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[3, 4],
# [[3, 4],
#
[7, 8]]
#
[7, 8]])
x = paddle.to_tensor([[1, 2, 3], [1, 2, 3]])
x = paddle.to_tensor([[1, 2, 3], [1, 2, 3]])
y = paddle.to_tensor([3, 0, 4])
y = paddle.to_tensor([3, 0, 4])
res = paddle.maximum(x, y)
res = paddle.maximum(x, y)
print(res)
print(res)
# Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[3, 2, 4],
# [[3, 2, 4],
#
[3, 2, 4]]
#
[3, 2, 4]])
x = paddle.to_tensor([2, 3, 5], dtype='float32')
x = paddle.to_tensor([2, 3, 5], dtype='float32')
y = paddle.to_tensor([1,
np.nan, np.nan
], dtype='float32')
y = paddle.to_tensor([1,
float("nan"), float("nan")
], dtype='float32')
res = paddle.maximum(x, y)
res = paddle.maximum(x, y)
print(res)
print(res)
# [ 2., nan, nan]
# Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
# [2. , nan, nan])
x = paddle.to_tensor([5, 3,
np.inf
], dtype='float32')
x = paddle.to_tensor([5, 3,
float("inf")
], dtype='float32')
y = paddle.to_tensor([1, -
np.inf
, 5], dtype='float32')
y = paddle.to_tensor([1, -
float("inf")
, 5], dtype='float32')
res = paddle.maximum(x, y)
res = paddle.maximum(x, y)
print(res)
print(res)
# [ 5., 3., inf.]
# Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
# [5. , 3. , inf.])
"""
"""
op_type
=
'elementwise_max'
op_type
=
'elementwise_max'
axis
=
-
1
axis
=
-
1
...
@@ -994,34 +997,37 @@ def minimum(x, y, name=None):
...
@@ -994,34 +997,37 @@ def minimum(x, y, name=None):
.. code-block:: python
.. code-block:: python
import numpy as np
import paddle
import paddle
x = paddle.to_tensor([[1, 2], [7, 8]])
x = paddle.to_tensor([[1, 2], [7, 8]])
y = paddle.to_tensor([[3, 4], [5, 6]])
y = paddle.to_tensor([[3, 4], [5, 6]])
res = paddle.minimum(x, y)
res = paddle.minimum(x, y)
print(res)
print(res)
# Tensor(shape=[2, 2], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[1, 2],
# [[1, 2],
#
[5, 6]]
#
[5, 6]])
x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]])
x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]])
y = paddle.to_tensor([3, 0, 4])
y = paddle.to_tensor([3, 0, 4])
res = paddle.minimum(x, y)
res = paddle.minimum(x, y)
print(res)
print(res)
# Tensor(shape=[1, 2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[[1, 0, 3],
# [[[1, 0, 3],
#
[1, 0, 3]]]
#
[1, 0, 3]]])
x = paddle.to_tensor([2, 3, 5], dtype='float32')
x = paddle.to_tensor([2, 3, 5], dtype='float32')
y = paddle.to_tensor([1,
np.nan, np.nan
], dtype='float32')
y = paddle.to_tensor([1,
float("nan"), float("nan")
], dtype='float32')
res = paddle.minimum(x, y)
res = paddle.minimum(x, y)
print(res)
print(res)
# [ 1., nan, nan]
# Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
# [1. , nan, nan])
x = paddle.to_tensor([5, 3,
np.inf
], dtype='float64')
x = paddle.to_tensor([5, 3,
float("inf")
], dtype='float64')
y = paddle.to_tensor([1, -
np.inf
, 5], dtype='float64')
y = paddle.to_tensor([1, -
float("inf")
, 5], dtype='float64')
res = paddle.minimum(x, y)
res = paddle.minimum(x, y)
print(res)
print(res)
# [ 1., -inf., 5.]
# Tensor(shape=[3], dtype=float64, place=Place(cpu), stop_gradient=True,
# [ 1. , -inf., 5. ])
"""
"""
op_type
=
'elementwise_min'
op_type
=
'elementwise_min'
axis
=
-
1
axis
=
-
1
...
@@ -1061,34 +1067,37 @@ def fmax(x, y, name=None):
...
@@ -1061,34 +1067,37 @@ def fmax(x, y, name=None):
.. code-block:: python
.. code-block:: python
import numpy as np
import paddle
import paddle
x = paddle.to_tensor([[1, 2], [7, 8]])
x = paddle.to_tensor([[1, 2], [7, 8]])
y = paddle.to_tensor([[3, 4], [5, 6]])
y = paddle.to_tensor([[3, 4], [5, 6]])
res = paddle.fmax(x, y)
res = paddle.fmax(x, y)
print(res)
print(res)
# Tensor(shape=[2, 2], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[3, 4],
# [[3, 4],
#
[7, 8]]
#
[7, 8]])
x = paddle.to_tensor([[1, 2, 3], [1, 2, 3]])
x = paddle.to_tensor([[1, 2, 3], [1, 2, 3]])
y = paddle.to_tensor([3, 0, 4])
y = paddle.to_tensor([3, 0, 4])
res = paddle.fmax(x, y)
res = paddle.fmax(x, y)
print(res)
print(res)
# Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[3, 2, 4],
# [[3, 2, 4],
#
[3, 2, 4]]
#
[3, 2, 4]])
x = paddle.to_tensor([2, 3, 5], dtype='float32')
x = paddle.to_tensor([2, 3, 5], dtype='float32')
y = paddle.to_tensor([1,
np.nan, np.nan
], dtype='float32')
y = paddle.to_tensor([1,
float("nan"), float("nan")
], dtype='float32')
res = paddle.fmax(x, y)
res = paddle.fmax(x, y)
print(res)
print(res)
# [ 2., 3., 5.]
# Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
# [2., 3., 5.])
x = paddle.to_tensor([5, 3,
np.inf
], dtype='float32')
x = paddle.to_tensor([5, 3,
float("inf")
], dtype='float32')
y = paddle.to_tensor([1, -
np.inf
, 5], dtype='float32')
y = paddle.to_tensor([1, -
float("inf")
, 5], dtype='float32')
res = paddle.fmax(x, y)
res = paddle.fmax(x, y)
print(res)
print(res)
# [ 5., 3., inf.]
# Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
# [5. , 3. , inf.])
"""
"""
op_type
=
'elementwise_fmax'
op_type
=
'elementwise_fmax'
axis
=
-
1
axis
=
-
1
...
@@ -1128,34 +1137,37 @@ def fmin(x, y, name=None):
...
@@ -1128,34 +1137,37 @@ def fmin(x, y, name=None):
.. code-block:: python
.. code-block:: python
import numpy as np
import paddle
import paddle
x = paddle.to_tensor([[1, 2], [7, 8]])
x = paddle.to_tensor([[1, 2], [7, 8]])
y = paddle.to_tensor([[3, 4], [5, 6]])
y = paddle.to_tensor([[3, 4], [5, 6]])
res = paddle.fmin(x, y)
res = paddle.fmin(x, y)
print(res)
print(res)
# Tensor(shape=[2, 2], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[1, 2],
# [[1, 2],
#
[5, 6]]
#
[5, 6]])
x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]])
x = paddle.to_tensor([[[1, 2, 3], [1, 2, 3]]])
y = paddle.to_tensor([3, 0, 4])
y = paddle.to_tensor([3, 0, 4])
res = paddle.fmin(x, y)
res = paddle.fmin(x, y)
print(res)
print(res)
# Tensor(shape=[1, 2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
# [[[1, 0, 3],
# [[[1, 0, 3],
#
[1, 0, 3]]]
#
[1, 0, 3]]])
x = paddle.to_tensor([2, 3, 5], dtype='float32')
x = paddle.to_tensor([2, 3, 5], dtype='float32')
y = paddle.to_tensor([1,
np.nan, np.nan
], dtype='float32')
y = paddle.to_tensor([1,
float("nan"), float("nan")
], dtype='float32')
res = paddle.fmin(x, y)
res = paddle.fmin(x, y)
print(res)
print(res)
# [ 1., 3., 5.]
# Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
# [1., 3., 5.])
x = paddle.to_tensor([5, 3,
np.inf
], dtype='float64')
x = paddle.to_tensor([5, 3,
float("inf")
], dtype='float64')
y = paddle.to_tensor([1, -
np.inf
, 5], dtype='float64')
y = paddle.to_tensor([1, -
float("inf")
, 5], dtype='float64')
res = paddle.fmin(x, y)
res = paddle.fmin(x, y)
print(res)
print(res)
# [ 1., -inf., 5.]
# Tensor(shape=[3], dtype=float64, place=Place(cpu), stop_gradient=True,
# [ 1. , -inf., 5. ])
"""
"""
op_type
=
'elementwise_fmin'
op_type
=
'elementwise_fmin'
axis
=
-
1
axis
=
-
1
...
@@ -1321,15 +1333,13 @@ def nansum(x, axis=None, dtype=None, keepdim=False, name=None):
...
@@ -1321,15 +1333,13 @@ def nansum(x, axis=None, dtype=None, keepdim=False, name=None):
.. code-block:: python
.. code-block:: python
import paddle
import paddle
import numpy as np
# x is a Tensor with following elements:
# x is a Tensor with following elements:
# [[nan, 0.3, 0.5, 0.9]
# [[nan, 0.3, 0.5, 0.9]
# [0.1, 0.2, -nan, 0.7]]
# [0.1, 0.2, -nan, 0.7]]
# Each example is followed by the corresponding output tensor.
# Each example is followed by the corresponding output tensor.
x = np.array([[float('nan'), 0.3, 0.5, 0.9],
x = paddle.to_tensor([[float('nan'), 0.3, 0.5, 0.9],
[0.1, 0.2, float('-nan'), 0.7]]).astype(np.float32)
[0.1, 0.2, float('-nan'), 0.7]],dtype="float32")
x = paddle.to_tensor(x)
out1 = paddle.nansum(x) # [2.7]
out1 = paddle.nansum(x) # [2.7]
out2 = paddle.nansum(x, axis=0) # [0.1, 0.5, 0.5, 1.6]
out2 = paddle.nansum(x, axis=0) # [0.1, 0.5, 0.5, 1.6]
out3 = paddle.nansum(x, axis=-1) # [1.7, 1.0]
out3 = paddle.nansum(x, axis=-1) # [1.7, 1.0]
...
@@ -1339,9 +1349,8 @@ def nansum(x, axis=None, dtype=None, keepdim=False, name=None):
...
@@ -1339,9 +1349,8 @@ def nansum(x, axis=None, dtype=None, keepdim=False, name=None):
# [[[1, nan], [3, 4]],
# [[[1, nan], [3, 4]],
# [[5, 6], [-nan, 8]]]
# [[5, 6], [-nan, 8]]]
# Each example is followed by the corresponding output tensor.
# Each example is followed by the corresponding output tensor.
y =
np.array
([[[1, float('nan')], [3, 4]],
y =
paddle.to_tensor
([[[1, float('nan')], [3, 4]],
[[5, 6], [float('-nan'), 8]]])
[[5, 6], [float('-nan'), 8]]])
y = paddle.to_tensor(y)
out5 = paddle.nansum(y, axis=[1, 2]) # [8, 19]
out5 = paddle.nansum(y, axis=[1, 2]) # [8, 19]
out6 = paddle.nansum(y, axis=[0, 1]) # [9, 18]
out6 = paddle.nansum(y, axis=[0, 1]) # [9, 18]
"""
"""
...
@@ -4323,7 +4332,7 @@ def rad2deg(x, name=None):
...
@@ -4323,7 +4332,7 @@ def rad2deg(x, name=None):
.. code-block:: python
.. code-block:: python
import paddle
import paddle
import
numpy as np
import
math
x1 = paddle.to_tensor([3.142, -3.142, 6.283, -6.283, 1.570, -1.570])
x1 = paddle.to_tensor([3.142, -3.142, 6.283, -6.283, 1.570, -1.570])
result1 = paddle.rad2deg(x1)
result1 = paddle.rad2deg(x1)
...
@@ -4332,7 +4341,7 @@ def rad2deg(x, name=None):
...
@@ -4332,7 +4341,7 @@ def rad2deg(x, name=None):
# [180.02334595, -180.02334595, 359.98937988, -359.98937988,
# [180.02334595, -180.02334595, 359.98937988, -359.98937988,
# 9.95437622 , -89.95437622])
# 9.95437622 , -89.95437622])
x2 = paddle.to_tensor(
np
.pi/2)
x2 = paddle.to_tensor(
math
.pi/2)
result2 = paddle.rad2deg(x2)
result2 = paddle.rad2deg(x2)
print(result2)
print(result2)
# Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
# Tensor(shape=[1], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
...
@@ -4813,18 +4822,20 @@ def angle(x, name=None):
...
@@ -4813,18 +4822,20 @@ def angle(x, name=None):
x = paddle.to_tensor([-2, -1, 0, 1]).unsqueeze(-1).astype('float32')
x = paddle.to_tensor([-2, -1, 0, 1]).unsqueeze(-1).astype('float32')
y = paddle.to_tensor([-2, -1, 0, 1]).astype('float32')
y = paddle.to_tensor([-2, -1, 0, 1]).astype('float32')
z = x + 1j * y
z = x + 1j * y
print(z.numpy())
print(z)
# [[-2.-2.j -2.-1.j -2.+0.j -2.+1.j]
# Tensor(shape=[4, 4], dtype=complex64, place=Place(cpu), stop_gradient=True,
# [-1.-2.j -1.-1.j -1.+0.j -1.+1.j]
# [[(-2-2j), (-2-1j), (-2+0j), (-2+1j)],
# [ 0.-2.j 0.-1.j 0.+0.j 0.+1.j]
# [(-1-2j), (-1-1j), (-1+0j), (-1+1j)],
# [ 1.-2.j 1.-1.j 1.+0.j 1.+1.j]]
# [-2j , -1j , 0j , 1j ],
# [ (1-2j), (1-1j), (1+0j), (1+1j)]])
theta = paddle.angle(z)
theta = paddle.angle(z)
print(theta.numpy())
print(theta)
# [[-2.3561945 -2.6779451 3.1415927 2.6779451]
# Tensor(shape=[4, 4], dtype=float32, place=Place(cpu), stop_gradient=True,
# [-2.0344439 -2.3561945 3.1415927 2.3561945]
# [[-2.35619450, -2.67794514, 3.14159274, 2.67794514],
# [-1.5707964 -1.5707964 0. 1.5707964]
# [-2.03444386, -2.35619450, 3.14159274, 2.35619450],
# [-1.1071488 -0.7853982 0. 0.7853982]]
# [-1.57079637, -1.57079637, 0. , 1.57079637],
# [-1.10714877, -0.78539819, 0. , 0.78539819]])
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
...
@@ -4911,19 +4922,14 @@ def frac(x, name=None):
...
@@ -4911,19 +4922,14 @@ def frac(x, name=None):
.. code-block:: python
.. code-block:: python
import paddle
import paddle
import numpy as np
input = paddle.rand([3, 3], 'float32')
print(input.numpy())
# [[ 1.2203873 -1.0035421 -0.35193074]
# [-0.00928353 0.58917075 -0.8407828 ]
# [-1.5131804 0.5850153 -0.17597814]]
input = paddle.to_tensor([[12.22000003, -1.02999997],
[-0.54999995, 0.66000003]])
output = paddle.frac(input)
output = paddle.frac(input)
print(output
.numpy()
)
print(output)
#
[[ 0.22038734 -0.00354207 -0.35193074]
#
Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=True,
#
[-0.00928353 0.58917075 -0.8407828 ]
#
[[ 0.22000003, -0.02999997],
#
[-0.5131804 0.5850153 -0.17597814]]
#
[-0.54999995, 0.66000003]])
"""
"""
op_type
=
'elementwise_sub'
op_type
=
'elementwise_sub'
axis
=
-
1
axis
=
-
1
...
...
python/paddle/tensor/stat.py
浏览文件 @
912be4f8
...
@@ -605,32 +605,35 @@ def quantile(x, q, axis=None, keepdim=False):
...
@@ -605,32 +605,35 @@ def quantile(x, q, axis=None, keepdim=False):
Examples:
Examples:
.. code-block:: python
.. code-block:: python
import numpy as np
import paddle
import paddle
x = np.arange(0, 8, dtype=np.float32).reshape(4, 2)
y = paddle.arange(0, 8 ,dtype="float32").reshape([4, 2])
# [[0 1]
# Tensor(shape=[4, 2], dtype=float32, place=Place(cpu), stop_gradient=True,
# [2 3]
# [[0., 1.],
# [4 5]
# [2., 3.],
# [6 7]]
# [4., 5.],
y = paddle.to_tensor(x)
# [6., 7.]])
y1 = paddle.quantile(y, q=0.5, axis=[0, 1])
y1 = paddle.quantile(y, q=0.5, axis=[0, 1])
# 3.5
# Tensor(shape=[], dtype=float64, place=Place(cpu), stop_gradient=True,
# 3.50000000)
y2 = paddle.quantile(y, q=0.5, axis=1)
y2 = paddle.quantile(y, q=0.5, axis=1)
# [0.5 2.5 4.5 6.5]
# Tensor(shape=[4], dtype=float64, place=Place(cpu), stop_gradient=True,
# [0.50000000, 2.50000000, 4.50000000, 6.50000000])
y3 = paddle.quantile(y, q=[0.3, 0.5], axis=0)
y3 = paddle.quantile(y, q=[0.3, 0.5], axis=0)
# [[1.8 2.8]
# Tensor(shape=[2, 2], dtype=float64, place=Place(cpu), stop_gradient=True,
# [3. 4. ]]
# [[1.80000000, 2.80000000],
# [3. , 4. ]])
x[0][0] = np.nan
y[0,0] = float("nan")
y = paddle.to_tensor(x)
y4 = paddle.quantile(y, q=0.8, axis=1, keepdim=True)
y4 = paddle.quantile(y, q=0.8, axis=1, keepdim=True)
# [[nan]
# Tensor(shape=[4, 1], dtype=float64, place=Place(cpu), stop_gradient=True,
# [2.8]
# [[nan ],
# [4.8]
# [2.80000000],
# [6.8]]
# [4.80000000],
# [6.80000000]])
"""
"""
return
_compute_quantile
(
x
,
q
,
axis
=
axis
,
keepdim
=
keepdim
,
ignore_nan
=
False
)
return
_compute_quantile
(
x
,
q
,
axis
=
axis
,
keepdim
=
keepdim
,
ignore_nan
=
False
)
...
@@ -665,35 +668,37 @@ def nanquantile(x, q, axis=None, keepdim=False):
...
@@ -665,35 +668,37 @@ def nanquantile(x, q, axis=None, keepdim=False):
Examples:
Examples:
.. code-block:: python
.. code-block:: python
import numpy as np
import paddle
import paddle
x =
np.array
(
x =
paddle.to_tensor
(
[[0, 1, 2, 3, 4],
[[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]],
[5, 6, 7, 8, 9]],
dtype=np.float32
dtype="float32")
)
x[0,0] = float("nan")
x[0][0] = np.nan
x = paddle.to_tensor(x)
y1 = paddle.nanquantile(x, q=0.5, axis=[0, 1])
y1 = paddle.nanquantile(x, q=0.5, axis=[0, 1])
# 5.0
# Tensor(shape=[], dtype=float64, place=Place(cpu), stop_gradient=True,
# 5.)
y2 = paddle.nanquantile(x, q=0.5, axis=1)
y2 = paddle.nanquantile(x, q=0.5, axis=1)
# [2.5 7. ]
# Tensor(shape=[2], dtype=float64, place=Place(cpu), stop_gradient=True,
# [2.50000000, 7. ])
y3 = paddle.nanquantile(x, q=[0.3, 0.5], axis=0)
y3 = paddle.nanquantile(x, q=[0.3, 0.5], axis=0)
# [[5. 2.5 3.5 4.5 5.5]
# Tensor(shape=[2, 5], dtype=float64, place=Place(cpu), stop_gradient=True,
# [5. 3.5 4.5 5.5 6.5]
# [[5. , 2.50000000, 3.50000000, 4.50000000, 5.50000000],
# [5. , 3.50000000, 4.50000000, 5.50000000, 6.50000000]])
y4 = paddle.nanquantile(x, q=0.8, axis=1, keepdim=True)
y4 = paddle.nanquantile(x, q=0.8, axis=1, keepdim=True)
# [[3.4]
# Tensor(shape=[2, 1], dtype=float64, place=Place(cpu), stop_gradient=True,
# [8.2]]
# [[3.40000000],
# [8.20000000]])
nan = paddle.full(shape=[2, 3], fill_value=
np.nan
)
nan = paddle.full(shape=[2, 3], fill_value=
float("nan")
)
y5 = paddle.nanquantile(nan, q=0.8, axis=1, keepdim=True)
y5 = paddle.nanquantile(nan, q=0.8, axis=1, keepdim=True)
# [[nan]
# Tensor(shape=[2, 1], dtype=float64, place=Place(cpu), stop_gradient=True,
# [nan]]
# [[nan],
# [nan]])
"""
"""
return
_compute_quantile
(
x
,
q
,
axis
=
axis
,
keepdim
=
keepdim
,
ignore_nan
=
True
)
return
_compute_quantile
(
x
,
q
,
axis
=
axis
,
keepdim
=
keepdim
,
ignore_nan
=
True
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录