Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
42eb56e2
P
Paddle
项目概览
PaddlePaddle
/
Paddle
接近 2 年 前同步成功
通知
2323
Star
20933
Fork
5424
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
42eb56e2
编写于
2月 22, 2022
作者:
Z
zhiboniu
提交者:
GitHub
2月 22, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
unset fluid in tensor (#35082)
上级
a08ee62a
变更
12
隐藏空白更改
内联
并排
Showing
12 changed file
with
223 addition
and
218 deletion
+223
-218
python/paddle/framework/__init__.py
python/paddle/framework/__init__.py
+6
-1
python/paddle/tensor/attribute.py
python/paddle/tensor/attribute.py
+4
-3
python/paddle/tensor/creation.py
python/paddle/tensor/creation.py
+15
-17
python/paddle/tensor/einsum.py
python/paddle/tensor/einsum.py
+4
-5
python/paddle/tensor/linalg.py
python/paddle/tensor/linalg.py
+35
-34
python/paddle/tensor/logic.py
python/paddle/tensor/logic.py
+12
-14
python/paddle/tensor/manipulation.py
python/paddle/tensor/manipulation.py
+24
-24
python/paddle/tensor/math.py
python/paddle/tensor/math.py
+65
-63
python/paddle/tensor/random.py
python/paddle/tensor/random.py
+14
-13
python/paddle/tensor/search.py
python/paddle/tensor/search.py
+37
-36
python/paddle/tensor/stat.py
python/paddle/tensor/stat.py
+6
-7
python/paddle/tensor/to_string.py
python/paddle/tensor/to_string.py
+1
-1
未找到文件。
python/paddle/framework/__init__.py
浏览文件 @
42eb56e2
...
...
@@ -32,7 +32,7 @@ from ..fluid.core import MLUPlace # noqa: F401
from
..fluid.core
import
CustomPlace
# noqa: F401
from
..fluid.core
import
VarBase
# noqa: F401
from
paddle
.fluid
import
core
# noqa: F401
from
.
.fluid
import
core
# noqa: F401
from
..fluid.dygraph.base
import
no_grad_
as
no_grad
# noqa: F401
from
..fluid.dygraph.base
import
grad
# noqa: F401
from
.io
import
save
# noqa: F401
...
...
@@ -47,5 +47,10 @@ from ..fluid.framework import set_flags # noqa: F401
from
..fluid.dygraph.base
import
enable_dygraph
as
disable_static
# noqa: F401
from
..fluid.dygraph.base
import
disable_dygraph
as
enable_static
# noqa: F401
from
..fluid.framework
import
in_dygraph_mode
as
in_dynamic_mode
# noqa: F401
from
..fluid.framework
import
_current_expected_place
,
_get_paddle_place
# noqa: F401
from
..fluid.framework
import
dygraph_only
# noqa: F401
from
..fluid.framework
import
convert_np_dtype_to_dtype_
,
_varbase_creator
,
OpProtoHolder
# noqa: F401
from
..fluid.framework
import
_in_eager_mode
# noqa: F401
from
..fluid.framework
import
_dygraph_tracer
# noqa: F401
__all__
=
[]
python/paddle/tensor/attribute.py
浏览文件 @
42eb56e2
...
...
@@ -14,7 +14,7 @@
from
__future__
import
print_function
from
..f
luid.framework
import
core
,
in_dygraph_mode
,
Variabl
e
from
..f
ramework
import
cor
e
from
..fluid.layer_helper
import
LayerHelper
from
..fluid.data_feeder
import
check_variable_and_dtype
...
...
@@ -23,6 +23,7 @@ from ..fluid.layers import rank # noqa: F401
from
..fluid.layers
import
shape
# noqa: F401
import
paddle
from
paddle
import
_C_ops
from
paddle.static
import
Variable
__all__
=
[]
...
...
@@ -184,7 +185,7 @@ def real(x, name=None):
# [[1., 2., 3.],
# [4., 5., 6.]])
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
real
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'complex64'
,
'complex128'
],
'real'
)
...
...
@@ -228,7 +229,7 @@ def imag(x, name=None):
# [[6., 5., 4.],
# [3., 2., 1.]])
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
imag
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'complex64'
,
'complex128'
],
'imag'
)
...
...
python/paddle/tensor/creation.py
浏览文件 @
42eb56e2
...
...
@@ -18,21 +18,19 @@ from paddle.common_ops_import import fill_constant
from
..fluid.layers
import
utils
from
..fluid.layers
import
tensor
from
..fluid.framework
import
Variable
from
..fluid.framework
import
unique_name
from
..fluid.framework
import
_current_expected_place
,
_get_paddle_place
from
..fluid.framework
import
dygraph_only
from
..fluid.initializer
import
Constant
from
..fluid.layers
import
core
from
..static
import
Variable
,
device_guard
from
..framework
import
_current_expected_place
,
_get_paddle_place
from
..framework
import
dygraph_only
from
..framework
import
core
from
..fluid.layer_helper
import
LayerHelper
from
..fluid.data_feeder
import
check_variable_and_dtype
,
check_type
,
check_dtype
,
convert_dtype
from
..f
luid.framework
import
convert_np_dtype_to_dtype_
,
in_dygraph_mode
,
_varbase_creator
,
device_guard
,
OpProtoHolder
from
..f
ramework
import
convert_np_dtype_to_dtype_
,
_varbase_creator
,
OpProtoHolder
from
paddle.tensor.attribute
import
_complex_to_real_dtype
,
_real_to_complex_dtype
# TODO: define functions to get create a tensor
from
..fluid.layers
import
linspace
# noqa: F401
import
paddle
from
paddle
import
_C_ops
from
..f
luid.f
ramework
import
_in_eager_mode
from
..framework
import
_in_eager_mode
__all__
=
[]
...
...
@@ -214,7 +212,7 @@ def full_like(x, fill_value, dtype=None, name=None):
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
fill_any_like
(
x
,
'value'
,
fill_value
,
'dtype'
,
dtype
)
helper
=
LayerHelper
(
"full_like"
,
**
locals
())
...
...
@@ -648,7 +646,7 @@ def tril(x, diagonal=0, name=None):
# [ 9, 10, 0, 0]])
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
op
=
getattr
(
_C_ops
,
'tril_triu'
)
return
op
(
x
,
'diagonal'
,
diagonal
,
"lower"
,
True
)
...
...
@@ -715,7 +713,7 @@ def triu(x, diagonal=0, name=None):
# [ 0, 10, 11, 12]])
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
op
=
getattr
(
_C_ops
,
'tril_triu'
)
return
op
(
x
,
'diagonal'
,
diagonal
,
"lower"
,
False
)
...
...
@@ -757,7 +755,7 @@ def meshgrid(*args, **kwargs):
if
len
(
args
)
==
1
and
isinstance
(
args
[
0
],
(
list
,
tuple
)):
args
=
args
[
0
]
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
num
=
len
(
args
)
out
=
_C_ops
.
meshgrid
(
list
(
args
),
num
)
return
out
...
...
@@ -862,7 +860,7 @@ def diagflat(x, offset=0, name=None):
# [0 0 0 4 0]]
"""
padding_value
=
0
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
if
len
(
x
.
shape
)
==
1
:
return
_C_ops
.
diag_v2
(
x
,
"offset"
,
offset
,
"padding_value"
,
padding_value
)
...
...
@@ -976,7 +974,7 @@ def diag(x, offset=0, padding_value=0, name=None):
print(y.numpy())
# [4]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
diag_v2
(
x
,
"offset"
,
offset
,
"padding_value"
,
padding_value
)
...
...
@@ -1057,7 +1055,7 @@ def empty(shape, dtype=None, name=None):
dtype
=
convert_dtype
(
dtype
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
shape
=
utils
.
convert_shape_to_list
(
shape
)
out
=
_C_ops
.
empty
(
'shape'
,
shape
,
'dtype'
,
convert_np_dtype_to_dtype_
(
dtype
))
...
...
@@ -1125,7 +1123,7 @@ def empty_like(x, dtype=None, name=None):
dtype
=
x
.
dtype
dtype
=
convert_dtype
(
dtype
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
out
=
_C_ops
.
empty
(
'shape'
,
x
.
shape
,
'dtype'
,
convert_np_dtype_to_dtype_
(
dtype
))
out
.
stop_gradient
=
True
...
...
@@ -1309,7 +1307,7 @@ def complex(real, imag, name=None):
# [[0.+0.j 0.+1.j 0.+2.j]
# [1.+0.j 1.+1.j 1.+2.j]]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
paddle
.
_C_ops
.
complex
(
real
,
imag
)
check_variable_and_dtype
(
real
,
'real'
,
[
'float32'
,
'float64'
],
'complex'
)
...
...
python/paddle/tensor/einsum.py
浏览文件 @
42eb56e2
...
...
@@ -15,9 +15,8 @@
import
itertools
import
re
from
..fluid.layers
import
reshape
,
transpose
from
.linalg
import
matmul
from
.manipulation
import
squeeze
,
unsqueeze
from
.linalg
import
matmul
,
transpose
from
.manipulation
import
squeeze
,
unsqueeze
,
reshape
from
.math
import
multiply
from
.math
import
sum
as
paddle_sum
...
...
@@ -792,10 +791,10 @@ def einsum(equation, *operands):
- For any free label which is not present for the output, it's lowered to
a dummy label.
- Examples
- '...ij, ...jk'
,
where i and k are free labels, j is dummy. The output label
- '...ij, ...jk'
,
where i and k are free labels, j is dummy. The output label
string is '...ik'
- 'ij -> i', where i is a free label and j is a dummy label.
- '...ij, ...jk -> ...ijk'
,
where i, j and k are all free labels.
- '...ij, ...jk -> ...ijk'
,
where i, j and k are all free labels.
- '...ij, ...jk -> ij', an invalid equation since `...` is not present for
the output.
...
...
python/paddle/tensor/linalg.py
浏览文件 @
42eb56e2
...
...
@@ -14,8 +14,9 @@
import
numpy
as
np
from
..fluid.layer_helper
import
LayerHelper
from
..f
luid.framework
import
in_dygraph_mode
,
_varbase_creator
,
Variable
,
_dygraph_tracer
from
..f
ramework
import
_varbase_creator
,
_dygraph_tracer
from
..fluid.data_feeder
import
check_variable_and_dtype
,
check_type
,
check_dtype
from
..static
import
Variable
from
..fluid.layers
import
transpose
,
cast
# noqa: F401
from
..fluid
import
layers
...
...
@@ -133,7 +134,7 @@ def matmul(x, y, transpose_x=False, transpose_y=False, name=None):
"""
op_type
=
'matmul_v2'
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
op
=
getattr
(
_C_ops
,
op_type
)
return
op
(
x
,
y
,
'trans_x'
,
transpose_x
,
'trans_y'
,
transpose_y
)
...
...
@@ -245,7 +246,7 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
raise
ValueError
(
"The dim of frobenius norm op should be None or two elements list!"
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
if
dim
is
None
:
return
_C_ops
.
frobenius_norm
(
input
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
True
)
...
...
@@ -282,7 +283,7 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
axis (int, optional): None for last dimension.
keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False.
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
if
axis
is
None
:
axis
=
-
1
return
_C_ops
.
p_norm
(
input
,
'porder'
,
porder
,
'axis'
,
axis
,
'keepdim'
,
keepdim
,
'asvector'
,
asvector
)
...
...
@@ -642,7 +643,7 @@ def cond(x, p=None, name=None):
axis
=
axis
if
axis
!=
None
and
axis
!=
[]
else
[
0
]
keepdim
=
False
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
abs_out
=
_C_ops
.
abs
(
input
)
sum_out
=
_C_ops
.
reduce_sum
(
abs_out
,
'dim'
,
axis
,
'keepdim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
...
...
@@ -699,7 +700,7 @@ def cond(x, p=None, name=None):
reduce_all
=
True
if
axis
is
None
or
axis
==
[]
else
False
keepdim
=
False
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
pow_out
=
_C_ops
.
pow
(
input
,
'factor'
,
porder
)
sum_out_1
=
_C_ops
.
reduce_sum
(
pow_out
,
'dim'
,
axis
,
'keepdim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
...
...
@@ -753,7 +754,7 @@ def cond(x, p=None, name=None):
u
,
s
,
vh
=
svd
(
input
,
full_matrices
=
False
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
if
porder
==
"nuc"
:
return
_C_ops
.
reduce_sum
(
s
,
'dim'
,
axis
,
'keepdim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
...
...
@@ -820,7 +821,7 @@ def cond(x, p=None, name=None):
return
out
def
empty_tensor
(
input
,
shape
):
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
input
.
reshape
(
shape
)
raise
ValueError
(
"only support x is nonempty tensor in static mode"
)
...
...
@@ -895,7 +896,7 @@ def dot(x, y, name=None):
"""
op_type
=
'dot'
# skip var type check in dygraph mode to improve efficiency
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
op
=
getattr
(
_C_ops
,
op_type
)
return
op
(
x
,
y
)
...
...
@@ -1079,7 +1080,7 @@ def t(input, name=None):
"Input(input) only support N-D (N<=2) tensor, but received "
"length of Input(input) is %s. Perhaps you can use paddle."
"tensor.transpose() instead."
%
len
(
input
.
shape
))
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
if
len
(
input
.
shape
)
==
1
:
return
input
# 2-D tensor
...
...
@@ -1144,7 +1145,7 @@ def cross(x, y, axis=None, name=None):
# [0. 0. 0.]
# [0. 0. 0.]]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
if
axis
is
not
None
:
return
_C_ops
.
cross
(
x
,
y
,
'dim'
,
axis
)
else
:
...
...
@@ -1203,7 +1204,7 @@ def cholesky(x, upper=False, name=None):
# [1.25450498 0.05600871 0.06400121]]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
cholesky
(
x
,
"upper"
,
upper
)
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'cholesky'
)
check_type
(
upper
,
'upper'
,
bool
,
'cholesky'
)
...
...
@@ -1257,7 +1258,7 @@ def matrix_rank(x, tol=None, hermitian=False, name=None):
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
if
tol
is
None
:
tol_tensor
=
None
tol_attr
=
0.0
...
...
@@ -1355,7 +1356,7 @@ def bmm(x, y, name=None):
"x's batch (shape[0]) must be equal with y's batch (shape[0]). But received x's shape: {}, y's shape: {}"
.
format
(
x_shape
,
y_shape
))
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
bmm
(
x
,
y
)
helper
=
LayerHelper
(
'bmm'
,
**
locals
())
...
...
@@ -1388,7 +1389,7 @@ def histogram(input, bins=100, min=0, max=0, name=None):
result = paddle.histogram(inputs, bins=4, min=0, max=3)
print(result) # [0, 2, 1, 0]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
histogram
(
input
,
"bins"
,
bins
,
"min"
,
min
,
"max"
,
max
)
helper
=
LayerHelper
(
'histogram'
,
**
locals
())
...
...
@@ -1435,7 +1436,7 @@ def bincount(x, weights=None, minlength=0, name=None):
if
x
.
dtype
not
in
[
paddle
.
int32
,
paddle
.
int64
]:
raise
TypeError
(
"Elements in Input(x) should all be integers"
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
bincount
(
x
,
weights
,
"minlength"
,
minlength
)
helper
=
LayerHelper
(
'bincount'
,
**
locals
())
...
...
@@ -1488,7 +1489,7 @@ def mv(x, vec, name=None):
vec = paddle.to_tensor(vec_data).astype("float64")
out = paddle.mv(x, vec)
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
out
=
_C_ops
.
mv
(
x
,
vec
)
return
out
...
...
@@ -1541,7 +1542,7 @@ def det(x, name=None):
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
determinant
(
x
)
check_dtype
(
x
.
dtype
,
'Input'
,
[
'float32'
,
'float64'
],
'det'
)
...
...
@@ -1596,7 +1597,7 @@ def slogdet(x, name=None):
# [-0.98610914, -0.43010661, -0.10872950]])
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
slogdeterminant
(
x
)
check_dtype
(
x
.
dtype
,
'Input'
,
[
'float32'
,
'float64'
],
'slogdet'
)
...
...
@@ -1669,7 +1670,7 @@ def svd(x, full_matrices=False, name=None):
# V * VH == I
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
svd
(
x
,
'full_matrices'
,
full_matrices
)
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'svd'
)
check_type
(
full_matrices
,
'full_matrices'
,
bool
,
'svd'
)
...
...
@@ -1744,7 +1745,7 @@ def matrix_power(x, n, name=None):
# [-7.66666667 , 8. , -1.83333333 ],
# [ 1.80555556 , -1.91666667 , 0.44444444 ]]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
matrix_power
(
x
,
"n"
,
n
)
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'matrix_power'
)
...
...
@@ -1801,7 +1802,7 @@ def qr(x, mode="reduced", name=None):
# one can verify : X = Q * R ;
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
q
,
r
=
_C_ops
.
qr
(
x
,
'mode'
,
mode
)
if
mode
==
"r"
:
return
r
...
...
@@ -1900,7 +1901,7 @@ def lu(x, pivot=True, get_infos=False, name=None):
# one can verify : X = P @ L @ U ;
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
LU
,
Piv
,
Info
=
_C_ops
.
lu
(
x
,
'pivots'
,
pivot
)
if
get_infos
:
return
LU
,
Piv
,
Info
...
...
@@ -1997,7 +1998,7 @@ def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None):
# one can verify : X = P @ L @ U ;
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
P
,
L
,
U
=
_C_ops
.
lu_unpack
(
x
,
y
,
'unpack_ludata'
,
unpack_ludata
,
'unpack_pivots'
,
unpack_pivots
)
return
P
,
L
,
U
...
...
@@ -2070,7 +2071,7 @@ def eig(x, name=None):
# [ (16.50471283351188+0j) , (-5.5034820550763515+0j) ,
# (-0.21026087843552282+0j)])
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
w
,
v
=
_C_ops
.
eig
(
x
)
return
w
,
v
...
...
@@ -2139,7 +2140,7 @@ def eigvals(x, name=None):
"The last two dimensions of Input(x) should be equal, but received x's shape = {}"
.
format
(
x_shape
))
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
eigvals
(
x
)
helper
=
LayerHelper
(
'eigvals'
,
**
locals
())
...
...
@@ -2210,7 +2211,7 @@ def multi_dot(x, name=None):
# [10, 7]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
multi_dot
(
x
)
check_type
(
x
,
'x'
,
(
list
,
tuple
),
'multi_dot'
)
...
...
@@ -2262,7 +2263,7 @@ def eigh(x, UPLO='L', name=None):
#[ 0.3826834323650898j , -0.9238795325112867j ]]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
eigh
(
x
,
'UPLO'
,
UPLO
)
def
__check_input
(
x
,
UPLO
):
...
...
@@ -2361,7 +2362,7 @@ def pinv(x, rcond=1e-15, hermitian=False, name=None):
# or out * x * out = x ;
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
if
not
hermitian
:
# combine svd and matmul op
u
,
s
,
vt
=
_C_ops
.
svd
(
x
,
'full_matrices'
,
False
)
...
...
@@ -2611,7 +2612,7 @@ def solve(x, y, name=None):
print(out)
# [2., 3.])
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
solve
(
x
,
y
)
inputs
=
{
"X"
:
[
x
],
"Y"
:
[
y
]}
...
...
@@ -2675,7 +2676,7 @@ def triangular_solve(x,
print(out)
# [7, -2, -5]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
triangular_solve
(
x
,
y
,
'upper'
,
upper
,
'transpose'
,
transpose
,
'unitriangular'
,
unitriangular
)
...
...
@@ -2732,7 +2733,7 @@ def cholesky_solve(x, y, upper=False, name=None):
print(out)
# [-2.5, -7, 9.5]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
cholesky_solve
(
x
,
y
,
'upper'
,
upper
)
helper
=
LayerHelper
(
"cholesky_solve"
,
**
locals
())
...
...
@@ -2776,7 +2777,7 @@ def eigvalsh(x, UPLO='L', name=None):
print(out_value)
#[0.17157288, 5.82842712]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
is_test
=
x
.
stop_gradient
values
,
_
=
_C_ops
.
eigvalsh
(
x
,
'UPLO'
,
UPLO
,
'is_test'
,
is_test
)
return
values
...
...
@@ -2904,7 +2905,7 @@ def lstsq(x, y, rcond=None, driver=None, name=None):
elif
x
.
dtype
==
paddle
.
float64
:
rcond
=
1e-15
*
max
(
x
.
shape
[
-
2
],
x
.
shape
[
-
1
])
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
solution
,
rank
,
singular_values
=
_C_ops
.
lstsq
(
x
,
y
,
"rcond"
,
rcond
,
"driver"
,
driver
)
if
x
.
shape
[
-
2
]
>
x
.
shape
[
-
1
]:
...
...
python/paddle/tensor/logic.py
浏览文件 @
42eb56e2
...
...
@@ -15,8 +15,7 @@
from
..fluid.layer_helper
import
LayerHelper
from
..fluid.data_feeder
import
check_type
,
check_variable_and_dtype
from
..fluid.layers.layer_function_generator
import
templatedoc
from
..
import
fluid
from
..fluid.framework
import
in_dygraph_mode
,
Variable
from
..static
import
Variable
from
..framework
import
VarBase
as
Tensor
# TODO: define logic functions of a tensor
...
...
@@ -25,8 +24,7 @@ from ..fluid.layers import logical_and # noqa: F401
from
..fluid.layers
import
logical_not
# noqa: F401
from
..fluid.layers
import
logical_or
# noqa: F401
from
..fluid.layers
import
logical_xor
# noqa: F401
from
paddle.common_ops_import
import
core
import
paddle
from
paddle
import
_C_ops
from
paddle.tensor.creation
import
full
...
...
@@ -61,7 +59,7 @@ def equal_all(x, y, name=None):
result2 = paddle.equal_all(x, z)
print(result2) # result2 = [False ]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
equal_all
(
x
,
y
)
helper
=
LayerHelper
(
"equal_all"
,
**
locals
())
...
...
@@ -124,7 +122,7 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
# [True]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
allclose
(
x
,
y
,
'rtol'
,
str
(
rtol
),
'atol'
,
str
(
atol
),
'equal_nan'
,
equal_nan
)
...
...
@@ -182,7 +180,7 @@ def equal(x, y, name=None):
if
not
isinstance
(
y
,
Variable
):
y
=
full
(
shape
=
[
1
],
dtype
=
x
.
dtype
,
fill_value
=
y
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
equal
(
x
,
y
)
check_variable_and_dtype
(
...
...
@@ -224,7 +222,7 @@ def greater_equal(x, y, name=None):
result1 = paddle.greater_equal(x, y)
print(result1) # result1 = [True False True]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
greater_equal
(
x
,
y
)
check_variable_and_dtype
(
x
,
"x"
,
...
...
@@ -270,7 +268,7 @@ def greater_than(x, y, name=None):
result1 = paddle.greater_than(x, y)
print(result1) # result1 = [False False True]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
greater_than
(
x
,
y
)
check_variable_and_dtype
(
x
,
"x"
,
...
...
@@ -317,7 +315,7 @@ def less_equal(x, y, name=None):
result1 = paddle.less_equal(x, y)
print(result1) # result1 = [True True False]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
less_equal
(
x
,
y
)
check_variable_and_dtype
(
...
...
@@ -360,7 +358,7 @@ def less_than(x, y, name=None):
result1 = paddle.less_than(x, y)
print(result1) # result1 = [False True False]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
less_than
(
x
,
y
)
check_variable_and_dtype
(
...
...
@@ -403,7 +401,7 @@ def not_equal(x, y, name=None):
result1 = paddle.not_equal(x, y)
print(result1) # result1 = [False True True]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
not_equal
(
x
,
y
)
check_variable_and_dtype
(
...
...
@@ -449,7 +447,7 @@ def is_tensor(x):
def
_bitwise_op
(
op_name
,
x
,
y
,
out
=
None
,
name
=
None
,
binary_op
=
True
):
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
op
=
getattr
(
_C_ops
,
op_name
)
if
binary_op
:
return
op
(
x
,
y
)
...
...
@@ -637,7 +635,7 @@ def isclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
# [True, True]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
isclose
(
x
,
y
,
'rtol'
,
str
(
rtol
),
'atol'
,
str
(
atol
),
'equal_nan'
,
equal_nan
)
...
...
python/paddle/tensor/manipulation.py
浏览文件 @
42eb56e2
...
...
@@ -15,11 +15,11 @@
from
__future__
import
print_function
from
collections
import
Counter
from
..fluid.layers
import
core
from
..static
import
Variable
,
device_guard
from
..framework
import
core
from
..fluid.layer_helper
import
LayerHelper
from
..f
luid.framework
import
Variable
,
OpProtoHolder
,
in_dygraph_mode
,
convert_np_dtype_to_dtype_
,
device_guard
,
dygraph_only
from
..f
ramework
import
OpProtoHolder
,
convert_np_dtype_to_dtype_
,
dygraph_only
from
..fluid.data_feeder
import
convert_dtype
,
check_variable_and_dtype
,
check_type
,
check_dtype
from
..fluid.layers.tensor
import
fill_constant
from
..fluid.layers
import
utils
import
numpy
as
np
# TODO: define functions to manipulate a tensor
...
...
@@ -378,7 +378,7 @@ def broadcast_tensors(input, name=None):
"""
num_inputs
=
len
(
input
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
broadcast_tensors
(
input
,
num_inputs
)
check_type
(
input
,
'input'
,
(
list
,
tuple
),
'broadcast_tensors'
)
...
...
@@ -475,7 +475,7 @@ def flip(x, axis, name=None):
"""
if
isinstance
(
axis
,
int
):
axis
=
[
axis
]
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
flip
(
x
,
"axis"
,
axis
)
helper
=
LayerHelper
(
"flip"
,
**
locals
())
...
...
@@ -671,7 +671,7 @@ def flatten(x, start_axis=0, stop_axis=-1, name=None):
if
not
(
isinstance
(
x
,
Variable
)):
raise
ValueError
(
"The input x should be a Tensor"
)
if
not
in_dygraph
_mode
():
if
not
paddle
.
in_dynamic
_mode
():
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int8'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
],
...
...
@@ -693,7 +693,7 @@ def flatten(x, start_axis=0, stop_axis=-1, name=None):
if
start_axis
>
stop_axis
:
raise
ValueError
(
"The stop_axis should be larger than stat_axis"
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
dy_out
,
_
=
_C_ops
.
flatten_contiguous_range
(
x
,
'start_axis'
,
start_axis
,
'stop_axis'
,
stop_axis
)
return
dy_out
...
...
@@ -792,7 +792,7 @@ def roll(x, shifts, axis=None, name=None):
else
:
axis
=
[]
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
roll
(
x
,
'axis'
,
axis
,
'shifts'
,
shifts
)
helper
=
LayerHelper
(
"roll"
,
**
locals
())
...
...
@@ -1108,7 +1108,7 @@ def unique_consecutive(x,
else
:
axis
=
[
axis
]
attr_dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
out
,
inverse
,
counts
=
_C_ops
.
unique_consecutive
(
x
,
'dtype'
,
attr_dtype
,
'return_inverse'
,
return_inverse
,
'return_counts'
,
return_counts
,
'axis'
,
axis
)
...
...
@@ -1213,7 +1213,7 @@ def unique(x,
else
:
axis
=
[
axis
]
attr_dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
out
,
inverse
,
indices
,
counts
=
_C_ops
.
unique
(
x
,
'dtype'
,
attr_dtype
,
'return_index'
,
return_index
,
'return_inverse'
,
return_inverse
,
'return_counts'
,
return_counts
,
...
...
@@ -1397,7 +1397,7 @@ def gather(x, index, axis=None, name=None):
if
axis
is
None
:
axis
=
0
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
axis
=
axis
.
item
()
if
isinstance
(
axis
,
paddle
.
Tensor
)
else
axis
return
_C_ops
.
gather
(
x
,
index
,
None
,
"axis"
,
axis
,
"overwrite"
,
False
)
...
...
@@ -1471,7 +1471,7 @@ def unbind(input, axis=0):
input_shape
=
input
.
shape
axis_
=
axis
if
axis
>=
0
else
len
(
input_shape
)
+
axis
num
=
input_shape
[
axis_
]
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
unbind
(
input
,
num
,
'axis'
,
axis
)
helper
=
LayerHelper
(
"unbind"
,
**
locals
())
...
...
@@ -1565,7 +1565,7 @@ def scatter(x, index, updates, overwrite=True, name=None):
# [2., 2.],
# [1., 1.]]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
scatter
(
x
,
index
,
updates
,
'overwrite'
,
overwrite
)
check_variable_and_dtype
(
...
...
@@ -1744,7 +1744,7 @@ def tile(x, repeat_times, name=None):
np_out = out.numpy()
# [[1, 2, 3], [1, 2, 3]]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
tile
(
x
,
'repeat_times'
,
repeat_times
)
check_type
(
repeat_times
,
'repeat_times'
,
(
list
,
tuple
,
Variable
),
'tile'
)
if
isinstance
(
repeat_times
,
Variable
):
...
...
@@ -1827,7 +1827,7 @@ def expand_as(x, y, name=None):
np_out = out.numpy()
# [[1, 2, 3], [1, 2, 3]]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
expand_as_v2
(
x
,
'target_shape'
,
y
.
shape
)
check_variable_and_dtype
(
...
...
@@ -1881,7 +1881,7 @@ def broadcast_to(x, shape, name=None):
print(out)
# [[1, 2, 3], [1, 2, 3]]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
expand_v2
(
x
,
'shape'
,
shape
)
if
isinstance
(
shape
,
Variable
):
...
...
@@ -1968,7 +1968,7 @@ def expand(x, shape, name=None):
print(out)
# [[1, 2, 3], [1, 2, 3]]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
expand_v2
(
x
,
'shape'
,
shape
)
if
isinstance
(
shape
,
Variable
):
...
...
@@ -2407,7 +2407,7 @@ def tensordot(x, y, axes=2, name=None):
check_type
(
axes
,
'axes'
,
(
int
,
tuple
,
list
,
Variable
),
op_type
)
def
_var_to_list
(
var
):
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
tolist
(
var
)
raise
TypeError
(
"The 'axes' with type 'Tensor' in "
+
op_type
+
...
...
@@ -2523,7 +2523,7 @@ def as_complex(x, name=None):
# [[ 0. +1.j 2. +3.j 4. +5.j]
# [ 6. +7.j 8. +9.j 10.+11.j]]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
paddle
.
_C_ops
.
as_complex
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'as_complex'
)
...
...
@@ -2572,7 +2572,7 @@ def as_real(x, name=None):
# [ 8. 9.]
# [10. 11.]]]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
paddle
.
_C_ops
.
as_real
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'complex64'
,
'complex128'
],
'as_real'
)
...
...
@@ -2626,7 +2626,7 @@ def repeat_interleave(x, repeats, axis=None, name=None):
x
=
paddle
.
flatten
(
x
)
axis
=
0
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
if
isinstance
(
repeats
,
int
):
return
_C_ops
.
repeat_interleave
(
x
,
None
,
'Repeats'
,
repeats
,
'dim'
,
axis
)
...
...
@@ -2733,7 +2733,7 @@ def moveaxis(x, source, destination, name=None):
for
i
in
range
(
len
(
src_dims
)):
perm
[
dst_dims
[
i
]]
=
src_dims
[
i
]
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
out
,
_
=
_C_ops
.
transpose2
(
x
,
'axis'
,
perm
)
return
out
...
...
@@ -2814,7 +2814,7 @@ def take_along_axis(arr, indices, axis):
if
not
broadcast_shape
:
# if indices matrix have larger size than arr, arr should broadcast into indices shape.
broadcast_shape
=
indices
.
shape
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
indices
=
paddle
.
broadcast_to
(
indices
,
broadcast_shape
)
broadcast_shape_list
=
list
(
broadcast_shape
)
broadcast_shape_list
[
axis
]
=
list
(
arr
.
shape
)[
axis
]
...
...
@@ -2879,7 +2879,7 @@ def put_along_axis(arr, indices, values, axis, reduce='assign'):
"`indices` and `arr` must have the same number of dimensions!"
)
axis
=
non_negative_axis
(
arr
,
axis
)
broadcast_shape
=
infer_broadcast_shape
(
arr
,
indices
,
axis
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
values
=
paddle
.
to_tensor
(
values
)
if
not
isinstance
(
values
,
paddle
.
Tensor
)
else
values
if
broadcast_shape
:
...
...
python/paddle/tensor/math.py
浏览文件 @
42eb56e2
...
...
@@ -26,8 +26,9 @@ from paddle.common_ops_import import dygraph_utils
from
paddle.tensor
import
cast
from
paddle.tensor.attribute
import
_complex_to_real_dtype
import
paddle
from
..fluid
import
layers
from
..fluid.framework
import
core
,
_varbase_creator
,
in_dygraph_mode
,
Variable
,
convert_np_dtype_to_dtype_
from
paddle.static
import
Variable
from
..framework
import
core
from
..framework
import
_varbase_creator
,
convert_np_dtype_to_dtype_
from
..fluid.layer_helper
import
LayerHelper
from
..fluid.data_feeder
import
check_variable_and_dtype
,
check_type
,
check_dtype
,
convert_dtype
from
..fluid.layers.layer_function_generator
import
_generate_doc_string_
,
generate_activation_fn
,
generate_layer_fn
...
...
@@ -70,7 +71,8 @@ from ..fluid.layers import acosh # noqa: F401
from
..fluid.layers
import
atanh
# noqa: F401
from
..fluid.layers
import
multiplex
# noqa: F401
from
..fluid
import
layers
from
..fluid.layers
import
reduce_prod
from
..fluid.layers
import
elementwise_sub
from
paddle
import
_C_ops
__all__
=
[]
...
...
@@ -147,7 +149,7 @@ def pow(x, y, name=None):
"""
# in dynamic graph mode
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
if
isinstance
(
y
,
(
int
,
float
)):
return
_C_ops
.
pow
(
x
,
'factor'
,
y
)
elif
isinstance
(
y
,
(
paddle
.
Tensor
,
Variable
)):
...
...
@@ -240,7 +242,7 @@ def add(x, y, name=None):
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
elementwise_add
(
x
,
y
)
return
_elementwise_op
(
LayerHelper
(
'elementwise_add'
,
**
locals
()))
...
...
@@ -319,7 +321,7 @@ def subtract(x, y, name=None):
op_type
=
'elementwise_sub'
axis
=
-
1
act
=
None
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
...
...
@@ -376,7 +378,7 @@ def divide(x, y, name=None):
op_type
=
'elementwise_div'
axis
=
-
1
act
=
None
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
...
...
@@ -415,7 +417,7 @@ def floor_divide(x, y, name=None):
"""
op_type
=
'elementwise_floordiv'
axis
=
-
1
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
op_name
=
op_type
)
...
...
@@ -455,7 +457,7 @@ def remainder(x, y, name=None):
"""
op_type
=
'elementwise_mod'
axis
=
-
1
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
op_name
=
op_type
)
...
...
@@ -505,7 +507,7 @@ def multiply(x, y, name=None):
act
=
None
axis
=
-
1
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
...
...
@@ -570,7 +572,7 @@ def maximum(x, y, name=None):
op_type
=
'elementwise_max'
axis
=
-
1
act
=
None
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
...
...
@@ -629,7 +631,7 @@ def minimum(x, y, name=None):
op_type
=
'elementwise_min'
axis
=
-
1
act
=
None
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
...
...
@@ -690,7 +692,7 @@ def fmax(x, y, name=None):
op_type
=
'elementwise_fmax'
axis
=
-
1
act
=
None
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
...
...
@@ -751,7 +753,7 @@ def fmin(x, y, name=None):
op_type
=
'elementwise_fmin'
axis
=
-
1
act
=
None
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
...
...
@@ -860,7 +862,7 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None):
return
(
False
,
src_type
)
dtype_flag
,
dtype
=
get_dtype
(
x
,
dtype
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
axis
=
axis
if
axis
!=
None
and
axis
!=
[]
else
[
0
]
if
dtype_flag
:
return
_C_ops
.
reduce_sum
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
...
...
@@ -1024,7 +1026,7 @@ def add_n(inputs, name=None):
# [[8., 10., 12.],
# [14., 16., 18.]]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
if
isinstance
(
inputs
,
Variable
):
inputs
=
[
inputs
]
return
_C_ops
.
sum
(
inputs
,
'use_mkldnn'
,
False
)
...
...
@@ -1080,7 +1082,7 @@ def trunc(input, name=None):
# [[0., 0.],
# [0., 0.]]))
'''
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
trunc
(
input
)
else
:
inputs
=
{
"X"
:
input
}
...
...
@@ -1164,7 +1166,7 @@ def mm(input, mat2, name=None):
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
matmul_v2
(
input
,
mat2
)
def
__check_input
(
x
,
y
):
...
...
@@ -1269,7 +1271,7 @@ def addmm(input, x, y, beta=1.0, alpha=1.0, name=None):
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
out
=
_C_ops
.
addmm
(
input
,
x
,
y
,
"Alpha"
,
alpha
,
"Beta"
,
beta
)
return
out
...
...
@@ -1328,7 +1330,7 @@ def renorm(x, p, axis, max_norm):
if
not
axis
>=
-
1
*
len
(
input_shape
):
raise
ValueError
(
"the axis:{} should not be less than -1 * length of input_shape:{}"
.
format
(
axis
,
-
1
*
len
(
input_shape
)))
axis
=
axis
+
len
(
input_shape
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
out
=
core
.
ops
.
renorm
(
x
,
'p'
,
p
,
'axis'
,
axis
,
'max_norm'
,
max_norm
)
return
out
...
...
@@ -1384,7 +1386,7 @@ def inner(x, y, name=None):
nx
=
x
.
reshape
((
-
1
,
xshape
[
-
1
]))
ny
=
y
.
reshape
((
-
1
,
yshape
[
-
1
]))
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
matmul_v2
(
nx
,
ny
.
T
).
reshape
(
dstshape
)
def
__check_input
(
x
,
y
):
...
...
@@ -1447,7 +1449,7 @@ def outer(x, y, name=None):
nx
=
x
.
reshape
((
-
1
,
1
))
ny
=
y
.
reshape
((
1
,
-
1
))
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
matmul_v2
(
nx
,
ny
)
def
__check_input
(
x
,
y
):
...
...
@@ -1516,7 +1518,7 @@ def logsumexp(x, axis=None, keepdim=False, name=None):
if
axis
is
None
or
len
(
axis
)
==
0
:
axis
=
[
0
]
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
logsumexp
(
x
,
'axis'
,
axis
,
'keepdim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
check_variable_and_dtype
(
x
,
'x'
,
...
...
@@ -1560,7 +1562,7 @@ def inverse(x, name=None):
print(inv) # [[0.5, 0], [0, 0.5]]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
inverse
(
x
)
def
_check_input
(
x
):
...
...
@@ -1676,7 +1678,7 @@ def max(x, axis=None, keepdim=False, name=None):
"""
reduce_all
,
axis
=
_get_reduce_all_value
(
axis
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
reduce_max
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
...
...
@@ -1776,7 +1778,7 @@ def min(x, axis=None, keepdim=False, name=None):
"""
reduce_all
,
axis
=
_get_reduce_all_value
(
axis
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
reduce_min
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
...
...
@@ -1889,7 +1891,7 @@ def amax(x, axis=None, keepdim=False, name=None):
"""
reduce_all
,
axis
=
_get_reduce_all_value
(
axis
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
reduce_amax
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
helper
=
LayerHelper
(
'amax'
,
**
locals
())
...
...
@@ -2002,7 +2004,7 @@ def amin(x, axis=None, keepdim=False, name=None):
"""
reduce_all
,
axis
=
_get_reduce_all_value
(
axis
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
reduce_amin
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
helper
=
LayerHelper
(
'amin'
,
**
locals
())
...
...
@@ -2046,7 +2048,7 @@ def log1p(x, name=None):
# [[0.], [0.6931472]]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
log1p
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
"log1p"
)
...
...
@@ -2095,7 +2097,7 @@ def log2(x, name=None):
res = paddle.log2(x_i)
print(res) # [1.0]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
log2
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
"log2"
)
...
...
@@ -2145,7 +2147,7 @@ def log10(x, name=None):
res = paddle.log10(x_i)
print(res) # [1.0]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
log10
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
"log10"
)
...
...
@@ -2206,7 +2208,7 @@ def clip(x, min=None, max=None, name=None):
min_
=
float
(
np
.
finfo
(
np
.
float32
).
min
)
max_
=
float
(
np
.
finfo
(
np
.
float32
).
max
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
if
isinstance
(
min
,
Variable
):
min
=
min
.
numpy
().
item
(
0
)
if
isinstance
(
max
,
Variable
):
...
...
@@ -2339,7 +2341,7 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None):
"But received axis1 = %d, axis2 = %d
\n
"
%
(
axis1
,
axis2
)
__check_input
(
input
,
offset
,
axis1
,
axis2
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
trace
(
x
,
'offset'
,
offset
,
'axis1'
,
axis1
,
'axis2'
,
axis2
)
inputs
=
{
'Input'
:
[
x
]}
...
...
@@ -2422,7 +2424,7 @@ def diagonal(x, offset=0, axis1=0, axis2=1, name=None):
# [0.17020577, 0.27325270]])
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
diagonal
(
x
,
'offset'
,
offset
,
'axis1'
,
axis1
,
'axis2'
,
axis2
)
def
__check_input
(
input
,
offset
,
dim1
,
dim2
):
...
...
@@ -2499,7 +2501,7 @@ ${comment}
# [12, 15, 18, 16, 20, 24],
# [21, 24, 27, 28, 32, 36]])
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
kron
(
x
,
y
)
helper
=
LayerHelper
(
'kron'
,
**
locals
())
...
...
@@ -2557,9 +2559,9 @@ def cumsum(x, axis=None, dtype=None, name=None):
else
:
flatten
=
False
if
dtype
is
not
None
and
x
.
dtype
!=
convert_np_dtype_to_dtype_
(
dtype
):
x
=
layers
.
cast
(
x
,
dtype
)
x
=
cast
(
x
,
dtype
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
if
axis
is
None
:
return
_C_ops
.
cumsum
(
x
,
'flatten'
,
flatten
)
else
:
...
...
@@ -2622,9 +2624,9 @@ def cumprod(x, dim=None, dtype=None, name=None):
"""
if
dtype
is
not
None
and
x
.
dtype
!=
convert_np_dtype_to_dtype_
(
dtype
):
x
=
layers
.
cast
(
x
,
dtype
)
x
=
cast
(
x
,
dtype
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
cumprod
(
x
,
'dim'
,
dim
)
check_variable_and_dtype
(
x
,
"x"
,
[
'complex64'
,
'complex128'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'cumprod'
)
...
...
@@ -2656,7 +2658,7 @@ def isfinite(x, name=None):
out = paddle.tensor.isfinite(x)
print(out) # [False True True False True False False]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
isfinite_v2
(
x
)
helper
=
LayerHelper
(
"isfinite_v2"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'isfinite'
)
...
...
@@ -2684,7 +2686,7 @@ def isinf(x, name=None):
out = paddle.tensor.isinf(x)
print(out) # [ True False False True False False False]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
isinf_v2
(
x
)
helper
=
LayerHelper
(
"isinf_v2"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'isinf'
)
...
...
@@ -2712,7 +2714,7 @@ def isnan(x, name=None):
out = paddle.tensor.isnan(x)
print(out) # [False False False False False True True]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
isnan_v2
(
x
)
helper
=
LayerHelper
(
"isnan_v2"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'isnan'
)
...
...
@@ -2783,9 +2785,9 @@ def prod(x, axis=None, keepdim=False, dtype=None, name=None):
if
dtype
is
not
None
:
check_dtype
(
dtype
,
'dtype'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'prod'
)
if
x
.
dtype
!=
convert_np_dtype_to_dtype_
(
dtype
):
x
=
layers
.
cast
(
x
,
dtype
)
x
=
cast
(
x
,
dtype
)
return
layers
.
reduce_prod
(
input
=
x
,
dim
=
axis
,
keep_dim
=
keepdim
,
name
=
name
)
return
reduce_prod
(
input
=
x
,
dim
=
axis
,
keep_dim
=
keepdim
,
name
=
name
)
def
sign
(
x
,
name
=
None
):
...
...
@@ -2809,7 +2811,7 @@ def sign(x, name=None):
out = paddle.sign(x=x)
print(out) # [1.0, 0.0, -1.0, 1.0]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
sign
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sign'
)
...
...
@@ -2846,7 +2848,7 @@ def tanh(x, name=None):
print(out)
# [-0.37994896 -0.19737532 0.09966799 0.29131261]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
tanh
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'tanh'
)
...
...
@@ -2888,7 +2890,7 @@ def increment(x, value=1.0, name=None):
# [1.]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
increment
(
x
,
'step'
,
value
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
...
...
@@ -2969,7 +2971,7 @@ def all(x, axis=None, keepdim=False, name=None):
else
:
reduce_all_flag
=
False
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
axis
=
axis
if
axis
!=
None
and
axis
!=
[]
else
[
0
]
return
_C_ops
.
reduce_all
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all_flag
)
...
...
@@ -3061,7 +3063,7 @@ def any(x, axis=None, keepdim=False, name=None):
else
:
reduce_all_flag
=
False
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
axis
=
axis
if
axis
!=
None
and
axis
!=
[]
else
[
0
]
return
_C_ops
.
reduce_any
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all_flag
)
...
...
@@ -3142,7 +3144,7 @@ def conj(x, name=None):
# [(4-4j), (5-5j), (6-6j)]])
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
conj
(
x
)
check_variable_and_dtype
(
x
,
"x"
,
[
'complex64'
,
'complex128'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'conj'
)
...
...
@@ -3181,7 +3183,7 @@ def digamma(x, name=None):
# [ nan , 5.32286835]])
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
digamma
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'digamma'
)
...
...
@@ -3212,7 +3214,7 @@ def neg(x, name=None):
# [0.4 0.2 -0.1 -0.3]
"""
return
layers
.
scale
(
x
,
scale
=-
1.0
,
bias
=
0.0
,
bias_after_scale
=
True
,
act
=
None
,
name
=
name
)
return
scale
(
x
,
scale
=-
1.0
,
bias
=
0.0
,
bias_after_scale
=
True
,
act
=
None
,
name
=
name
)
def
atan2
(
x
,
y
,
name
=
None
):
r
"""
...
...
@@ -3257,7 +3259,7 @@ def atan2(x, y, name=None):
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
atan2
(
x
,
y
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'int32'
,
'int64'
,
'float16'
,
'float32'
,
'float64'
],
'atan2'
)
...
...
@@ -3313,7 +3315,7 @@ def logit(x, eps=None, name=None):
if
eps
==
None
:
eps
=
0.0
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
logit
(
x
,
'eps'
,
eps
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'logit'
)
...
...
@@ -3356,7 +3358,7 @@ def lerp(x, y, weight, name=None):
# out: [5.5., 6., 6.5, 7.]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
check_type
(
weight
,
'weight'
,
(
float
,
paddle
.
Tensor
,
Variable
),
'lerp'
)
if
isinstance
(
weight
,
float
):
weight
=
paddle
.
to_tensor
(
weight
,
dtype
=
x
.
dtype
)
...
...
@@ -3419,7 +3421,7 @@ def erfinv(x, name=None):
"""
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'erfinv'
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
erfinv
(
x
)
helper
=
LayerHelper
(
'erfinv'
,
**
locals
())
...
...
@@ -3478,7 +3480,7 @@ def rad2deg(x, name=None):
# [57.29578018])
"""
rad2deg_scale
=
180
/
np
.
pi
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
if
convert_dtype
(
x
.
dtype
)
in
[
'int32'
,
'int64'
]:
x
=
cast
(
x
,
dtype
=
"float32"
)
return
_C_ops
.
scale
(
x
,
'scale'
,
rad2deg_scale
)
...
...
@@ -3531,7 +3533,7 @@ def deg2rad(x, name=None):
# [3.14159274])
"""
deg2rad_scale
=
np
.
pi
/
180.0
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
if
convert_dtype
(
x
.
dtype
)
in
[
'int32'
,
'int64'
]:
x
=
cast
(
x
,
dtype
=
"float32"
)
return
_C_ops
.
scale
(
x
,
'scale'
,
deg2rad_scale
)
...
...
@@ -3615,7 +3617,7 @@ def gcd(x, y, name=None):
paddle
.
where
(
y_not_equal_0
,
paddle
.
mod
(
x
,
y_safe
),
paddle
.
zeros
(
y
.
shape
,
y
.
dtype
)))
return
(
paddle
.
where
(
x
<
y
,
y
,
x
),
paddle
.
where
(
x
<
y
,
x
,
y
))
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
while
_gcd_cond_fn
(
x
,
y
):
x
,
y
=
_gcd_body_fn
(
x
,
y
)
...
...
@@ -3749,7 +3751,7 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None):
dtype
=
x
.
dtype
axes
=
[
axis
]
infer_flags
=
list
(
1
for
i
in
range
(
len
(
axes
)))
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
has_pend
=
False
input_list
=
[]
if
prepend
is
not
None
and
append
is
not
None
:
...
...
@@ -3788,7 +3790,7 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None):
op
=
getattr
(
_C_ops
,
"logical_xor"
)
out
=
op
(
input_back
,
input_front
)
else
:
out
=
layers
.
elementwise_sub
(
input_back
,
input_front
,
axis
=
axis
)
out
=
elementwise_sub
(
input_back
,
input_front
,
axis
=
axis
)
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'bool'
,
'int32'
,
'int64'
],
'diff'
)
...
...
@@ -3840,7 +3842,7 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None):
type
=
'logical_xor'
,
inputs
=
{
"X"
:
input_back
,
"Y"
:
input_front
},
outputs
=
{
"Out"
:
out
}
)
else
:
out
=
layers
.
elementwise_sub
(
input_back
,
input_front
,
axis
=
axis
)
out
=
elementwise_sub
(
input_back
,
input_front
,
axis
=
axis
)
return
out
...
...
@@ -3883,7 +3885,7 @@ def angle(x, name=None):
# [-1.1071488 -0.7853982 0. 0.7853982]]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
angle
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
...
...
python/paddle/tensor/random.py
浏览文件 @
42eb56e2
...
...
@@ -14,13 +14,14 @@
# TODO: define random functions
from
..f
luid
import
core
from
..f
luid.framework
import
in_dygraph_mode
,
Variable
,
convert_np_dtype_to_dtype_
,
dygraph_only
from
..f
ramework
import
core
from
..f
ramework
import
convert_np_dtype_to_dtype_
,
dygraph_only
from
..fluid.layer_helper
import
LayerHelper
from
..fluid.data_feeder
import
check_variable_and_dtype
,
check_type
,
check_dtype
,
check_shape
from
..fluid.layers
import
utils
import
paddle
from
paddle
import
_C_ops
from
paddle.static
import
Variable
__all__
=
[]
...
...
@@ -65,7 +66,7 @@ def bernoulli(x, name=None):
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
bernoulli
(
x
)
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"bernoulli"
)
...
...
@@ -110,7 +111,7 @@ def poisson(x, name=None):
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
poisson
(
x
)
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"poisson"
)
...
...
@@ -173,7 +174,7 @@ def multinomial(x, num_samples=1, replacement=False, name=None):
assert
core
.
is_compiled_with_rocm
()
==
False
,
(
"multinomial op is not supported on ROCM yet."
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
multinomial
(
x
,
'num_samples'
,
num_samples
,
'replacement'
,
replacement
)
...
...
@@ -231,7 +232,7 @@ def gaussian(shape, mean=0.0, std=1.0, dtype=None, name=None):
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
shape
=
utils
.
convert_shape_to_list
(
shape
)
return
_C_ops
.
gaussian_random
(
'shape'
,
shape
,
'mean'
,
float
(
mean
),
'std'
,
...
...
@@ -422,7 +423,7 @@ def normal(mean=0.0, std=1.0, shape=None, name=None):
# [1.00780561 3.78457445 5.81058198] # random
"""
if
not
in_dygraph
_mode
():
if
not
paddle
.
in_dynamic
_mode
():
check_type
(
mean
,
'mean'
,
(
int
,
float
,
Variable
),
'normal'
)
check_type
(
std
,
'std'
,
(
int
,
float
,
Variable
),
'normal'
)
if
isinstance
(
mean
,
Variable
):
...
...
@@ -454,7 +455,7 @@ def normal(mean=0.0, std=1.0, shape=None, name=None):
return
gaussian
(
shape
=
shape
,
mean
=
mean
,
std
=
std
,
name
=
name
)
out
=
out
*
std
+
mean
if
not
in_dygraph
_mode
():
if
not
paddle
.
in_dynamic
_mode
():
out
.
stop_grediant
=
True
return
out
...
...
@@ -540,7 +541,7 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
shape
=
utils
.
convert_shape_to_list
(
shape
)
return
_C_ops
.
uniform_random
(
'shape'
,
shape
,
'min'
,
float
(
min
),
'max'
,
...
...
@@ -679,7 +680,7 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None):
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
shape
=
utils
.
convert_shape_to_list
(
shape
)
return
_C_ops
.
randint
(
'shape'
,
shape
,
'low'
,
low
,
'high'
,
high
,
'seed'
,
0
,
'dtype'
,
dtype
)
...
...
@@ -846,7 +847,7 @@ def randint_like(x, low=0, high=None, dtype=None, name=None):
"randint_like's low must less then high, but received low = {0}, "
"high = {1}"
.
format
(
low
,
high
))
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
shape
=
utils
.
convert_shape_to_list
(
shape
)
out
=
_C_ops
.
randint
(
'shape'
,
shape
,
'low'
,
low
,
'high'
,
high
,
'seed'
,
0
,
'dtype'
,
core
.
VarDesc
.
VarType
.
INT64
)
...
...
@@ -911,7 +912,7 @@ def randperm(n, dtype="int64", name=None):
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
randperm
(
'n'
,
n
,
'seed'
,
0
,
'dtype'
,
dtype
)
if
n
<
1
:
...
...
@@ -1014,7 +1015,7 @@ def exponential_(x, lam=1.0, name=None):
# [0.72520673, 0.45208144, 0.30234432]]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
exponential_
(
x
,
"lambda"
,
lam
)
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"exponential"
)
...
...
python/paddle/tensor/search.py
浏览文件 @
42eb56e2
...
...
@@ -13,14 +13,16 @@
# limitations under the License.
from
__future__
import
print_function
import
numpy
as
np
import
paddle
from
..fluid.layer_helper
import
LayerHelper
from
..fluid.data_feeder
import
check_variable_and_dtype
,
check_type
,
check_dtype
from
..fluid
import
core
,
layers
from
paddle.common_ops_import
import
in_dygraph_mod
e
from
..fluid
import
layers
from
..framework
import
cor
e
from
paddle.common_ops_import
import
convert_np_dtype_to_dtype_
from
paddle.common_ops_import
import
Variable
from
paddle.common_ops_import
import
VarDesc
from
paddle
import
_C_ops
from
.logic
import
logical_not
# TODO: define searching & indexing functions of a tensor
# from ..fluid.layers import has_inf #DEFINE_ALIAS
...
...
@@ -88,7 +90,7 @@ def argsort(x, axis=-1, descending=False, name=None):
# [1 1 0 2]
# [0 2 1 1]]]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
_
,
ids
=
_C_ops
.
argsort
(
x
,
'axis'
,
axis
,
'descending'
,
descending
)
return
ids
check_variable_and_dtype
(
...
...
@@ -165,7 +167,7 @@ def argmax(x, axis=None, keepdim=False, dtype="int64", name=None):
flatten
=
True
axis
=
0
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
out
=
_C_ops
.
arg_max
(
x
,
'axis'
,
axis
,
'dtype'
,
var_dtype
,
'keepdims'
,
keepdim
,
'flatten'
,
flatten
)
return
out
...
...
@@ -242,7 +244,7 @@ def argmin(x, axis=None, keepdim=False, dtype="int64", name=None):
flatten
=
True
axis
=
0
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
out
=
_C_ops
.
arg_min
(
x
,
'axis'
,
axis
,
'dtype'
,
var_dtype
,
'keepdims'
,
keepdim
,
'flatten'
,
flatten
)
return
out
...
...
@@ -302,7 +304,7 @@ def index_select(x, index, axis=0, name=None):
# [ 9. 10. 10.]]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
index_select
(
x
,
index
,
'dim'
,
axis
)
helper
=
LayerHelper
(
"index_select"
,
**
locals
())
...
...
@@ -378,7 +380,7 @@ def nonzero(x, as_tuple=False):
shape
=
x
.
shape
rank
=
len
(
shape
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
outs
=
_C_ops
.
where_index
(
x
)
else
:
outs
=
layers
.
where
(
x
)
...
...
@@ -390,7 +392,7 @@ def nonzero(x, as_tuple=False):
else
:
for
i
in
range
(
rank
):
list_out
.
append
(
layers
.
slice
(
paddle
.
slice
(
outs
,
axes
=
[
1
],
starts
=
[
i
],
ends
=
[
i
+
1
]))
return
tuple
(
list_out
)
...
...
@@ -452,7 +454,7 @@ def sort(x, axis=-1, descending=False, name=None):
# [4. 7. 4. 6.]
# [5. 7. 7. 9.]]]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
out
,
_
=
_C_ops
.
argsort
(
x
,
'axis'
,
axis
,
'descending'
,
descending
)
return
out
helper
=
LayerHelper
(
"sort"
,
**
locals
())
...
...
@@ -501,7 +503,7 @@ def mode(x, axis=-1, keepdim=False, name=None):
# [1, 0]]))
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
mode
(
x
,
"axis"
,
axis
,
"keepdim"
,
keepdim
)
helper
=
LayerHelper
(
"mode"
,
**
locals
())
...
...
@@ -575,7 +577,7 @@ def where(condition, x=None, y=None, name=None):
if
x
is
None
or
y
is
None
:
raise
ValueError
(
"either both or neither of x and y should be given"
)
if
not
in_dygraph
_mode
():
if
not
paddle
.
in_dynamic
_mode
():
check_variable_and_dtype
(
condition
,
'condition'
,
[
'bool'
],
'where'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'where'
)
...
...
@@ -592,28 +594,27 @@ def where(condition, x=None, y=None, name=None):
broadcast_y
=
y
else
:
if
core
.
is_compiled_with_xpu
():
cond_int
=
layers
.
cast
(
condition
,
x
.
dtype
)
cond_not_int
=
layers
.
cast
(
layers
.
logical_not
(
condition
),
x
.
dtype
)
out1
=
layers
.
elementwise_mul
(
x
,
cond_int
)
out2
=
layers
.
elementwise_mul
(
y
,
cond_not_int
)
out
=
layers
.
elementwise_
add
(
out1
,
out2
)
cond_int
=
paddle
.
cast
(
condition
,
x
.
dtype
)
cond_not_int
=
paddle
.
cast
(
logical_not
(
condition
),
x
.
dtype
)
out1
=
paddle
.
multiply
(
x
,
cond_int
)
out2
=
paddle
.
multiply
(
y
,
cond_not_int
)
out
=
paddle
.
add
(
out1
,
out2
)
return
out
zeros_like_x
=
layers
.
zeros_like
(
x
)
zeros_like_y
=
layers
.
zeros_like
(
y
)
zeros_like_condition
=
layers
.
zeros_like
(
condition
)
zeros_like_condition
=
layers
.
cast
(
zeros_like_condition
,
x
.
dtype
)
cast_cond
=
layers
.
cast
(
condition
,
x
.
dtype
)
broadcast_zeros
=
layers
.
elementwise_add
(
zeros_like_x
,
zeros_like_y
)
broadcast_zeros
=
layers
.
elementwise_add
(
broadcast_zeros
,
zeros_like_condition
)
broadcast_x
=
layers
.
elementwise_add
(
x
,
broadcast_zeros
)
broadcast_y
=
layers
.
elementwise_add
(
y
,
broadcast_zeros
)
broadcast_condition
=
layers
.
elementwise_add
(
cast_cond
,
broadcast_zeros
)
broadcast_condition
=
layers
.
cast
(
broadcast_condition
,
'bool'
)
if
in_dygraph_mode
():
zeros_like_x
=
paddle
.
zeros_like
(
x
)
zeros_like_y
=
paddle
.
zeros_like
(
y
)
zeros_like_condition
=
paddle
.
zeros_like
(
condition
)
zeros_like_condition
=
paddle
.
cast
(
zeros_like_condition
,
x
.
dtype
)
cast_cond
=
paddle
.
cast
(
condition
,
x
.
dtype
)
broadcast_zeros
=
paddle
.
add
(
zeros_like_x
,
zeros_like_y
)
broadcast_zeros
=
paddle
.
add
(
broadcast_zeros
,
zeros_like_condition
)
broadcast_x
=
paddle
.
add
(
x
,
broadcast_zeros
)
broadcast_y
=
paddle
.
add
(
y
,
broadcast_zeros
)
broadcast_condition
=
paddle
.
add
(
cast_cond
,
broadcast_zeros
)
broadcast_condition
=
paddle
.
cast
(
broadcast_condition
,
'bool'
)
if
paddle
.
in_dynamic_mode
():
return
_C_ops
.
where
(
broadcast_condition
,
broadcast_x
,
broadcast_y
)
else
:
helper
=
LayerHelper
(
"where"
,
**
locals
())
...
...
@@ -704,7 +705,7 @@ def index_sample(x, index):
# [1200 1100]]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
index_sample
(
x
,
index
)
helper
=
LayerHelper
(
"index_sample"
,
**
locals
())
...
...
@@ -752,7 +753,7 @@ def masked_select(x, mask, name=None):
#[1.0 5.0 6.0 9.0]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
masked_select
(
x
,
mask
)
helper
=
LayerHelper
(
"masked_select"
,
**
locals
())
...
...
@@ -822,7 +823,7 @@ def topk(x, k, axis=None, largest=True, sorted=True, name=None):
# [[1 1 0 0]]
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
k
=
k
.
numpy
().
item
(
0
)
if
isinstance
(
k
,
Variable
)
else
k
if
axis
is
None
:
out
,
indices
=
_C_ops
.
top_k_v2
(
x
,
'k'
,
...
...
@@ -906,7 +907,7 @@ def searchsorted(sorted_sequence,
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
searchsorted
(
sorted_sequence
,
values
,
"out_int32"
,
out_int32
,
"right"
,
right
)
...
...
@@ -969,7 +970,7 @@ def kthvalue(x, k, axis=None, keepdim=False, name=None):
# [[0, 2],
# [1, 2]]))
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
if
axis
is
not
None
:
return
_C_ops
.
kthvalue
(
x
,
'k'
,
k
,
"axis"
,
axis
,
"keepdim"
,
keepdim
)
else
:
...
...
python/paddle/tensor/stat.py
浏览文件 @
42eb56e2
...
...
@@ -15,10 +15,9 @@
# TODO: define statistical functions of a tensor
import
numpy
as
np
from
..
fluid.framework
import
Variable
from
..
static
import
Variable
from
..fluid.layer_helper
import
LayerHelper
from
..fluid.framework
import
core
,
in_dygraph_mode
from
..fluid
import
layers
from
..framework
import
core
from
.search
import
where
from
..fluid.data_feeder
import
convert_dtype
,
check_variable_and_dtype
,
check_type
,
check_dtype
import
paddle
...
...
@@ -88,7 +87,7 @@ def mean(x, axis=None, keepdim=False, name=None):
if
axis
is
None
or
len
(
axis
)
==
0
:
axis
=
[
0
]
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
reduce_mean
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
...
...
@@ -150,7 +149,7 @@ def var(x, axis=None, unbiased=True, keepdim=False, name=None):
out2 = paddle.var(x, axis=1)
# [1. 4.33333333]
"""
if
not
in_dygraph
_mode
():
if
not
paddle
.
in_dynamic
_mode
():
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'var'
)
u
=
mean
(
x
,
axis
,
True
,
name
)
...
...
@@ -209,7 +208,7 @@ def std(x, axis=None, unbiased=True, keepdim=False, name=None):
out2 = paddle.std(x, axis=1)
# [1. 2.081666]
"""
if
not
in_dygraph
_mode
():
if
not
paddle
.
in_dynamic
_mode
():
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'std'
)
out
=
var
(
**
locals
())
...
...
@@ -237,7 +236,7 @@ def numel(x, name=None):
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
size
(
x
)
if
not
isinstance
(
x
,
Variable
):
...
...
python/paddle/tensor/to_string.py
浏览文件 @
42eb56e2
...
...
@@ -14,7 +14,7 @@
import
paddle
import
numpy
as
np
from
paddle.fluid.layers
import
core
from
..framework
import
core
from
paddle.fluid.data_feeder
import
convert_dtype
,
check_variable_and_dtype
,
check_type
,
check_dtype
__all__
=
[]
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录