Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
42eb56e2
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
42eb56e2
编写于
2月 22, 2022
作者:
Z
zhiboniu
提交者:
GitHub
2月 22, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
unset fluid in tensor (#35082)
上级
a08ee62a
变更
12
隐藏空白更改
内联
并排
Showing
12 changed file
with
223 addition
and
218 deletion
+223
-218
python/paddle/framework/__init__.py
python/paddle/framework/__init__.py
+6
-1
python/paddle/tensor/attribute.py
python/paddle/tensor/attribute.py
+4
-3
python/paddle/tensor/creation.py
python/paddle/tensor/creation.py
+15
-17
python/paddle/tensor/einsum.py
python/paddle/tensor/einsum.py
+4
-5
python/paddle/tensor/linalg.py
python/paddle/tensor/linalg.py
+35
-34
python/paddle/tensor/logic.py
python/paddle/tensor/logic.py
+12
-14
python/paddle/tensor/manipulation.py
python/paddle/tensor/manipulation.py
+24
-24
python/paddle/tensor/math.py
python/paddle/tensor/math.py
+65
-63
python/paddle/tensor/random.py
python/paddle/tensor/random.py
+14
-13
python/paddle/tensor/search.py
python/paddle/tensor/search.py
+37
-36
python/paddle/tensor/stat.py
python/paddle/tensor/stat.py
+6
-7
python/paddle/tensor/to_string.py
python/paddle/tensor/to_string.py
+1
-1
未找到文件。
python/paddle/framework/__init__.py
浏览文件 @
42eb56e2
...
@@ -32,7 +32,7 @@ from ..fluid.core import MLUPlace # noqa: F401
...
@@ -32,7 +32,7 @@ from ..fluid.core import MLUPlace # noqa: F401
from
..fluid.core
import
CustomPlace
# noqa: F401
from
..fluid.core
import
CustomPlace
# noqa: F401
from
..fluid.core
import
VarBase
# noqa: F401
from
..fluid.core
import
VarBase
# noqa: F401
from
paddle
.fluid
import
core
# noqa: F401
from
.
.fluid
import
core
# noqa: F401
from
..fluid.dygraph.base
import
no_grad_
as
no_grad
# noqa: F401
from
..fluid.dygraph.base
import
no_grad_
as
no_grad
# noqa: F401
from
..fluid.dygraph.base
import
grad
# noqa: F401
from
..fluid.dygraph.base
import
grad
# noqa: F401
from
.io
import
save
# noqa: F401
from
.io
import
save
# noqa: F401
...
@@ -47,5 +47,10 @@ from ..fluid.framework import set_flags # noqa: F401
...
@@ -47,5 +47,10 @@ from ..fluid.framework import set_flags # noqa: F401
from
..fluid.dygraph.base
import
enable_dygraph
as
disable_static
# noqa: F401
from
..fluid.dygraph.base
import
enable_dygraph
as
disable_static
# noqa: F401
from
..fluid.dygraph.base
import
disable_dygraph
as
enable_static
# noqa: F401
from
..fluid.dygraph.base
import
disable_dygraph
as
enable_static
# noqa: F401
from
..fluid.framework
import
in_dygraph_mode
as
in_dynamic_mode
# noqa: F401
from
..fluid.framework
import
in_dygraph_mode
as
in_dynamic_mode
# noqa: F401
from
..fluid.framework
import
_current_expected_place
,
_get_paddle_place
# noqa: F401
from
..fluid.framework
import
dygraph_only
# noqa: F401
from
..fluid.framework
import
convert_np_dtype_to_dtype_
,
_varbase_creator
,
OpProtoHolder
# noqa: F401
from
..fluid.framework
import
_in_eager_mode
# noqa: F401
from
..fluid.framework
import
_dygraph_tracer
# noqa: F401
__all__
=
[]
__all__
=
[]
python/paddle/tensor/attribute.py
浏览文件 @
42eb56e2
...
@@ -14,7 +14,7 @@
...
@@ -14,7 +14,7 @@
from
__future__
import
print_function
from
__future__
import
print_function
from
..f
luid.framework
import
core
,
in_dygraph_mode
,
Variabl
e
from
..f
ramework
import
cor
e
from
..fluid.layer_helper
import
LayerHelper
from
..fluid.layer_helper
import
LayerHelper
from
..fluid.data_feeder
import
check_variable_and_dtype
from
..fluid.data_feeder
import
check_variable_and_dtype
...
@@ -23,6 +23,7 @@ from ..fluid.layers import rank # noqa: F401
...
@@ -23,6 +23,7 @@ from ..fluid.layers import rank # noqa: F401
from
..fluid.layers
import
shape
# noqa: F401
from
..fluid.layers
import
shape
# noqa: F401
import
paddle
import
paddle
from
paddle
import
_C_ops
from
paddle
import
_C_ops
from
paddle.static
import
Variable
__all__
=
[]
__all__
=
[]
...
@@ -184,7 +185,7 @@ def real(x, name=None):
...
@@ -184,7 +185,7 @@ def real(x, name=None):
# [[1., 2., 3.],
# [[1., 2., 3.],
# [4., 5., 6.]])
# [4., 5., 6.]])
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
real
(
x
)
return
_C_ops
.
real
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'complex64'
,
'complex128'
],
'real'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'complex64'
,
'complex128'
],
'real'
)
...
@@ -228,7 +229,7 @@ def imag(x, name=None):
...
@@ -228,7 +229,7 @@ def imag(x, name=None):
# [[6., 5., 4.],
# [[6., 5., 4.],
# [3., 2., 1.]])
# [3., 2., 1.]])
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
imag
(
x
)
return
_C_ops
.
imag
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'complex64'
,
'complex128'
],
'imag'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'complex64'
,
'complex128'
],
'imag'
)
...
...
python/paddle/tensor/creation.py
浏览文件 @
42eb56e2
...
@@ -18,21 +18,19 @@ from paddle.common_ops_import import fill_constant
...
@@ -18,21 +18,19 @@ from paddle.common_ops_import import fill_constant
from
..fluid.layers
import
utils
from
..fluid.layers
import
utils
from
..fluid.layers
import
tensor
from
..fluid.layers
import
tensor
from
..fluid.framework
import
Variable
from
..static
import
Variable
,
device_guard
from
..fluid.framework
import
unique_name
from
..framework
import
_current_expected_place
,
_get_paddle_place
from
..fluid.framework
import
_current_expected_place
,
_get_paddle_place
from
..framework
import
dygraph_only
from
..fluid.framework
import
dygraph_only
from
..framework
import
core
from
..fluid.initializer
import
Constant
from
..fluid.layers
import
core
from
..fluid.layer_helper
import
LayerHelper
from
..fluid.layer_helper
import
LayerHelper
from
..fluid.data_feeder
import
check_variable_and_dtype
,
check_type
,
check_dtype
,
convert_dtype
from
..fluid.data_feeder
import
check_variable_and_dtype
,
check_type
,
check_dtype
,
convert_dtype
from
..f
luid.framework
import
convert_np_dtype_to_dtype_
,
in_dygraph_mode
,
_varbase_creator
,
device_guard
,
OpProtoHolder
from
..f
ramework
import
convert_np_dtype_to_dtype_
,
_varbase_creator
,
OpProtoHolder
from
paddle.tensor.attribute
import
_complex_to_real_dtype
,
_real_to_complex_dtype
from
paddle.tensor.attribute
import
_complex_to_real_dtype
,
_real_to_complex_dtype
# TODO: define functions to get create a tensor
# TODO: define functions to get create a tensor
from
..fluid.layers
import
linspace
# noqa: F401
from
..fluid.layers
import
linspace
# noqa: F401
import
paddle
import
paddle
from
paddle
import
_C_ops
from
paddle
import
_C_ops
from
..f
luid.f
ramework
import
_in_eager_mode
from
..framework
import
_in_eager_mode
__all__
=
[]
__all__
=
[]
...
@@ -214,7 +212,7 @@ def full_like(x, fill_value, dtype=None, name=None):
...
@@ -214,7 +212,7 @@ def full_like(x, fill_value, dtype=None, name=None):
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
fill_any_like
(
x
,
'value'
,
fill_value
,
'dtype'
,
dtype
)
return
_C_ops
.
fill_any_like
(
x
,
'value'
,
fill_value
,
'dtype'
,
dtype
)
helper
=
LayerHelper
(
"full_like"
,
**
locals
())
helper
=
LayerHelper
(
"full_like"
,
**
locals
())
...
@@ -648,7 +646,7 @@ def tril(x, diagonal=0, name=None):
...
@@ -648,7 +646,7 @@ def tril(x, diagonal=0, name=None):
# [ 9, 10, 0, 0]])
# [ 9, 10, 0, 0]])
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
op
=
getattr
(
_C_ops
,
'tril_triu'
)
op
=
getattr
(
_C_ops
,
'tril_triu'
)
return
op
(
x
,
'diagonal'
,
diagonal
,
"lower"
,
True
)
return
op
(
x
,
'diagonal'
,
diagonal
,
"lower"
,
True
)
...
@@ -715,7 +713,7 @@ def triu(x, diagonal=0, name=None):
...
@@ -715,7 +713,7 @@ def triu(x, diagonal=0, name=None):
# [ 0, 10, 11, 12]])
# [ 0, 10, 11, 12]])
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
op
=
getattr
(
_C_ops
,
'tril_triu'
)
op
=
getattr
(
_C_ops
,
'tril_triu'
)
return
op
(
x
,
'diagonal'
,
diagonal
,
"lower"
,
False
)
return
op
(
x
,
'diagonal'
,
diagonal
,
"lower"
,
False
)
...
@@ -757,7 +755,7 @@ def meshgrid(*args, **kwargs):
...
@@ -757,7 +755,7 @@ def meshgrid(*args, **kwargs):
if
len
(
args
)
==
1
and
isinstance
(
args
[
0
],
(
list
,
tuple
)):
if
len
(
args
)
==
1
and
isinstance
(
args
[
0
],
(
list
,
tuple
)):
args
=
args
[
0
]
args
=
args
[
0
]
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
num
=
len
(
args
)
num
=
len
(
args
)
out
=
_C_ops
.
meshgrid
(
list
(
args
),
num
)
out
=
_C_ops
.
meshgrid
(
list
(
args
),
num
)
return
out
return
out
...
@@ -862,7 +860,7 @@ def diagflat(x, offset=0, name=None):
...
@@ -862,7 +860,7 @@ def diagflat(x, offset=0, name=None):
# [0 0 0 4 0]]
# [0 0 0 4 0]]
"""
"""
padding_value
=
0
padding_value
=
0
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
if
len
(
x
.
shape
)
==
1
:
if
len
(
x
.
shape
)
==
1
:
return
_C_ops
.
diag_v2
(
x
,
"offset"
,
offset
,
"padding_value"
,
return
_C_ops
.
diag_v2
(
x
,
"offset"
,
offset
,
"padding_value"
,
padding_value
)
padding_value
)
...
@@ -976,7 +974,7 @@ def diag(x, offset=0, padding_value=0, name=None):
...
@@ -976,7 +974,7 @@ def diag(x, offset=0, padding_value=0, name=None):
print(y.numpy())
print(y.numpy())
# [4]
# [4]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
diag_v2
(
x
,
"offset"
,
offset
,
"padding_value"
,
return
_C_ops
.
diag_v2
(
x
,
"offset"
,
offset
,
"padding_value"
,
padding_value
)
padding_value
)
...
@@ -1057,7 +1055,7 @@ def empty(shape, dtype=None, name=None):
...
@@ -1057,7 +1055,7 @@ def empty(shape, dtype=None, name=None):
dtype
=
convert_dtype
(
dtype
)
dtype
=
convert_dtype
(
dtype
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
shape
=
utils
.
convert_shape_to_list
(
shape
)
shape
=
utils
.
convert_shape_to_list
(
shape
)
out
=
_C_ops
.
empty
(
'shape'
,
shape
,
'dtype'
,
out
=
_C_ops
.
empty
(
'shape'
,
shape
,
'dtype'
,
convert_np_dtype_to_dtype_
(
dtype
))
convert_np_dtype_to_dtype_
(
dtype
))
...
@@ -1125,7 +1123,7 @@ def empty_like(x, dtype=None, name=None):
...
@@ -1125,7 +1123,7 @@ def empty_like(x, dtype=None, name=None):
dtype
=
x
.
dtype
dtype
=
x
.
dtype
dtype
=
convert_dtype
(
dtype
)
dtype
=
convert_dtype
(
dtype
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
out
=
_C_ops
.
empty
(
'shape'
,
x
.
shape
,
'dtype'
,
out
=
_C_ops
.
empty
(
'shape'
,
x
.
shape
,
'dtype'
,
convert_np_dtype_to_dtype_
(
dtype
))
convert_np_dtype_to_dtype_
(
dtype
))
out
.
stop_gradient
=
True
out
.
stop_gradient
=
True
...
@@ -1309,7 +1307,7 @@ def complex(real, imag, name=None):
...
@@ -1309,7 +1307,7 @@ def complex(real, imag, name=None):
# [[0.+0.j 0.+1.j 0.+2.j]
# [[0.+0.j 0.+1.j 0.+2.j]
# [1.+0.j 1.+1.j 1.+2.j]]
# [1.+0.j 1.+1.j 1.+2.j]]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
paddle
.
_C_ops
.
complex
(
real
,
imag
)
return
paddle
.
_C_ops
.
complex
(
real
,
imag
)
check_variable_and_dtype
(
real
,
'real'
,
[
'float32'
,
'float64'
],
'complex'
)
check_variable_and_dtype
(
real
,
'real'
,
[
'float32'
,
'float64'
],
'complex'
)
...
...
python/paddle/tensor/einsum.py
浏览文件 @
42eb56e2
...
@@ -15,9 +15,8 @@
...
@@ -15,9 +15,8 @@
import
itertools
import
itertools
import
re
import
re
from
..fluid.layers
import
reshape
,
transpose
from
.linalg
import
matmul
,
transpose
from
.linalg
import
matmul
from
.manipulation
import
squeeze
,
unsqueeze
,
reshape
from
.manipulation
import
squeeze
,
unsqueeze
from
.math
import
multiply
from
.math
import
multiply
from
.math
import
sum
as
paddle_sum
from
.math
import
sum
as
paddle_sum
...
@@ -792,10 +791,10 @@ def einsum(equation, *operands):
...
@@ -792,10 +791,10 @@ def einsum(equation, *operands):
- For any free label which is not present for the output, it's lowered to
- For any free label which is not present for the output, it's lowered to
a dummy label.
a dummy label.
- Examples
- Examples
- '...ij, ...jk'
,
where i and k are free labels, j is dummy. The output label
- '...ij, ...jk'
,
where i and k are free labels, j is dummy. The output label
string is '...ik'
string is '...ik'
- 'ij -> i', where i is a free label and j is a dummy label.
- 'ij -> i', where i is a free label and j is a dummy label.
- '...ij, ...jk -> ...ijk'
,
where i, j and k are all free labels.
- '...ij, ...jk -> ...ijk'
,
where i, j and k are all free labels.
- '...ij, ...jk -> ij', an invalid equation since `...` is not present for
- '...ij, ...jk -> ij', an invalid equation since `...` is not present for
the output.
the output.
...
...
python/paddle/tensor/linalg.py
浏览文件 @
42eb56e2
...
@@ -14,8 +14,9 @@
...
@@ -14,8 +14,9 @@
import
numpy
as
np
import
numpy
as
np
from
..fluid.layer_helper
import
LayerHelper
from
..fluid.layer_helper
import
LayerHelper
from
..f
luid.framework
import
in_dygraph_mode
,
_varbase_creator
,
Variable
,
_dygraph_tracer
from
..f
ramework
import
_varbase_creator
,
_dygraph_tracer
from
..fluid.data_feeder
import
check_variable_and_dtype
,
check_type
,
check_dtype
from
..fluid.data_feeder
import
check_variable_and_dtype
,
check_type
,
check_dtype
from
..static
import
Variable
from
..fluid.layers
import
transpose
,
cast
# noqa: F401
from
..fluid.layers
import
transpose
,
cast
# noqa: F401
from
..fluid
import
layers
from
..fluid
import
layers
...
@@ -133,7 +134,7 @@ def matmul(x, y, transpose_x=False, transpose_y=False, name=None):
...
@@ -133,7 +134,7 @@ def matmul(x, y, transpose_x=False, transpose_y=False, name=None):
"""
"""
op_type
=
'matmul_v2'
op_type
=
'matmul_v2'
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
op
=
getattr
(
_C_ops
,
op_type
)
op
=
getattr
(
_C_ops
,
op_type
)
return
op
(
x
,
y
,
'trans_x'
,
transpose_x
,
'trans_y'
,
transpose_y
)
return
op
(
x
,
y
,
'trans_x'
,
transpose_x
,
'trans_y'
,
transpose_y
)
...
@@ -245,7 +246,7 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
...
@@ -245,7 +246,7 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
raise
ValueError
(
raise
ValueError
(
"The dim of frobenius norm op should be None or two elements list!"
"The dim of frobenius norm op should be None or two elements list!"
)
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
if
dim
is
None
:
if
dim
is
None
:
return
_C_ops
.
frobenius_norm
(
input
,
'keep_dim'
,
keepdim
,
return
_C_ops
.
frobenius_norm
(
input
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
True
)
'reduce_all'
,
True
)
...
@@ -282,7 +283,7 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
...
@@ -282,7 +283,7 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
axis (int, optional): None for last dimension.
axis (int, optional): None for last dimension.
keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False.
keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False.
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
if
axis
is
None
:
axis
=
-
1
if
axis
is
None
:
axis
=
-
1
return
_C_ops
.
p_norm
(
input
,
'porder'
,
porder
,
'axis'
,
axis
,
return
_C_ops
.
p_norm
(
input
,
'porder'
,
porder
,
'axis'
,
axis
,
'keepdim'
,
keepdim
,
'asvector'
,
asvector
)
'keepdim'
,
keepdim
,
'asvector'
,
asvector
)
...
@@ -642,7 +643,7 @@ def cond(x, p=None, name=None):
...
@@ -642,7 +643,7 @@ def cond(x, p=None, name=None):
axis
=
axis
if
axis
!=
None
and
axis
!=
[]
else
[
0
]
axis
=
axis
if
axis
!=
None
and
axis
!=
[]
else
[
0
]
keepdim
=
False
keepdim
=
False
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
abs_out
=
_C_ops
.
abs
(
input
)
abs_out
=
_C_ops
.
abs
(
input
)
sum_out
=
_C_ops
.
reduce_sum
(
abs_out
,
'dim'
,
axis
,
'keepdim'
,
sum_out
=
_C_ops
.
reduce_sum
(
abs_out
,
'dim'
,
axis
,
'keepdim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
keepdim
,
'reduce_all'
,
reduce_all
)
...
@@ -699,7 +700,7 @@ def cond(x, p=None, name=None):
...
@@ -699,7 +700,7 @@ def cond(x, p=None, name=None):
reduce_all
=
True
if
axis
is
None
or
axis
==
[]
else
False
reduce_all
=
True
if
axis
is
None
or
axis
==
[]
else
False
keepdim
=
False
keepdim
=
False
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
pow_out
=
_C_ops
.
pow
(
input
,
'factor'
,
porder
)
pow_out
=
_C_ops
.
pow
(
input
,
'factor'
,
porder
)
sum_out_1
=
_C_ops
.
reduce_sum
(
pow_out
,
'dim'
,
axis
,
'keepdim'
,
sum_out_1
=
_C_ops
.
reduce_sum
(
pow_out
,
'dim'
,
axis
,
'keepdim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
keepdim
,
'reduce_all'
,
reduce_all
)
...
@@ -753,7 +754,7 @@ def cond(x, p=None, name=None):
...
@@ -753,7 +754,7 @@ def cond(x, p=None, name=None):
u
,
s
,
vh
=
svd
(
input
,
full_matrices
=
False
)
u
,
s
,
vh
=
svd
(
input
,
full_matrices
=
False
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
if
porder
==
"nuc"
:
if
porder
==
"nuc"
:
return
_C_ops
.
reduce_sum
(
s
,
'dim'
,
axis
,
'keepdim'
,
keepdim
,
return
_C_ops
.
reduce_sum
(
s
,
'dim'
,
axis
,
'keepdim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
'reduce_all'
,
reduce_all
)
...
@@ -820,7 +821,7 @@ def cond(x, p=None, name=None):
...
@@ -820,7 +821,7 @@ def cond(x, p=None, name=None):
return
out
return
out
def
empty_tensor
(
input
,
shape
):
def
empty_tensor
(
input
,
shape
):
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
input
.
reshape
(
shape
)
return
input
.
reshape
(
shape
)
raise
ValueError
(
"only support x is nonempty tensor in static mode"
)
raise
ValueError
(
"only support x is nonempty tensor in static mode"
)
...
@@ -895,7 +896,7 @@ def dot(x, y, name=None):
...
@@ -895,7 +896,7 @@ def dot(x, y, name=None):
"""
"""
op_type
=
'dot'
op_type
=
'dot'
# skip var type check in dygraph mode to improve efficiency
# skip var type check in dygraph mode to improve efficiency
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
op
=
getattr
(
_C_ops
,
op_type
)
op
=
getattr
(
_C_ops
,
op_type
)
return
op
(
x
,
y
)
return
op
(
x
,
y
)
...
@@ -1079,7 +1080,7 @@ def t(input, name=None):
...
@@ -1079,7 +1080,7 @@ def t(input, name=None):
"Input(input) only support N-D (N<=2) tensor, but received "
"Input(input) only support N-D (N<=2) tensor, but received "
"length of Input(input) is %s. Perhaps you can use paddle."
"length of Input(input) is %s. Perhaps you can use paddle."
"tensor.transpose() instead."
%
len
(
input
.
shape
))
"tensor.transpose() instead."
%
len
(
input
.
shape
))
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
if
len
(
input
.
shape
)
==
1
:
if
len
(
input
.
shape
)
==
1
:
return
input
return
input
# 2-D tensor
# 2-D tensor
...
@@ -1144,7 +1145,7 @@ def cross(x, y, axis=None, name=None):
...
@@ -1144,7 +1145,7 @@ def cross(x, y, axis=None, name=None):
# [0. 0. 0.]
# [0. 0. 0.]
# [0. 0. 0.]]
# [0. 0. 0.]]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
if
axis
is
not
None
:
if
axis
is
not
None
:
return
_C_ops
.
cross
(
x
,
y
,
'dim'
,
axis
)
return
_C_ops
.
cross
(
x
,
y
,
'dim'
,
axis
)
else
:
else
:
...
@@ -1203,7 +1204,7 @@ def cholesky(x, upper=False, name=None):
...
@@ -1203,7 +1204,7 @@ def cholesky(x, upper=False, name=None):
# [1.25450498 0.05600871 0.06400121]]
# [1.25450498 0.05600871 0.06400121]]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
cholesky
(
x
,
"upper"
,
upper
)
return
_C_ops
.
cholesky
(
x
,
"upper"
,
upper
)
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'cholesky'
)
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'cholesky'
)
check_type
(
upper
,
'upper'
,
bool
,
'cholesky'
)
check_type
(
upper
,
'upper'
,
bool
,
'cholesky'
)
...
@@ -1257,7 +1258,7 @@ def matrix_rank(x, tol=None, hermitian=False, name=None):
...
@@ -1257,7 +1258,7 @@ def matrix_rank(x, tol=None, hermitian=False, name=None):
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
if
tol
is
None
:
if
tol
is
None
:
tol_tensor
=
None
tol_tensor
=
None
tol_attr
=
0.0
tol_attr
=
0.0
...
@@ -1355,7 +1356,7 @@ def bmm(x, y, name=None):
...
@@ -1355,7 +1356,7 @@ def bmm(x, y, name=None):
"x's batch (shape[0]) must be equal with y's batch (shape[0]). But received x's shape: {}, y's shape: {}"
.
"x's batch (shape[0]) must be equal with y's batch (shape[0]). But received x's shape: {}, y's shape: {}"
.
format
(
x_shape
,
y_shape
))
format
(
x_shape
,
y_shape
))
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
bmm
(
x
,
y
)
return
_C_ops
.
bmm
(
x
,
y
)
helper
=
LayerHelper
(
'bmm'
,
**
locals
())
helper
=
LayerHelper
(
'bmm'
,
**
locals
())
...
@@ -1388,7 +1389,7 @@ def histogram(input, bins=100, min=0, max=0, name=None):
...
@@ -1388,7 +1389,7 @@ def histogram(input, bins=100, min=0, max=0, name=None):
result = paddle.histogram(inputs, bins=4, min=0, max=3)
result = paddle.histogram(inputs, bins=4, min=0, max=3)
print(result) # [0, 2, 1, 0]
print(result) # [0, 2, 1, 0]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
histogram
(
input
,
"bins"
,
bins
,
"min"
,
min
,
"max"
,
max
)
return
_C_ops
.
histogram
(
input
,
"bins"
,
bins
,
"min"
,
min
,
"max"
,
max
)
helper
=
LayerHelper
(
'histogram'
,
**
locals
())
helper
=
LayerHelper
(
'histogram'
,
**
locals
())
...
@@ -1435,7 +1436,7 @@ def bincount(x, weights=None, minlength=0, name=None):
...
@@ -1435,7 +1436,7 @@ def bincount(x, weights=None, minlength=0, name=None):
if
x
.
dtype
not
in
[
paddle
.
int32
,
paddle
.
int64
]:
if
x
.
dtype
not
in
[
paddle
.
int32
,
paddle
.
int64
]:
raise
TypeError
(
"Elements in Input(x) should all be integers"
)
raise
TypeError
(
"Elements in Input(x) should all be integers"
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
bincount
(
x
,
weights
,
"minlength"
,
minlength
)
return
_C_ops
.
bincount
(
x
,
weights
,
"minlength"
,
minlength
)
helper
=
LayerHelper
(
'bincount'
,
**
locals
())
helper
=
LayerHelper
(
'bincount'
,
**
locals
())
...
@@ -1488,7 +1489,7 @@ def mv(x, vec, name=None):
...
@@ -1488,7 +1489,7 @@ def mv(x, vec, name=None):
vec = paddle.to_tensor(vec_data).astype("float64")
vec = paddle.to_tensor(vec_data).astype("float64")
out = paddle.mv(x, vec)
out = paddle.mv(x, vec)
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
out
=
_C_ops
.
mv
(
x
,
vec
)
out
=
_C_ops
.
mv
(
x
,
vec
)
return
out
return
out
...
@@ -1541,7 +1542,7 @@ def det(x, name=None):
...
@@ -1541,7 +1542,7 @@ def det(x, name=None):
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
determinant
(
x
)
return
_C_ops
.
determinant
(
x
)
check_dtype
(
x
.
dtype
,
'Input'
,
[
'float32'
,
'float64'
],
'det'
)
check_dtype
(
x
.
dtype
,
'Input'
,
[
'float32'
,
'float64'
],
'det'
)
...
@@ -1596,7 +1597,7 @@ def slogdet(x, name=None):
...
@@ -1596,7 +1597,7 @@ def slogdet(x, name=None):
# [-0.98610914, -0.43010661, -0.10872950]])
# [-0.98610914, -0.43010661, -0.10872950]])
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
slogdeterminant
(
x
)
return
_C_ops
.
slogdeterminant
(
x
)
check_dtype
(
x
.
dtype
,
'Input'
,
[
'float32'
,
'float64'
],
'slogdet'
)
check_dtype
(
x
.
dtype
,
'Input'
,
[
'float32'
,
'float64'
],
'slogdet'
)
...
@@ -1669,7 +1670,7 @@ def svd(x, full_matrices=False, name=None):
...
@@ -1669,7 +1670,7 @@ def svd(x, full_matrices=False, name=None):
# V * VH == I
# V * VH == I
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
svd
(
x
,
'full_matrices'
,
full_matrices
)
return
_C_ops
.
svd
(
x
,
'full_matrices'
,
full_matrices
)
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'svd'
)
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'svd'
)
check_type
(
full_matrices
,
'full_matrices'
,
bool
,
'svd'
)
check_type
(
full_matrices
,
'full_matrices'
,
bool
,
'svd'
)
...
@@ -1744,7 +1745,7 @@ def matrix_power(x, n, name=None):
...
@@ -1744,7 +1745,7 @@ def matrix_power(x, n, name=None):
# [-7.66666667 , 8. , -1.83333333 ],
# [-7.66666667 , 8. , -1.83333333 ],
# [ 1.80555556 , -1.91666667 , 0.44444444 ]]
# [ 1.80555556 , -1.91666667 , 0.44444444 ]]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
matrix_power
(
x
,
"n"
,
n
)
return
_C_ops
.
matrix_power
(
x
,
"n"
,
n
)
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'matrix_power'
)
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'matrix_power'
)
...
@@ -1801,7 +1802,7 @@ def qr(x, mode="reduced", name=None):
...
@@ -1801,7 +1802,7 @@ def qr(x, mode="reduced", name=None):
# one can verify : X = Q * R ;
# one can verify : X = Q * R ;
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
q
,
r
=
_C_ops
.
qr
(
x
,
'mode'
,
mode
)
q
,
r
=
_C_ops
.
qr
(
x
,
'mode'
,
mode
)
if
mode
==
"r"
:
if
mode
==
"r"
:
return
r
return
r
...
@@ -1900,7 +1901,7 @@ def lu(x, pivot=True, get_infos=False, name=None):
...
@@ -1900,7 +1901,7 @@ def lu(x, pivot=True, get_infos=False, name=None):
# one can verify : X = P @ L @ U ;
# one can verify : X = P @ L @ U ;
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
LU
,
Piv
,
Info
=
_C_ops
.
lu
(
x
,
'pivots'
,
pivot
)
LU
,
Piv
,
Info
=
_C_ops
.
lu
(
x
,
'pivots'
,
pivot
)
if
get_infos
:
if
get_infos
:
return
LU
,
Piv
,
Info
return
LU
,
Piv
,
Info
...
@@ -1997,7 +1998,7 @@ def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None):
...
@@ -1997,7 +1998,7 @@ def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None):
# one can verify : X = P @ L @ U ;
# one can verify : X = P @ L @ U ;
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
P
,
L
,
U
=
_C_ops
.
lu_unpack
(
x
,
y
,
'unpack_ludata'
,
unpack_ludata
,
P
,
L
,
U
=
_C_ops
.
lu_unpack
(
x
,
y
,
'unpack_ludata'
,
unpack_ludata
,
'unpack_pivots'
,
unpack_pivots
)
'unpack_pivots'
,
unpack_pivots
)
return
P
,
L
,
U
return
P
,
L
,
U
...
@@ -2070,7 +2071,7 @@ def eig(x, name=None):
...
@@ -2070,7 +2071,7 @@ def eig(x, name=None):
# [ (16.50471283351188+0j) , (-5.5034820550763515+0j) ,
# [ (16.50471283351188+0j) , (-5.5034820550763515+0j) ,
# (-0.21026087843552282+0j)])
# (-0.21026087843552282+0j)])
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
w
,
v
=
_C_ops
.
eig
(
x
)
w
,
v
=
_C_ops
.
eig
(
x
)
return
w
,
v
return
w
,
v
...
@@ -2139,7 +2140,7 @@ def eigvals(x, name=None):
...
@@ -2139,7 +2140,7 @@ def eigvals(x, name=None):
"The last two dimensions of Input(x) should be equal, but received x's shape = {}"
.
"The last two dimensions of Input(x) should be equal, but received x's shape = {}"
.
format
(
x_shape
))
format
(
x_shape
))
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
eigvals
(
x
)
return
_C_ops
.
eigvals
(
x
)
helper
=
LayerHelper
(
'eigvals'
,
**
locals
())
helper
=
LayerHelper
(
'eigvals'
,
**
locals
())
...
@@ -2210,7 +2211,7 @@ def multi_dot(x, name=None):
...
@@ -2210,7 +2211,7 @@ def multi_dot(x, name=None):
# [10, 7]
# [10, 7]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
multi_dot
(
x
)
return
_C_ops
.
multi_dot
(
x
)
check_type
(
x
,
'x'
,
(
list
,
tuple
),
'multi_dot'
)
check_type
(
x
,
'x'
,
(
list
,
tuple
),
'multi_dot'
)
...
@@ -2262,7 +2263,7 @@ def eigh(x, UPLO='L', name=None):
...
@@ -2262,7 +2263,7 @@ def eigh(x, UPLO='L', name=None):
#[ 0.3826834323650898j , -0.9238795325112867j ]]
#[ 0.3826834323650898j , -0.9238795325112867j ]]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
eigh
(
x
,
'UPLO'
,
UPLO
)
return
_C_ops
.
eigh
(
x
,
'UPLO'
,
UPLO
)
def
__check_input
(
x
,
UPLO
):
def
__check_input
(
x
,
UPLO
):
...
@@ -2361,7 +2362,7 @@ def pinv(x, rcond=1e-15, hermitian=False, name=None):
...
@@ -2361,7 +2362,7 @@ def pinv(x, rcond=1e-15, hermitian=False, name=None):
# or out * x * out = x ;
# or out * x * out = x ;
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
if
not
hermitian
:
if
not
hermitian
:
# combine svd and matmul op
# combine svd and matmul op
u
,
s
,
vt
=
_C_ops
.
svd
(
x
,
'full_matrices'
,
False
)
u
,
s
,
vt
=
_C_ops
.
svd
(
x
,
'full_matrices'
,
False
)
...
@@ -2611,7 +2612,7 @@ def solve(x, y, name=None):
...
@@ -2611,7 +2612,7 @@ def solve(x, y, name=None):
print(out)
print(out)
# [2., 3.])
# [2., 3.])
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
solve
(
x
,
y
)
return
_C_ops
.
solve
(
x
,
y
)
inputs
=
{
"X"
:
[
x
],
"Y"
:
[
y
]}
inputs
=
{
"X"
:
[
x
],
"Y"
:
[
y
]}
...
@@ -2675,7 +2676,7 @@ def triangular_solve(x,
...
@@ -2675,7 +2676,7 @@ def triangular_solve(x,
print(out)
print(out)
# [7, -2, -5]
# [7, -2, -5]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
triangular_solve
(
x
,
y
,
'upper'
,
upper
,
'transpose'
,
return
_C_ops
.
triangular_solve
(
x
,
y
,
'upper'
,
upper
,
'transpose'
,
transpose
,
'unitriangular'
,
transpose
,
'unitriangular'
,
unitriangular
)
unitriangular
)
...
@@ -2732,7 +2733,7 @@ def cholesky_solve(x, y, upper=False, name=None):
...
@@ -2732,7 +2733,7 @@ def cholesky_solve(x, y, upper=False, name=None):
print(out)
print(out)
# [-2.5, -7, 9.5]
# [-2.5, -7, 9.5]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
cholesky_solve
(
x
,
y
,
'upper'
,
upper
)
return
_C_ops
.
cholesky_solve
(
x
,
y
,
'upper'
,
upper
)
helper
=
LayerHelper
(
"cholesky_solve"
,
**
locals
())
helper
=
LayerHelper
(
"cholesky_solve"
,
**
locals
())
...
@@ -2776,7 +2777,7 @@ def eigvalsh(x, UPLO='L', name=None):
...
@@ -2776,7 +2777,7 @@ def eigvalsh(x, UPLO='L', name=None):
print(out_value)
print(out_value)
#[0.17157288, 5.82842712]
#[0.17157288, 5.82842712]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
is_test
=
x
.
stop_gradient
is_test
=
x
.
stop_gradient
values
,
_
=
_C_ops
.
eigvalsh
(
x
,
'UPLO'
,
UPLO
,
'is_test'
,
is_test
)
values
,
_
=
_C_ops
.
eigvalsh
(
x
,
'UPLO'
,
UPLO
,
'is_test'
,
is_test
)
return
values
return
values
...
@@ -2904,7 +2905,7 @@ def lstsq(x, y, rcond=None, driver=None, name=None):
...
@@ -2904,7 +2905,7 @@ def lstsq(x, y, rcond=None, driver=None, name=None):
elif
x
.
dtype
==
paddle
.
float64
:
elif
x
.
dtype
==
paddle
.
float64
:
rcond
=
1e-15
*
max
(
x
.
shape
[
-
2
],
x
.
shape
[
-
1
])
rcond
=
1e-15
*
max
(
x
.
shape
[
-
2
],
x
.
shape
[
-
1
])
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
solution
,
rank
,
singular_values
=
_C_ops
.
lstsq
(
x
,
y
,
"rcond"
,
rcond
,
solution
,
rank
,
singular_values
=
_C_ops
.
lstsq
(
x
,
y
,
"rcond"
,
rcond
,
"driver"
,
driver
)
"driver"
,
driver
)
if
x
.
shape
[
-
2
]
>
x
.
shape
[
-
1
]:
if
x
.
shape
[
-
2
]
>
x
.
shape
[
-
1
]:
...
...
python/paddle/tensor/logic.py
浏览文件 @
42eb56e2
...
@@ -15,8 +15,7 @@
...
@@ -15,8 +15,7 @@
from
..fluid.layer_helper
import
LayerHelper
from
..fluid.layer_helper
import
LayerHelper
from
..fluid.data_feeder
import
check_type
,
check_variable_and_dtype
from
..fluid.data_feeder
import
check_type
,
check_variable_and_dtype
from
..fluid.layers.layer_function_generator
import
templatedoc
from
..fluid.layers.layer_function_generator
import
templatedoc
from
..
import
fluid
from
..static
import
Variable
from
..fluid.framework
import
in_dygraph_mode
,
Variable
from
..framework
import
VarBase
as
Tensor
from
..framework
import
VarBase
as
Tensor
# TODO: define logic functions of a tensor
# TODO: define logic functions of a tensor
...
@@ -25,8 +24,7 @@ from ..fluid.layers import logical_and # noqa: F401
...
@@ -25,8 +24,7 @@ from ..fluid.layers import logical_and # noqa: F401
from
..fluid.layers
import
logical_not
# noqa: F401
from
..fluid.layers
import
logical_not
# noqa: F401
from
..fluid.layers
import
logical_or
# noqa: F401
from
..fluid.layers
import
logical_or
# noqa: F401
from
..fluid.layers
import
logical_xor
# noqa: F401
from
..fluid.layers
import
logical_xor
# noqa: F401
import
paddle
from
paddle.common_ops_import
import
core
from
paddle
import
_C_ops
from
paddle
import
_C_ops
from
paddle.tensor.creation
import
full
from
paddle.tensor.creation
import
full
...
@@ -61,7 +59,7 @@ def equal_all(x, y, name=None):
...
@@ -61,7 +59,7 @@ def equal_all(x, y, name=None):
result2 = paddle.equal_all(x, z)
result2 = paddle.equal_all(x, z)
print(result2) # result2 = [False ]
print(result2) # result2 = [False ]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
equal_all
(
x
,
y
)
return
_C_ops
.
equal_all
(
x
,
y
)
helper
=
LayerHelper
(
"equal_all"
,
**
locals
())
helper
=
LayerHelper
(
"equal_all"
,
**
locals
())
...
@@ -124,7 +122,7 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
...
@@ -124,7 +122,7 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
# [True]
# [True]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
allclose
(
x
,
y
,
'rtol'
,
return
_C_ops
.
allclose
(
x
,
y
,
'rtol'
,
str
(
rtol
),
'atol'
,
str
(
rtol
),
'atol'
,
str
(
atol
),
'equal_nan'
,
equal_nan
)
str
(
atol
),
'equal_nan'
,
equal_nan
)
...
@@ -182,7 +180,7 @@ def equal(x, y, name=None):
...
@@ -182,7 +180,7 @@ def equal(x, y, name=None):
if
not
isinstance
(
y
,
Variable
):
if
not
isinstance
(
y
,
Variable
):
y
=
full
(
shape
=
[
1
],
dtype
=
x
.
dtype
,
fill_value
=
y
)
y
=
full
(
shape
=
[
1
],
dtype
=
x
.
dtype
,
fill_value
=
y
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
equal
(
x
,
y
)
return
_C_ops
.
equal
(
x
,
y
)
check_variable_and_dtype
(
check_variable_and_dtype
(
...
@@ -224,7 +222,7 @@ def greater_equal(x, y, name=None):
...
@@ -224,7 +222,7 @@ def greater_equal(x, y, name=None):
result1 = paddle.greater_equal(x, y)
result1 = paddle.greater_equal(x, y)
print(result1) # result1 = [True False True]
print(result1) # result1 = [True False True]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
greater_equal
(
x
,
y
)
return
_C_ops
.
greater_equal
(
x
,
y
)
check_variable_and_dtype
(
x
,
"x"
,
check_variable_and_dtype
(
x
,
"x"
,
...
@@ -270,7 +268,7 @@ def greater_than(x, y, name=None):
...
@@ -270,7 +268,7 @@ def greater_than(x, y, name=None):
result1 = paddle.greater_than(x, y)
result1 = paddle.greater_than(x, y)
print(result1) # result1 = [False False True]
print(result1) # result1 = [False False True]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
greater_than
(
x
,
y
)
return
_C_ops
.
greater_than
(
x
,
y
)
check_variable_and_dtype
(
x
,
"x"
,
check_variable_and_dtype
(
x
,
"x"
,
...
@@ -317,7 +315,7 @@ def less_equal(x, y, name=None):
...
@@ -317,7 +315,7 @@ def less_equal(x, y, name=None):
result1 = paddle.less_equal(x, y)
result1 = paddle.less_equal(x, y)
print(result1) # result1 = [True True False]
print(result1) # result1 = [True True False]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
less_equal
(
x
,
y
)
return
_C_ops
.
less_equal
(
x
,
y
)
check_variable_and_dtype
(
check_variable_and_dtype
(
...
@@ -360,7 +358,7 @@ def less_than(x, y, name=None):
...
@@ -360,7 +358,7 @@ def less_than(x, y, name=None):
result1 = paddle.less_than(x, y)
result1 = paddle.less_than(x, y)
print(result1) # result1 = [False True False]
print(result1) # result1 = [False True False]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
less_than
(
x
,
y
)
return
_C_ops
.
less_than
(
x
,
y
)
check_variable_and_dtype
(
check_variable_and_dtype
(
...
@@ -403,7 +401,7 @@ def not_equal(x, y, name=None):
...
@@ -403,7 +401,7 @@ def not_equal(x, y, name=None):
result1 = paddle.not_equal(x, y)
result1 = paddle.not_equal(x, y)
print(result1) # result1 = [False True True]
print(result1) # result1 = [False True True]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
not_equal
(
x
,
y
)
return
_C_ops
.
not_equal
(
x
,
y
)
check_variable_and_dtype
(
check_variable_and_dtype
(
...
@@ -449,7 +447,7 @@ def is_tensor(x):
...
@@ -449,7 +447,7 @@ def is_tensor(x):
def
_bitwise_op
(
op_name
,
x
,
y
,
out
=
None
,
name
=
None
,
binary_op
=
True
):
def
_bitwise_op
(
op_name
,
x
,
y
,
out
=
None
,
name
=
None
,
binary_op
=
True
):
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
op
=
getattr
(
_C_ops
,
op_name
)
op
=
getattr
(
_C_ops
,
op_name
)
if
binary_op
:
if
binary_op
:
return
op
(
x
,
y
)
return
op
(
x
,
y
)
...
@@ -637,7 +635,7 @@ def isclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
...
@@ -637,7 +635,7 @@ def isclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
# [True, True]
# [True, True]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
isclose
(
x
,
y
,
'rtol'
,
return
_C_ops
.
isclose
(
x
,
y
,
'rtol'
,
str
(
rtol
),
'atol'
,
str
(
rtol
),
'atol'
,
str
(
atol
),
'equal_nan'
,
equal_nan
)
str
(
atol
),
'equal_nan'
,
equal_nan
)
...
...
python/paddle/tensor/manipulation.py
浏览文件 @
42eb56e2
...
@@ -15,11 +15,11 @@
...
@@ -15,11 +15,11 @@
from
__future__
import
print_function
from
__future__
import
print_function
from
collections
import
Counter
from
collections
import
Counter
from
..fluid.layers
import
core
from
..static
import
Variable
,
device_guard
from
..framework
import
core
from
..fluid.layer_helper
import
LayerHelper
from
..fluid.layer_helper
import
LayerHelper
from
..f
luid.framework
import
Variable
,
OpProtoHolder
,
in_dygraph_mode
,
convert_np_dtype_to_dtype_
,
device_guard
,
dygraph_only
from
..f
ramework
import
OpProtoHolder
,
convert_np_dtype_to_dtype_
,
dygraph_only
from
..fluid.data_feeder
import
convert_dtype
,
check_variable_and_dtype
,
check_type
,
check_dtype
from
..fluid.data_feeder
import
convert_dtype
,
check_variable_and_dtype
,
check_type
,
check_dtype
from
..fluid.layers.tensor
import
fill_constant
from
..fluid.layers
import
utils
from
..fluid.layers
import
utils
import
numpy
as
np
import
numpy
as
np
# TODO: define functions to manipulate a tensor
# TODO: define functions to manipulate a tensor
...
@@ -378,7 +378,7 @@ def broadcast_tensors(input, name=None):
...
@@ -378,7 +378,7 @@ def broadcast_tensors(input, name=None):
"""
"""
num_inputs
=
len
(
input
)
num_inputs
=
len
(
input
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
broadcast_tensors
(
input
,
num_inputs
)
return
_C_ops
.
broadcast_tensors
(
input
,
num_inputs
)
check_type
(
input
,
'input'
,
(
list
,
tuple
),
'broadcast_tensors'
)
check_type
(
input
,
'input'
,
(
list
,
tuple
),
'broadcast_tensors'
)
...
@@ -475,7 +475,7 @@ def flip(x, axis, name=None):
...
@@ -475,7 +475,7 @@ def flip(x, axis, name=None):
"""
"""
if
isinstance
(
axis
,
int
):
if
isinstance
(
axis
,
int
):
axis
=
[
axis
]
axis
=
[
axis
]
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
flip
(
x
,
"axis"
,
axis
)
return
_C_ops
.
flip
(
x
,
"axis"
,
axis
)
helper
=
LayerHelper
(
"flip"
,
**
locals
())
helper
=
LayerHelper
(
"flip"
,
**
locals
())
...
@@ -671,7 +671,7 @@ def flatten(x, start_axis=0, stop_axis=-1, name=None):
...
@@ -671,7 +671,7 @@ def flatten(x, start_axis=0, stop_axis=-1, name=None):
if
not
(
isinstance
(
x
,
Variable
)):
if
not
(
isinstance
(
x
,
Variable
)):
raise
ValueError
(
"The input x should be a Tensor"
)
raise
ValueError
(
"The input x should be a Tensor"
)
if
not
in_dygraph
_mode
():
if
not
paddle
.
in_dynamic
_mode
():
check_variable_and_dtype
(
check_variable_and_dtype
(
x
,
'x'
,
x
,
'x'
,
[
'float32'
,
'float64'
,
'int8'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
],
[
'float32'
,
'float64'
,
'int8'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
],
...
@@ -693,7 +693,7 @@ def flatten(x, start_axis=0, stop_axis=-1, name=None):
...
@@ -693,7 +693,7 @@ def flatten(x, start_axis=0, stop_axis=-1, name=None):
if
start_axis
>
stop_axis
:
if
start_axis
>
stop_axis
:
raise
ValueError
(
"The stop_axis should be larger than stat_axis"
)
raise
ValueError
(
"The stop_axis should be larger than stat_axis"
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
dy_out
,
_
=
_C_ops
.
flatten_contiguous_range
(
x
,
'start_axis'
,
start_axis
,
dy_out
,
_
=
_C_ops
.
flatten_contiguous_range
(
x
,
'start_axis'
,
start_axis
,
'stop_axis'
,
stop_axis
)
'stop_axis'
,
stop_axis
)
return
dy_out
return
dy_out
...
@@ -792,7 +792,7 @@ def roll(x, shifts, axis=None, name=None):
...
@@ -792,7 +792,7 @@ def roll(x, shifts, axis=None, name=None):
else
:
else
:
axis
=
[]
axis
=
[]
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
roll
(
x
,
'axis'
,
axis
,
'shifts'
,
shifts
)
return
_C_ops
.
roll
(
x
,
'axis'
,
axis
,
'shifts'
,
shifts
)
helper
=
LayerHelper
(
"roll"
,
**
locals
())
helper
=
LayerHelper
(
"roll"
,
**
locals
())
...
@@ -1108,7 +1108,7 @@ def unique_consecutive(x,
...
@@ -1108,7 +1108,7 @@ def unique_consecutive(x,
else
:
else
:
axis
=
[
axis
]
axis
=
[
axis
]
attr_dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
attr_dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
out
,
inverse
,
counts
=
_C_ops
.
unique_consecutive
(
out
,
inverse
,
counts
=
_C_ops
.
unique_consecutive
(
x
,
'dtype'
,
attr_dtype
,
'return_inverse'
,
return_inverse
,
x
,
'dtype'
,
attr_dtype
,
'return_inverse'
,
return_inverse
,
'return_counts'
,
return_counts
,
'axis'
,
axis
)
'return_counts'
,
return_counts
,
'axis'
,
axis
)
...
@@ -1213,7 +1213,7 @@ def unique(x,
...
@@ -1213,7 +1213,7 @@ def unique(x,
else
:
else
:
axis
=
[
axis
]
axis
=
[
axis
]
attr_dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
attr_dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
out
,
inverse
,
indices
,
counts
=
_C_ops
.
unique
(
out
,
inverse
,
indices
,
counts
=
_C_ops
.
unique
(
x
,
'dtype'
,
attr_dtype
,
'return_index'
,
return_index
,
x
,
'dtype'
,
attr_dtype
,
'return_index'
,
return_index
,
'return_inverse'
,
return_inverse
,
'return_counts'
,
return_counts
,
'return_inverse'
,
return_inverse
,
'return_counts'
,
return_counts
,
...
@@ -1397,7 +1397,7 @@ def gather(x, index, axis=None, name=None):
...
@@ -1397,7 +1397,7 @@ def gather(x, index, axis=None, name=None):
if
axis
is
None
:
if
axis
is
None
:
axis
=
0
axis
=
0
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
axis
=
axis
.
item
()
if
isinstance
(
axis
,
paddle
.
Tensor
)
else
axis
axis
=
axis
.
item
()
if
isinstance
(
axis
,
paddle
.
Tensor
)
else
axis
return
_C_ops
.
gather
(
x
,
index
,
None
,
"axis"
,
axis
,
"overwrite"
,
False
)
return
_C_ops
.
gather
(
x
,
index
,
None
,
"axis"
,
axis
,
"overwrite"
,
False
)
...
@@ -1471,7 +1471,7 @@ def unbind(input, axis=0):
...
@@ -1471,7 +1471,7 @@ def unbind(input, axis=0):
input_shape
=
input
.
shape
input_shape
=
input
.
shape
axis_
=
axis
if
axis
>=
0
else
len
(
input_shape
)
+
axis
axis_
=
axis
if
axis
>=
0
else
len
(
input_shape
)
+
axis
num
=
input_shape
[
axis_
]
num
=
input_shape
[
axis_
]
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
unbind
(
input
,
num
,
'axis'
,
axis
)
return
_C_ops
.
unbind
(
input
,
num
,
'axis'
,
axis
)
helper
=
LayerHelper
(
"unbind"
,
**
locals
())
helper
=
LayerHelper
(
"unbind"
,
**
locals
())
...
@@ -1565,7 +1565,7 @@ def scatter(x, index, updates, overwrite=True, name=None):
...
@@ -1565,7 +1565,7 @@ def scatter(x, index, updates, overwrite=True, name=None):
# [2., 2.],
# [2., 2.],
# [1., 1.]]
# [1., 1.]]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
scatter
(
x
,
index
,
updates
,
'overwrite'
,
overwrite
)
return
_C_ops
.
scatter
(
x
,
index
,
updates
,
'overwrite'
,
overwrite
)
check_variable_and_dtype
(
check_variable_and_dtype
(
...
@@ -1744,7 +1744,7 @@ def tile(x, repeat_times, name=None):
...
@@ -1744,7 +1744,7 @@ def tile(x, repeat_times, name=None):
np_out = out.numpy()
np_out = out.numpy()
# [[1, 2, 3], [1, 2, 3]]
# [[1, 2, 3], [1, 2, 3]]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
tile
(
x
,
'repeat_times'
,
repeat_times
)
return
_C_ops
.
tile
(
x
,
'repeat_times'
,
repeat_times
)
check_type
(
repeat_times
,
'repeat_times'
,
(
list
,
tuple
,
Variable
),
'tile'
)
check_type
(
repeat_times
,
'repeat_times'
,
(
list
,
tuple
,
Variable
),
'tile'
)
if
isinstance
(
repeat_times
,
Variable
):
if
isinstance
(
repeat_times
,
Variable
):
...
@@ -1827,7 +1827,7 @@ def expand_as(x, y, name=None):
...
@@ -1827,7 +1827,7 @@ def expand_as(x, y, name=None):
np_out = out.numpy()
np_out = out.numpy()
# [[1, 2, 3], [1, 2, 3]]
# [[1, 2, 3], [1, 2, 3]]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
expand_as_v2
(
x
,
'target_shape'
,
y
.
shape
)
return
_C_ops
.
expand_as_v2
(
x
,
'target_shape'
,
y
.
shape
)
check_variable_and_dtype
(
check_variable_and_dtype
(
...
@@ -1881,7 +1881,7 @@ def broadcast_to(x, shape, name=None):
...
@@ -1881,7 +1881,7 @@ def broadcast_to(x, shape, name=None):
print(out)
print(out)
# [[1, 2, 3], [1, 2, 3]]
# [[1, 2, 3], [1, 2, 3]]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
expand_v2
(
x
,
'shape'
,
shape
)
return
_C_ops
.
expand_v2
(
x
,
'shape'
,
shape
)
if
isinstance
(
shape
,
Variable
):
if
isinstance
(
shape
,
Variable
):
...
@@ -1968,7 +1968,7 @@ def expand(x, shape, name=None):
...
@@ -1968,7 +1968,7 @@ def expand(x, shape, name=None):
print(out)
print(out)
# [[1, 2, 3], [1, 2, 3]]
# [[1, 2, 3], [1, 2, 3]]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
expand_v2
(
x
,
'shape'
,
shape
)
return
_C_ops
.
expand_v2
(
x
,
'shape'
,
shape
)
if
isinstance
(
shape
,
Variable
):
if
isinstance
(
shape
,
Variable
):
...
@@ -2407,7 +2407,7 @@ def tensordot(x, y, axes=2, name=None):
...
@@ -2407,7 +2407,7 @@ def tensordot(x, y, axes=2, name=None):
check_type
(
axes
,
'axes'
,
(
int
,
tuple
,
list
,
Variable
),
op_type
)
check_type
(
axes
,
'axes'
,
(
int
,
tuple
,
list
,
Variable
),
op_type
)
def
_var_to_list
(
var
):
def
_var_to_list
(
var
):
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
tolist
(
var
)
return
tolist
(
var
)
raise
TypeError
(
raise
TypeError
(
"The 'axes' with type 'Tensor' in "
+
op_type
+
"The 'axes' with type 'Tensor' in "
+
op_type
+
...
@@ -2523,7 +2523,7 @@ def as_complex(x, name=None):
...
@@ -2523,7 +2523,7 @@ def as_complex(x, name=None):
# [[ 0. +1.j 2. +3.j 4. +5.j]
# [[ 0. +1.j 2. +3.j 4. +5.j]
# [ 6. +7.j 8. +9.j 10.+11.j]]
# [ 6. +7.j 8. +9.j 10.+11.j]]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
paddle
.
_C_ops
.
as_complex
(
x
)
return
paddle
.
_C_ops
.
as_complex
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'as_complex'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'as_complex'
)
...
@@ -2572,7 +2572,7 @@ def as_real(x, name=None):
...
@@ -2572,7 +2572,7 @@ def as_real(x, name=None):
# [ 8. 9.]
# [ 8. 9.]
# [10. 11.]]]
# [10. 11.]]]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
paddle
.
_C_ops
.
as_real
(
x
)
return
paddle
.
_C_ops
.
as_real
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'complex64'
,
'complex128'
],
'as_real'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'complex64'
,
'complex128'
],
'as_real'
)
...
@@ -2626,7 +2626,7 @@ def repeat_interleave(x, repeats, axis=None, name=None):
...
@@ -2626,7 +2626,7 @@ def repeat_interleave(x, repeats, axis=None, name=None):
x
=
paddle
.
flatten
(
x
)
x
=
paddle
.
flatten
(
x
)
axis
=
0
axis
=
0
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
if
isinstance
(
repeats
,
int
):
if
isinstance
(
repeats
,
int
):
return
_C_ops
.
repeat_interleave
(
x
,
None
,
'Repeats'
,
repeats
,
'dim'
,
return
_C_ops
.
repeat_interleave
(
x
,
None
,
'Repeats'
,
repeats
,
'dim'
,
axis
)
axis
)
...
@@ -2733,7 +2733,7 @@ def moveaxis(x, source, destination, name=None):
...
@@ -2733,7 +2733,7 @@ def moveaxis(x, source, destination, name=None):
for
i
in
range
(
len
(
src_dims
)):
for
i
in
range
(
len
(
src_dims
)):
perm
[
dst_dims
[
i
]]
=
src_dims
[
i
]
perm
[
dst_dims
[
i
]]
=
src_dims
[
i
]
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
out
,
_
=
_C_ops
.
transpose2
(
x
,
'axis'
,
perm
)
out
,
_
=
_C_ops
.
transpose2
(
x
,
'axis'
,
perm
)
return
out
return
out
...
@@ -2814,7 +2814,7 @@ def take_along_axis(arr, indices, axis):
...
@@ -2814,7 +2814,7 @@ def take_along_axis(arr, indices, axis):
if
not
broadcast_shape
:
if
not
broadcast_shape
:
# if indices matrix have larger size than arr, arr should broadcast into indices shape.
# if indices matrix have larger size than arr, arr should broadcast into indices shape.
broadcast_shape
=
indices
.
shape
broadcast_shape
=
indices
.
shape
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
indices
=
paddle
.
broadcast_to
(
indices
,
broadcast_shape
)
indices
=
paddle
.
broadcast_to
(
indices
,
broadcast_shape
)
broadcast_shape_list
=
list
(
broadcast_shape
)
broadcast_shape_list
=
list
(
broadcast_shape
)
broadcast_shape_list
[
axis
]
=
list
(
arr
.
shape
)[
axis
]
broadcast_shape_list
[
axis
]
=
list
(
arr
.
shape
)[
axis
]
...
@@ -2879,7 +2879,7 @@ def put_along_axis(arr, indices, values, axis, reduce='assign'):
...
@@ -2879,7 +2879,7 @@ def put_along_axis(arr, indices, values, axis, reduce='assign'):
"`indices` and `arr` must have the same number of dimensions!"
)
"`indices` and `arr` must have the same number of dimensions!"
)
axis
=
non_negative_axis
(
arr
,
axis
)
axis
=
non_negative_axis
(
arr
,
axis
)
broadcast_shape
=
infer_broadcast_shape
(
arr
,
indices
,
axis
)
broadcast_shape
=
infer_broadcast_shape
(
arr
,
indices
,
axis
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
values
=
paddle
.
to_tensor
(
values
)
if
not
isinstance
(
values
=
paddle
.
to_tensor
(
values
)
if
not
isinstance
(
values
,
paddle
.
Tensor
)
else
values
values
,
paddle
.
Tensor
)
else
values
if
broadcast_shape
:
if
broadcast_shape
:
...
...
python/paddle/tensor/math.py
浏览文件 @
42eb56e2
...
@@ -26,8 +26,9 @@ from paddle.common_ops_import import dygraph_utils
...
@@ -26,8 +26,9 @@ from paddle.common_ops_import import dygraph_utils
from
paddle.tensor
import
cast
from
paddle.tensor
import
cast
from
paddle.tensor.attribute
import
_complex_to_real_dtype
from
paddle.tensor.attribute
import
_complex_to_real_dtype
import
paddle
import
paddle
from
..fluid
import
layers
from
paddle.static
import
Variable
from
..fluid.framework
import
core
,
_varbase_creator
,
in_dygraph_mode
,
Variable
,
convert_np_dtype_to_dtype_
from
..framework
import
core
from
..framework
import
_varbase_creator
,
convert_np_dtype_to_dtype_
from
..fluid.layer_helper
import
LayerHelper
from
..fluid.layer_helper
import
LayerHelper
from
..fluid.data_feeder
import
check_variable_and_dtype
,
check_type
,
check_dtype
,
convert_dtype
from
..fluid.data_feeder
import
check_variable_and_dtype
,
check_type
,
check_dtype
,
convert_dtype
from
..fluid.layers.layer_function_generator
import
_generate_doc_string_
,
generate_activation_fn
,
generate_layer_fn
from
..fluid.layers.layer_function_generator
import
_generate_doc_string_
,
generate_activation_fn
,
generate_layer_fn
...
@@ -70,7 +71,8 @@ from ..fluid.layers import acosh # noqa: F401
...
@@ -70,7 +71,8 @@ from ..fluid.layers import acosh # noqa: F401
from
..fluid.layers
import
atanh
# noqa: F401
from
..fluid.layers
import
atanh
# noqa: F401
from
..fluid.layers
import
multiplex
# noqa: F401
from
..fluid.layers
import
multiplex
# noqa: F401
from
..fluid
import
layers
from
..fluid.layers
import
reduce_prod
from
..fluid.layers
import
elementwise_sub
from
paddle
import
_C_ops
from
paddle
import
_C_ops
__all__
=
[]
__all__
=
[]
...
@@ -147,7 +149,7 @@ def pow(x, y, name=None):
...
@@ -147,7 +149,7 @@ def pow(x, y, name=None):
"""
"""
# in dynamic graph mode
# in dynamic graph mode
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
if
isinstance
(
y
,
(
int
,
float
)):
if
isinstance
(
y
,
(
int
,
float
)):
return
_C_ops
.
pow
(
x
,
'factor'
,
y
)
return
_C_ops
.
pow
(
x
,
'factor'
,
y
)
elif
isinstance
(
y
,
(
paddle
.
Tensor
,
Variable
)):
elif
isinstance
(
y
,
(
paddle
.
Tensor
,
Variable
)):
...
@@ -240,7 +242,7 @@ def add(x, y, name=None):
...
@@ -240,7 +242,7 @@ def add(x, y, name=None):
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
elementwise_add
(
x
,
y
)
return
_C_ops
.
elementwise_add
(
x
,
y
)
return
_elementwise_op
(
LayerHelper
(
'elementwise_add'
,
**
locals
()))
return
_elementwise_op
(
LayerHelper
(
'elementwise_add'
,
**
locals
()))
...
@@ -319,7 +321,7 @@ def subtract(x, y, name=None):
...
@@ -319,7 +321,7 @@ def subtract(x, y, name=None):
op_type
=
'elementwise_sub'
op_type
=
'elementwise_sub'
axis
=
-
1
axis
=
-
1
act
=
None
act
=
None
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_elementwise_op_in_dygraph
(
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
...
@@ -376,7 +378,7 @@ def divide(x, y, name=None):
...
@@ -376,7 +378,7 @@ def divide(x, y, name=None):
op_type
=
'elementwise_div'
op_type
=
'elementwise_div'
axis
=
-
1
axis
=
-
1
act
=
None
act
=
None
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_elementwise_op_in_dygraph
(
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
...
@@ -415,7 +417,7 @@ def floor_divide(x, y, name=None):
...
@@ -415,7 +417,7 @@ def floor_divide(x, y, name=None):
"""
"""
op_type
=
'elementwise_floordiv'
op_type
=
'elementwise_floordiv'
axis
=
-
1
axis
=
-
1
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_elementwise_op_in_dygraph
(
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
op_name
=
op_type
)
x
,
y
,
axis
=
axis
,
op_name
=
op_type
)
...
@@ -455,7 +457,7 @@ def remainder(x, y, name=None):
...
@@ -455,7 +457,7 @@ def remainder(x, y, name=None):
"""
"""
op_type
=
'elementwise_mod'
op_type
=
'elementwise_mod'
axis
=
-
1
axis
=
-
1
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_elementwise_op_in_dygraph
(
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
op_name
=
op_type
)
x
,
y
,
axis
=
axis
,
op_name
=
op_type
)
...
@@ -505,7 +507,7 @@ def multiply(x, y, name=None):
...
@@ -505,7 +507,7 @@ def multiply(x, y, name=None):
act
=
None
act
=
None
axis
=
-
1
axis
=
-
1
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_elementwise_op_in_dygraph
(
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
...
@@ -570,7 +572,7 @@ def maximum(x, y, name=None):
...
@@ -570,7 +572,7 @@ def maximum(x, y, name=None):
op_type
=
'elementwise_max'
op_type
=
'elementwise_max'
axis
=
-
1
axis
=
-
1
act
=
None
act
=
None
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_elementwise_op_in_dygraph
(
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
...
@@ -629,7 +631,7 @@ def minimum(x, y, name=None):
...
@@ -629,7 +631,7 @@ def minimum(x, y, name=None):
op_type
=
'elementwise_min'
op_type
=
'elementwise_min'
axis
=
-
1
axis
=
-
1
act
=
None
act
=
None
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_elementwise_op_in_dygraph
(
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
...
@@ -690,7 +692,7 @@ def fmax(x, y, name=None):
...
@@ -690,7 +692,7 @@ def fmax(x, y, name=None):
op_type
=
'elementwise_fmax'
op_type
=
'elementwise_fmax'
axis
=
-
1
axis
=
-
1
act
=
None
act
=
None
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_elementwise_op_in_dygraph
(
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
...
@@ -751,7 +753,7 @@ def fmin(x, y, name=None):
...
@@ -751,7 +753,7 @@ def fmin(x, y, name=None):
op_type
=
'elementwise_fmin'
op_type
=
'elementwise_fmin'
axis
=
-
1
axis
=
-
1
act
=
None
act
=
None
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_elementwise_op_in_dygraph
(
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
...
@@ -860,7 +862,7 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None):
...
@@ -860,7 +862,7 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None):
return
(
False
,
src_type
)
return
(
False
,
src_type
)
dtype_flag
,
dtype
=
get_dtype
(
x
,
dtype
)
dtype_flag
,
dtype
=
get_dtype
(
x
,
dtype
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
axis
=
axis
if
axis
!=
None
and
axis
!=
[]
else
[
0
]
axis
=
axis
if
axis
!=
None
and
axis
!=
[]
else
[
0
]
if
dtype_flag
:
if
dtype_flag
:
return
_C_ops
.
reduce_sum
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
return
_C_ops
.
reduce_sum
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
...
@@ -1024,7 +1026,7 @@ def add_n(inputs, name=None):
...
@@ -1024,7 +1026,7 @@ def add_n(inputs, name=None):
# [[8., 10., 12.],
# [[8., 10., 12.],
# [14., 16., 18.]]
# [14., 16., 18.]]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
if
isinstance
(
inputs
,
Variable
):
if
isinstance
(
inputs
,
Variable
):
inputs
=
[
inputs
]
inputs
=
[
inputs
]
return
_C_ops
.
sum
(
inputs
,
'use_mkldnn'
,
False
)
return
_C_ops
.
sum
(
inputs
,
'use_mkldnn'
,
False
)
...
@@ -1080,7 +1082,7 @@ def trunc(input, name=None):
...
@@ -1080,7 +1082,7 @@ def trunc(input, name=None):
# [[0., 0.],
# [[0., 0.],
# [0., 0.]]))
# [0., 0.]]))
'''
'''
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
trunc
(
input
)
return
_C_ops
.
trunc
(
input
)
else
:
else
:
inputs
=
{
"X"
:
input
}
inputs
=
{
"X"
:
input
}
...
@@ -1164,7 +1166,7 @@ def mm(input, mat2, name=None):
...
@@ -1164,7 +1166,7 @@ def mm(input, mat2, name=None):
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
matmul_v2
(
input
,
mat2
)
return
_C_ops
.
matmul_v2
(
input
,
mat2
)
def
__check_input
(
x
,
y
):
def
__check_input
(
x
,
y
):
...
@@ -1269,7 +1271,7 @@ def addmm(input, x, y, beta=1.0, alpha=1.0, name=None):
...
@@ -1269,7 +1271,7 @@ def addmm(input, x, y, beta=1.0, alpha=1.0, name=None):
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
out
=
_C_ops
.
addmm
(
input
,
x
,
y
,
"Alpha"
,
alpha
,
"Beta"
,
beta
)
out
=
_C_ops
.
addmm
(
input
,
x
,
y
,
"Alpha"
,
alpha
,
"Beta"
,
beta
)
return
out
return
out
...
@@ -1328,7 +1330,7 @@ def renorm(x, p, axis, max_norm):
...
@@ -1328,7 +1330,7 @@ def renorm(x, p, axis, max_norm):
if
not
axis
>=
-
1
*
len
(
input_shape
):
if
not
axis
>=
-
1
*
len
(
input_shape
):
raise
ValueError
(
"the axis:{} should not be less than -1 * length of input_shape:{}"
.
format
(
axis
,
-
1
*
len
(
input_shape
)))
raise
ValueError
(
"the axis:{} should not be less than -1 * length of input_shape:{}"
.
format
(
axis
,
-
1
*
len
(
input_shape
)))
axis
=
axis
+
len
(
input_shape
)
axis
=
axis
+
len
(
input_shape
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
out
=
core
.
ops
.
renorm
(
x
,
'p'
,
p
,
'axis'
,
axis
,
'max_norm'
,
max_norm
)
out
=
core
.
ops
.
renorm
(
x
,
'p'
,
p
,
'axis'
,
axis
,
'max_norm'
,
max_norm
)
return
out
return
out
...
@@ -1384,7 +1386,7 @@ def inner(x, y, name=None):
...
@@ -1384,7 +1386,7 @@ def inner(x, y, name=None):
nx
=
x
.
reshape
((
-
1
,
xshape
[
-
1
]))
nx
=
x
.
reshape
((
-
1
,
xshape
[
-
1
]))
ny
=
y
.
reshape
((
-
1
,
yshape
[
-
1
]))
ny
=
y
.
reshape
((
-
1
,
yshape
[
-
1
]))
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
matmul_v2
(
nx
,
ny
.
T
).
reshape
(
dstshape
)
return
_C_ops
.
matmul_v2
(
nx
,
ny
.
T
).
reshape
(
dstshape
)
def
__check_input
(
x
,
y
):
def
__check_input
(
x
,
y
):
...
@@ -1447,7 +1449,7 @@ def outer(x, y, name=None):
...
@@ -1447,7 +1449,7 @@ def outer(x, y, name=None):
nx
=
x
.
reshape
((
-
1
,
1
))
nx
=
x
.
reshape
((
-
1
,
1
))
ny
=
y
.
reshape
((
1
,
-
1
))
ny
=
y
.
reshape
((
1
,
-
1
))
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
matmul_v2
(
nx
,
ny
)
return
_C_ops
.
matmul_v2
(
nx
,
ny
)
def
__check_input
(
x
,
y
):
def
__check_input
(
x
,
y
):
...
@@ -1516,7 +1518,7 @@ def logsumexp(x, axis=None, keepdim=False, name=None):
...
@@ -1516,7 +1518,7 @@ def logsumexp(x, axis=None, keepdim=False, name=None):
if
axis
is
None
or
len
(
axis
)
==
0
:
if
axis
is
None
or
len
(
axis
)
==
0
:
axis
=
[
0
]
axis
=
[
0
]
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
logsumexp
(
x
,
'axis'
,
axis
,
'keepdim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
return
_C_ops
.
logsumexp
(
x
,
'axis'
,
axis
,
'keepdim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
check_variable_and_dtype
(
x
,
'x'
,
check_variable_and_dtype
(
x
,
'x'
,
...
@@ -1560,7 +1562,7 @@ def inverse(x, name=None):
...
@@ -1560,7 +1562,7 @@ def inverse(x, name=None):
print(inv) # [[0.5, 0], [0, 0.5]]
print(inv) # [[0.5, 0], [0, 0.5]]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
inverse
(
x
)
return
_C_ops
.
inverse
(
x
)
def
_check_input
(
x
):
def
_check_input
(
x
):
...
@@ -1676,7 +1678,7 @@ def max(x, axis=None, keepdim=False, name=None):
...
@@ -1676,7 +1678,7 @@ def max(x, axis=None, keepdim=False, name=None):
"""
"""
reduce_all
,
axis
=
_get_reduce_all_value
(
axis
)
reduce_all
,
axis
=
_get_reduce_all_value
(
axis
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
reduce_max
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
return
_C_ops
.
reduce_max
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
'reduce_all'
,
reduce_all
)
...
@@ -1776,7 +1778,7 @@ def min(x, axis=None, keepdim=False, name=None):
...
@@ -1776,7 +1778,7 @@ def min(x, axis=None, keepdim=False, name=None):
"""
"""
reduce_all
,
axis
=
_get_reduce_all_value
(
axis
)
reduce_all
,
axis
=
_get_reduce_all_value
(
axis
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
reduce_min
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
return
_C_ops
.
reduce_min
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
'reduce_all'
,
reduce_all
)
...
@@ -1889,7 +1891,7 @@ def amax(x, axis=None, keepdim=False, name=None):
...
@@ -1889,7 +1891,7 @@ def amax(x, axis=None, keepdim=False, name=None):
"""
"""
reduce_all
,
axis
=
_get_reduce_all_value
(
axis
)
reduce_all
,
axis
=
_get_reduce_all_value
(
axis
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
reduce_amax
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
return
_C_ops
.
reduce_amax
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
helper
=
LayerHelper
(
'amax'
,
**
locals
())
helper
=
LayerHelper
(
'amax'
,
**
locals
())
...
@@ -2002,7 +2004,7 @@ def amin(x, axis=None, keepdim=False, name=None):
...
@@ -2002,7 +2004,7 @@ def amin(x, axis=None, keepdim=False, name=None):
"""
"""
reduce_all
,
axis
=
_get_reduce_all_value
(
axis
)
reduce_all
,
axis
=
_get_reduce_all_value
(
axis
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
reduce_amin
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
return
_C_ops
.
reduce_amin
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
helper
=
LayerHelper
(
'amin'
,
**
locals
())
helper
=
LayerHelper
(
'amin'
,
**
locals
())
...
@@ -2046,7 +2048,7 @@ def log1p(x, name=None):
...
@@ -2046,7 +2048,7 @@ def log1p(x, name=None):
# [[0.], [0.6931472]]
# [[0.], [0.6931472]]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
log1p
(
x
)
return
_C_ops
.
log1p
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
"log1p"
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
"log1p"
)
...
@@ -2095,7 +2097,7 @@ def log2(x, name=None):
...
@@ -2095,7 +2097,7 @@ def log2(x, name=None):
res = paddle.log2(x_i)
res = paddle.log2(x_i)
print(res) # [1.0]
print(res) # [1.0]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
log2
(
x
)
return
_C_ops
.
log2
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
"log2"
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
"log2"
)
...
@@ -2145,7 +2147,7 @@ def log10(x, name=None):
...
@@ -2145,7 +2147,7 @@ def log10(x, name=None):
res = paddle.log10(x_i)
res = paddle.log10(x_i)
print(res) # [1.0]
print(res) # [1.0]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
log10
(
x
)
return
_C_ops
.
log10
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
"log10"
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
"log10"
)
...
@@ -2206,7 +2208,7 @@ def clip(x, min=None, max=None, name=None):
...
@@ -2206,7 +2208,7 @@ def clip(x, min=None, max=None, name=None):
min_
=
float
(
np
.
finfo
(
np
.
float32
).
min
)
min_
=
float
(
np
.
finfo
(
np
.
float32
).
min
)
max_
=
float
(
np
.
finfo
(
np
.
float32
).
max
)
max_
=
float
(
np
.
finfo
(
np
.
float32
).
max
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
if
isinstance
(
min
,
Variable
):
if
isinstance
(
min
,
Variable
):
min
=
min
.
numpy
().
item
(
0
)
min
=
min
.
numpy
().
item
(
0
)
if
isinstance
(
max
,
Variable
):
if
isinstance
(
max
,
Variable
):
...
@@ -2339,7 +2341,7 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None):
...
@@ -2339,7 +2341,7 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None):
"But received axis1 = %d, axis2 = %d
\n
"
%
(
axis1
,
axis2
)
"But received axis1 = %d, axis2 = %d
\n
"
%
(
axis1
,
axis2
)
__check_input
(
input
,
offset
,
axis1
,
axis2
)
__check_input
(
input
,
offset
,
axis1
,
axis2
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
trace
(
x
,
'offset'
,
offset
,
'axis1'
,
axis1
,
'axis2'
,
axis2
)
return
_C_ops
.
trace
(
x
,
'offset'
,
offset
,
'axis1'
,
axis1
,
'axis2'
,
axis2
)
inputs
=
{
'Input'
:
[
x
]}
inputs
=
{
'Input'
:
[
x
]}
...
@@ -2422,7 +2424,7 @@ def diagonal(x, offset=0, axis1=0, axis2=1, name=None):
...
@@ -2422,7 +2424,7 @@ def diagonal(x, offset=0, axis1=0, axis2=1, name=None):
# [0.17020577, 0.27325270]])
# [0.17020577, 0.27325270]])
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
diagonal
(
x
,
'offset'
,
offset
,
'axis1'
,
axis1
,
'axis2'
,
axis2
)
return
_C_ops
.
diagonal
(
x
,
'offset'
,
offset
,
'axis1'
,
axis1
,
'axis2'
,
axis2
)
def
__check_input
(
input
,
offset
,
dim1
,
dim2
):
def
__check_input
(
input
,
offset
,
dim1
,
dim2
):
...
@@ -2499,7 +2501,7 @@ ${comment}
...
@@ -2499,7 +2501,7 @@ ${comment}
# [12, 15, 18, 16, 20, 24],
# [12, 15, 18, 16, 20, 24],
# [21, 24, 27, 28, 32, 36]])
# [21, 24, 27, 28, 32, 36]])
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
kron
(
x
,
y
)
return
_C_ops
.
kron
(
x
,
y
)
helper
=
LayerHelper
(
'kron'
,
**
locals
())
helper
=
LayerHelper
(
'kron'
,
**
locals
())
...
@@ -2557,9 +2559,9 @@ def cumsum(x, axis=None, dtype=None, name=None):
...
@@ -2557,9 +2559,9 @@ def cumsum(x, axis=None, dtype=None, name=None):
else
:
else
:
flatten
=
False
flatten
=
False
if
dtype
is
not
None
and
x
.
dtype
!=
convert_np_dtype_to_dtype_
(
dtype
):
if
dtype
is
not
None
and
x
.
dtype
!=
convert_np_dtype_to_dtype_
(
dtype
):
x
=
layers
.
cast
(
x
,
dtype
)
x
=
cast
(
x
,
dtype
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
if
axis
is
None
:
if
axis
is
None
:
return
_C_ops
.
cumsum
(
x
,
'flatten'
,
flatten
)
return
_C_ops
.
cumsum
(
x
,
'flatten'
,
flatten
)
else
:
else
:
...
@@ -2622,9 +2624,9 @@ def cumprod(x, dim=None, dtype=None, name=None):
...
@@ -2622,9 +2624,9 @@ def cumprod(x, dim=None, dtype=None, name=None):
"""
"""
if
dtype
is
not
None
and
x
.
dtype
!=
convert_np_dtype_to_dtype_
(
dtype
):
if
dtype
is
not
None
and
x
.
dtype
!=
convert_np_dtype_to_dtype_
(
dtype
):
x
=
layers
.
cast
(
x
,
dtype
)
x
=
cast
(
x
,
dtype
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
cumprod
(
x
,
'dim'
,
dim
)
return
_C_ops
.
cumprod
(
x
,
'dim'
,
dim
)
check_variable_and_dtype
(
x
,
"x"
,
[
'complex64'
,
'complex128'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'cumprod'
)
check_variable_and_dtype
(
x
,
"x"
,
[
'complex64'
,
'complex128'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'cumprod'
)
...
@@ -2656,7 +2658,7 @@ def isfinite(x, name=None):
...
@@ -2656,7 +2658,7 @@ def isfinite(x, name=None):
out = paddle.tensor.isfinite(x)
out = paddle.tensor.isfinite(x)
print(out) # [False True True False True False False]
print(out) # [False True True False True False False]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
isfinite_v2
(
x
)
return
_C_ops
.
isfinite_v2
(
x
)
helper
=
LayerHelper
(
"isfinite_v2"
,
**
locals
())
helper
=
LayerHelper
(
"isfinite_v2"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'isfinite'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'isfinite'
)
...
@@ -2684,7 +2686,7 @@ def isinf(x, name=None):
...
@@ -2684,7 +2686,7 @@ def isinf(x, name=None):
out = paddle.tensor.isinf(x)
out = paddle.tensor.isinf(x)
print(out) # [ True False False True False False False]
print(out) # [ True False False True False False False]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
isinf_v2
(
x
)
return
_C_ops
.
isinf_v2
(
x
)
helper
=
LayerHelper
(
"isinf_v2"
,
**
locals
())
helper
=
LayerHelper
(
"isinf_v2"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'isinf'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'isinf'
)
...
@@ -2712,7 +2714,7 @@ def isnan(x, name=None):
...
@@ -2712,7 +2714,7 @@ def isnan(x, name=None):
out = paddle.tensor.isnan(x)
out = paddle.tensor.isnan(x)
print(out) # [False False False False False True True]
print(out) # [False False False False False True True]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
isnan_v2
(
x
)
return
_C_ops
.
isnan_v2
(
x
)
helper
=
LayerHelper
(
"isnan_v2"
,
**
locals
())
helper
=
LayerHelper
(
"isnan_v2"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'isnan'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'isnan'
)
...
@@ -2783,9 +2785,9 @@ def prod(x, axis=None, keepdim=False, dtype=None, name=None):
...
@@ -2783,9 +2785,9 @@ def prod(x, axis=None, keepdim=False, dtype=None, name=None):
if
dtype
is
not
None
:
if
dtype
is
not
None
:
check_dtype
(
dtype
,
'dtype'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'prod'
)
check_dtype
(
dtype
,
'dtype'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'prod'
)
if
x
.
dtype
!=
convert_np_dtype_to_dtype_
(
dtype
):
if
x
.
dtype
!=
convert_np_dtype_to_dtype_
(
dtype
):
x
=
layers
.
cast
(
x
,
dtype
)
x
=
cast
(
x
,
dtype
)
return
layers
.
reduce_prod
(
input
=
x
,
dim
=
axis
,
keep_dim
=
keepdim
,
name
=
name
)
return
reduce_prod
(
input
=
x
,
dim
=
axis
,
keep_dim
=
keepdim
,
name
=
name
)
def
sign
(
x
,
name
=
None
):
def
sign
(
x
,
name
=
None
):
...
@@ -2809,7 +2811,7 @@ def sign(x, name=None):
...
@@ -2809,7 +2811,7 @@ def sign(x, name=None):
out = paddle.sign(x=x)
out = paddle.sign(x=x)
print(out) # [1.0, 0.0, -1.0, 1.0]
print(out) # [1.0, 0.0, -1.0, 1.0]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
sign
(
x
)
return
_C_ops
.
sign
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sign'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sign'
)
...
@@ -2846,7 +2848,7 @@ def tanh(x, name=None):
...
@@ -2846,7 +2848,7 @@ def tanh(x, name=None):
print(out)
print(out)
# [-0.37994896 -0.19737532 0.09966799 0.29131261]
# [-0.37994896 -0.19737532 0.09966799 0.29131261]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
tanh
(
x
)
return
_C_ops
.
tanh
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'tanh'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'tanh'
)
...
@@ -2888,7 +2890,7 @@ def increment(x, value=1.0, name=None):
...
@@ -2888,7 +2890,7 @@ def increment(x, value=1.0, name=None):
# [1.]
# [1.]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
increment
(
x
,
'step'
,
value
)
return
_C_ops
.
increment
(
x
,
'step'
,
value
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
...
@@ -2969,7 +2971,7 @@ def all(x, axis=None, keepdim=False, name=None):
...
@@ -2969,7 +2971,7 @@ def all(x, axis=None, keepdim=False, name=None):
else
:
else
:
reduce_all_flag
=
False
reduce_all_flag
=
False
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
axis
=
axis
if
axis
!=
None
and
axis
!=
[]
else
[
0
]
axis
=
axis
if
axis
!=
None
and
axis
!=
[]
else
[
0
]
return
_C_ops
.
reduce_all
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
return
_C_ops
.
reduce_all
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all_flag
)
'reduce_all'
,
reduce_all_flag
)
...
@@ -3061,7 +3063,7 @@ def any(x, axis=None, keepdim=False, name=None):
...
@@ -3061,7 +3063,7 @@ def any(x, axis=None, keepdim=False, name=None):
else
:
else
:
reduce_all_flag
=
False
reduce_all_flag
=
False
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
axis
=
axis
if
axis
!=
None
and
axis
!=
[]
else
[
0
]
axis
=
axis
if
axis
!=
None
and
axis
!=
[]
else
[
0
]
return
_C_ops
.
reduce_any
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
return
_C_ops
.
reduce_any
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all_flag
)
'reduce_all'
,
reduce_all_flag
)
...
@@ -3142,7 +3144,7 @@ def conj(x, name=None):
...
@@ -3142,7 +3144,7 @@ def conj(x, name=None):
# [(4-4j), (5-5j), (6-6j)]])
# [(4-4j), (5-5j), (6-6j)]])
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
conj
(
x
)
return
_C_ops
.
conj
(
x
)
check_variable_and_dtype
(
x
,
"x"
,
[
'complex64'
,
'complex128'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'conj'
)
check_variable_and_dtype
(
x
,
"x"
,
[
'complex64'
,
'complex128'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'conj'
)
...
@@ -3181,7 +3183,7 @@ def digamma(x, name=None):
...
@@ -3181,7 +3183,7 @@ def digamma(x, name=None):
# [ nan , 5.32286835]])
# [ nan , 5.32286835]])
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
digamma
(
x
)
return
_C_ops
.
digamma
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'digamma'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'digamma'
)
...
@@ -3212,7 +3214,7 @@ def neg(x, name=None):
...
@@ -3212,7 +3214,7 @@ def neg(x, name=None):
# [0.4 0.2 -0.1 -0.3]
# [0.4 0.2 -0.1 -0.3]
"""
"""
return
layers
.
scale
(
x
,
scale
=-
1.0
,
bias
=
0.0
,
bias_after_scale
=
True
,
act
=
None
,
name
=
name
)
return
scale
(
x
,
scale
=-
1.0
,
bias
=
0.0
,
bias_after_scale
=
True
,
act
=
None
,
name
=
name
)
def
atan2
(
x
,
y
,
name
=
None
):
def
atan2
(
x
,
y
,
name
=
None
):
r
"""
r
"""
...
@@ -3257,7 +3259,7 @@ def atan2(x, y, name=None):
...
@@ -3257,7 +3259,7 @@ def atan2(x, y, name=None):
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
atan2
(
x
,
y
)
return
_C_ops
.
atan2
(
x
,
y
)
else
:
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'int32'
,
'int64'
,
'float16'
,
'float32'
,
'float64'
],
'atan2'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'int32'
,
'int64'
,
'float16'
,
'float32'
,
'float64'
],
'atan2'
)
...
@@ -3313,7 +3315,7 @@ def logit(x, eps=None, name=None):
...
@@ -3313,7 +3315,7 @@ def logit(x, eps=None, name=None):
if
eps
==
None
:
if
eps
==
None
:
eps
=
0.0
eps
=
0.0
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
logit
(
x
,
'eps'
,
eps
)
return
_C_ops
.
logit
(
x
,
'eps'
,
eps
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'logit'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'logit'
)
...
@@ -3356,7 +3358,7 @@ def lerp(x, y, weight, name=None):
...
@@ -3356,7 +3358,7 @@ def lerp(x, y, weight, name=None):
# out: [5.5., 6., 6.5, 7.]
# out: [5.5., 6., 6.5, 7.]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
check_type
(
weight
,
'weight'
,
(
float
,
paddle
.
Tensor
,
Variable
),
'lerp'
)
check_type
(
weight
,
'weight'
,
(
float
,
paddle
.
Tensor
,
Variable
),
'lerp'
)
if
isinstance
(
weight
,
float
):
if
isinstance
(
weight
,
float
):
weight
=
paddle
.
to_tensor
(
weight
,
dtype
=
x
.
dtype
)
weight
=
paddle
.
to_tensor
(
weight
,
dtype
=
x
.
dtype
)
...
@@ -3419,7 +3421,7 @@ def erfinv(x, name=None):
...
@@ -3419,7 +3421,7 @@ def erfinv(x, name=None):
"""
"""
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'erfinv'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'erfinv'
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
erfinv
(
x
)
return
_C_ops
.
erfinv
(
x
)
helper
=
LayerHelper
(
'erfinv'
,
**
locals
())
helper
=
LayerHelper
(
'erfinv'
,
**
locals
())
...
@@ -3478,7 +3480,7 @@ def rad2deg(x, name=None):
...
@@ -3478,7 +3480,7 @@ def rad2deg(x, name=None):
# [57.29578018])
# [57.29578018])
"""
"""
rad2deg_scale
=
180
/
np
.
pi
rad2deg_scale
=
180
/
np
.
pi
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
if
convert_dtype
(
x
.
dtype
)
in
[
'int32'
,
'int64'
]:
if
convert_dtype
(
x
.
dtype
)
in
[
'int32'
,
'int64'
]:
x
=
cast
(
x
,
dtype
=
"float32"
)
x
=
cast
(
x
,
dtype
=
"float32"
)
return
_C_ops
.
scale
(
x
,
'scale'
,
rad2deg_scale
)
return
_C_ops
.
scale
(
x
,
'scale'
,
rad2deg_scale
)
...
@@ -3531,7 +3533,7 @@ def deg2rad(x, name=None):
...
@@ -3531,7 +3533,7 @@ def deg2rad(x, name=None):
# [3.14159274])
# [3.14159274])
"""
"""
deg2rad_scale
=
np
.
pi
/
180.0
deg2rad_scale
=
np
.
pi
/
180.0
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
if
convert_dtype
(
x
.
dtype
)
in
[
'int32'
,
'int64'
]:
if
convert_dtype
(
x
.
dtype
)
in
[
'int32'
,
'int64'
]:
x
=
cast
(
x
,
dtype
=
"float32"
)
x
=
cast
(
x
,
dtype
=
"float32"
)
return
_C_ops
.
scale
(
x
,
'scale'
,
deg2rad_scale
)
return
_C_ops
.
scale
(
x
,
'scale'
,
deg2rad_scale
)
...
@@ -3615,7 +3617,7 @@ def gcd(x, y, name=None):
...
@@ -3615,7 +3617,7 @@ def gcd(x, y, name=None):
paddle
.
where
(
y_not_equal_0
,
paddle
.
mod
(
x
,
y_safe
),
paddle
.
zeros
(
y
.
shape
,
y
.
dtype
)))
paddle
.
where
(
y_not_equal_0
,
paddle
.
mod
(
x
,
y_safe
),
paddle
.
zeros
(
y
.
shape
,
y
.
dtype
)))
return
(
paddle
.
where
(
x
<
y
,
y
,
x
),
paddle
.
where
(
x
<
y
,
x
,
y
))
return
(
paddle
.
where
(
x
<
y
,
y
,
x
),
paddle
.
where
(
x
<
y
,
x
,
y
))
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
while
_gcd_cond_fn
(
x
,
y
):
while
_gcd_cond_fn
(
x
,
y
):
x
,
y
=
_gcd_body_fn
(
x
,
y
)
x
,
y
=
_gcd_body_fn
(
x
,
y
)
...
@@ -3749,7 +3751,7 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None):
...
@@ -3749,7 +3751,7 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None):
dtype
=
x
.
dtype
dtype
=
x
.
dtype
axes
=
[
axis
]
axes
=
[
axis
]
infer_flags
=
list
(
1
for
i
in
range
(
len
(
axes
)))
infer_flags
=
list
(
1
for
i
in
range
(
len
(
axes
)))
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
has_pend
=
False
has_pend
=
False
input_list
=
[]
input_list
=
[]
if
prepend
is
not
None
and
append
is
not
None
:
if
prepend
is
not
None
and
append
is
not
None
:
...
@@ -3788,7 +3790,7 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None):
...
@@ -3788,7 +3790,7 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None):
op
=
getattr
(
_C_ops
,
"logical_xor"
)
op
=
getattr
(
_C_ops
,
"logical_xor"
)
out
=
op
(
input_back
,
input_front
)
out
=
op
(
input_back
,
input_front
)
else
:
else
:
out
=
layers
.
elementwise_sub
(
input_back
,
input_front
,
axis
=
axis
)
out
=
elementwise_sub
(
input_back
,
input_front
,
axis
=
axis
)
return
out
return
out
else
:
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'bool'
,
'int32'
,
'int64'
],
'diff'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'bool'
,
'int32'
,
'int64'
],
'diff'
)
...
@@ -3840,7 +3842,7 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None):
...
@@ -3840,7 +3842,7 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None):
type
=
'logical_xor'
,
inputs
=
{
"X"
:
input_back
,
"Y"
:
input_front
},
outputs
=
{
"Out"
:
out
}
type
=
'logical_xor'
,
inputs
=
{
"X"
:
input_back
,
"Y"
:
input_front
},
outputs
=
{
"Out"
:
out
}
)
)
else
:
else
:
out
=
layers
.
elementwise_sub
(
input_back
,
input_front
,
axis
=
axis
)
out
=
elementwise_sub
(
input_back
,
input_front
,
axis
=
axis
)
return
out
return
out
...
@@ -3883,7 +3885,7 @@ def angle(x, name=None):
...
@@ -3883,7 +3885,7 @@ def angle(x, name=None):
# [-1.1071488 -0.7853982 0. 0.7853982]]
# [-1.1071488 -0.7853982 0. 0.7853982]]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
angle
(
x
)
return
_C_ops
.
angle
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
check_variable_and_dtype
(
x
,
'x'
,
...
...
python/paddle/tensor/random.py
浏览文件 @
42eb56e2
...
@@ -14,13 +14,14 @@
...
@@ -14,13 +14,14 @@
# TODO: define random functions
# TODO: define random functions
from
..f
luid
import
core
from
..f
ramework
import
core
from
..f
luid.framework
import
in_dygraph_mode
,
Variable
,
convert_np_dtype_to_dtype_
,
dygraph_only
from
..f
ramework
import
convert_np_dtype_to_dtype_
,
dygraph_only
from
..fluid.layer_helper
import
LayerHelper
from
..fluid.layer_helper
import
LayerHelper
from
..fluid.data_feeder
import
check_variable_and_dtype
,
check_type
,
check_dtype
,
check_shape
from
..fluid.data_feeder
import
check_variable_and_dtype
,
check_type
,
check_dtype
,
check_shape
from
..fluid.layers
import
utils
from
..fluid.layers
import
utils
import
paddle
import
paddle
from
paddle
import
_C_ops
from
paddle
import
_C_ops
from
paddle.static
import
Variable
__all__
=
[]
__all__
=
[]
...
@@ -65,7 +66,7 @@ def bernoulli(x, name=None):
...
@@ -65,7 +66,7 @@ def bernoulli(x, name=None):
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
bernoulli
(
x
)
return
_C_ops
.
bernoulli
(
x
)
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"bernoulli"
)
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"bernoulli"
)
...
@@ -110,7 +111,7 @@ def poisson(x, name=None):
...
@@ -110,7 +111,7 @@ def poisson(x, name=None):
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
poisson
(
x
)
return
_C_ops
.
poisson
(
x
)
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"poisson"
)
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"poisson"
)
...
@@ -173,7 +174,7 @@ def multinomial(x, num_samples=1, replacement=False, name=None):
...
@@ -173,7 +174,7 @@ def multinomial(x, num_samples=1, replacement=False, name=None):
assert
core
.
is_compiled_with_rocm
()
==
False
,
(
assert
core
.
is_compiled_with_rocm
()
==
False
,
(
"multinomial op is not supported on ROCM yet."
)
"multinomial op is not supported on ROCM yet."
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
multinomial
(
x
,
'num_samples'
,
num_samples
,
'replacement'
,
return
_C_ops
.
multinomial
(
x
,
'num_samples'
,
num_samples
,
'replacement'
,
replacement
)
replacement
)
...
@@ -231,7 +232,7 @@ def gaussian(shape, mean=0.0, std=1.0, dtype=None, name=None):
...
@@ -231,7 +232,7 @@ def gaussian(shape, mean=0.0, std=1.0, dtype=None, name=None):
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
shape
=
utils
.
convert_shape_to_list
(
shape
)
shape
=
utils
.
convert_shape_to_list
(
shape
)
return
_C_ops
.
gaussian_random
(
'shape'
,
shape
,
'mean'
,
return
_C_ops
.
gaussian_random
(
'shape'
,
shape
,
'mean'
,
float
(
mean
),
'std'
,
float
(
mean
),
'std'
,
...
@@ -422,7 +423,7 @@ def normal(mean=0.0, std=1.0, shape=None, name=None):
...
@@ -422,7 +423,7 @@ def normal(mean=0.0, std=1.0, shape=None, name=None):
# [1.00780561 3.78457445 5.81058198] # random
# [1.00780561 3.78457445 5.81058198] # random
"""
"""
if
not
in_dygraph
_mode
():
if
not
paddle
.
in_dynamic
_mode
():
check_type
(
mean
,
'mean'
,
(
int
,
float
,
Variable
),
'normal'
)
check_type
(
mean
,
'mean'
,
(
int
,
float
,
Variable
),
'normal'
)
check_type
(
std
,
'std'
,
(
int
,
float
,
Variable
),
'normal'
)
check_type
(
std
,
'std'
,
(
int
,
float
,
Variable
),
'normal'
)
if
isinstance
(
mean
,
Variable
):
if
isinstance
(
mean
,
Variable
):
...
@@ -454,7 +455,7 @@ def normal(mean=0.0, std=1.0, shape=None, name=None):
...
@@ -454,7 +455,7 @@ def normal(mean=0.0, std=1.0, shape=None, name=None):
return
gaussian
(
shape
=
shape
,
mean
=
mean
,
std
=
std
,
name
=
name
)
return
gaussian
(
shape
=
shape
,
mean
=
mean
,
std
=
std
,
name
=
name
)
out
=
out
*
std
+
mean
out
=
out
*
std
+
mean
if
not
in_dygraph
_mode
():
if
not
paddle
.
in_dynamic
_mode
():
out
.
stop_grediant
=
True
out
.
stop_grediant
=
True
return
out
return
out
...
@@ -540,7 +541,7 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
...
@@ -540,7 +541,7 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
shape
=
utils
.
convert_shape_to_list
(
shape
)
shape
=
utils
.
convert_shape_to_list
(
shape
)
return
_C_ops
.
uniform_random
(
'shape'
,
shape
,
'min'
,
return
_C_ops
.
uniform_random
(
'shape'
,
shape
,
'min'
,
float
(
min
),
'max'
,
float
(
min
),
'max'
,
...
@@ -679,7 +680,7 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None):
...
@@ -679,7 +680,7 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None):
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
shape
=
utils
.
convert_shape_to_list
(
shape
)
shape
=
utils
.
convert_shape_to_list
(
shape
)
return
_C_ops
.
randint
(
'shape'
,
shape
,
'low'
,
low
,
'high'
,
high
,
'seed'
,
return
_C_ops
.
randint
(
'shape'
,
shape
,
'low'
,
low
,
'high'
,
high
,
'seed'
,
0
,
'dtype'
,
dtype
)
0
,
'dtype'
,
dtype
)
...
@@ -846,7 +847,7 @@ def randint_like(x, low=0, high=None, dtype=None, name=None):
...
@@ -846,7 +847,7 @@ def randint_like(x, low=0, high=None, dtype=None, name=None):
"randint_like's low must less then high, but received low = {0}, "
"randint_like's low must less then high, but received low = {0}, "
"high = {1}"
.
format
(
low
,
high
))
"high = {1}"
.
format
(
low
,
high
))
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
shape
=
utils
.
convert_shape_to_list
(
shape
)
shape
=
utils
.
convert_shape_to_list
(
shape
)
out
=
_C_ops
.
randint
(
'shape'
,
shape
,
'low'
,
low
,
'high'
,
high
,
'seed'
,
out
=
_C_ops
.
randint
(
'shape'
,
shape
,
'low'
,
low
,
'high'
,
high
,
'seed'
,
0
,
'dtype'
,
core
.
VarDesc
.
VarType
.
INT64
)
0
,
'dtype'
,
core
.
VarDesc
.
VarType
.
INT64
)
...
@@ -911,7 +912,7 @@ def randperm(n, dtype="int64", name=None):
...
@@ -911,7 +912,7 @@ def randperm(n, dtype="int64", name=None):
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
randperm
(
'n'
,
n
,
'seed'
,
0
,
'dtype'
,
dtype
)
return
_C_ops
.
randperm
(
'n'
,
n
,
'seed'
,
0
,
'dtype'
,
dtype
)
if
n
<
1
:
if
n
<
1
:
...
@@ -1014,7 +1015,7 @@ def exponential_(x, lam=1.0, name=None):
...
@@ -1014,7 +1015,7 @@ def exponential_(x, lam=1.0, name=None):
# [0.72520673, 0.45208144, 0.30234432]]
# [0.72520673, 0.45208144, 0.30234432]]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
exponential_
(
x
,
"lambda"
,
lam
)
return
_C_ops
.
exponential_
(
x
,
"lambda"
,
lam
)
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"exponential"
)
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"exponential"
)
...
...
python/paddle/tensor/search.py
浏览文件 @
42eb56e2
...
@@ -13,14 +13,16 @@
...
@@ -13,14 +13,16 @@
# limitations under the License.
# limitations under the License.
from
__future__
import
print_function
from
__future__
import
print_function
import
numpy
as
np
import
numpy
as
np
import
paddle
from
..fluid.layer_helper
import
LayerHelper
from
..fluid.layer_helper
import
LayerHelper
from
..fluid.data_feeder
import
check_variable_and_dtype
,
check_type
,
check_dtype
from
..fluid.data_feeder
import
check_variable_and_dtype
,
check_type
,
check_dtype
from
..fluid
import
core
,
layers
from
..fluid
import
layers
from
paddle.common_ops_import
import
in_dygraph_mod
e
from
..framework
import
cor
e
from
paddle.common_ops_import
import
convert_np_dtype_to_dtype_
from
paddle.common_ops_import
import
convert_np_dtype_to_dtype_
from
paddle.common_ops_import
import
Variable
from
paddle.common_ops_import
import
Variable
from
paddle.common_ops_import
import
VarDesc
from
paddle.common_ops_import
import
VarDesc
from
paddle
import
_C_ops
from
paddle
import
_C_ops
from
.logic
import
logical_not
# TODO: define searching & indexing functions of a tensor
# TODO: define searching & indexing functions of a tensor
# from ..fluid.layers import has_inf #DEFINE_ALIAS
# from ..fluid.layers import has_inf #DEFINE_ALIAS
...
@@ -88,7 +90,7 @@ def argsort(x, axis=-1, descending=False, name=None):
...
@@ -88,7 +90,7 @@ def argsort(x, axis=-1, descending=False, name=None):
# [1 1 0 2]
# [1 1 0 2]
# [0 2 1 1]]]
# [0 2 1 1]]]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
_
,
ids
=
_C_ops
.
argsort
(
x
,
'axis'
,
axis
,
'descending'
,
descending
)
_
,
ids
=
_C_ops
.
argsort
(
x
,
'axis'
,
axis
,
'descending'
,
descending
)
return
ids
return
ids
check_variable_and_dtype
(
check_variable_and_dtype
(
...
@@ -165,7 +167,7 @@ def argmax(x, axis=None, keepdim=False, dtype="int64", name=None):
...
@@ -165,7 +167,7 @@ def argmax(x, axis=None, keepdim=False, dtype="int64", name=None):
flatten
=
True
flatten
=
True
axis
=
0
axis
=
0
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
out
=
_C_ops
.
arg_max
(
x
,
'axis'
,
axis
,
'dtype'
,
var_dtype
,
'keepdims'
,
out
=
_C_ops
.
arg_max
(
x
,
'axis'
,
axis
,
'dtype'
,
var_dtype
,
'keepdims'
,
keepdim
,
'flatten'
,
flatten
)
keepdim
,
'flatten'
,
flatten
)
return
out
return
out
...
@@ -242,7 +244,7 @@ def argmin(x, axis=None, keepdim=False, dtype="int64", name=None):
...
@@ -242,7 +244,7 @@ def argmin(x, axis=None, keepdim=False, dtype="int64", name=None):
flatten
=
True
flatten
=
True
axis
=
0
axis
=
0
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
out
=
_C_ops
.
arg_min
(
x
,
'axis'
,
axis
,
'dtype'
,
var_dtype
,
'keepdims'
,
out
=
_C_ops
.
arg_min
(
x
,
'axis'
,
axis
,
'dtype'
,
var_dtype
,
'keepdims'
,
keepdim
,
'flatten'
,
flatten
)
keepdim
,
'flatten'
,
flatten
)
return
out
return
out
...
@@ -302,7 +304,7 @@ def index_select(x, index, axis=0, name=None):
...
@@ -302,7 +304,7 @@ def index_select(x, index, axis=0, name=None):
# [ 9. 10. 10.]]
# [ 9. 10. 10.]]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
index_select
(
x
,
index
,
'dim'
,
axis
)
return
_C_ops
.
index_select
(
x
,
index
,
'dim'
,
axis
)
helper
=
LayerHelper
(
"index_select"
,
**
locals
())
helper
=
LayerHelper
(
"index_select"
,
**
locals
())
...
@@ -378,7 +380,7 @@ def nonzero(x, as_tuple=False):
...
@@ -378,7 +380,7 @@ def nonzero(x, as_tuple=False):
shape
=
x
.
shape
shape
=
x
.
shape
rank
=
len
(
shape
)
rank
=
len
(
shape
)
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
outs
=
_C_ops
.
where_index
(
x
)
outs
=
_C_ops
.
where_index
(
x
)
else
:
else
:
outs
=
layers
.
where
(
x
)
outs
=
layers
.
where
(
x
)
...
@@ -390,7 +392,7 @@ def nonzero(x, as_tuple=False):
...
@@ -390,7 +392,7 @@ def nonzero(x, as_tuple=False):
else
:
else
:
for
i
in
range
(
rank
):
for
i
in
range
(
rank
):
list_out
.
append
(
list_out
.
append
(
layers
.
slice
(
paddle
.
slice
(
outs
,
axes
=
[
1
],
starts
=
[
i
],
ends
=
[
i
+
1
]))
outs
,
axes
=
[
1
],
starts
=
[
i
],
ends
=
[
i
+
1
]))
return
tuple
(
list_out
)
return
tuple
(
list_out
)
...
@@ -452,7 +454,7 @@ def sort(x, axis=-1, descending=False, name=None):
...
@@ -452,7 +454,7 @@ def sort(x, axis=-1, descending=False, name=None):
# [4. 7. 4. 6.]
# [4. 7. 4. 6.]
# [5. 7. 7. 9.]]]
# [5. 7. 7. 9.]]]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
out
,
_
=
_C_ops
.
argsort
(
x
,
'axis'
,
axis
,
'descending'
,
descending
)
out
,
_
=
_C_ops
.
argsort
(
x
,
'axis'
,
axis
,
'descending'
,
descending
)
return
out
return
out
helper
=
LayerHelper
(
"sort"
,
**
locals
())
helper
=
LayerHelper
(
"sort"
,
**
locals
())
...
@@ -501,7 +503,7 @@ def mode(x, axis=-1, keepdim=False, name=None):
...
@@ -501,7 +503,7 @@ def mode(x, axis=-1, keepdim=False, name=None):
# [1, 0]]))
# [1, 0]]))
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
mode
(
x
,
"axis"
,
axis
,
"keepdim"
,
keepdim
)
return
_C_ops
.
mode
(
x
,
"axis"
,
axis
,
"keepdim"
,
keepdim
)
helper
=
LayerHelper
(
"mode"
,
**
locals
())
helper
=
LayerHelper
(
"mode"
,
**
locals
())
...
@@ -575,7 +577,7 @@ def where(condition, x=None, y=None, name=None):
...
@@ -575,7 +577,7 @@ def where(condition, x=None, y=None, name=None):
if
x
is
None
or
y
is
None
:
if
x
is
None
or
y
is
None
:
raise
ValueError
(
"either both or neither of x and y should be given"
)
raise
ValueError
(
"either both or neither of x and y should be given"
)
if
not
in_dygraph
_mode
():
if
not
paddle
.
in_dynamic
_mode
():
check_variable_and_dtype
(
condition
,
'condition'
,
[
'bool'
],
'where'
)
check_variable_and_dtype
(
condition
,
'condition'
,
[
'bool'
],
'where'
)
check_variable_and_dtype
(
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'where'
)
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'where'
)
...
@@ -592,28 +594,27 @@ def where(condition, x=None, y=None, name=None):
...
@@ -592,28 +594,27 @@ def where(condition, x=None, y=None, name=None):
broadcast_y
=
y
broadcast_y
=
y
else
:
else
:
if
core
.
is_compiled_with_xpu
():
if
core
.
is_compiled_with_xpu
():
cond_int
=
layers
.
cast
(
condition
,
x
.
dtype
)
cond_int
=
paddle
.
cast
(
condition
,
x
.
dtype
)
cond_not_int
=
layers
.
cast
(
layers
.
logical_not
(
condition
),
x
.
dtype
)
cond_not_int
=
paddle
.
cast
(
logical_not
(
condition
),
x
.
dtype
)
out1
=
layers
.
elementwise_mul
(
x
,
cond_int
)
out1
=
paddle
.
multiply
(
x
,
cond_int
)
out2
=
layers
.
elementwise_mul
(
y
,
cond_not_int
)
out2
=
paddle
.
multiply
(
y
,
cond_not_int
)
out
=
layers
.
elementwise_
add
(
out1
,
out2
)
out
=
paddle
.
add
(
out1
,
out2
)
return
out
return
out
zeros_like_x
=
layers
.
zeros_like
(
x
)
zeros_like_x
=
paddle
.
zeros_like
(
x
)
zeros_like_y
=
layers
.
zeros_like
(
y
)
zeros_like_y
=
paddle
.
zeros_like
(
y
)
zeros_like_condition
=
layers
.
zeros_like
(
condition
)
zeros_like_condition
=
paddle
.
zeros_like
(
condition
)
zeros_like_condition
=
layers
.
cast
(
zeros_like_condition
,
x
.
dtype
)
zeros_like_condition
=
paddle
.
cast
(
zeros_like_condition
,
x
.
dtype
)
cast_cond
=
layers
.
cast
(
condition
,
x
.
dtype
)
cast_cond
=
paddle
.
cast
(
condition
,
x
.
dtype
)
broadcast_zeros
=
layers
.
elementwise_add
(
zeros_like_x
,
zeros_like_y
)
broadcast_zeros
=
paddle
.
add
(
zeros_like_x
,
zeros_like_y
)
broadcast_zeros
=
layers
.
elementwise_add
(
broadcast_zeros
,
broadcast_zeros
=
paddle
.
add
(
broadcast_zeros
,
zeros_like_condition
)
zeros_like_condition
)
broadcast_x
=
paddle
.
add
(
x
,
broadcast_zeros
)
broadcast_x
=
layers
.
elementwise_add
(
x
,
broadcast_zeros
)
broadcast_y
=
paddle
.
add
(
y
,
broadcast_zeros
)
broadcast_y
=
layers
.
elementwise_add
(
y
,
broadcast_zeros
)
broadcast_condition
=
paddle
.
add
(
cast_cond
,
broadcast_zeros
)
broadcast_condition
=
layers
.
elementwise_add
(
cast_cond
,
broadcast_zeros
)
broadcast_condition
=
paddle
.
cast
(
broadcast_condition
,
'bool'
)
broadcast_condition
=
layers
.
cast
(
broadcast_condition
,
'bool'
)
if
paddle
.
in_dynamic_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
where
(
broadcast_condition
,
broadcast_x
,
broadcast_y
)
return
_C_ops
.
where
(
broadcast_condition
,
broadcast_x
,
broadcast_y
)
else
:
else
:
helper
=
LayerHelper
(
"where"
,
**
locals
())
helper
=
LayerHelper
(
"where"
,
**
locals
())
...
@@ -704,7 +705,7 @@ def index_sample(x, index):
...
@@ -704,7 +705,7 @@ def index_sample(x, index):
# [1200 1100]]
# [1200 1100]]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
index_sample
(
x
,
index
)
return
_C_ops
.
index_sample
(
x
,
index
)
helper
=
LayerHelper
(
"index_sample"
,
**
locals
())
helper
=
LayerHelper
(
"index_sample"
,
**
locals
())
...
@@ -752,7 +753,7 @@ def masked_select(x, mask, name=None):
...
@@ -752,7 +753,7 @@ def masked_select(x, mask, name=None):
#[1.0 5.0 6.0 9.0]
#[1.0 5.0 6.0 9.0]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
masked_select
(
x
,
mask
)
return
_C_ops
.
masked_select
(
x
,
mask
)
helper
=
LayerHelper
(
"masked_select"
,
**
locals
())
helper
=
LayerHelper
(
"masked_select"
,
**
locals
())
...
@@ -822,7 +823,7 @@ def topk(x, k, axis=None, largest=True, sorted=True, name=None):
...
@@ -822,7 +823,7 @@ def topk(x, k, axis=None, largest=True, sorted=True, name=None):
# [[1 1 0 0]]
# [[1 1 0 0]]
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
k
=
k
.
numpy
().
item
(
0
)
if
isinstance
(
k
,
Variable
)
else
k
k
=
k
.
numpy
().
item
(
0
)
if
isinstance
(
k
,
Variable
)
else
k
if
axis
is
None
:
if
axis
is
None
:
out
,
indices
=
_C_ops
.
top_k_v2
(
x
,
'k'
,
out
,
indices
=
_C_ops
.
top_k_v2
(
x
,
'k'
,
...
@@ -906,7 +907,7 @@ def searchsorted(sorted_sequence,
...
@@ -906,7 +907,7 @@ def searchsorted(sorted_sequence,
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
searchsorted
(
sorted_sequence
,
values
,
"out_int32"
,
return
_C_ops
.
searchsorted
(
sorted_sequence
,
values
,
"out_int32"
,
out_int32
,
"right"
,
right
)
out_int32
,
"right"
,
right
)
...
@@ -969,7 +970,7 @@ def kthvalue(x, k, axis=None, keepdim=False, name=None):
...
@@ -969,7 +970,7 @@ def kthvalue(x, k, axis=None, keepdim=False, name=None):
# [[0, 2],
# [[0, 2],
# [1, 2]]))
# [1, 2]]))
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
if
axis
is
not
None
:
if
axis
is
not
None
:
return
_C_ops
.
kthvalue
(
x
,
'k'
,
k
,
"axis"
,
axis
,
"keepdim"
,
keepdim
)
return
_C_ops
.
kthvalue
(
x
,
'k'
,
k
,
"axis"
,
axis
,
"keepdim"
,
keepdim
)
else
:
else
:
...
...
python/paddle/tensor/stat.py
浏览文件 @
42eb56e2
...
@@ -15,10 +15,9 @@
...
@@ -15,10 +15,9 @@
# TODO: define statistical functions of a tensor
# TODO: define statistical functions of a tensor
import
numpy
as
np
import
numpy
as
np
from
..
fluid.framework
import
Variable
from
..
static
import
Variable
from
..fluid.layer_helper
import
LayerHelper
from
..fluid.layer_helper
import
LayerHelper
from
..fluid.framework
import
core
,
in_dygraph_mode
from
..framework
import
core
from
..fluid
import
layers
from
.search
import
where
from
.search
import
where
from
..fluid.data_feeder
import
convert_dtype
,
check_variable_and_dtype
,
check_type
,
check_dtype
from
..fluid.data_feeder
import
convert_dtype
,
check_variable_and_dtype
,
check_type
,
check_dtype
import
paddle
import
paddle
...
@@ -88,7 +87,7 @@ def mean(x, axis=None, keepdim=False, name=None):
...
@@ -88,7 +87,7 @@ def mean(x, axis=None, keepdim=False, name=None):
if
axis
is
None
or
len
(
axis
)
==
0
:
if
axis
is
None
or
len
(
axis
)
==
0
:
axis
=
[
0
]
axis
=
[
0
]
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
reduce_mean
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
return
_C_ops
.
reduce_mean
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
'reduce_all'
,
reduce_all
)
...
@@ -150,7 +149,7 @@ def var(x, axis=None, unbiased=True, keepdim=False, name=None):
...
@@ -150,7 +149,7 @@ def var(x, axis=None, unbiased=True, keepdim=False, name=None):
out2 = paddle.var(x, axis=1)
out2 = paddle.var(x, axis=1)
# [1. 4.33333333]
# [1. 4.33333333]
"""
"""
if
not
in_dygraph
_mode
():
if
not
paddle
.
in_dynamic
_mode
():
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'var'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'var'
)
u
=
mean
(
x
,
axis
,
True
,
name
)
u
=
mean
(
x
,
axis
,
True
,
name
)
...
@@ -209,7 +208,7 @@ def std(x, axis=None, unbiased=True, keepdim=False, name=None):
...
@@ -209,7 +208,7 @@ def std(x, axis=None, unbiased=True, keepdim=False, name=None):
out2 = paddle.std(x, axis=1)
out2 = paddle.std(x, axis=1)
# [1. 2.081666]
# [1. 2.081666]
"""
"""
if
not
in_dygraph
_mode
():
if
not
paddle
.
in_dynamic
_mode
():
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'std'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'std'
)
out
=
var
(
**
locals
())
out
=
var
(
**
locals
())
...
@@ -237,7 +236,7 @@ def numel(x, name=None):
...
@@ -237,7 +236,7 @@ def numel(x, name=None):
"""
"""
if
in_dygraph
_mode
():
if
paddle
.
in_dynamic
_mode
():
return
_C_ops
.
size
(
x
)
return
_C_ops
.
size
(
x
)
if
not
isinstance
(
x
,
Variable
):
if
not
isinstance
(
x
,
Variable
):
...
...
python/paddle/tensor/to_string.py
浏览文件 @
42eb56e2
...
@@ -14,7 +14,7 @@
...
@@ -14,7 +14,7 @@
import
paddle
import
paddle
import
numpy
as
np
import
numpy
as
np
from
paddle.fluid.layers
import
core
from
..framework
import
core
from
paddle.fluid.data_feeder
import
convert_dtype
,
check_variable_and_dtype
,
check_type
,
check_dtype
from
paddle.fluid.data_feeder
import
convert_dtype
,
check_variable_and_dtype
,
check_type
,
check_dtype
__all__
=
[]
__all__
=
[]
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录