Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
861fef52
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
861fef52
编写于
12月 27, 2022
作者:
W
wanghuancoder
提交者:
GitHub
12月 27, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
delete legacy dygraph code in python/paddle/tensor (#49286)
* delete _in_legacy_dygraph
上级
ea741aff
变更
15
显示空白变更内容
内联
并排
Showing
15 changed file
with
4091 addition
and
5104 deletion
+4091
-5104
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+1
-2
python/paddle/fluid/tests/unittests/test_unique.py
python/paddle/fluid/tests/unittests/test_unique.py
+13
-6
python/paddle/tensor/array.py
python/paddle/tensor/array.py
+68
-66
python/paddle/tensor/attribute.py
python/paddle/tensor/attribute.py
+43
-51
python/paddle/tensor/creation.py
python/paddle/tensor/creation.py
+368
-438
python/paddle/tensor/einsum.py
python/paddle/tensor/einsum.py
+29
-32
python/paddle/tensor/layer_function_generator.py
python/paddle/tensor/layer_function_generator.py
+49
-45
python/paddle/tensor/linalg.py
python/paddle/tensor/linalg.py
+685
-921
python/paddle/tensor/logic.py
python/paddle/tensor/logic.py
+243
-268
python/paddle/tensor/manipulation.py
python/paddle/tensor/manipulation.py
+1223
-1476
python/paddle/tensor/math.py
python/paddle/tensor/math.py
+761
-1053
python/paddle/tensor/ops.py
python/paddle/tensor/ops.py
+194
-200
python/paddle/tensor/random.py
python/paddle/tensor/random.py
+153
-204
python/paddle/tensor/search.py
python/paddle/tensor/search.py
+201
-281
python/paddle/tensor/stat.py
python/paddle/tensor/stat.py
+60
-61
未找到文件。
python/paddle/fluid/framework.py
浏览文件 @
861fef52
...
...
@@ -255,8 +255,7 @@ def _test_eager_guard(place=None):
try
:
yield
finally
:
if
not
already_fallback
:
_enable_legacy_dygraph
()
pass
global_ipu_index
=
-
1
...
...
python/paddle/fluid/tests/unittests/test_unique.py
浏览文件 @
861fef52
...
...
@@ -28,7 +28,9 @@ class TestUniqueOp(OpTest):
self
.
init_config
()
def
test_check_output
(
self
):
paddle
.
enable_static
()
self
.
check_output
()
paddle
.
disable_static
()
def
init_config
(
self
):
self
.
inputs
=
{
...
...
@@ -72,6 +74,8 @@ class TestRandom(TestUniqueOp):
class
TestUniqueRaiseError
(
unittest
.
TestCase
):
def
test_errors
(
self
):
paddle
.
enable_static
()
def
test_type
():
paddle
.
unique
([
10
])
...
...
@@ -82,6 +86,7 @@ class TestUniqueRaiseError(unittest.TestCase):
paddle
.
unique
(
data
)
self
.
assertRaises
(
TypeError
,
test_dtype
)
paddle
.
disable_static
()
@
unittest
.
skipIf
(
...
...
@@ -100,8 +105,10 @@ class TestOneGPU(TestUniqueOp):
def
test_check_output
(
self
):
if
core
.
is_compiled_with_cuda
():
paddle
.
enable_static
()
place
=
core
.
CUDAPlace
(
0
)
self
.
check_output_with_place
(
place
,
atol
=
1e-5
)
paddle
.
disable_static
()
@
unittest
.
skipIf
(
...
...
@@ -125,8 +132,10 @@ class TestRandomGPU(TestUniqueOp):
def
test_check_output
(
self
):
if
core
.
is_compiled_with_cuda
():
paddle
.
enable_static
()
place
=
core
.
CUDAPlace
(
0
)
self
.
check_output_with_place
(
place
,
atol
=
1e-5
)
paddle
.
disable_static
()
class
TestSortedUniqueOp
(
TestUniqueOp
):
...
...
@@ -209,16 +218,13 @@ class TestUniqueOpAxis1(TestUniqueOp):
class
TestUniqueAPI
(
unittest
.
TestCase
):
def
test_dygraph_api_out
(
self
):
paddle
.
disable_static
()
x_data
=
x_data
=
np
.
random
.
randint
(
0
,
10
,
(
120
))
x
=
paddle
.
to_tensor
(
x_data
)
out
=
paddle
.
unique
(
x
)
expected_out
=
np
.
unique
(
x_data
)
self
.
assertTrue
((
out
.
numpy
()
==
expected_out
).
all
(),
True
)
paddle
.
enable_static
()
def
test_dygraph_api_attr
(
self
):
paddle
.
disable_static
()
x_data
=
np
.
random
.
random
((
3
,
5
,
5
)).
astype
(
"float32"
)
x
=
paddle
.
to_tensor
(
x_data
)
out
,
index
,
inverse
,
counts
=
paddle
.
unique
(
...
...
@@ -239,10 +245,8 @@ class TestUniqueAPI(unittest.TestCase):
self
.
assertTrue
((
index
.
numpy
()
==
np_index
).
all
(),
True
)
self
.
assertTrue
((
inverse
.
numpy
()
==
np_inverse
).
all
(),
True
)
self
.
assertTrue
((
counts
.
numpy
()
==
np_counts
).
all
(),
True
)
paddle
.
enable_static
()
def
test_dygraph_attr_dtype
(
self
):
paddle
.
disable_static
()
x_data
=
x_data
=
np
.
random
.
randint
(
0
,
10
,
(
120
))
x
=
paddle
.
to_tensor
(
x_data
)
out
,
indices
,
inverse
,
counts
=
paddle
.
unique
(
...
...
@@ -259,9 +263,9 @@ class TestUniqueAPI(unittest.TestCase):
self
.
assertTrue
((
indices
.
numpy
()
==
np_indices
).
all
(),
True
)
self
.
assertTrue
((
inverse
.
numpy
()
==
np_inverse
).
all
(),
True
)
self
.
assertTrue
((
counts
.
numpy
()
==
np_counts
).
all
(),
True
)
paddle
.
enable_static
()
def
test_static_graph
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
(),
paddle
.
static
.
Program
()
):
...
...
@@ -281,6 +285,7 @@ class TestUniqueAPI(unittest.TestCase):
np
.
testing
.
assert_allclose
(
result
[
0
],
np_unique
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
result
[
1
],
np_inverse
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
result
[
2
],
np_counts
,
rtol
=
1e-05
)
paddle
.
disable_static
()
class
TestUniqueError
(
unittest
.
TestCase
):
...
...
@@ -295,6 +300,7 @@ class TestUniqueError(unittest.TestCase):
self
.
assertRaises
(
TypeError
,
test_x_dtype
)
def
test_attr
(
self
):
paddle
.
enable_static
()
x
=
paddle
.
fluid
.
data
(
name
=
'x'
,
shape
=
[
10
,
10
],
dtype
=
'float64'
)
def
test_return_index
():
...
...
@@ -319,6 +325,7 @@ class TestUniqueError(unittest.TestCase):
result
=
paddle
.
unique
(
x
,
dtype
=
'float64'
)
self
.
assertRaises
(
TypeError
,
test_axis
)
paddle
.
disable_static
()
if
__name__
==
"__main__"
:
...
...
python/paddle/tensor/array.py
浏览文件 @
861fef52
...
...
@@ -15,7 +15,7 @@
# Define functions about array.
from
..fluid.data_feeder
import
check_type
,
check_variable_and_dtype
from
..framework
import
LayerHelper
,
_non_static_mode
,
cor
e
from
..framework
import
LayerHelper
,
core
,
in_dygraph_mod
e
from
..static
import
Variable
__all__
=
[]
...
...
@@ -45,12 +45,12 @@ def array_length(array):
arr_len = paddle.tensor.array_length(arr)
print(arr_len) # 1
"""
if
_non_static
_mode
():
if
in_dygraph
_mode
():
assert
isinstance
(
array
,
list
),
"The 'array' in array_write must be a list in dygraph mode"
return
len
(
array
)
else
:
if
(
not
isinstance
(
array
,
Variable
)
or
array
.
type
!=
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
...
...
@@ -63,7 +63,9 @@ def array_length(array):
tmp
=
helper
.
create_variable_for_type_inference
(
dtype
=
'int64'
)
tmp
.
stop_gradient
=
True
helper
.
append_op
(
type
=
'lod_array_length'
,
inputs
=
{
'X'
:
[
array
]},
outputs
=
{
'Out'
:
[
tmp
]}
type
=
'lod_array_length'
,
inputs
=
{
'X'
:
[
array
]},
outputs
=
{
'Out'
:
[
tmp
]},
)
return
tmp
...
...
@@ -107,7 +109,7 @@ def array_read(array, i):
item = paddle.tensor.array_read(arr, i)
print(item) # [[5., 5., 5.]]
"""
if
_non_static
_mode
():
if
in_dygraph
_mode
():
assert
isinstance
(
array
,
list
),
"The 'array' in array_read must be list in dygraph mode"
...
...
@@ -119,7 +121,7 @@ def array_read(array, i):
],
"The shape of index 'i' should be [1] in dygraph mode"
i
=
i
.
numpy
().
item
(
0
)
return
array
[
i
]
else
:
check_variable_and_dtype
(
i
,
'i'
,
[
'int64'
],
'array_read'
)
helper
=
LayerHelper
(
'array_read'
,
**
locals
())
if
(
...
...
@@ -167,7 +169,7 @@ def array_write(x, i, array=None):
item = paddle.tensor.array_read(arr, i)
print(item) # [[5., 5., 5.]]
"""
if
_non_static
_mode
():
if
in_dygraph
_mode
():
assert
isinstance
(
x
,
Variable
),
"The input data 'x' in array_write must be Variable in dygraph mode"
...
...
@@ -191,7 +193,7 @@ def array_write(x, i, array=None):
else
:
array
.
append
(
x
)
return
array
else
:
check_variable_and_dtype
(
i
,
'i'
,
[
'int64'
],
'array_write'
)
check_type
(
x
,
'x'
,
(
Variable
),
'array_write'
)
helper
=
LayerHelper
(
'array_write'
,
**
locals
())
...
...
@@ -265,9 +267,9 @@ def create_array(dtype, initialized_list=None):
)
)
if
_non_static
_mode
():
if
in_dygraph
_mode
():
return
array
else
:
helper
=
LayerHelper
(
"array"
,
**
locals
())
tensor_array
=
helper
.
create_variable
(
name
=
"{0}.out"
.
format
(
helper
.
name
),
...
...
python/paddle/tensor/attribute.py
浏览文件 @
861fef52
...
...
@@ -17,10 +17,10 @@
import
numpy
as
np
import
paddle
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle
import
_C_ops
from
..fluid.data_feeder
import
check_type
,
check_variable_and_dtype
from
..fluid.framework
import
_in_legacy_dygraph
,
in_dygraph_mode
from
..fluid.framework
import
in_dygraph_mode
from
..framework
import
LayerHelper
,
core
from
..static
import
Variable
from
.creation
import
_complex_to_real_dtype
,
assign
...
...
@@ -107,11 +107,7 @@ def shape(input):
out
=
_C_ops
.
shape
(
input
)
out
.
stop_gradient
=
True
return
out
if
_in_legacy_dygraph
():
out
=
_legacy_C_ops
.
shape
(
input
)
out
.
stop_gradient
=
True
return
out
else
:
check_variable_and_dtype
(
input
,
'input'
,
...
...
@@ -289,9 +285,7 @@ def real(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
real
(
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
real
(
x
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'complex64'
,
'complex128'
],
'real'
)
helper
=
LayerHelper
(
'real'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
...
...
@@ -336,9 +330,7 @@ def imag(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
imag
(
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
imag
(
x
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'complex64'
,
'complex128'
],
'imag'
)
helper
=
LayerHelper
(
'imag'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
...
...
python/paddle/tensor/creation.py
浏览文件 @
861fef52
...
...
@@ -33,7 +33,6 @@ from ..fluid.data_feeder import (
from
..fluid.framework
import
(
Variable
,
_in_eager_without_dygraph_check
,
_in_legacy_dygraph
,
device_guard
,
)
from
..fluid.initializer
import
Constant
,
Initializer
...
...
@@ -43,7 +42,6 @@ from ..framework import (
LayerHelper
,
_current_expected_place
,
_get_paddle_place
,
_non_static_mode
,
convert_np_dtype_to_dtype_
,
core
,
in_dygraph_mode
,
...
...
@@ -324,11 +322,7 @@ def linspace(start, stop, num, dtype=None, name=None):
dtype
,
_current_expected_place
(),
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
linspace
(
tensor_start
,
tensor_stop
,
tensor_num
,
'dtype'
,
dtype
)
else
:
helper
=
LayerHelper
(
"linspace"
,
**
locals
())
start_dtype
=
convert_dtype
(
tensor_start
.
dtype
)
...
...
@@ -376,7 +370,11 @@ def linspace(start, stop, num, dtype=None, name=None):
helper
.
append_op
(
type
=
'linspace'
,
inputs
=
{
'Start'
:
tensor_start
,
'Stop'
:
tensor_stop
,
'Num'
:
tensor_num
},
inputs
=
{
'Start'
:
tensor_start
,
'Stop'
:
tensor_stop
,
'Num'
:
tensor_num
,
},
attrs
=
{
'dtype'
:
dtype
},
outputs
=
{
'Out'
:
[
out
]},
)
...
...
@@ -446,11 +444,11 @@ def logspace(start, stop, num, base=10.0, dtype=None, name=None):
if
not
isinstance
(
base
,
Variable
):
with
device_guard
(
"cpu"
):
tensor_base
=
fill_constant
([
1
],
dtype
,
base
)
if
_non_static
_mode
():
if
in_dygraph
_mode
():
return
_legacy_C_ops
.
logspace
(
tensor_start
,
tensor_stop
,
tensor_num
,
tensor_base
,
'dtype'
,
dtype
)
else
:
helper
=
LayerHelper
(
"logspace"
,
**
locals
())
start_dtype
=
convert_dtype
(
tensor_start
.
dtype
)
...
...
@@ -746,7 +744,7 @@ def to_tensor(data, dtype=None, place=None, stop_gradient=True):
if
place
is
None
:
place
=
_current_expected_place
()
if
_non_static_mode
():
if
paddle
.
fluid
.
framework
.
_non_static_mode
():
return
_to_tensor_non_static
(
data
,
dtype
,
place
,
stop_gradient
)
# call assign for static graph
...
...
@@ -785,32 +783,41 @@ def full_like(x, fill_value, dtype=None, name=None):
# [[2. 2. 2.]
# [2. 2. 2.]]
"""
if
dtype
is
None
:
dtype
=
x
.
dtype
else
:
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
in_dygraph_mode
():
return
_C_ops
.
full_like
(
x
,
fill_value
,
dtype
,
x
.
place
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
fill_any_like
(
x
,
'value'
,
fill_value
,
'dtype'
,
dtype
)
else
:
helper
=
LayerHelper
(
"full_like"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
],
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
],
'full_like'
,
)
check_dtype
(
dtype
,
'dtype'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
],
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
],
'full_like/zeros_like/ones_like'
,
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
...
...
@@ -1011,7 +1018,7 @@ def eye(num_rows, num_columns=None, dtype=None, name=None):
"""
def
_check_attr
(
attr
,
message
):
if
isinstance
(
attr
,
((
Variable
,
core
.
VarBase
,
core
.
eager
.
Tensor
))):
if
isinstance
(
attr
,
((
Variable
,
core
.
eager
.
Tensor
))):
assert
len
(
attr
.
shape
)
==
1
and
attr
.
shape
[
0
]
in
[
1
,
-
1
]
elif
not
isinstance
(
attr
,
int
)
or
attr
<
0
:
raise
TypeError
(
"{} should be a non-negative int."
.
format
(
message
))
...
...
@@ -1027,16 +1034,10 @@ def eye(num_rows, num_columns=None, dtype=None, name=None):
else
:
num_columns
=
num_rows
if
_non_static_mode
():
if
in_dygraph_mode
():
out
=
_C_ops
.
eye
(
num_rows
,
num_columns
,
dtype
,
_current_expected_place
()
)
elif
_in_legacy_dygraph
():
out
=
_legacy_C_ops
.
eye
(
'dtype'
,
dtype
,
'num_rows'
,
num_rows
,
'num_columns'
,
num_columns
)
else
:
helper
=
LayerHelper
(
"eye"
,
**
locals
())
check_dtype
(
...
...
@@ -1211,14 +1212,12 @@ def arange(start=0, end=None, step=1, dtype=None, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
arange
(
start
,
end
,
step
,
dtype
,
_current_expected_place
())
if
_in_legacy_dygraph
():
out
=
_legacy_C_ops
.
range
(
start
,
end
,
step
)
out
.
stop_gradient
=
True
return
out
else
:
check_dtype
(
dtype
,
'dtype'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'range/arange'
dtype
,
'dtype'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'range/arange'
,
)
helper
=
LayerHelper
(
'range'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
,
shape
=
out_shape
)
...
...
@@ -1328,11 +1327,7 @@ def tril(x, diagonal=0, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
tril
(
x
,
diagonal
,
True
)
if
_in_legacy_dygraph
():
op
=
getattr
(
_legacy_C_ops
,
'tril_triu'
)
return
op
(
x
,
'diagonal'
,
diagonal
,
"lower"
,
True
)
else
:
return
_tril_triu_op
(
LayerHelper
(
'tril'
,
**
locals
()))
...
...
@@ -1394,11 +1389,7 @@ def triu(x, diagonal=0, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
triu
(
x
,
diagonal
,
False
)
if
_in_legacy_dygraph
():
op
=
getattr
(
_legacy_C_ops
,
'tril_triu'
)
return
op
(
x
,
'diagonal'
,
diagonal
,
"lower"
,
False
)
else
:
return
_tril_triu_op
(
LayerHelper
(
'triu'
,
**
locals
()))
...
...
@@ -1437,18 +1428,16 @@ def meshgrid(*args, **kwargs):
if
len
(
args
)
==
1
and
isinstance
(
args
[
0
],
(
list
,
tuple
)):
args
=
args
[
0
]
if
_in_legacy_dygraph
():
num
=
len
(
args
)
out
=
_legacy_C_ops
.
meshgrid
(
list
(
args
),
num
)
return
out
if
in_dygraph_mode
():
return
_C_ops
.
meshgrid
(
list
(
args
))
else
:
name
=
kwargs
.
get
(
"name"
,
None
)
helper
=
LayerHelper
(
'meshgrid'
,
**
locals
())
if
not
isinstance
(
args
,
(
list
,
tuple
)):
raise
TypeError
(
"The type of input args in meshgrid should be list."
)
raise
TypeError
(
"The type of input args in meshgrid should be list."
)
for
id
,
input_
in
enumerate
(
args
):
check_dtype
(
...
...
@@ -1555,27 +1544,14 @@ def diagflat(x, offset=0, name=None):
# [0, 0, 3, 0, 0],
# [0, 0, 0, 4, 0]])
"""
padding_value
=
0
if
in_dygraph_mode
():
if
len
(
x
.
shape
)
<=
1
:
return
_C_ops
.
diag
(
x
,
offset
,
padding_value
)
return
_C_ops
.
diag
(
x
,
offset
,
0
)
else
:
y
=
_C_ops
.
flatten
(
x
,
0
,
-
1
)
return
_C_ops
.
diag
(
y
,
offset
,
padding_value
)
if
_in_legacy_dygraph
():
if
len
(
x
.
shape
)
==
1
:
return
_legacy_C_ops
.
diag_v2
(
x
,
"offset"
,
offset
,
"padding_value"
,
padding_value
)
return
_C_ops
.
diag
(
y
,
offset
,
0
)
else
:
y
,
_
=
_legacy_C_ops
.
flatten_contiguous_range
(
x
,
"start_axis"
,
0
,
"stop_axis"
,
-
1
)
return
_legacy_C_ops
.
diag_v2
(
y
,
"offset"
,
offset
,
"padding_value"
,
padding_value
)
padding_value
=
0
check_type
(
x
,
'x'
,
(
Variable
),
'diagflat'
)
check_dtype
(
x
.
dtype
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'diagflat'
...
...
@@ -1690,11 +1666,6 @@ def diag(x, offset=0, padding_value=0, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
diag
(
x
,
offset
,
padding_value
)
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
diag_v2
(
x
,
"offset"
,
offset
,
"padding_value"
,
padding_value
)
else
:
check_type
(
x
,
'x'
,
(
Variable
),
'diag_v2'
)
check_dtype
(
...
...
@@ -1782,15 +1753,7 @@ def empty(shape, dtype=None, name=None):
)
out
.
stop_gradient
=
True
return
out
if
_in_legacy_dygraph
():
shape
=
utils
.
convert_shape_to_list
(
shape
)
out
=
_legacy_C_ops
.
empty
(
'shape'
,
shape
,
'dtype'
,
convert_np_dtype_to_dtype_
(
dtype
)
)
out
.
stop_gradient
=
True
return
out
else
:
helper
=
LayerHelper
(
"empty"
,
**
locals
())
inputs
=
{}
...
...
@@ -1863,14 +1826,7 @@ def empty_like(x, dtype=None, name=None):
)
out
.
stop_gradient
=
True
return
out
if
_in_legacy_dygraph
():
out
=
_legacy_C_ops
.
empty
(
'shape'
,
x
.
shape
,
'dtype'
,
convert_np_dtype_to_dtype_
(
dtype
)
)
out
.
stop_gradient
=
True
return
out
else
:
helper
=
LayerHelper
(
"empty_like"
,
**
locals
())
check_variable_and_dtype
(
x
,
...
...
@@ -1958,10 +1914,6 @@ def assign(x, output=None):
output
=
_C_ops
.
assign
(
input
)
else
:
_C_ops
.
assign_out_
(
input
,
output
)
elif
_in_legacy_dygraph
():
if
output
is
None
:
output
=
core
.
VarBase
()
_legacy_C_ops
.
assign
(
input
,
output
)
else
:
check_dtype
(
input
.
dtype
,
...
...
@@ -2060,18 +2012,6 @@ def assign(x, output=None):
values
,
_current_expected_place
(),
)
elif
_in_legacy_dygraph
():
if
output
is
None
:
output
=
core
.
VarBase
()
_legacy_C_ops
.
assign_value
(
output
,
'shape'
,
list
(
input
.
shape
),
'dtype'
,
dtype
,
value_name
,
values
,
)
else
:
if
output
is
None
:
output
=
helper
.
create_variable_for_type_inference
(
...
...
@@ -2087,9 +2027,6 @@ def assign(x, output=None):
},
)
if
is_inplace
and
_in_legacy_dygraph
():
output
.
_bump_inplace_version
()
return
output
...
...
@@ -2227,12 +2164,13 @@ def complex(real, imag, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
complex
(
real
,
imag
)
if
paddle
.
in_dynamic_mode
():
return
paddle
.
_legacy_C_ops
.
complex
(
real
,
imag
)
check_variable_and_dtype
(
real
,
'real'
,
[
'float32'
,
'float64'
],
'complex'
)
check_variable_and_dtype
(
imag
,
'imag'
,
[
'float32'
,
'float64'
],
'complex'
)
else
:
check_variable_and_dtype
(
real
,
'real'
,
[
'float32'
,
'float64'
],
'complex'
)
check_variable_and_dtype
(
imag
,
'imag'
,
[
'float32'
,
'float64'
],
'complex'
)
op_type
=
"complex"
helper
=
LayerHelper
(
op_type
,
**
locals
())
...
...
@@ -2242,7 +2180,9 @@ def complex(real, imag, name=None):
)
outputs
=
{
"Out"
:
out
}
attrs
=
{}
helper
.
append_op
(
type
=
op_type
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
outputs
)
helper
.
append_op
(
type
=
op_type
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
outputs
)
return
out
...
...
@@ -2291,6 +2231,17 @@ def tril_indices(row, col, offset=0, dtype='int64'):
# [[ 1, 2, 2, 3, 3, 3],
# [ 0, 0, 1, 0, 1, 2]]
"""
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
in_dygraph_mode
():
if
col
is
None
:
col
=
row
out
=
_C_ops
.
tril_indices
(
row
,
col
,
offset
,
dtype
,
_current_expected_place
()
)
return
out
else
:
if
not
isinstance
(
row
,
int
)
or
row
<
0
:
raise
TypeError
(
"row should be a non-negative int"
)
...
...
@@ -2303,22 +2254,6 @@ def tril_indices(row, col, offset=0, dtype='int64'):
if
not
isinstance
(
offset
,
int
):
raise
TypeError
(
"offset should be a int"
)
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
in_dygraph_mode
():
out
=
_C_ops
.
tril_indices
(
row
,
col
,
offset
,
dtype
,
_current_expected_place
()
)
return
out
if
_in_legacy_dygraph
():
out
=
_legacy_C_ops
.
tril_indices
(
'rows'
,
row
,
'cols'
,
col
,
'offset'
,
offset
,
"dtype"
,
dtype
)
return
out
else
:
helper
=
LayerHelper
(
"tril_indices"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
...
...
@@ -2375,6 +2310,17 @@ def triu_indices(row, col=None, offset=0, dtype='int64'):
# [[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 3, 3],
# [0, 1, 2, 3, 0, 1, 2, 3, 1, 2, 3, 2, 3]]
"""
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
in_dygraph_mode
():
if
col
is
None
:
col
=
row
out
=
_C_ops
.
triu_indices
(
row
,
col
,
offset
,
dtype
,
_current_expected_place
()
)
return
out
else
:
if
not
isinstance
(
row
,
int
)
or
row
<
0
:
raise
TypeError
(
"row should be a non-negative int"
)
...
...
@@ -2387,22 +2333,6 @@ def triu_indices(row, col=None, offset=0, dtype='int64'):
if
not
isinstance
(
offset
,
int
):
raise
TypeError
(
"offset should be a int"
)
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
in_dygraph_mode
():
out
=
_C_ops
.
triu_indices
(
row
,
col
,
offset
,
dtype
,
_current_expected_place
()
)
return
out
if
_in_legacy_dygraph
():
out
=
_legacy_C_ops
.
triu_indices
(
'row'
,
row
,
'col'
,
col
,
'offset'
,
offset
,
"dtype"
,
dtype
)
return
out
else
:
helper
=
LayerHelper
(
"triu_indices"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
...
...
python/paddle/tensor/einsum.py
浏览文件 @
861fef52
...
...
@@ -20,10 +20,10 @@ import string
import
numpy
as
np
import
opt_einsum
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle
import
_C_ops
from
..fluid.data_feeder
import
check_type
,
check_variable_and_dtype
from
..fluid.framework
import
_in_legacy_dygraph
,
in_dygraph_mode
from
..fluid.framework
import
in_dygraph_mode
from
..fluid.layer_helper
import
LayerHelper
from
.linalg
import
matmul
,
transpose
from
.manipulation
import
reshape
,
squeeze
,
unsqueeze
...
...
@@ -829,18 +829,15 @@ def gen_einsum_op(equation, *operands):
"""
EinsumOp Python Interface:
"""
assert
len
(
operands
)
<=
2
,
"Only support two operands in EinsumOp."
if
in_dygraph_mode
():
return
_C_ops
.
einsum
(
operands
,
equation
)[
0
]
if
_in_legacy_dygraph
():
# dygraph
return
_legacy_C_ops
.
einsum
(
operands
,
len
(
operands
),
len
(
operands
),
'equation'
,
equation
)[
0
]
else
:
assert
len
(
operands
)
<=
2
,
"Only support two operands in EinsumOp."
for
inp
in
operands
:
check_variable_and_dtype
(
inp
,
'dtype'
,
[
'float32'
,
'float64'
],
'einsum'
)
check_variable_and_dtype
(
inp
,
'dtype'
,
[
'float32'
,
'float64'
],
'einsum'
)
check_type
(
equation
,
'equation'
,
str
,
'einsum'
)
helper
=
LayerHelper
(
'einsum'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
operands
[
0
].
dtype
)
...
...
python/paddle/tensor/layer_function_generator.py
浏览文件 @
861fef52
...
...
@@ -24,7 +24,6 @@ from ..fluid.proto import framework_pb2
from
..framework
import
(
LayerHelper
,
OpProtoHolder
,
_non_static_mode
,
convert_np_dtype_to_dtype_
,
core
,
in_dygraph_mode
,
...
...
@@ -274,15 +273,16 @@ def generate_activation_fn(op_type):
op_proto
=
OpProtoHolder
.
instance
().
get_op_proto
(
op_type
)
def
func
(
x
,
name
=
None
):
if
in_dygraph_mode
()
and
hasattr
(
_C_ops
,
op_type
):
if
in_dygraph_mode
():
if
hasattr
(
_C_ops
,
op_type
):
op
=
getattr
(
_C_ops
,
op_type
)
return
op
(
x
)
else
:
# TODO(dev): Because some ops' yaml has not been migrated.
# Replace it with _in_legacy_dygraph while all yaml work is done.
if
_non_static_mode
():
# Replace it with _C_ops while all yaml work is done.
op
=
getattr
(
_legacy_C_ops
,
op_type
)
return
op
(
x
)
else
:
if
op_type
not
in
[
"abs"
,
"exp"
,
"square"
]:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
op_type
...
...
@@ -307,7 +307,9 @@ def generate_activation_fn(op_type):
helper
=
LayerHelper
(
op_type
,
**
locals
())
output
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
op_type
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
output
})
helper
.
append_op
(
type
=
op_type
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
output
}
)
return
output
func
.
__name__
=
op_type
...
...
@@ -332,12 +334,14 @@ def generate_inplace_fn(inplace_op_type):
origin_op_type
=
inplace_op_type
[:
-
1
]
def
func
(
x
,
name
=
None
):
if
in_dygraph_mode
()
and
hasattr
(
_C_ops
,
inplace_op_type
):
if
in_dygraph_mode
():
if
hasattr
(
_C_ops
,
inplace_op_type
):
op
=
getattr
(
_C_ops
,
inplace_op_type
)
return
op
(
x
)
if
_non_static_mode
()
:
else
:
op
=
getattr
(
_legacy_C_ops
,
inplace_op_type
)
return
op
(
x
)
else
:
warnings
.
warn
(
"In static mode, {}() is the same as {}() and does not perform inplace operation."
.
format
(
inplace_op_type
,
origin_op_type
...
...
python/paddle/tensor/linalg.py
浏览文件 @
861fef52
...
...
@@ -15,7 +15,7 @@
import
numpy
as
np
import
paddle
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle
import
_C_ops
from
paddle.common_ops_import
import
VarDesc
from
..fluid.data_feeder
import
(
...
...
@@ -23,8 +23,7 @@ from ..fluid.data_feeder import (
check_type
,
check_variable_and_dtype
,
)
from
..fluid.framework
import
_in_legacy_dygraph
from
..framework
import
LayerHelper
,
_non_static_mode
,
in_dygraph_mode
from
..framework
import
LayerHelper
,
in_dygraph_mode
from
..static
import
Variable
from
.creation
import
full
from
.logic
import
logical_not
...
...
@@ -90,10 +89,6 @@ def transpose(x, perm, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
transpose
(
x
,
perm
)
else
:
if
_in_legacy_dygraph
():
out
,
_
=
_legacy_C_ops
.
transpose2
(
x
,
'axis'
,
perm
)
return
out
check_variable_and_dtype
(
x
,
'x'
,
...
...
@@ -235,12 +230,7 @@ def matmul(x, y, transpose_x=False, transpose_y=False, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
matmul
(
x
,
y
,
transpose_x
,
transpose_y
)
if
_in_legacy_dygraph
():
op_type
=
'matmul_v2'
op
=
getattr
(
_legacy_C_ops
,
op_type
)
return
op
(
x
,
y
,
'trans_x'
,
transpose_x
,
'trans_y'
,
transpose_y
)
else
:
attrs
=
{
'trans_x'
:
transpose_x
,
'trans_y'
:
transpose_y
,
...
...
@@ -252,7 +242,13 @@ def matmul(x, y, transpose_x=False, transpose_y=False, name=None):
check_variable_and_dtype
(
val
,
name
,
[
'float16'
,
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
[
'float16'
,
'float32'
,
'float64'
,
'complex64'
,
'complex128'
,
],
'matmul'
,
)
...
...
@@ -373,14 +369,7 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
if
dim
is
None
:
return
_C_ops
.
frobenius_norm
(
input
,
[],
keepdim
,
True
)
return
_C_ops
.
frobenius_norm
(
input
,
dim
,
keepdim
,
False
)
if
_in_legacy_dygraph
():
if
dim
is
None
:
return
_legacy_C_ops
.
frobenius_norm
(
input
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
True
)
return
_legacy_C_ops
.
frobenius_norm
(
input
,
'dim'
,
dim
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
False
)
else
:
attrs
=
{
'dim'
:
dim
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
False
}
if
dim
is
None
:
attrs
[
'reduce_all'
]
=
True
...
...
@@ -416,22 +405,7 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
if
axis
is
None
:
axis
=
-
1
return
_C_ops
.
p_norm
(
input
,
porder
,
axis
,
1e-12
,
keepdim
,
asvector
)
if
_in_legacy_dygraph
():
if
axis
is
None
:
axis
=
-
1
return
_legacy_C_ops
.
p_norm
(
input
,
'porder'
,
porder
,
'axis'
,
axis
,
'keepdim'
,
keepdim
,
'asvector'
,
asvector
,
)
else
:
if
porder
is
not
None
:
check_type
(
porder
,
'porder'
,
(
float
,
int
),
'p_norm'
)
if
axis
is
not
None
:
...
...
@@ -469,17 +443,21 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
return
_C_ops
.
max
(
out
,
axis
,
keepdim
)
else
:
return
_C_ops
.
min
(
out
,
axis
,
keepdim
)
else
:
helper
=
LayerHelper
(
'inf_norm'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
()
)
helper
.
append_op
(
type
=
'abs'
,
inputs
=
{
'X'
:
input
},
outputs
=
{
'Out'
:
out
})
helper
.
append_op
(
type
=
'abs'
,
inputs
=
{
'X'
:
input
},
outputs
=
{
'Out'
:
out
}
)
reduce_out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
()
)
reduce_all
=
True
if
axis
is
None
or
axis
==
[]
or
asvector
else
False
reduce_all
=
(
True
if
axis
is
None
or
axis
==
[]
or
asvector
else
False
)
axis
=
axis
if
axis
is
not
None
and
axis
!=
[]
else
[
0
]
reduce_type
=
(
...
...
@@ -489,7 +467,11 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
type
=
reduce_type
,
inputs
=
{
'X'
:
out
},
outputs
=
{
'Out'
:
reduce_out
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
,
},
)
return
reduce_out
...
...
@@ -846,40 +828,6 @@ def cond(x, p=None, name=None):
return
_C_ops
.
max
(
sum_out
,
[
-
1
],
False
)
if
porder
==
-
1
or
porder
==
-
np
.
inf
:
return
_C_ops
.
min
(
sum_out
,
[
-
1
],
False
)
elif
_in_legacy_dygraph
():
reduce_all
=
True
if
axis
is
None
or
axis
==
[]
else
False
axis
=
axis
if
axis
is
not
None
and
axis
!=
[]
else
[
0
]
abs_out
=
_legacy_C_ops
.
abs
(
input
)
sum_out
=
_legacy_C_ops
.
reduce_sum
(
abs_out
,
'dim'
,
axis
,
'keepdim'
,
False
,
'reduce_all'
,
reduce_all
,
)
if
porder
==
1
or
porder
==
np
.
inf
:
return
_legacy_C_ops
.
reduce_max
(
sum_out
,
'dim'
,
[
-
1
],
'keepdim'
,
False
,
'reduce_all'
,
reduce_all
,
)
if
porder
==
-
1
or
porder
==
-
np
.
inf
:
return
_legacy_C_ops
.
reduce_min
(
sum_out
,
'dim'
,
[
-
1
],
'keepdim'
,
False
,
'reduce_all'
,
reduce_all
,
)
else
:
reduce_all
=
True
if
axis
is
None
or
axis
==
[]
else
False
axis
=
axis
if
axis
is
not
None
and
axis
!=
[]
else
[
0
]
...
...
@@ -940,29 +888,7 @@ def cond(x, p=None, name=None):
sum_out_1
=
_C_ops
.
sum
(
pow_out
,
axis
,
None
,
False
)
sum_out_2
=
_C_ops
.
sum
(
sum_out_1
,
axis
,
None
,
False
)
return
_C_ops
.
pow
(
sum_out_2
,
float
(
1.0
/
porder
))
elif
paddle
.
in_dynamic_mode
():
reduce_all
=
True
if
axis
is
None
or
axis
==
[]
else
False
pow_out
=
_legacy_C_ops
.
pow
(
input
,
'factor'
,
porder
)
sum_out_1
=
_legacy_C_ops
.
reduce_sum
(
pow_out
,
'dim'
,
axis
,
'keepdim'
,
False
,
'reduce_all'
,
reduce_all
,
)
sum_out_2
=
_legacy_C_ops
.
reduce_sum
(
sum_out_1
,
'dim'
,
axis
,
'keepdim'
,
False
,
'reduce_all'
,
reduce_all
,
)
return
_legacy_C_ops
.
pow
(
sum_out_2
,
'factor'
,
float
(
1.0
/
porder
))
else
:
reduce_all
=
True
if
axis
is
None
or
axis
==
[]
else
False
block
=
LayerHelper
(
'norm'
,
**
locals
())
pow_out
=
block
.
create_variable_for_type_inference
(
...
...
@@ -987,13 +913,21 @@ def cond(x, p=None, name=None):
type
=
'reduce_sum'
,
inputs
=
{
'X'
:
pow_out
},
outputs
=
{
'Out'
:
sum_out_1
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
False
,
'reduce_all'
:
reduce_all
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
False
,
'reduce_all'
:
reduce_all
,
},
)
block
.
append_op
(
type
=
'reduce_sum'
,
inputs
=
{
'X'
:
sum_out_1
},
outputs
=
{
'Out'
:
sum_out_2
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
False
,
'reduce_all'
:
reduce_all
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
False
,
'reduce_all'
:
reduce_all
,
},
)
block
.
append_op
(
type
=
'pow'
,
...
...
@@ -1009,48 +943,19 @@ def cond(x, p=None, name=None):
Calculate the matrix norm, which is related to singular values, of a matrix
or batches of matrices, including nuclear norm, 2-norm and (-2)-norm.
"""
if
not
in_dygraph_mode
():
reduce_all
=
True
if
axis
is
None
or
axis
==
[]
else
False
u
,
s
,
vh
=
svd
(
input
,
full_matrices
=
False
)
if
_non_static_mode
():
if
porder
==
"nuc"
:
if
in_dygraph_mode
():
if
porder
==
"nuc"
:
return
_C_ops
.
sum
(
s
,
axis
,
None
,
False
)
else
:
return
_legacy_C_ops
.
reduce_sum
(
s
,
'dim'
,
axis
,
'keepdim'
,
False
,
'reduce_all'
,
reduce_all
,
)
if
in_dygraph_mode
():
max_out
=
_C_ops
.
max
(
s
,
axis
,
False
)
min_out
=
_C_ops
.
min
(
s
,
axis
,
False
)
if
porder
==
2
:
return
_C_ops
.
divide
(
max_out
,
min_out
)
if
porder
==
-
2
:
return
_C_ops
.
divide
(
min_out
,
max_out
)
else
:
max_out
=
_legacy_C_ops
.
reduce_max
(
s
,
'dim'
,
axis
,
'keepdim'
,
False
,
'reduce_all'
,
reduce_all
)
min_out
=
_legacy_C_ops
.
reduce_min
(
s
,
'dim'
,
axis
,
'keepdim'
,
False
,
'reduce_all'
,
reduce_all
)
if
porder
==
2
:
return
_legacy_C_ops
.
elementwise_div
(
max_out
,
min_out
,
'aixs'
,
axis
,
'use_mkldnn'
,
False
)
if
porder
==
-
2
:
return
_legacy_C_ops
.
elementwise_div
(
min_out
,
max_out
,
'aixs'
,
axis
,
'use_mkldnn'
,
False
)
reduce_all
=
True
if
axis
is
None
or
axis
==
[]
else
False
block
=
LayerHelper
(
'norm'
,
**
locals
())
out
=
block
.
create_variable_for_type_inference
(
dtype
=
block
.
input_dtype
()
...
...
@@ -1077,13 +982,21 @@ def cond(x, p=None, name=None):
type
=
'reduce_max'
,
inputs
=
{
'X'
:
s
},
outputs
=
{
'Out'
:
max_out
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
False
,
'reduce_all'
:
reduce_all
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
False
,
'reduce_all'
:
reduce_all
,
},
)
block
.
append_op
(
type
=
'reduce_min'
,
inputs
=
{
'X'
:
s
},
outputs
=
{
'Out'
:
min_out
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
False
,
'reduce_all'
:
reduce_all
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
False
,
'reduce_all'
:
reduce_all
,
},
)
if
porder
==
2
:
block
.
append_op
(
...
...
@@ -1103,7 +1016,7 @@ def cond(x, p=None, name=None):
return
out
def
empty_tensor
(
input
,
shape
):
if
paddle
.
in_dynamic
_mode
():
if
in_dygraph
_mode
():
return
input
.
reshape
(
shape
)
raise
ValueError
(
"only support x is nonempty tensor in static mode"
)
...
...
@@ -1186,9 +1099,7 @@ def dot(x, y, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
dot
(
x
,
y
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
dot
(
x
,
y
)
else
:
op_type
=
'dot'
assert
x
is
not
None
,
'x cannot be None in {}'
.
format
(
op_type
)
...
...
@@ -1389,15 +1300,7 @@ def t(input, name=None):
perm
=
[
1
,
0
]
out
=
_C_ops
.
transpose
(
input
,
perm
)
return
out
if
_in_legacy_dygraph
():
if
len
(
input
.
shape
)
==
1
:
return
input
# 2-D tensor
perm
=
[
1
,
0
]
out
,
_
=
_legacy_C_ops
.
transpose2
(
input
,
'axis'
,
perm
)
return
out
else
:
check_variable_and_dtype
(
input
,
'input'
,
...
...
@@ -1461,12 +1364,6 @@ def cross(x, y, axis=9, name=None):
if
in_dygraph_mode
():
axis
=
K_DEFAULT_DIM
if
axis
is
None
else
axis
return
_C_ops
.
cross
(
x
,
y
,
axis
)
else
:
if
_in_legacy_dygraph
():
if
axis
is
not
None
:
return
_legacy_C_ops
.
cross
(
x
,
y
,
'dim'
,
axis
)
else
:
return
_legacy_C_ops
.
cross
(
x
,
y
)
else
:
helper
=
LayerHelper
(
"cross"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
...
...
@@ -1520,10 +1417,7 @@ def cholesky(x, upper=False, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
cholesky
(
x
,
upper
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
cholesky
(
x
,
"upper"
,
upper
)
else
:
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'cholesky'
)
check_type
(
upper
,
'upper'
,
bool
,
'cholesky'
)
helper
=
LayerHelper
(
'cholesky'
,
**
locals
())
...
...
@@ -1594,34 +1488,7 @@ def matrix_rank(x, tol=None, hermitian=False, name=None):
tol_attr
=
float
(
tol
)
use_default_tol
=
False
return
_C_ops
.
matrix_rank
(
x
,
tol_attr
,
hermitian
,
use_default_tol
)
if
_in_legacy_dygraph
():
if
tol
is
None
:
tol_tensor
=
None
tol_attr
=
0.0
use_default_tol
=
True
elif
isinstance
(
tol
,
Variable
):
if
tol
.
dtype
!=
x
.
dtype
:
tol_tensor
=
cast
(
tol
,
x
.
dtype
)
else
:
tol_tensor
=
tol
tol_attr
=
0.0
use_default_tol
=
False
else
:
tol_tensor
=
None
tol_attr
=
float
(
tol
)
use_default_tol
=
False
return
_legacy_C_ops
.
matrix_rank
(
x
,
tol_tensor
,
"tol"
,
tol_attr
,
'hermitian'
,
hermitian
,
'use_default_tol'
,
use_default_tol
,
)
inputs
=
{}
attrs
=
{}
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'matrix_rank'
)
...
...
@@ -1711,13 +1578,12 @@ def bmm(x, y, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
bmm
(
x
,
y
)
if
paddle
.
in_dynamic_mode
():
return
_legacy_C_ops
.
bmm
(
x
,
y
)
else
:
helper
=
LayerHelper
(
'bmm'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'bmm'
,
inputs
=
{
'X'
:
x
,
'Y'
:
y
},
outputs
=
{
'Out'
:
out
})
helper
.
append_op
(
type
=
'bmm'
,
inputs
=
{
'X'
:
x
,
'Y'
:
y
},
outputs
=
{
'Out'
:
out
}
)
return
out
...
...
@@ -1748,12 +1614,7 @@ def histogram(input, bins=100, min=0, max=0, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
histogram
(
input
,
bins
,
min
,
max
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
histogram
(
input
,
"bins"
,
bins
,
"min"
,
min
,
"max"
,
max
)
else
:
helper
=
LayerHelper
(
'histogram'
,
**
locals
())
check_variable_and_dtype
(
input
,
'X'
,
[
'int32'
,
'int64'
,
'float32'
,
'float64'
],
'histogram'
...
...
@@ -1800,9 +1661,7 @@ def bincount(x, weights=None, minlength=0, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
bincount
(
x
,
weights
,
minlength
)
elif
_in_legacy_dygraph
():
return
_legacy_C_ops
.
bincount
(
x
,
weights
,
"minlength"
,
minlength
)
else
:
helper
=
LayerHelper
(
'bincount'
,
**
locals
())
check_variable_and_dtype
(
x
,
'X'
,
[
'int32'
,
'int64'
],
'bincount'
)
...
...
@@ -1859,10 +1718,6 @@ def mv(x, vec, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
mv
(
x
,
vec
)
else
:
if
_in_legacy_dygraph
():
out
=
_legacy_C_ops
.
mv
(
x
,
vec
)
return
out
else
:
def
__check_input
(
x
,
vec
):
var_names
=
{
'x'
:
x
,
'vec'
:
vec
}
...
...
@@ -1927,10 +1782,7 @@ def det(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
det
(
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
determinant
(
x
)
else
:
check_dtype
(
x
.
dtype
,
'Input'
,
[
'float32'
,
'float64'
],
'det'
)
input_shape
=
list
(
x
.
shape
)
...
...
@@ -1989,10 +1841,7 @@ def slogdet(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
slogdet
(
x
)
elif
paddle
.
in_dynamic_mode
():
return
_legacy_C_ops
.
slogdeterminant
(
x
)
else
:
check_dtype
(
x
.
dtype
,
'Input'
,
[
'float32'
,
'float64'
],
'slogdet'
)
input_shape
=
list
(
x
.
shape
)
...
...
@@ -2011,7 +1860,9 @@ def slogdet(x, name=None):
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'slogdeterminant'
,
inputs
=
{
'Input'
:
[
x
]},
outputs
=
{
'Out'
:
[
out
]}
type
=
'slogdeterminant'
,
inputs
=
{
'Input'
:
[
x
]},
outputs
=
{
'Out'
:
[
out
]},
)
return
out
...
...
@@ -2071,8 +1922,7 @@ def svd(x, full_matrices=False, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
svd
(
x
,
full_matrices
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
svd
(
x
,
'full_matrices'
,
full_matrices
)
else
:
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'svd'
)
check_type
(
full_matrices
,
'full_matrices'
,
bool
,
'svd'
)
helper
=
LayerHelper
(
'svd'
,
**
locals
())
...
...
@@ -2146,11 +1996,10 @@ def matrix_power(x, n, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
matrix_power
(
x
,
n
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
matrix_power
(
x
,
"n"
,
n
)
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'matrix_power'
)
else
:
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'matrix_power'
)
check_type
(
n
,
'n'
,
int
,
'matrix_power'
)
helper
=
LayerHelper
(
'matrix_power'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
...
...
@@ -2211,12 +2060,7 @@ def qr(x, mode="reduced", name=None):
return
r
else
:
return
q
,
r
if
_in_legacy_dygraph
():
q
,
r
=
_legacy_C_ops
.
qr
(
x
,
'mode'
,
mode
)
if
mode
==
"r"
:
return
r
else
:
return
q
,
r
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'qr'
)
check_type
(
mode
,
'mode'
,
str
,
'qr'
)
helper
=
LayerHelper
(
'qr'
,
**
locals
())
...
...
@@ -2315,8 +2159,6 @@ def lu(x, pivot=True, get_infos=False, name=None):
if
in_dygraph_mode
():
lu
,
p
,
info
=
_C_ops
.
lu
(
x
,
pivot
)
elif
paddle
.
in_dynamic_mode
():
lu
,
p
,
info
=
_legacy_C_ops
.
lu
(
x
,
'pivot'
,
pivot
)
else
:
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'lu'
)
helper
=
LayerHelper
(
'lu'
,
**
locals
())
...
...
@@ -2413,14 +2255,10 @@ def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None):
if
in_dygraph_mode
():
P
,
L
,
U
=
_C_ops
.
lu_unpack
(
x
,
y
,
unpack_ludata
,
unpack_pivots
)
return
P
,
L
,
U
if
paddle
.
in_dynamic_mode
():
P
,
L
,
U
=
_legacy_C_ops
.
lu_unpack
(
x
,
y
,
'unpack_ludata'
,
unpack_ludata
,
'unpack_pivots'
,
unpack_pivots
else
:
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'lu_unpack'
)
return
P
,
L
,
U
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'lu_unpack'
)
helper
=
LayerHelper
(
'lu_unpack'
,
**
locals
())
p
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
l
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
...
...
@@ -2486,10 +2324,7 @@ def eig(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
eig
(
x
)
elif
paddle
.
in_dynamic_mode
():
w
,
v
=
_legacy_C_ops
.
eig
(
x
)
return
w
,
v
else
:
check_variable_and_dtype
(
x
,
'X'
,
[
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
'eig'
)
...
...
@@ -2562,9 +2397,7 @@ def eigvals(x, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
eigvals
(
x
)
elif
paddle
.
in_dynamic_mode
():
return
_legacy_C_ops
.
eigvals
(
x
)
else
:
helper
=
LayerHelper
(
'eigvals'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'eigvals'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
})
...
...
@@ -2627,11 +2460,9 @@ def multi_dot(x, name=None):
# [10, 7]
"""
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
multi_dot
(
x
)
if
in_dygraph_mode
():
return
_C_ops
.
multi_dot
(
x
)
else
:
check_type
(
x
,
'x'
,
(
list
,
tuple
),
'multi_dot'
)
for
id
,
item
in
enumerate
(
x
):
check_variable_and_dtype
(
...
...
@@ -2648,7 +2479,9 @@ def multi_dot(x, name=None):
helper
=
LayerHelper
(
'multi_dot'
,
**
locals
())
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
'multi_dot'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
helper
.
append_op
(
type
=
'multi_dot'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
}
)
return
out
...
...
@@ -2687,9 +2520,7 @@ def eigh(x, UPLO='L', name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
eigh
(
x
,
UPLO
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
eigh
(
x
,
'UPLO'
,
UPLO
)
else
:
def
__check_input
(
x
,
UPLO
):
x_shape
=
list
(
x
.
shape
)
...
...
@@ -2713,7 +2544,10 @@ def eigh(x, UPLO='L', name=None):
helper
=
LayerHelper
(
'eigh'
,
**
locals
())
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
'eigh'
x
,
'dtype'
,
[
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
'eigh'
,
)
out_value
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
...
...
@@ -2838,68 +2672,6 @@ def pinv(x, rcond=1e-15, hermitian=False, name=None):
u_conj
=
_C_ops
.
conj
(
u
)
out_2
=
_C_ops
.
matmul
(
out_1
,
u_conj
,
False
,
True
)
return
out_2
if
_in_legacy_dygraph
():
if
not
hermitian
:
# combine svd and matmul op
u
,
s
,
vt
=
_legacy_C_ops
.
svd
(
x
,
'full_matrices'
,
False
)
max_singular_val
=
_legacy_C_ops
.
reduce_max
(
s
,
'dim'
,
[
-
1
],
'keep_dim'
,
True
,
'reduce_all'
,
False
)
rcond
=
paddle
.
to_tensor
(
rcond
,
dtype
=
x
.
dtype
)
cutoff
=
rcond
*
max_singular_val
y
=
float
(
'inf'
)
y
=
paddle
.
to_tensor
(
y
,
dtype
=
x
.
dtype
)
condition
=
s
>
cutoff
cond_int
=
cast
(
condition
,
s
.
dtype
)
cond_not_int
=
cast
(
logical_not
(
condition
),
s
.
dtype
)
out1
=
multiply
(
1
/
s
,
cond_int
)
out2
=
multiply
(
1
/
y
,
cond_not_int
)
singular
=
add
(
out1
,
out2
)
st
,
_
=
_legacy_C_ops
.
unsqueeze2
(
singular
,
'axes'
,
[
-
2
])
dims
=
list
(
range
(
len
(
vt
.
shape
)))
perm
=
dims
[:
-
2
]
+
[
dims
[
-
1
]]
+
[
dims
[
-
2
]]
v
,
_
=
_legacy_C_ops
.
transpose2
(
vt
,
'axis'
,
perm
)
out_1
=
v
*
st
if
in_dygraph_mode
():
out_2
=
_C_ops
.
matmul
(
out_1
,
u
,
False
,
True
)
else
:
out_2
=
_legacy_C_ops
.
matmul_v2
(
out_1
,
u
,
'trans_x'
,
False
,
'trans_y'
,
True
)
return
out_2
else
:
# combine eigh and matmul op
s
,
u
=
_legacy_C_ops
.
eigh
(
x
,
'UPLO'
,
'L'
)
s_abs
=
paddle
.
abs
(
s
)
max_singular_val
=
_legacy_C_ops
.
reduce_max
(
s_abs
,
'dim'
,
[
-
1
],
'keep_dim'
,
True
,
'reduce_all'
,
False
)
rcond
=
paddle
.
to_tensor
(
rcond
,
dtype
=
s
.
dtype
)
cutoff
=
rcond
*
max_singular_val
y
=
float
(
'inf'
)
y
=
paddle
.
to_tensor
(
y
,
dtype
=
s
.
dtype
)
condition
=
s_abs
>
cutoff
cond_int
=
cast
(
condition
,
s
.
dtype
)
cond_not_int
=
cast
(
logical_not
(
condition
),
s
.
dtype
)
out1
=
multiply
(
1
/
s
,
cond_int
)
out2
=
multiply
(
1
/
y
,
cond_not_int
)
singular
=
add
(
out1
,
out2
)
st
,
_
=
_legacy_C_ops
.
unsqueeze2
(
singular
,
'axes'
,
[
-
2
])
out_1
=
u
*
st
u_conj
=
_legacy_C_ops
.
conj
(
u
)
if
in_dygraph_mode
():
out_2
=
_C_ops
.
matmul
(
out_1
,
u_conj
,
False
,
True
)
else
:
out_2
=
_legacy_C_ops
.
matmul_v2
(
out_1
,
u_conj
,
'trans_x'
,
False
,
'trans_y'
,
True
)
return
out_2
else
:
if
not
hermitian
:
helper
=
LayerHelper
(
'pinv'
,
**
locals
())
...
...
@@ -3098,10 +2870,7 @@ def solve(x, y, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
solve
(
x
,
y
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
solve
(
x
,
y
)
else
:
inputs
=
{
"X"
:
[
x
],
"Y"
:
[
y
]}
helper
=
LayerHelper
(
"solve"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'solve'
)
...
...
@@ -3170,23 +2939,15 @@ def triangular_solve(
"""
if
in_dygraph_mode
():
return
_C_ops
.
triangular_solve
(
x
,
y
,
upper
,
transpose
,
unitriangular
)
if
paddle
.
in_dynamic_mode
():
return
_legacy_C_ops
.
triangular_solve
(
x
,
y
,
'upper'
,
upper
,
'transpose'
,
transpose
,
'unitriangular'
,
unitriangular
,
)
else
:
inputs
=
{
"X"
:
[
x
],
"Y"
:
[
y
]}
helper
=
LayerHelper
(
"triangular_solve"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'triangular_solve'
)
check_variable_and_dtype
(
y
,
'y'
,
[
'float32'
,
'float64'
],
'triangular_solve'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'triangular_solve'
)
check_variable_and_dtype
(
y
,
'y'
,
[
'float32'
,
'float64'
],
'triangular_solve'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
...
...
@@ -3237,13 +2998,14 @@ def cholesky_solve(x, y, upper=False, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
cholesky_solve
(
x
,
y
,
upper
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
cholesky_solve
(
x
,
y
,
'upper'
,
upper
)
else
:
helper
=
LayerHelper
(
"cholesky_solve"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'cholesky_solve'
)
check_variable_and_dtype
(
y
,
'y'
,
[
'float32'
,
'float64'
],
'cholesky_solve'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'cholesky_solve'
)
check_variable_and_dtype
(
y
,
'y'
,
[
'float32'
,
'float64'
],
'cholesky_solve'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
...
...
@@ -3284,11 +3046,7 @@ def eigvalsh(x, UPLO='L', name=None):
if
in_dygraph_mode
():
values
,
_
=
_C_ops
.
eigvalsh
(
x
,
UPLO
,
x
.
stop_gradient
)
return
values
elif
paddle
.
in_dynamic_mode
():
is_test
=
x
.
stop_gradient
values
,
_
=
_legacy_C_ops
.
eigvalsh
(
x
,
'UPLO'
,
UPLO
,
'is_test'
,
is_test
)
return
values
else
:
def
__check_input
(
x
,
UPLO
):
x_shape
=
list
(
x
.
shape
)
...
...
@@ -3423,16 +3181,10 @@ def lstsq(x, y, rcond=None, driver=None, name=None):
elif
x
.
dtype
==
paddle
.
float64
:
rcond
=
1e-15
*
max
(
x
.
shape
[
-
2
],
x
.
shape
[
-
1
])
if
_non_static_mode
():
if
in_dygraph_mode
():
solution
,
residuals
,
rank
,
singular_values
=
_C_ops
.
lstsq
(
x
,
y
,
rcond
,
driver
)
else
:
solution
,
residuals
,
rank
,
singular_values
=
_legacy_C_ops
.
lstsq
(
x
,
y
,
'rcond'
,
rcond
,
'driver'
,
driver
)
if
driver
==
"gels"
:
rank
=
paddle
.
empty
(
shape
=
[
0
],
dtype
=
paddle
.
int32
)
singular_values
=
paddle
.
empty
(
shape
=
[
0
],
dtype
=
x
.
dtype
)
...
...
@@ -3440,19 +3192,27 @@ def lstsq(x, y, rcond=None, driver=None, name=None):
singular_values
=
paddle
.
empty
(
shape
=
[
0
],
dtype
=
x
.
dtype
)
return
solution
,
residuals
,
rank
,
singular_values
else
:
helper
=
LayerHelper
(
'lstsq'
,
**
locals
())
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
'lstsq'
x
,
'dtype'
,
[
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
'lstsq'
,
)
check_variable_and_dtype
(
y
,
'dtype'
,
[
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
'lstsq'
y
,
'dtype'
,
[
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
'lstsq'
,
)
solution
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
residuals
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
rank
=
helper
.
create_variable_for_type_inference
(
dtype
=
paddle
.
int32
)
singular_values
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
singular_values
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'lstsq'
,
...
...
@@ -3468,9 +3228,13 @@ def lstsq(x, y, rcond=None, driver=None, name=None):
if
driver
==
"gels"
:
rank
=
paddle
.
static
.
data
(
name
=
'rank'
,
shape
=
[
0
])
singular_values
=
paddle
.
static
.
data
(
name
=
'singular_values'
,
shape
=
[
0
])
singular_values
=
paddle
.
static
.
data
(
name
=
'singular_values'
,
shape
=
[
0
]
)
elif
driver
==
"gelsy"
:
singular_values
=
paddle
.
static
.
data
(
name
=
'singular_values'
,
shape
=
[
0
])
singular_values
=
paddle
.
static
.
data
(
name
=
'singular_values'
,
shape
=
[
0
]
)
return
solution
,
residuals
,
rank
,
singular_values
...
...
python/paddle/tensor/logic.py
浏览文件 @
861fef52
...
...
@@ -26,10 +26,9 @@ if _in_eager_mode_:
else
:
from
..framework
import
VarBase
as
Tensor
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle
import
_C_ops
from
paddle.tensor.creation
import
full
from
..fluid.framework
import
_in_legacy_dygraph
from
..framework
import
LayerHelper
,
in_dygraph_mode
__all__
=
[]
...
...
@@ -42,12 +41,7 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
return
op
(
x
,
y
)
else
:
return
op
(
x
)
elif
_in_legacy_dygraph
():
op
=
getattr
(
_legacy_C_ops
,
op_name
)
if
binary_op
:
return
op
(
x
,
y
)
else
:
return
op
(
x
)
check_variable_and_dtype
(
x
,
"x"
,
...
...
@@ -58,7 +52,15 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
check_variable_and_dtype
(
y
,
"y"
,
[
"bool"
,
"int8"
,
"int16"
,
"int32"
,
"int64"
,
"float32"
,
"float64"
],
[
"bool"
,
"int8"
,
"int16"
,
"int32"
,
"int64"
,
"float32"
,
"float64"
,
],
op_name
,
)
if
out
is
not
None
:
...
...
@@ -80,7 +82,9 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
type
=
op_name
,
inputs
=
{
"X"
:
x
,
"Y"
:
y
},
outputs
=
{
"Out"
:
out
}
)
else
:
helper
.
append_op
(
type
=
op_name
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
helper
.
append_op
(
type
=
op_name
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
}
)
return
out
...
...
@@ -288,9 +292,7 @@ def is_empty(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
is_empty
(
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
is_empty
(
x
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'is_empty'
)
...
...
@@ -336,14 +338,13 @@ def equal_all(x, y, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
equal_all
(
x
,
y
)
if
paddle
.
in_dynamic_mode
():
return
_legacy_C_ops
.
equal_all
(
x
,
y
)
else
:
helper
=
LayerHelper
(
"equal_all"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
helper
.
append_op
(
type
=
'equal_all'
,
inputs
=
{
'X'
:
[
x
],
'Y'
:
[
y
]},
outputs
=
{
'Out'
:
[
out
]}
type
=
'equal_all'
,
inputs
=
{
'X'
:
[
x
],
'Y'
:
[
y
]},
outputs
=
{
'Out'
:
[
out
]},
)
return
out
...
...
@@ -393,10 +394,7 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
allclose
(
x
,
y
,
rtol
,
atol
,
equal_nan
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
allclose
(
x
,
y
,
'rtol'
,
str
(
rtol
),
'atol'
,
str
(
atol
),
'equal_nan'
,
equal_nan
)
else
:
check_variable_and_dtype
(
x
,
"input"
,
[
'float32'
,
'float64'
],
'allclose'
)
check_variable_and_dtype
(
y
,
"input"
,
[
'float32'
,
'float64'
],
'allclose'
)
check_type
(
rtol
,
'rtol'
,
float
,
'allclose'
)
...
...
@@ -456,9 +454,6 @@ def equal(x, y, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
equal
(
x
,
y
)
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
equal
(
x
,
y
)
else
:
check_variable_and_dtype
(
x
,
...
...
@@ -512,9 +507,6 @@ def greater_equal(x, y, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
greater_equal
(
x
,
y
)
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
greater_equal
(
x
,
y
)
else
:
check_variable_and_dtype
(
x
,
...
...
@@ -568,9 +560,6 @@ def greater_than(x, y, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
greater_than
(
x
,
y
)
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
greater_than
(
x
,
y
)
else
:
check_variable_and_dtype
(
x
,
...
...
@@ -625,9 +614,6 @@ def less_equal(x, y, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
less_equal
(
x
,
y
)
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
less_equal
(
x
,
y
)
else
:
check_variable_and_dtype
(
x
,
...
...
@@ -682,9 +668,6 @@ def less_than(x, y, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
less_than
(
x
,
y
)
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
less_than
(
x
,
y
)
else
:
check_variable_and_dtype
(
x
,
...
...
@@ -739,9 +722,6 @@ def not_equal(x, y, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
not_equal
(
x
,
y
)
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
not_equal
(
x
,
y
)
else
:
check_variable_and_dtype
(
x
,
...
...
@@ -802,15 +782,12 @@ def _bitwise_op(op_name, x, y, out=None, name=None, binary_op=True):
return
op
(
x
,
y
)
else
:
return
op
(
x
)
elif
_in_legacy_dygraph
():
op
=
getattr
(
_legacy_C_ops
,
op_name
)
if
binary_op
:
return
op
(
x
,
y
)
else
:
return
op
(
x
)
check_variable_and_dtype
(
x
,
"x"
,
[
"bool"
,
"uint8"
,
"int8"
,
"int16"
,
"int32"
,
"int64"
],
op_name
x
,
"x"
,
[
"bool"
,
"uint8"
,
"int8"
,
"int16"
,
"int32"
,
"int64"
],
op_name
,
)
if
y
is
not
None
:
check_variable_and_dtype
(
...
...
@@ -834,7 +811,9 @@ def _bitwise_op(op_name, x, y, out=None, name=None, binary_op=True):
type
=
op_name
,
inputs
=
{
"X"
:
x
,
"Y"
:
y
},
outputs
=
{
"Out"
:
out
}
)
else
:
helper
.
append_op
(
type
=
op_name
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
helper
.
append_op
(
type
=
op_name
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
}
)
return
out
...
...
@@ -998,11 +977,7 @@ def isclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
isclose
(
x
,
y
,
rtol
,
atol
,
equal_nan
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
isclose
(
x
,
y
,
'rtol'
,
str
(
rtol
),
'atol'
,
str
(
atol
),
'equal_nan'
,
equal_nan
)
else
:
check_variable_and_dtype
(
x
,
"input"
,
[
'float32'
,
'float64'
],
'isclose'
)
check_variable_and_dtype
(
y
,
"input"
,
[
'float32'
,
'float64'
],
'isclose'
)
check_type
(
rtol
,
'rtol'
,
float
,
'isclose'
)
...
...
python/paddle/tensor/manipulation.py
浏览文件 @
861fef52
...
...
@@ -19,17 +19,16 @@ from collections import Counter
import
numpy
as
np
import
paddle
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle
import
_C_ops
from
paddle.utils.inplace_utils
import
inplace_apis_in_dygraph_only
from
..common_ops_import
import
_varbase_creator
,
fill_constant
from
..common_ops_import
import
fill_constant
from
..fluid.data_feeder
import
(
check_dtype
,
check_type
,
check_variable_and_dtype
,
convert_dtype
,
)
from
..fluid.framework
import
_in_legacy_dygraph
,
_non_static_mode
from
..fluid.layers
import
utils
from
..framework
import
(
LayerHelper
,
...
...
@@ -124,7 +123,7 @@ def tensor_array_to_tensor(input, axis=1, use_stack=False, name=None):
paddle.tensor.array.array_write(x1, i + 1, array)
output, output_index = paddle.tensor.manipulation.tensor_array_to_tensor(input=array)
"""
if
_non_static
_mode
():
if
in_dygraph
_mode
():
assert
isinstance
(
input
,
list
),
"The 'input' in tensor_array_to_tensor must be list"
...
...
@@ -136,7 +135,7 @@ def tensor_array_to_tensor(input, axis=1, use_stack=False, name=None):
np
.
array
(
list
(
map
(
lambda
x
:
int
(
x
.
shape
[
axis
]),
input
)))
)
return
res
,
sizes
else
:
check_type
(
input
,
'input'
,
(
list
,
Variable
),
'tensor_array_to_tensor'
)
if
isinstance
(
input
,
list
):
for
i
,
input_x
in
enumerate
(
input
):
...
...
@@ -147,7 +146,9 @@ def tensor_array_to_tensor(input, axis=1, use_stack=False, name=None):
'tensor_array_to_tensor'
,
)
helper
=
LayerHelper
(
'tensor_array_to_tensor'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
()
)
out_index
=
helper
.
create_variable_for_type_inference
(
dtype
=
"int32"
)
helper
.
append_op
(
type
=
'tensor_array_to_tensor'
,
...
...
@@ -186,13 +187,7 @@ def cast(x, dtype):
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
return
_C_ops
.
cast
(
x
,
dtype
)
if
_non_static_mode
():
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
out
=
_legacy_C_ops
.
cast
(
x
,
'in_dtype'
,
x
.
dtype
,
'out_dtype'
,
dtype
)
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
...
...
@@ -362,73 +357,6 @@ def slice(input, axes, starts, ends):
return
_C_ops
.
slice
(
input
,
axes
,
starts
,
ends
,
infer_flags
,
[])
else
:
if
_in_legacy_dygraph
():
attrs
=
()
starts_tensor
=
None
ends_tensor
=
None
if
isinstance
(
axes
,
(
list
,
tuple
)):
axes
=
list
(
axes
)
if
len
(
axes
)
==
0
:
raise
ValueError
(
"Input axes should not be an empty list/tuple."
)
for
i
in
range
(
len
(
axes
)):
if
axes
[
i
]
<
0
:
axes
[
i
]
=
max
(
0
,
axes
[
i
]
+
len
(
input
.
shape
))
else
:
axes
[
i
]
=
min
(
len
(
input
.
shape
)
-
1
,
axes
[
i
])
else
:
raise
ValueError
(
"Input axes must be a python list or tuple, but reveived {}"
.
format
(
type
(
axes
)
)
)
infer_flags
=
list
(
1
for
i
in
range
(
len
(
axes
)))
tmp_tensor_type
=
Variable
if
isinstance
(
starts
,
(
list
,
tuple
)):
starts
=
[
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
tmp_tensor_type
)
else
item
for
item
in
starts
]
attrs
+=
(
'starts'
,
starts
)
elif
isinstance
(
starts
,
tmp_tensor_type
):
starts_tensor
=
starts
starts
.
stop_gradient
=
True
infer_flags
=
list
(
-
1
for
i
in
range
(
len
(
axes
)))
if
isinstance
(
ends
,
(
list
,
tuple
)):
ends
=
[
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
tmp_tensor_type
)
else
item
for
item
in
ends
]
attrs
+=
(
'ends'
,
ends
)
elif
isinstance
(
ends
,
tmp_tensor_type
):
ends_tensor
=
ends
ends_tensor
.
stop_gradient
=
True
infer_flags
=
list
(
-
1
for
i
in
range
(
len
(
axes
)))
return
_legacy_C_ops
.
slice
(
input
,
starts_tensor
,
ends_tensor
,
None
,
None
,
'axes'
,
axes
,
'infer_flags'
,
infer_flags
,
*
attrs
,
)
if
not
isinstance
(
starts
,
(
list
,
tuple
,
Variable
)):
raise
ValueError
(
"Input starts must be an Variable, python list or tuple."
...
...
@@ -452,7 +380,9 @@ def slice(input, axes, starts, ends):
elif
isinstance
(
starts
,
(
list
,
tuple
)):
attrs
[
'starts'
]
=
[]
if
utils
.
_contain_var
(
starts
):
inputs
[
'StartsTensorList'
]
=
utils
.
_convert_to_tensor_list
(
starts
)
inputs
[
'StartsTensorList'
]
=
utils
.
_convert_to_tensor_list
(
starts
)
for
i
,
dim
in
enumerate
(
starts
):
if
isinstance
(
dim
,
Variable
):
attrs
[
'starts'
].
append
(
-
1
)
...
...
@@ -545,10 +475,6 @@ def transpose(x, perm, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
transpose
(
x
,
perm
)
else
:
if
_in_legacy_dygraph
():
out
,
_
=
_legacy_C_ops
.
transpose2
(
x
,
'axis'
,
perm
)
return
out
check_variable_and_dtype
(
x
,
'x'
,
...
...
@@ -625,14 +551,7 @@ def unstack(x, axis=0, num=None):
if
num
==
0
:
return
[]
return
_C_ops
.
unstack
(
x
,
axis
,
num
)
if
_non_static_mode
():
if
num
is
None
:
num
=
x
.
shape
[
axis
]
if
num
==
0
:
return
[]
return
_legacy_C_ops
.
unstack
(
x
,
num
,
'axis'
,
int
(
axis
),
'num'
,
num
)
else
:
helper
=
LayerHelper
(
'unstack'
,
**
locals
())
if
num
is
None
:
if
axis
is
None
or
x
.
shape
[
axis
]
<=
0
:
...
...
@@ -959,12 +878,7 @@ def fill_(x, value):
"The type of 'value' must be int or float, but received %s."
%
(
type
(
value
))
)
if
in_dygraph_mode
():
return
_C_ops
.
fill_
(
x
,
value
)
else
:
return
_legacy_C_ops
.
fill_any_
(
x
,
"value_float"
,
float
(
value
),
"value_int"
,
int
(
value
)
)
@
dygraph_only
...
...
@@ -992,12 +906,7 @@ def zero_(x):
print(tensor.tolist()) #[0, 0, 0, 0, 0]
"""
if
in_dygraph_mode
():
return
_C_ops
.
fill_
(
x
,
0.0
)
else
:
return
_legacy_C_ops
.
fill_any_
(
x
,
"value_float"
,
0.0
,
"value_int"
,
int
(
0
)
)
@
dygraph_only
...
...
@@ -1025,39 +934,11 @@ def fill_diagonal_(x, value, offset=0, wrap=False, name=None):
x.fill_diagonal_(1.0)
print(x.tolist()) #[[1.0, 2.0, 2.0], [2.0, 1.0, 2.0], [2.0, 2.0, 1.0], [2.0, 2.0, 2.0]]
"""
helper
=
LayerHelper
(
"fill_diagonal_"
,
**
locals
())
check_type
(
x
,
'X'
,
(
Variable
),
'fill_diagonal_'
)
dtype
=
helper
.
input_dtype
(
'x'
)
check_dtype
(
dtype
,
'X'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'fill_diagonal_'
,
)
check_type
(
value
,
'value'
,
(
bool
,
int
,
float
),
'fill_diagonal_'
)
check_type
(
wrap
,
'wrap'
,
(
bool
),
'fill_diagonal_'
)
inshape
=
x
.
shape
inshapeset
=
set
(
inshape
)
assert
len
(
inshape
)
>=
2
,
'Tensor dims should >= 2 in fill_diagonal_ API'
if
len
(
inshape
)
>
2
:
assert
(
len
(
inshapeset
)
==
1
),
'Tensor dims should be equal while input dims > 2 in fill_diagonal_ API'
if
in_dygraph_mode
():
if
len
(
in
shape
)
==
2
:
if
len
(
x
.
shape
)
==
2
:
return
_C_ops
.
fill_diagonal_
(
x
,
value
,
offset
,
wrap
)
return
_C_ops
.
fill_diagonal_
(
x
,
value
,
offset
,
True
)
if
len
(
inshape
)
==
2
:
return
_legacy_C_ops
.
fill_diagonal_
(
x
,
'value'
,
value
,
'offset'
,
offset
,
'wrap'
,
wrap
)
return
_legacy_C_ops
.
fill_diagonal_
(
x
,
'value'
,
value
,
'offset'
,
offset
,
'wrap'
,
True
)
def
_fill_diagonal_tensor_impl
(
x
,
y
,
offset
=
0
,
dim1
=
0
,
dim2
=
1
,
inplace
=
False
):
inshape
=
x
.
shape
...
...
@@ -1087,18 +968,8 @@ def _fill_diagonal_tensor_impl(x, y, offset=0, dim1=0, dim2=1, inplace=False):
y
=
y
.
reshape
([
1
,
-
1
])
if
inplace
:
if
in_dygraph_mode
():
return
_C_ops
.
fill_diagonal_tensor_
(
x
,
y
,
offset
,
dim1
,
dim2
)
else
:
return
_legacy_C_ops
.
fill_diagonal_tensor_
(
x
,
y
,
'offset'
,
offset
,
'dim1'
,
dim1
,
'dim2'
,
dim2
)
if
in_dygraph_mode
():
return
_C_ops
.
fill_diagonal_tensor
(
x
,
y
,
offset
,
dim1
,
dim2
)
else
:
return
_legacy_C_ops
.
fill_diagonal_tensor
(
x
,
y
,
'offset'
,
offset
,
'dim1'
,
dim1
,
'dim2'
,
dim2
)
def
fill_diagonal_tensor_
(
x
,
y
,
offset
=
0
,
dim1
=
0
,
dim2
=
1
,
name
=
None
):
...
...
@@ -1248,17 +1119,7 @@ def concat(x, axis=0, name=None):
if
not
isinstance
(
input
,
Variable
):
input
=
[
t
for
t
in
input
if
t
.
shape
.
count
(
0
)
==
0
]
return
_C_ops
.
concat
(
input
,
axis
)
if
_in_legacy_dygraph
():
if
isinstance
(
axis
,
Variable
):
axis
=
axis
.
numpy
()
axis
=
axis
.
item
(
0
)
if
not
isinstance
(
input
,
Variable
):
input
=
[
t
for
t
in
input
if
t
.
shape
.
count
(
0
)
==
0
]
out
=
_varbase_creator
()
_legacy_C_ops
.
concat
(
input
,
out
,
'axis'
,
axis
)
return
out
else
:
check_type
(
input
,
'input'
,
(
list
,
tuple
,
Variable
),
'concat'
)
if
not
isinstance
(
input
,
Variable
):
for
id
,
x
in
enumerate
(
input
):
...
...
@@ -1295,7 +1156,9 @@ def concat(x, axis=0, name=None):
)
helper
=
LayerHelper
(
'concat'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
()
)
if
input
[
0
].
desc
.
type
()
==
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
:
# NOTE(liym27): Don't remove this if branch!
...
...
@@ -1304,7 +1167,8 @@ def concat(x, axis=0, name=None):
assert
len
(
input
)
==
1
,
(
"If the elements of 'input' in concat are Variable(LoDTensorArray), "
"number of the elements must be 1, but received %s."
%
len
(
input
)
"number of the elements must be 1, but received %s."
%
len
(
input
)
)
out_index
=
helper
.
create_variable_for_type_inference
(
dtype
=
"int32"
)
helper
.
append_op
(
...
...
@@ -1323,7 +1187,10 @@ def concat(x, axis=0, name=None):
attrs
[
'axis'
]
=
axis
helper
.
append_op
(
type
=
'concat'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
[
out
]},
attrs
=
attrs
type
=
'concat'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
[
out
]},
attrs
=
attrs
,
)
return
out
...
...
@@ -1358,11 +1225,9 @@ def broadcast_tensors(input, name=None):
"""
num_inputs
=
len
(
input
)
if
paddle
.
framework
.
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
broadcast_tensors
(
input
)
if
paddle
.
framework
.
_non_static_mode
():
return
_legacy_C_ops
.
broadcast_tensors
(
input
,
num_inputs
)
else
:
check_type
(
input
,
'input'
,
(
list
,
tuple
),
'broadcast_tensors'
)
if
num_inputs
<
1
:
raise
TypeError
(
...
...
@@ -1428,7 +1293,10 @@ def broadcast_tensors(input, name=None):
inputs
=
{
'X'
:
input
}
helper
.
append_op
(
type
=
'broadcast_tensors'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
{}
type
=
'broadcast_tensors'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
{},
)
return
out
...
...
@@ -1465,10 +1333,7 @@ def flip(x, axis, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
flip
(
x
,
axis
)
if
paddle
.
in_dynamic_mode
():
return
_legacy_C_ops
.
flip
(
x
,
"axis"
,
axis
)
else
:
helper
=
LayerHelper
(
"flip"
,
**
locals
())
check_type
(
x
,
'X'
,
(
Variable
),
'flip'
)
dtype
=
helper
.
input_dtype
(
'x'
)
...
...
@@ -1482,10 +1347,15 @@ def flip(x, axis, name=None):
if
name
is
None
:
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
else
:
out
=
helper
.
create_variable
(
name
=
name
,
dtype
=
dtype
,
persistable
=
False
)
out
=
helper
.
create_variable
(
name
=
name
,
dtype
=
dtype
,
persistable
=
False
)
helper
.
append_op
(
type
=
"flip"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
},
attrs
=
{
"axis"
:
axis
}
type
=
"flip"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
},
attrs
=
{
"axis"
:
axis
},
)
return
out
...
...
@@ -1705,13 +1575,7 @@ def flatten(x, start_axis=0, stop_axis=-1, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
flatten
(
x
,
start_axis
,
stop_axis
)
if
_in_legacy_dygraph
():
dy_out
,
_
=
_legacy_C_ops
.
flatten_contiguous_range
(
x
,
'start_axis'
,
start_axis
,
'stop_axis'
,
stop_axis
)
return
dy_out
else
:
helper
=
LayerHelper
(
'flatten'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
x_shape
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
...
...
@@ -1760,12 +1624,6 @@ def flatten_(x, start_axis=0, stop_axis=-1, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
flatten_
(
x
,
start_axis
,
stop_axis
)
if
_in_legacy_dygraph
():
dy_out
,
_
=
_legacy_C_ops
.
flatten_contiguous_range_
(
x
,
'start_axis'
,
start_axis
,
'stop_axis'
,
stop_axis
)
return
dy_out
def
roll
(
x
,
shifts
,
axis
=
None
,
name
=
None
):
"""
...
...
@@ -1830,10 +1688,7 @@ def roll(x, shifts, axis=None, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
roll
(
x
,
shifts
,
axis
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
roll
(
x
,
'axis'
,
axis
,
'shifts'
,
shifts
)
else
:
helper
=
LayerHelper
(
"roll"
,
**
locals
())
check_type
(
axis
,
'axis'
,
(
list
,
tuple
),
'roll'
)
...
...
@@ -1947,10 +1802,7 @@ def stack(x, axis=0, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
stack
(
x
,
axis
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
stack
(
x
,
'axis'
,
axis
)
else
:
if
not
isinstance
(
x
,
list
)
and
not
isinstance
(
x
,
tuple
):
# NOTE:(zhiqiu) Only support Variable as input if the Variable is a LOD_TENSOR_ARRAY create by create_array, array_write, array_read, etc.
# In that case, Variable is array of tensors indeed.
...
...
@@ -2055,7 +1907,7 @@ def split(x, num_or_sections, axis=0, name=None):
"""
input
=
x
dim
=
axis
if
_non_static
_mode
():
if
in_dygraph
_mode
():
num
=
None
attrs
=
()
...
...
@@ -2085,16 +1937,11 @@ def split(x, num_or_sections, axis=0, name=None):
"The type of 'num_or_sections' in split must be int, list or tuple in imperative mode, but "
"received %s."
%
(
type
(
num_or_sections
))
)
if
in_dygraph_mode
():
if
isinstance
(
num_or_sections
,
int
):
return
_C_ops
.
split_with_num
(
input
,
num_or_sections
,
dim
)
else
:
return
_C_ops
.
split
(
input
,
num_or_sections
,
dim
)
elif
_in_legacy_dygraph
():
out
=
[
_varbase_creator
()
for
n
in
range
(
num
)]
_legacy_C_ops
.
split
(
input
,
out
,
*
attrs
)
return
out
else
:
check_variable_and_dtype
(
input
,
'input'
,
...
...
@@ -2110,7 +1957,9 @@ def split(x, num_or_sections, axis=0, name=None):
],
'split'
,
)
check_type
(
num_or_sections
,
'num_or_sections'
,
(
list
,
int
,
tuple
),
'split'
)
check_type
(
num_or_sections
,
'num_or_sections'
,
(
list
,
int
,
tuple
),
'split'
)
check_type
(
dim
,
'dim'
,
(
int
,
Variable
),
'split'
)
if
isinstance
(
dim
,
Variable
):
check_dtype
(
dim
.
dtype
,
'dim'
,
[
'int32'
,
'int64'
],
'split'
)
...
...
@@ -2119,7 +1968,9 @@ def split(x, num_or_sections, axis=0, name=None):
input_shape
=
input
.
shape
inputs
=
{
'X'
:
input
}
attrs
=
{
'num'
:
num_or_sections
if
isinstance
(
num_or_sections
,
int
)
else
0
}
attrs
=
{
'num'
:
num_or_sections
if
isinstance
(
num_or_sections
,
int
)
else
0
}
def
_get_SectionsTensorList
(
one_list
):
tensor_list
=
[]
...
...
@@ -2137,7 +1988,9 @@ def split(x, num_or_sections, axis=0, name=None):
%
idx
)
unk_dim_idx
=
idx
temp_out
=
helper
.
create_variable_for_type_inference
(
'int32'
)
temp_out
=
helper
.
create_variable_for_type_inference
(
'int32'
)
fill_constant
(
[
1
],
'int32'
,
dim_size
,
force_cpu
=
True
,
out
=
temp_out
)
...
...
@@ -2180,7 +2033,9 @@ def split(x, num_or_sections, axis=0, name=None):
)
outs
=
[
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
())
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
()
)
for
i
in
range
(
num
)
]
helper
.
append_op
(
...
...
@@ -2317,10 +2172,7 @@ def squeeze(x, axis=None, name=None):
axes
=
axis
if
in_dygraph_mode
():
return
_C_ops
.
squeeze
(
input
,
axes
)
if
_in_legacy_dygraph
():
out
,
_
=
_legacy_C_ops
.
squeeze2
(
input
,
'axes'
,
axes
)
return
out
else
:
helper
=
LayerHelper
(
"squeeze"
,
**
locals
())
check_variable_and_dtype
(
input
,
...
...
@@ -2379,9 +2231,6 @@ def squeeze_(x, axis=None, name=None):
axes
=
axis
if
in_dygraph_mode
():
return
_C_ops
.
squeeze_
(
input
,
axes
)
if
_in_legacy_dygraph
():
out
,
_
=
_legacy_C_ops
.
squeeze2_
(
input
,
'axes'
,
axes
)
return
out
def
unique_consecutive
(
...
...
@@ -2473,26 +2322,7 @@ def unique_consecutive(
if
len
(
outs
)
==
1
:
return
outs
[
0
]
return
tuple
(
outs
)
elif
paddle
.
in_dynamic_mode
():
out
,
inverse
,
counts
=
_legacy_C_ops
.
unique_consecutive
(
x
,
'dtype'
,
attr_dtype
,
'return_inverse'
,
return_inverse
,
'return_counts'
,
return_counts
,
'axis'
,
axis
,
)
outs
=
[
out
]
if
return_inverse
:
outs
.
append
(
inverse
)
if
return_counts
:
outs
.
append
(
counts
)
if
len
(
outs
)
==
1
:
return
outs
[
0
]
return
tuple
(
outs
)
else
:
check_variable_and_dtype
(
x
,
"input"
,
...
...
@@ -2527,7 +2357,10 @@ def unique_consecutive(
if
return_counts
:
outs
.
append
(
counts
)
helper
.
append_op
(
type
=
"unique_consecutive"
,
inputs
=
{
"X"
:
x
},
attrs
=
attrs
,
outputs
=
outputs
type
=
"unique_consecutive"
,
inputs
=
{
"X"
:
x
},
attrs
=
attrs
,
outputs
=
outputs
,
)
if
len
(
outs
)
==
1
:
return
outs
[
0
]
...
...
@@ -2604,27 +2437,10 @@ def unique(
else
:
axis
=
[
axis
]
attr_dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
_non_static_mode
():
if
in_dygraph_mode
():
out
,
indices
,
inverse
,
counts
=
_C_ops
.
unique
(
x
,
return_index
,
return_inverse
,
return_counts
,
axis
,
attr_dtype
)
if
_in_legacy_dygraph
():
out
,
inverse
,
indices
,
counts
=
_legacy_C_ops
.
unique
(
x
,
'dtype'
,
attr_dtype
,
'return_index'
,
return_index
,
'return_inverse'
,
return_inverse
,
'return_counts'
,
return_counts
,
'axis'
,
axis
,
"is_sorted"
,
True
,
)
outs
=
[
out
]
if
return_index
:
outs
.
append
(
indices
)
...
...
@@ -2637,7 +2453,7 @@ def unique(
return
outs
[
0
]
return
tuple
(
outs
)
else
:
check_variable_and_dtype
(
x
,
"input"
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'unique'
)
...
...
@@ -2741,7 +2557,7 @@ def unsqueeze(x, axis, name=None):
"""
input
=
x
axes
=
axis
if
_non_static
_mode
():
if
in_dygraph
_mode
():
if
isinstance
(
axes
,
int
):
axes
=
[
axes
]
elif
isinstance
(
axes
,
Variable
):
...
...
@@ -2751,11 +2567,8 @@ def unsqueeze(x, axis, name=None):
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
Variable
)
else
item
for
item
in
axes
]
if
_in_legacy_dygraph
():
out
,
_
=
_legacy_C_ops
.
unsqueeze2
(
input
,
'axes'
,
axes
)
return
out
return
_C_ops
.
unsqueeze
(
input
,
axes
)
else
:
check_type
(
axes
,
'axis/axes'
,
(
int
,
list
,
tuple
,
Variable
),
'unsqueeze'
)
check_variable_and_dtype
(
input
,
...
...
@@ -2818,10 +2631,7 @@ def unsqueeze_(x, axis, name=None):
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
Variable
)
else
item
for
item
in
axes
]
if
in_dygraph_mode
():
return
_C_ops
.
unsqueeze_
(
input
,
axes
)
out
,
_
=
_legacy_C_ops
.
unsqueeze2_
(
input
,
'axes'
,
axes
)
return
out
def
gather
(
x
,
index
,
axis
=
None
,
name
=
None
):
...
...
@@ -2874,16 +2684,19 @@ def gather(x, index, axis=None, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
gather
(
x
,
index
,
axis
)
if
_in_legacy_dygraph
():
axis
=
axis
.
item
()
if
isinstance
(
axis
,
paddle
.
Tensor
)
else
axis
return
_legacy_C_ops
.
gather
(
x
,
index
,
None
,
"axis"
,
axis
,
"overwrite"
,
False
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
],
[
'float16'
,
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
,
],
'gather'
,
)
check_variable_and_dtype
(
index
,
'index'
,
[
'int32'
,
'int64'
],
'gather'
)
...
...
@@ -2945,19 +2758,17 @@ def unbind(input, axis=0):
"""
if
in_dygraph_mode
():
return
_C_ops
.
unbind
(
input
,
axis
)
else
:
if
not
isinstance
(
axis
,
(
int
)):
raise
TypeError
(
"The type of 'axis' must be int, but received %s."
%
(
type
(
axis
))
"The type of 'axis' must be int, but received %s."
%
(
type
(
axis
))
)
if
isinstance
(
axis
,
np
.
generic
):
axis
=
np
.
asscalar
(
axis
)
input_shape
=
input
.
shape
axis_
=
axis
if
axis
>=
0
else
len
(
input_shape
)
+
axis
num
=
input_shape
[
axis_
]
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
unbind
(
input
,
num
,
'axis'
,
axis
)
helper
=
LayerHelper
(
"unbind"
,
**
locals
())
check_type
(
input
,
'input'
,
(
Variable
),
'unbind'
)
dtype
=
helper
.
input_dtype
()
...
...
@@ -2965,7 +2776,9 @@ def unbind(input, axis=0):
dtype
,
'unbind'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'unbind'
)
outs
=
[
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
())
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
()
)
for
i
in
range
(
num
)
]
helper
.
append_op
(
...
...
@@ -3053,11 +2866,6 @@ def scatter(x, index, updates, overwrite=True, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
scatter
(
x
,
index
,
updates
,
overwrite
)
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
scatter
(
x
,
index
,
updates
,
'overwrite'
,
overwrite
)
else
:
check_variable_and_dtype
(
x
,
...
...
@@ -3083,9 +2891,7 @@ def scatter_(x, index, updates, overwrite=True, name=None):
Inplace version of ``scatter`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_paddle_tensor_scatter`.
"""
if
in_dygraph_mode
():
return
_C_ops
.
scatter_
(
x
,
index
,
updates
,
overwrite
)
return
_legacy_C_ops
.
scatter_
(
x
,
index
,
updates
,
'overwrite'
,
overwrite
)
def
scatter_nd_add
(
x
,
index
,
updates
,
name
=
None
):
...
...
@@ -3159,10 +2965,6 @@ def scatter_nd_add(x, index, updates, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
scatter_nd_add
(
x
,
index
,
updates
)
else
:
if
_in_legacy_dygraph
():
op
=
getattr
(
_legacy_C_ops
,
'scatter_nd_add'
)
return
op
(
x
,
index
,
updates
)
else
:
if
x
.
dtype
!=
updates
.
dtype
:
raise
ValueError
(
"x and updates must have same data type."
)
...
...
@@ -3307,11 +3109,10 @@ def tile(x, repeat_times, name=None):
repeat_times
=
repeat_times
.
numpy
().
tolist
()
return
_C_ops
.
tile
(
x
,
repeat_times
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
tile
(
x
,
'repeat_times'
,
repeat_times
)
check_type
(
repeat_times
,
'repeat_times'
,
(
list
,
tuple
,
Variable
),
'tile'
)
else
:
check_type
(
repeat_times
,
'repeat_times'
,
(
list
,
tuple
,
Variable
),
'tile'
)
if
isinstance
(
repeat_times
,
Variable
):
assert
(
len
(
repeat_times
.
shape
)
==
1
...
...
@@ -3404,12 +3205,12 @@ def expand_as(x, y, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
expand_as
(
x
,
None
,
y
.
shape
)
if
_non_static_mode
():
return
_legacy_C_ops
.
expand_as_v2
(
x
,
'target_shape'
,
y
.
shape
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'expand_as'
x
,
'x'
,
[
'bool'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'expand_as'
,
)
check_type
(
y
,
'y'
,
Variable
,
'expand_as'
)
...
...
@@ -3463,9 +3264,7 @@ def broadcast_to(x, shape, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
expand
(
x
,
shape
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
expand_v2
(
x
,
'shape'
,
shape
)
else
:
if
isinstance
(
shape
,
Variable
):
assert
len
(
shape
.
shape
)
==
1
,
'shape must be an 1-D Tensor.'
else
:
...
...
@@ -3481,7 +3280,10 @@ def broadcast_to(x, shape, name=None):
),
'Elements in shape must be 1-D Tensors or integers.'
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'broadcast_to'
x
,
'x'
,
[
'bool'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'broadcast_to'
,
)
check_type
(
shape
,
'shape'
,
(
list
,
tuple
,
Variable
),
'broadcast_to'
)
if
convert_dtype
(
x
.
dtype
)
==
'bool'
and
not
x
.
stop_gradient
:
...
...
@@ -3557,10 +3359,7 @@ def expand(x, shape, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
expand
(
x
,
shape
)
if
paddle
.
in_dynamic_mode
():
return
_legacy_C_ops
.
expand_v2
(
x
,
'shape'
,
shape
)
else
:
if
isinstance
(
shape
,
Variable
):
assert
len
(
shape
.
shape
)
==
1
,
'shape must be an 1-D Tensor.'
else
:
...
...
@@ -3710,25 +3509,6 @@ def reshape(x, shape, name=None):
return
out
else
:
if
_in_legacy_dygraph
():
tmp_tensor_type
=
Variable
if
isinstance
(
shape
,
(
list
,
tuple
)):
shape
=
[
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
Variable
)
else
item
for
item
in
shape
]
out
,
_
=
_legacy_C_ops
.
reshape2
(
x
,
None
,
'shape'
,
shape
)
elif
isinstance
(
shape
,
tmp_tensor_type
):
shape
.
stop_gradient
=
True
out
,
_
=
_legacy_C_ops
.
reshape2
(
x
,
shape
)
else
:
raise
ValueError
(
"shape must be an instance of `list`, `tuple` or `Variable`,"
" got '{}.'"
.
format
(
type
(
shape
))
)
return
out
check_variable_and_dtype
(
x
,
'x'
,
...
...
@@ -3745,7 +3525,9 @@ def reshape(x, shape, name=None):
'reshape'
,
)
check_type
(
shape
,
'shape'
,
(
list
,
tuple
,
Variable
),
'reshape'
)
check_type
(
actual_shape
,
'actual_shape'
,
(
Variable
,
type
(
None
)),
'reshape'
)
check_type
(
actual_shape
,
'actual_shape'
,
(
Variable
,
type
(
None
)),
'reshape'
)
helper
=
LayerHelper
(
"reshape2"
,
**
locals
())
...
...
@@ -3844,24 +3626,6 @@ def reshape_(x, shape, name=None):
)
return
out
else
:
if
isinstance
(
shape
,
(
list
,
tuple
)):
shape
=
[
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
Variable
)
else
item
for
item
in
shape
]
out
,
_
=
_legacy_C_ops
.
reshape2_
(
x
,
None
,
'shape'
,
shape
)
return
out
elif
isinstance
(
shape
,
Variable
):
shape
.
stop_gradient
=
True
# NOTE(pangyoki): Cannot support the case where the shape Tensor
# is negative. In the infer_shape stage, the input's dim will
# be changed to a negative number.
# Thus, convert Shape Tensor to list firstly and then call
# reshape inplace op.
shape_list
=
shape
.
numpy
().
tolist
()
out
,
_
=
_legacy_C_ops
.
reshape2_
(
x
,
None
,
'shape'
,
shape_list
)
return
out
def
gather_nd
(
x
,
index
,
name
=
None
):
...
...
@@ -3939,15 +3703,15 @@ def gather_nd(x, index, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
gather_nd
(
x
,
index
)
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
gather_nd
(
x
,
index
)
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
],
'gather_np'
,
)
check_variable_and_dtype
(
index
,
'index'
,
[
'int32'
,
'int64'
],
'gather_np'
)
check_variable_and_dtype
(
index
,
'index'
,
[
'int32'
,
'int64'
],
'gather_np'
)
helper
=
LayerHelper
(
'gather_nd'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
output
=
helper
.
create_variable_for_type_inference
(
dtype
)
...
...
@@ -4043,7 +3807,7 @@ def strided_slice(x, axes, starts, ends, strides, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
strided_slice
(
x
,
axes
,
starts
,
ends
,
strides
)
else
:
helper
=
LayerHelper
(
'strided_slice'
,
**
locals
())
check_variable_and_dtype
(
...
...
@@ -4066,7 +3830,9 @@ def strided_slice(x, axes, starts, ends, strides, name=None):
for
i
,
var
in
enumerate
(
list_input
):
var_name
=
input_name
+
'['
+
str
(
i
)
+
']'
if
isinstance
(
var
,
Variable
):
check_dtype
(
var
.
dtype
,
var_name
,
[
'int32'
],
'strided_slice'
)
check_dtype
(
var
.
dtype
,
var_name
,
[
'int32'
],
'strided_slice'
)
check_list_elements_dtype
(
axes
,
'axes'
)
check_list_elements_dtype
(
starts
,
'starts'
)
...
...
@@ -4081,25 +3847,18 @@ def strided_slice(x, axes, starts, ends, strides, name=None):
new_list_tensor
.
append
(
dim
)
else
:
assert
isinstance
(
dim
,
int
)
temp_out
=
helper
.
create_variable_for_type_inference
(
'int32'
)
fill_constant
([
1
],
'int32'
,
dim
,
force_cpu
=
True
,
out
=
temp_out
)
temp_out
=
helper
.
create_variable_for_type_inference
(
'int32'
)
fill_constant
(
[
1
],
'int32'
,
dim
,
force_cpu
=
True
,
out
=
temp_out
)
new_list_tensor
.
append
(
temp_out
)
return
new_list_tensor
inputs
=
{
'Input'
:
x
}
attrs
=
{
'axes'
:
axes
}
infer_flags
=
list
(
1
for
i
in
range
(
len
(
axes
)))
if
_in_legacy_dygraph
():
inputs
=
{
'Input'
:
x
}
attrs
=
{
'axes'
:
axes
,
'starts'
:
starts
,
'ends'
:
ends
,
'strides'
:
strides
,
'infer_flags'
:
infer_flags
,
}
else
:
# starts
if
isinstance
(
starts
,
Variable
):
starts
.
stop_gradient
=
True
...
...
@@ -4155,7 +3914,10 @@ def strided_slice(x, axes, starts, ends, strides, name=None):
dtype
=
helper
.
input_dtype
(
'x'
)
)
helper
.
append_op
(
type
=
'strided_slice'
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
'Out'
:
out
}
type
=
'strided_slice'
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
'Out'
:
out
},
)
return
out
...
...
@@ -4281,7 +4043,7 @@ def tensordot(x, y, axes=2, name=None):
check_type
(
axes
,
'axes'
,
(
int
,
tuple
,
list
,
Variable
),
op_type
)
def
_var_to_list
(
var
):
if
paddle
.
in_dynamic
_mode
():
if
in_dygraph
_mode
():
return
tolist
(
var
)
raise
TypeError
(
"The 'axes' with type 'Tensor' in "
...
...
@@ -4409,9 +4171,7 @@ def as_complex(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
as_complex
(
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
as_complex
(
x
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'as_complex'
)
op_type
=
"as_complex"
helper
=
LayerHelper
(
op_type
,
**
locals
())
...
...
@@ -4421,7 +4181,9 @@ def as_complex(x, name=None):
)
outputs
=
{
"Out"
:
out
}
attrs
=
{}
helper
.
append_op
(
type
=
op_type
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
outputs
)
helper
.
append_op
(
type
=
op_type
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
outputs
)
return
out
...
...
@@ -4462,9 +4224,7 @@ def as_real(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
as_real
(
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
as_real
(
x
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'complex64'
,
'complex128'
],
'as_real'
)
op_type
=
"as_real"
helper
=
LayerHelper
(
op_type
,
**
locals
())
...
...
@@ -4633,11 +4393,7 @@ def moveaxis(x, source, destination, name=None):
if
in_dygraph_mode
():
out
=
_C_ops
.
transpose
(
x
,
perm
)
return
out
if
_in_legacy_dygraph
():
out
,
_
=
_legacy_C_ops
.
transpose2
(
x
,
'axis'
,
perm
)
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
...
...
@@ -4727,15 +4483,14 @@ def take_along_axis(arr, indices, axis):
if
not
broadcast_shape
:
# if indices matrix have larger size than arr, arr should broadcast into indices shape.
broadcast_shape
=
indices
.
shape
if
_non_static
_mode
():
if
in_dygraph
_mode
():
indices
=
paddle
.
broadcast_to
(
indices
,
broadcast_shape
)
broadcast_shape_list
=
list
(
broadcast_shape
)
broadcast_shape_list
[
axis
]
=
list
(
arr
.
shape
)[
axis
]
broadcast_shape
=
tuple
(
broadcast_shape_list
)
arr
=
paddle
.
broadcast_to
(
arr
,
broadcast_shape
)
if
not
_in_legacy_dygraph
():
return
_C_ops
.
take_along_axis
(
arr
,
indices
,
axis
)
return
_legacy_C_ops
.
take_along_axis
(
arr
,
indices
,
'Axis'
,
axis
)
else
:
check_variable_and_dtype
(
arr
,
'x'
,
...
...
@@ -4797,7 +4552,7 @@ def put_along_axis(arr, indices, values, axis, reduce='assign'):
)
axis
=
non_negative_axis
(
arr
,
axis
)
broadcast_shape
=
infer_broadcast_shape
(
arr
,
indices
,
axis
)
if
_non_static
_mode
():
if
in_dygraph
_mode
():
values
=
(
paddle
.
to_tensor
(
values
)
if
not
isinstance
(
values
,
paddle
.
Tensor
)
...
...
@@ -4806,12 +4561,8 @@ def put_along_axis(arr, indices, values, axis, reduce='assign'):
if
broadcast_shape
:
indices
=
paddle
.
broadcast_to
(
indices
,
broadcast_shape
)
values
=
paddle
.
broadcast_to
(
values
,
indices
.
shape
)
if
in_dygraph_mode
():
return
_C_ops
.
put_along_axis
(
arr
,
indices
,
values
,
axis
,
reduce
)
return
_legacy_C_ops
.
put_along_axis
(
arr
,
indices
,
values
,
"Axis"
,
axis
,
"Reduce"
,
reduce
)
else
:
check_variable_and_dtype
(
arr
,
'x'
,
...
...
@@ -4856,11 +4607,7 @@ def put_along_axis_(arr, indices, values, axis, reduce='assign'):
if
broadcast_shape
:
indices
=
paddle
.
broadcast_to
(
indices
,
broadcast_shape
)
values
=
paddle
.
broadcast_to
(
values
,
indices
.
shape
)
if
in_dygraph_mode
():
return
_C_ops
.
put_along_axis_
(
arr
,
indices
,
values
,
axis
,
reduce
)
return
_legacy_C_ops
.
put_along_axis_
(
arr
,
indices
,
values
,
"Axis"
,
axis
,
"Reduce"
,
reduce
)
def
index_add
(
x
,
index
,
axis
,
value
,
name
=
None
):
...
...
python/paddle/tensor/math.py
浏览文件 @
861fef52
...
...
@@ -34,9 +34,6 @@ from ..fluid.data_feeder import (
from
..fluid.layers
import
utils
from
..framework
import
(
LayerHelper
,
_in_legacy_dygraph
,
_non_static_mode
,
_varbase_creator
,
convert_np_dtype_to_dtype_
,
core
,
in_dygraph_mode
,
...
...
@@ -158,9 +155,7 @@ def log(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
log
(
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
log
(
x
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
"log"
)
inputs
=
{
'X'
:
[
x
]}
helper
=
LayerHelper
(
'log'
,
**
locals
())
...
...
@@ -220,19 +215,7 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
if
in_dygraph_mode
():
out
=
_C_ops
.
scale
(
x
,
scale
,
float
(
bias
),
bias_after_scale
)
return
dygraph_utils
.
_append_activation_in_dygraph
(
out
,
act
)
elif
_in_legacy_dygraph
():
_scale
=
scale
.
numpy
().
item
(
0
)
if
isinstance
(
scale
,
Variable
)
else
scale
out
=
_legacy_C_ops
.
scale
(
x
,
'scale'
,
float
(
_scale
),
'bias'
,
float
(
bias
),
'bias_after_scale'
,
bias_after_scale
,
)
return
dygraph_utils
.
_append_activation_in_dygraph
(
out
,
act
)
else
:
check_variable_and_dtype
(
x
,
"x"
,
...
...
@@ -295,10 +278,12 @@ def stanh(x, scale_a=0.67, scale_b=1.7159, name=None):
"""
if
_non_static
_mode
():
if
in_dygraph
_mode
():
return
_legacy_C_ops
.
stanh
(
x
,
'scale_a'
,
scale_a
,
'scale_b'
,
scale_b
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'stanh'
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'stanh'
)
helper
=
LayerHelper
(
'stanh'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
...
...
@@ -363,9 +348,7 @@ def multiplex(inputs, index, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
multiplex
(
inputs
,
index
)
elif
_in_legacy_dygraph
():
return
_legacy_C_ops
.
multiplex
(
index
,
inputs
)
else
:
helper
=
LayerHelper
(
'multiplex'
,
**
locals
())
check_type
(
inputs
,
'inputs'
,
(
list
),
'multiplex'
)
...
...
@@ -380,7 +363,9 @@ def multiplex(inputs, index, name=None):
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'multiplex'
,
)
check_variable_and_dtype
(
index
,
"index"
,
[
'int32'
,
'int64'
],
'multiplex'
)
check_variable_and_dtype
(
index
,
"index"
,
[
'int32'
,
'int64'
],
'multiplex'
)
out
=
helper
.
create_variable_for_type_inference
(
inputs
[
0
].
dtype
)
helper
.
append_op
(
...
...
@@ -399,17 +384,6 @@ def scale_(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
scale_
(
x
,
scale
,
float
(
bias
),
bias_after_scale
)
if
_in_legacy_dygraph
():
_scale
=
scale
.
numpy
().
item
(
0
)
if
isinstance
(
scale
,
Variable
)
else
scale
return
_legacy_C_ops
.
scale_
(
x
,
'scale'
,
float
(
_scale
),
'bias'
,
float
(
bias
),
'bias_after_scale'
,
bias_after_scale
,
)
def
pow
(
x
,
y
,
name
=
None
):
...
...
@@ -469,17 +443,7 @@ def pow(x, y, name=None):
raise
TypeError
(
'y must be scalar or tensor type, but received: %s '
%
(
y
.
dtype
)
)
if
_in_legacy_dygraph
():
if
isinstance
(
y
,
(
int
,
float
)):
return
_legacy_C_ops
.
pow
(
x
,
'factor'
,
y
)
elif
isinstance
(
y
,
(
paddle
.
Tensor
,
Variable
)):
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=-
1
,
act
=
None
,
op_name
=
'elementwise_pow'
)
else
:
raise
TypeError
(
'y must be scalar or tensor type, but received: %s '
%
(
y
.
dtype
)
)
# in static graph mode
if
isinstance
(
y
,
(
int
,
float
)):
helper
=
LayerHelper
(
'pow'
,
**
locals
())
...
...
@@ -531,11 +495,6 @@ def _elementwise_op_in_dygraph(
OP_NAMEMAPPING
[
op_name
]
if
not
is_inplace
(
op_name
)
else
op_name
,
)
out
=
op
(
x
,
y
)
if
_in_legacy_dygraph
():
op
=
getattr
(
_legacy_C_ops
,
op_name
)
out
=
op
(
x
,
y
,
'axis'
,
axis
,
'use_mkldnn'
,
use_mkldnn
)
return
dygraph_utils
.
_append_activation_in_dygraph
(
out
,
act
,
use_mkldnn
=
use_mkldnn
)
...
...
@@ -642,9 +601,6 @@ def add(x, y, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
add
(
x
,
y
)
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
elementwise_add
(
x
,
y
)
else
:
return
_elementwise_op
(
LayerHelper
(
'elementwise_add'
,
**
locals
()))
...
...
@@ -734,11 +690,6 @@ def subtract(x, y, name=None):
act
=
None
if
in_dygraph_mode
():
return
_C_ops
.
subtract
(
x
,
y
)
else
:
if
_in_legacy_dygraph
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
else
:
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
...
...
@@ -806,11 +757,6 @@ def divide(x, y, name=None):
act
=
None
if
in_dygraph_mode
():
return
_C_ops
.
divide
(
x
,
y
)
else
:
if
_in_legacy_dygraph
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
else
:
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
...
...
@@ -853,9 +799,7 @@ def floor_divide(x, y, name=None):
axis
=
-
1
if
in_dygraph_mode
():
return
_C_ops
.
floor_divide
(
x
,
y
)
elif
_in_legacy_dygraph
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
op_name
=
op_type
)
else
:
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
...
...
@@ -897,9 +841,7 @@ def remainder(x, y, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
remainder
(
x
,
y
)
elif
_in_legacy_dygraph
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
op_name
=
op_type
)
else
:
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
...
...
@@ -970,11 +912,6 @@ def multiply(x, y, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
multiply
(
x
,
y
)
else
:
if
_in_legacy_dygraph
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
else
:
if
x
.
dtype
!=
y
.
dtype
:
raise
TypeError
(
...
...
@@ -1017,11 +954,6 @@ def _add_with_axis(x, y, axis=-1, name=None):
else
:
op_type
=
'elementwise_add'
act
=
None
if
_in_legacy_dygraph
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
else
:
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
...
...
@@ -1034,11 +966,6 @@ def _subtract_with_axis(x, y, axis=-1, name=None):
else
:
op_type
=
'elementwise_sub'
act
=
None
if
_in_legacy_dygraph
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
else
:
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
...
...
@@ -1051,11 +978,6 @@ def _multiply_with_axis(x, y, axis=-1, name=None):
else
:
op_type
=
'elementwise_mul'
act
=
None
if
_in_legacy_dygraph
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
else
:
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
...
...
@@ -1066,11 +988,6 @@ def _divide_with_axis(x, y, axis=-1, name=None):
else
:
op_type
=
'elementwise_div'
act
=
None
if
_in_legacy_dygraph
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
else
:
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
...
...
@@ -1135,10 +1052,7 @@ def maximum(x, y, name=None):
act
=
None
if
in_dygraph_mode
():
return
_C_ops
.
maximum
(
x
,
y
)
elif
_in_legacy_dygraph
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
else
:
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
...
...
@@ -1203,10 +1117,7 @@ def minimum(x, y, name=None):
act
=
None
if
in_dygraph_mode
():
return
_C_ops
.
minimum
(
x
,
y
)
elif
_in_legacy_dygraph
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
else
:
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
...
...
@@ -1273,10 +1184,7 @@ def fmax(x, y, name=None):
act
=
None
if
in_dygraph_mode
():
return
_C_ops
.
fmax
(
x
,
y
)
if
_in_legacy_dygraph
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
else
:
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
...
...
@@ -1343,10 +1251,7 @@ def fmin(x, y, name=None):
act
=
None
if
in_dygraph_mode
():
return
_C_ops
.
fmin
(
x
,
y
)
if
_in_legacy_dygraph
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
else
:
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
...
...
@@ -1417,35 +1322,8 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
sum
(
x
,
axis
,
dtype
,
keepdim
)
reduce_all
,
axis
=
_get_reduce_axis_with_tensor
(
axis
,
x
)
if
_in_legacy_dygraph
():
if
dtype_flag
:
return
_legacy_C_ops
.
reduce_sum
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
,
'in_dtype'
,
x
.
dtype
,
'out_dtype'
,
dtype
,
)
else
:
return
_legacy_C_ops
.
reduce_sum
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
,
)
reduce_all
,
axis
=
_get_reduce_axis_with_tensor
(
axis
,
x
)
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
}
if
dtype_flag
:
...
...
@@ -1468,7 +1346,9 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None):
'sum'
,
)
check_type
(
axis
,
'axis'
,
(
int
,
list
,
tuple
,
type
(
None
),
Variable
),
'sum'
)
check_type
(
axis
,
'axis'
,
(
int
,
list
,
tuple
,
type
(
None
),
Variable
),
'sum'
)
helper
=
LayerHelper
(
'sum'
,
**
locals
())
if
dtype_flag
:
...
...
@@ -1476,7 +1356,10 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None):
else
:
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'reduce_sum'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
type
=
'reduce_sum'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
,
)
return
out
...
...
@@ -1784,11 +1667,7 @@ def add_n(inputs, name=None):
if
isinstance
(
inputs
,
Variable
):
inputs
=
[
inputs
]
return
_C_ops
.
add_n
(
inputs
)
if
_in_legacy_dygraph
():
if
isinstance
(
inputs
,
Variable
):
inputs
=
[
inputs
]
return
_legacy_C_ops
.
sum
(
inputs
,
'use_mkldnn'
,
False
)
else
:
helper
=
LayerHelper
(
'add_n'
,
**
locals
())
check_type
(
inputs
,
'inputs'
,
(
Variable
,
tuple
,
list
),
'add_n'
)
if
isinstance
(
inputs
,
list
)
or
isinstance
(
inputs
,
tuple
):
...
...
@@ -1851,9 +1730,6 @@ def trunc(input, name=None):
'''
if
in_dygraph_mode
():
return
_C_ops
.
trunc
(
input
)
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
trunc
(
input
)
else
:
inputs
=
{
"X"
:
input
}
attrs
=
{}
...
...
@@ -1939,8 +1815,7 @@ def mm(input, mat2, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
matmul
(
input
,
mat2
,
False
,
False
)
elif
paddle
.
in_dynamic_mode
():
return
_legacy_C_ops
.
matmul_v2
(
input
,
mat2
)
else
:
def
__check_input
(
x
,
y
):
var_names
=
{
'x'
:
x
,
'y'
:
y
}
...
...
@@ -1983,7 +1858,9 @@ def mm(input, mat2, name=None):
helper
=
LayerHelper
(
'mm'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
helper
.
append_op
(
type
=
'matmul_v2'
,
inputs
=
{
'X'
:
input
,
'Y'
:
mat2
},
outputs
=
{
'Out'
:
out
}
type
=
'matmul_v2'
,
inputs
=
{
'X'
:
input
,
'Y'
:
mat2
},
outputs
=
{
'Out'
:
out
},
)
return
out
...
...
@@ -2079,10 +1956,6 @@ def addmm(input, x, y, beta=1.0, alpha=1.0, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
addmm
(
input
,
x
,
y
,
beta
,
alpha
)
else
:
if
_in_legacy_dygraph
():
out
=
_legacy_C_ops
.
addmm
(
input
,
x
,
y
,
"Alpha"
,
alpha
,
"Beta"
,
beta
)
return
out
else
:
inputs
=
{
'Input'
:
input
,
"X"
:
x
,
"Y"
:
y
}
attrs
=
{
'Alpha'
:
alpha
,
'Beta'
:
beta
}
...
...
@@ -2154,12 +2027,7 @@ def renorm(x, p, axis, max_norm):
if
in_dygraph_mode
():
out
=
_C_ops
.
renorm
(
x
,
p
,
axis
,
max_norm
)
return
out
elif
_in_legacy_dygraph
():
out
=
_legacy_C_ops
.
renorm
(
x
,
'p'
,
p
,
'axis'
,
axis
,
'max_norm'
,
max_norm
)
return
out
else
:
inputs
=
{
'X'
:
x
}
attrs
=
{
'p'
:
p
,
'axis'
:
axis
,
'max_norm'
:
max_norm
}
...
...
@@ -2213,8 +2081,7 @@ def inner(x, y, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
matmul
(
nx
,
ny
.
T
,
False
,
False
).
reshape
(
dstshape
)
elif
paddle
.
in_dynamic_mode
():
return
_legacy_C_ops
.
matmul_v2
(
nx
,
ny
.
T
).
reshape
(
dstshape
)
else
:
def
__check_input
(
x
,
y
):
var_names
=
{
'x'
:
x
,
'y'
:
y
}
...
...
@@ -2240,7 +2107,9 @@ def inner(x, y, name=None):
helper
=
LayerHelper
(
'inner'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
nx
.
dtype
)
helper
.
append_op
(
type
=
'matmul_v2'
,
inputs
=
{
'X'
:
nx
,
'Y'
:
ny
.
T
},
outputs
=
{
'Out'
:
out
}
type
=
'matmul_v2'
,
inputs
=
{
'X'
:
nx
,
'Y'
:
ny
.
T
},
outputs
=
{
'Out'
:
out
},
)
return
out
.
reshape
(
dstshape
)
...
...
@@ -2279,8 +2148,7 @@ def outer(x, y, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
matmul
(
nx
,
ny
,
False
,
False
)
elif
paddle
.
in_dynamic_mode
():
return
_legacy_C_ops
.
matmul_v2
(
nx
,
ny
)
else
:
def
__check_input
(
x
,
y
):
var_names
=
{
'x'
:
x
,
'y'
:
y
}
...
...
@@ -2345,11 +2213,7 @@ def logsumexp(x, axis=None, keepdim=False, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
logsumexp
(
x
,
axis
,
keepdim
,
reduce_all
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
logsumexp
(
x
,
'axis'
,
axis
,
'keepdim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'logsumexp'
)
helper
=
LayerHelper
(
'logsumexp'
,
**
locals
())
...
...
@@ -2390,8 +2254,7 @@ def inverse(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
inverse
(
x
)
elif
paddle
.
in_dynamic_mode
():
return
_legacy_C_ops
.
inverse
(
x
)
else
:
def
_check_input
(
x
):
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'inverse'
)
...
...
@@ -2491,12 +2354,8 @@ def max(x, axis=None, keepdim=False, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
max
(
x
,
axis
,
keepdim
)
else
:
reduce_all
,
axis
=
_get_reduce_axis_with_tensor
(
axis
,
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
reduce_max
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
helper
=
LayerHelper
(
'max'
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'max'
...
...
@@ -2593,13 +2452,8 @@ def min(x, axis=None, keepdim=False, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
min
(
x
,
axis
,
keepdim
)
else
:
reduce_all
,
axis
=
_get_reduce_axis_with_tensor
(
axis
,
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
reduce_min
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
helper
=
LayerHelper
(
'min'
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'min'
...
...
@@ -2707,12 +2561,8 @@ def amax(x, axis=None, keepdim=False, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
amax
(
x
,
axis
,
keepdim
)
else
:
reduce_all
,
axis
=
_get_reduce_axis
(
axis
,
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
reduce_amax
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
helper
=
LayerHelper
(
'amax'
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'amax'
...
...
@@ -2821,11 +2671,8 @@ def amin(x, axis=None, keepdim=False, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
amin
(
x
,
axis
,
keepdim
)
else
:
reduce_all
,
axis
=
_get_reduce_axis
(
axis
,
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
reduce_amin
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
helper
=
LayerHelper
(
'amin'
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'amin'
...
...
@@ -2867,9 +2714,7 @@ def log1p(x, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
log1p
(
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
log1p
(
x
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
"log1p"
)
inputs
=
{
'X'
:
[
x
]}
helper
=
LayerHelper
(
'log1p'
,
**
locals
())
...
...
@@ -2919,10 +2764,10 @@ def log2(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
log2
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
log2
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
"log2"
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
"log2"
)
inputs
=
{
'X'
:
[
x
]}
helper
=
LayerHelper
(
'log2'
,
**
locals
())
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
...
...
@@ -2971,10 +2816,10 @@ def log10(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
log10
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
log10
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
"log10"
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
"log10"
)
inputs
=
{
'X'
:
[
x
]}
helper
=
LayerHelper
(
'log10'
,
**
locals
())
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
...
...
@@ -3038,16 +2883,7 @@ def clip(x, min=None, max=None, name=None):
min
=
min_
if
min
is
None
else
min
max
=
max_
if
max
is
None
else
max
return
_C_ops
.
clip
(
x
,
min
,
max
)
if
_in_legacy_dygraph
():
if
isinstance
(
min
,
Variable
):
min
=
min
.
numpy
().
item
(
0
)
if
isinstance
(
max
,
Variable
):
max
=
max
.
numpy
().
item
(
0
)
min
=
min_
if
min
is
None
else
min
max
=
max_
if
max
is
None
else
max
return
_legacy_C_ops
.
clip
(
x
,
"min"
,
min
,
"max"
,
max
)
else
:
if
min
is
not
None
:
check_type
(
min
,
'min'
,
(
float
,
int
,
Variable
),
'clip'
)
if
isinstance
(
min
,
Variable
):
...
...
@@ -3117,9 +2953,6 @@ def clip_(x, min=None, max=None, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
clip_
(
x
,
min
,
max
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
clip_
(
x
,
"min"
,
min
,
"max"
,
max
)
def
trace
(
x
,
offset
=
0
,
axis1
=
0
,
axis2
=
1
,
name
=
None
):
"""
...
...
@@ -3196,12 +3029,7 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
trace
(
x
,
offset
,
axis1
,
axis2
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
trace
(
x
,
'offset'
,
offset
,
'axis1'
,
axis1
,
'axis2'
,
axis2
)
else
:
__check_input
(
x
,
offset
,
axis1
,
axis2
)
helper
=
LayerHelper
(
'trace'
,
**
locals
())
...
...
@@ -3284,10 +3112,6 @@ def diagonal(x, offset=0, axis1=0, axis2=1, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
diagonal
(
x
,
offset
,
axis1
,
axis2
)
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
diagonal
(
x
,
'offset'
,
offset
,
'axis1'
,
axis1
,
'axis2'
,
axis2
)
def
__check_input
(
x
,
offset
,
axis1
,
axis2
):
check_dtype
(
...
...
@@ -3363,10 +3187,9 @@ def kron(x, y, name=None):
# [12, 15, 18, 16, 20, 24],
# [21, 24, 27, 28, 32, 36]])
"""
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
kron
(
x
,
y
)
if
in_dygraph_mode
():
return
_C_ops
.
kron
(
x
,
y
)
return
_legacy_C_ops
.
kron
(
x
,
y
)
else
:
helper
=
LayerHelper
(
'kron'
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'kron'
...
...
@@ -3376,7 +3199,9 @@ def kron(x, y, name=None):
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
"kron"
,
inputs
=
{
"X"
:
x
,
"Y"
:
y
},
outputs
=
{
"Out"
:
out
})
helper
.
append_op
(
type
=
"kron"
,
inputs
=
{
"X"
:
x
,
"Y"
:
y
},
outputs
=
{
"Out"
:
out
}
)
return
out
...
...
@@ -3432,12 +3257,7 @@ def cumsum(x, axis=None, dtype=None, name=None):
if
axis
is
None
:
axis
=
-
1
return
_C_ops
.
cumsum
(
x
,
axis
,
flatten
,
False
,
False
)
if
_in_legacy_dygraph
():
if
axis
is
None
:
return
_legacy_C_ops
.
cumsum
(
x
,
'flatten'
,
flatten
)
else
:
return
_legacy_C_ops
.
cumsum
(
x
,
'axis'
,
axis
,
'flatten'
,
flatten
)
check_type
(
x
,
'x'
,
(
Variable
),
'cumsum'
)
locals_var
=
locals
().
copy
()
kwargs
=
dict
()
...
...
@@ -3507,14 +3327,7 @@ def logcumsumexp(x, axis=None, dtype=None, name=None):
if
axis
is
None
:
axis
=
-
1
return
_C_ops
.
logcumsumexp
(
x
,
axis
,
flatten
,
False
,
False
)
if
_in_legacy_dygraph
():
if
axis
is
None
:
return
_legacy_C_ops
.
logcumsumexp
(
x
,
'flatten'
,
flatten
)
else
:
return
_legacy_C_ops
.
logcumsumexp
(
x
,
'axis'
,
axis
,
'flatten'
,
flatten
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
"logcumsumexp"
)
...
...
@@ -3586,9 +3399,7 @@ def cumprod(x, dim=None, dtype=None, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
cumprod
(
x
,
dim
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
cumprod
(
x
,
'dim'
,
dim
)
else
:
check_variable_and_dtype
(
x
,
"x"
,
...
...
@@ -3631,14 +3442,18 @@ def isfinite(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
isfinite
(
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
isfinite_v2
(
x
)
else
:
helper
=
LayerHelper
(
"isfinite_v2"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'isfinite'
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'isfinite'
,
)
out
=
helper
.
create_variable_for_type_inference
(
'bool'
)
helper
.
append_op
(
type
=
"isfinite_v2"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
helper
.
append_op
(
type
=
"isfinite_v2"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
}
)
return
out
...
...
@@ -3665,8 +3480,7 @@ def isinf(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
isinf
(
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
isinf_v2
(
x
)
else
:
helper
=
LayerHelper
(
"isinf_v2"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'isinf'
...
...
@@ -3699,9 +3513,7 @@ def isnan(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
isnan
(
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
isnan_v2
(
x
)
else
:
helper
=
LayerHelper
(
"isnan_v2"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'isnan'
...
...
@@ -3775,17 +3587,17 @@ def prod(x, axis=None, keepdim=False, dtype=None, name=None):
reduce_all
,
axis
=
_get_reduce_axis_with_tensor
(
axis
,
x
)
if
in_dygraph_mode
():
return
_C_ops
.
prod
(
x
,
axis
,
keepdim
,
reduce_all
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
reduce_prod
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
else
:
helper
=
LayerHelper
(
'reduce_prod'
,
**
locals
())
check_variable_and_dtype
(
x
,
'x/input'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'reduce_prod'
x
,
'x/input'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'reduce_prod'
,
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
()
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
())
helper
.
append_op
(
type
=
'reduce_prod'
,
inputs
=
{
'X'
:
x
},
...
...
@@ -3817,11 +3629,10 @@ def sign(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
sign
(
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
sign
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sign'
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sign'
)
helper
=
LayerHelper
(
"sign"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
...
...
@@ -3857,11 +3668,10 @@ def tanh(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
tanh
(
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
tanh
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'tanh'
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'tanh'
)
check_type
(
x
,
'x'
,
(
Variable
),
'tanh'
)
helper
=
LayerHelper
(
'tanh'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
...
...
@@ -3875,9 +3685,7 @@ def tanh_(x, name=None):
Inplace version of ``tanh`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_tensor_tanh`.
"""
if
in_dygraph_mode
():
return
_C_ops
.
tanh_
(
x
)
return
_legacy_C_ops
.
tanh_
(
x
)
def
increment
(
x
,
value
=
1.0
,
name
=
None
):
...
...
@@ -3905,10 +3713,7 @@ def increment(x, value=1.0, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
increment_
(
x
,
value
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
increment
(
x
,
'step'
,
value
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'increment'
)
...
...
@@ -3973,13 +3778,8 @@ def all(x, axis=None, keepdim=False, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
all
(
x
,
axis
,
keepdim
)
else
:
reduce_all
,
axis
=
_get_reduce_axis
(
axis
,
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
reduce_all
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
...
...
@@ -3992,7 +3792,10 @@ def all(x, axis=None, keepdim=False, name=None):
helper
=
LayerHelper
(
'all'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'reduce_all'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
type
=
'reduce_all'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
,
)
return
out
...
...
@@ -4049,13 +3852,8 @@ def any(x, axis=None, keepdim=False, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
any
(
x
,
axis
,
keepdim
)
else
:
reduce_all
,
axis
=
_get_reduce_axis
(
axis
,
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
reduce_any
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
...
...
@@ -4069,7 +3867,10 @@ def any(x, axis=None, keepdim=False, name=None):
helper
=
LayerHelper
(
'any'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'reduce_any'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
type
=
'reduce_any'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
,
)
return
out
...
...
@@ -4137,10 +3938,7 @@ def conj(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
conj
(
x
)
if
paddle
.
in_dynamic_mode
():
return
_legacy_C_ops
.
conj
(
x
)
else
:
check_variable_and_dtype
(
x
,
"x"
,
...
...
@@ -4149,7 +3947,9 @@ def conj(x, name=None):
)
helper
=
LayerHelper
(
'conj'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
()
)
helper
.
append_op
(
type
=
'conj'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
[
out
]})
return
out
...
...
@@ -4184,9 +3984,6 @@ def digamma(x, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
digamma
(
x
)
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
digamma
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'digamma'
)
helper
=
LayerHelper
(
'digamma'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
...
...
@@ -4221,9 +4018,7 @@ def lgamma(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
lgamma
(
x
)
elif
_in_legacy_dygraph
():
return
_legacy_C_ops
.
lgamma
(
x
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'lgamma'
)
helper
=
LayerHelper
(
'lgamma'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
...
...
@@ -4303,9 +4098,6 @@ def atan2(x, y, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
atan2
(
x
,
y
)
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
atan2
(
x
,
y
)
else
:
check_variable_and_dtype
(
x
,
...
...
@@ -4367,18 +4159,21 @@ def logit(x, eps=None, name=None):
# [-1.0277, -4.5365, -0.9544, -1.3269, 1.4468]
"""
if
eps
is
None
:
eps
=
0.0
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
logit
(
x
,
'eps'
,
eps
)
if
in_dygraph_mode
():
return
_C_ops
.
logit
(
x
,
eps
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'logit'
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'logit'
)
helper
=
LayerHelper
(
"logit"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'logit'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'eps'
:
eps
}
type
=
'logit'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'eps'
:
eps
},
)
return
out
...
...
@@ -4419,17 +4214,15 @@ def lerp(x, y, weight, name=None):
weight
=
paddle
.
to_tensor
(
weight
,
dtype
=
x
.
dtype
)
return
_C_ops
.
lerp
(
x
,
y
,
weight
)
if
_in_legacy_dygraph
():
if
isinstance
(
weight
,
float
):
weight
=
paddle
.
to_tensor
(
weight
,
dtype
=
x
.
dtype
)
return
_legacy_C_ops
.
lerp
(
x
,
y
,
weight
)
else
:
if
isinstance
(
weight
,
float
):
weight
=
paddle
.
full
(
shape
=
[
1
],
fill_value
=
weight
,
dtype
=
x
.
dtype
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'lerp'
)
check_variable_and_dtype
(
y
,
'y'
,
[
'float32'
,
'float64'
],
'lerp'
)
check_variable_and_dtype
(
weight
,
'weight'
,
[
'float32'
,
'float64'
],
'lerp'
)
check_variable_and_dtype
(
weight
,
'weight'
,
[
'float32'
,
'float64'
],
'lerp'
)
helper
=
LayerHelper
(
'lerp'
,
**
locals
())
inputs
=
{
'X'
:
x
,
'Y'
:
y
,
'Weight'
:
weight
}
...
...
@@ -4456,9 +4249,7 @@ def lerp_(x, y, weight, name=None):
out_shape
,
x
.
shape
)
)
if
in_dygraph_mode
():
return
_C_ops
.
lerp_
(
x
,
y
,
weight
)
return
_legacy_C_ops
.
lerp_
(
x
,
y
,
weight
)
def
erfinv
(
x
,
name
=
None
):
...
...
@@ -4488,12 +4279,8 @@ def erfinv(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
erfinv
(
x
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'erfinv'
)
if
paddle
.
in_dynamic_mode
():
return
_legacy_C_ops
.
erfinv
(
x
)
helper
=
LayerHelper
(
'erfinv'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'erfinv'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
})
...
...
@@ -4507,9 +4294,7 @@ def erfinv_(x, name=None):
Please refer to :ref:`api_tensor_erfinv`.
"""
check_type
(
x
,
'x'
,
(
paddle
.
Tensor
,
Variable
),
'erfinv'
)
if
in_dygraph_mode
():
return
_C_ops
.
erfinv_
(
x
)
return
_legacy_C_ops
.
erfinv_
(
x
)
def
rad2deg
(
x
,
name
=
None
):
...
...
@@ -4558,10 +4343,6 @@ def rad2deg(x, name=None):
if
convert_dtype
(
x
.
dtype
)
in
[
'int32'
,
'int64'
]:
x
=
cast
(
x
,
dtype
=
"float32"
)
return
_C_ops
.
scale
(
x
,
rad2deg_scale
,
0.0
,
True
)
elif
paddle
.
in_dynamic_mode
():
if
convert_dtype
(
x
.
dtype
)
in
[
'int32'
,
'int64'
]:
x
=
cast
(
x
,
dtype
=
"float32"
)
return
_legacy_C_ops
.
scale
(
x
,
'scale'
,
rad2deg_scale
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'int32'
,
'int64'
,
'float32'
,
'float64'
],
'rad2deg'
...
...
@@ -4626,10 +4407,6 @@ def deg2rad(x, name=None):
if
convert_dtype
(
x
.
dtype
)
in
[
'int32'
,
'int64'
]:
x
=
cast
(
x
,
dtype
=
"float32"
)
return
_C_ops
.
scale
(
x
,
deg2rad_scale
,
0.0
,
True
)
elif
paddle
.
in_dynamic_mode
():
if
convert_dtype
(
x
.
dtype
)
in
[
'int32'
,
'int64'
]:
x
=
cast
(
x
,
dtype
=
"float32"
)
return
_legacy_C_ops
.
scale
(
x
,
'scale'
,
deg2rad_scale
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'int32'
,
'int64'
,
'float32'
,
'float64'
],
'deg2rad'
...
...
@@ -4729,7 +4506,7 @@ def gcd(x, y, name=None):
)
return
(
paddle
.
where
(
x
<
y
,
y
,
x
),
paddle
.
where
(
x
<
y
,
x
,
y
))
if
paddle
.
in_dynamic
_mode
():
if
in_dygraph
_mode
():
while
_gcd_cond_fn
(
x
,
y
):
x
,
y
=
_gcd_body_fn
(
x
,
y
)
...
...
@@ -4907,68 +4684,6 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None):
return
_C_ops
.
logical_xor
(
input_back
,
input_front
)
else
:
return
_C_ops
.
subtract
(
input_back
,
input_front
)
elif
_in_legacy_dygraph
():
has_pend
=
False
input_list
=
[]
if
prepend
is
not
None
and
append
is
not
None
:
input_list
=
[
prepend
,
x
,
append
]
has_pend
=
True
elif
prepend
is
not
None
:
input_list
=
[
prepend
,
x
]
has_pend
=
True
elif
append
is
not
None
:
input_list
=
[
x
,
append
]
has_pend
=
True
if
has_pend
:
new_input
=
_varbase_creator
()
_legacy_C_ops
.
concat
(
input_list
,
new_input
,
'axis'
,
axis
)
else
:
new_input
=
x
attrs_1
=
()
attrs_2
=
()
dim_len
=
new_input
.
shape
[
axis
]
starts_1
=
[
0
]
attrs_1
+=
(
'starts'
,
starts_1
)
ends_1
=
[
dim_len
-
1
]
attrs_1
+=
(
'ends'
,
ends_1
)
input_front
=
_legacy_C_ops
.
slice
(
new_input
,
None
,
None
,
None
,
None
,
'axes'
,
axes
,
'infer_flags'
,
infer_flags
,
*
attrs_1
)
starts_2
=
[
1
]
attrs_2
+=
(
'starts'
,
starts_2
)
ends_2
=
[
dim_len
]
attrs_2
+=
(
'ends'
,
ends_2
)
input_back
=
_legacy_C_ops
.
slice
(
new_input
,
None
,
None
,
None
,
None
,
'axes'
,
axes
,
'infer_flags'
,
infer_flags
,
*
attrs_2
)
if
x
.
dtype
==
paddle
.
bool
:
return
_legacy_C_ops
.
logical_xor
(
input_back
,
input_front
)
else
:
return
paddle
.
tensor
.
math
.
_subtract_with_axis
(
input_back
,
input_front
,
axis
=
axis
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'bool'
,
'int32'
,
'int64'
],
'diff'
...
...
@@ -5082,9 +4797,7 @@ def angle(x, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
angle
(
x
)
elif
paddle
.
in_dynamic_mode
():
return
_legacy_C_ops
.
angle
(
x
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
'angle'
)
...
...
@@ -5143,10 +4856,11 @@ def heaviside(x, y, name=None):
op_type
=
'elementwise_heaviside'
axis
=
-
1
act
=
None
if
_non_static
_mode
():
if
in_dygraph
_mode
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
else
:
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
...
...
@@ -5191,12 +4905,6 @@ def frac(x, name=None):
if
in_dygraph_mode
():
y
=
_C_ops
.
trunc
(
x
)
return
_C_ops
.
subtract
(
x
,
y
)
else
:
if
_in_legacy_dygraph
():
y
=
_legacy_C_ops
.
trunc
(
x
)
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
else
:
inputs
=
{
"X"
:
x
}
attrs
=
{}
...
...
@@ -5334,7 +5042,7 @@ def take(x, index, mode='raise', name=None):
)
)
if
paddle
.
in_dynamic
_mode
():
if
in_dygraph
_mode
():
if
not
isinstance
(
index
,
(
paddle
.
Tensor
,
Variable
)):
raise
TypeError
(
"The type of 'index' must be Tensor, but got {}"
.
format
(
...
...
python/paddle/tensor/ops.py
浏览文件 @
861fef52
...
...
@@ -12,9 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from
..
import
_C_ops
,
_legacy_C_ops
from
..
import
_C_ops
from
..fluid.data_feeder
import
check_variable_and_dtype
from
..fluid.framework
import
_in_legacy_dygraph
,
in_dygraph_mode
from
..fluid.framework
import
in_dygraph_mode
from
..framework
import
LayerHelper
from
.layer_function_generator
import
(
add_sample_code
,
...
...
@@ -218,10 +218,10 @@ def acos(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
acos
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
acos
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'acos'
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'acos'
)
helper
=
LayerHelper
(
'acos'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'acos'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
...
...
@@ -255,10 +255,10 @@ def acosh(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
acosh
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
acosh
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'acosh'
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'acosh'
)
helper
=
LayerHelper
(
'acosh'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'acosh'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
...
...
@@ -292,10 +292,10 @@ def asin(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
asin
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
asin
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'asin'
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'asin'
)
helper
=
LayerHelper
(
'asin'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'asin'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
...
...
@@ -329,10 +329,10 @@ def asinh(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
asinh
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
asinh
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'asinh'
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'asinh'
)
helper
=
LayerHelper
(
'asinh'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'asinh'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
...
...
@@ -366,10 +366,10 @@ def atan(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
atan
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
atan
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'atan'
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'atan'
)
helper
=
LayerHelper
(
'atan'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'atan'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
...
...
@@ -403,10 +403,10 @@ def atanh(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
atanh
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
atanh
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'atanh'
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'atanh'
)
helper
=
LayerHelper
(
'atanh'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'atanh'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
...
...
@@ -441,10 +441,10 @@ def ceil(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
ceil
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
ceil
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'ceil'
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'ceil'
)
helper
=
LayerHelper
(
'ceil'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'ceil'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
...
...
@@ -480,10 +480,10 @@ def cos(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
cos
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
cos
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'cos'
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'cos'
)
helper
=
LayerHelper
(
'cos'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'cos'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
...
...
@@ -519,10 +519,10 @@ def cosh(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
cosh
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
cosh
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'cosh'
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'cosh'
)
helper
=
LayerHelper
(
'cosh'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'cosh'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
...
...
@@ -557,9 +557,7 @@ def exp(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
exp
(
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
exp
(
x
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
...
...
@@ -608,10 +606,10 @@ def expm1(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
expm1
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
expm1
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'expm1'
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'expm1'
)
helper
=
LayerHelper
(
'expm1'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'expm1'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
...
...
@@ -646,10 +644,10 @@ def floor(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
floor
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
floor
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'floor'
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'floor'
)
helper
=
LayerHelper
(
'floor'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'floor'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
...
...
@@ -684,15 +682,15 @@ def reciprocal(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
reciprocal
(
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
reciprocal
(
x
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'reciprocal'
)
helper
=
LayerHelper
(
'reciprocal'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'reciprocal'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
helper
.
append_op
(
type
=
'reciprocal'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
}
)
return
out
...
...
@@ -731,10 +729,10 @@ def round(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
round
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
round
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'round'
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'round'
)
helper
=
LayerHelper
(
'round'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'round'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
...
...
@@ -770,10 +768,10 @@ def rsqrt(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
rsqrt
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
rsqrt
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'rsqrt'
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'rsqrt'
)
helper
=
LayerHelper
(
'rsqrt'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'rsqrt'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
...
...
@@ -808,9 +806,7 @@ def sigmoid(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
sigmoid
(
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
sigmoid
(
x
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sigmoid'
)
...
...
@@ -847,10 +843,10 @@ def sin(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
sin
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
sin
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sin'
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sin'
)
helper
=
LayerHelper
(
'sin'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'sin'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
...
...
@@ -884,10 +880,10 @@ def sinh(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
sinh
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
sinh
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sinh'
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sinh'
)
helper
=
LayerHelper
(
'sinh'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'sinh'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
...
...
@@ -920,10 +916,10 @@ def sqrt(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
sqrt
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
sqrt
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sqrt'
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sqrt'
)
helper
=
LayerHelper
(
'sqrt'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'sqrt'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
...
...
@@ -956,9 +952,7 @@ def square(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
square
(
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
square
(
x
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
...
...
@@ -1008,10 +1002,10 @@ def tan(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
tan
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
tan
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'tan'
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'tan'
)
helper
=
LayerHelper
(
'tan'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'tan'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
...
...
python/paddle/tensor/random.py
浏览文件 @
861fef52
...
...
@@ -16,11 +16,7 @@
import
paddle
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle.fluid.framework
import
(
_current_expected_place
,
_in_legacy_dygraph
,
in_dygraph_mode
,
)
from
paddle.fluid.framework
import
_current_expected_place
,
in_dygraph_mode
from
paddle.static
import
Variable
from
..fluid.data_feeder
import
(
...
...
@@ -80,10 +76,7 @@ def bernoulli(x, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
bernoulli
(
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
bernoulli
(
x
)
else
:
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"bernoulli"
)
helper
=
LayerHelper
(
"randint"
,
**
locals
())
...
...
@@ -129,10 +122,7 @@ def poisson(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
poisson
(
x
)
if
paddle
.
in_dynamic_mode
():
return
_legacy_C_ops
.
poisson
(
x
)
else
:
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"poisson"
)
helper
=
LayerHelper
(
"poisson"
,
**
locals
())
...
...
@@ -197,12 +187,7 @@ def multinomial(x, num_samples=1, replacement=False, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
multinomial
(
x
,
num_samples
,
replacement
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
multinomial
(
x
,
'num_samples'
,
num_samples
,
'replacement'
,
replacement
)
else
:
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"multinomial"
)
helper
=
LayerHelper
(
"multinomial"
,
**
locals
())
...
...
@@ -356,22 +341,7 @@ def gaussian(shape, mean=0.0, std=1.0, seed=0, dtype=None, name=None):
return
_C_ops
.
gaussian
(
shape
,
float
(
mean
),
float
(
std
),
seed
,
dtype
,
place
)
if
_in_legacy_dygraph
():
shape
=
utils
.
convert_shape_to_list
(
shape
)
return
_legacy_C_ops
.
gaussian_random
(
'shape'
,
shape
,
'mean'
,
float
(
mean
),
'std'
,
float
(
std
),
'seed'
,
seed
,
'dtype'
,
dtype
,
)
else
:
check_shape
(
shape
,
op_type_for_check
)
check_dtype
(
dtype
,
'dtype'
,
[
'float32'
,
'float64'
],
op_type_for_check
)
...
...
@@ -390,7 +360,10 @@ def gaussian(shape, mean=0.0, std=1.0, seed=0, dtype=None, name=None):
helper
=
LayerHelper
(
'gaussian'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
'gaussian_random'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
type
=
'gaussian_random'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
,
)
out
.
stop_gradient
=
True
return
out
...
...
@@ -550,7 +523,7 @@ def normal(mean=0.0, std=1.0, shape=None, name=None):
# [1.00780561 3.78457445 5.81058198] # random
"""
if
not
paddle
.
in_dynamic
_mode
():
if
not
in_dygraph
_mode
():
check_type
(
mean
,
'mean'
,
(
int
,
float
,
Variable
),
'normal'
)
check_type
(
std
,
'std'
,
(
int
,
float
,
Variable
),
'normal'
)
if
isinstance
(
mean
,
Variable
):
...
...
@@ -588,7 +561,7 @@ def normal(mean=0.0, std=1.0, shape=None, name=None):
return
gaussian
(
shape
=
shape
,
mean
=
mean
,
std
=
std
,
name
=
name
)
out
=
out
*
std
+
mean
if
not
paddle
.
in_dynamic
_mode
():
if
not
in_dygraph
_mode
():
out
.
stop_grediant
=
True
return
out
...
...
@@ -680,22 +653,7 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
seed
,
_current_expected_place
(),
)
if
_in_legacy_dygraph
():
shape
=
utils
.
convert_shape_to_list
(
shape
)
return
_legacy_C_ops
.
uniform_random
(
'shape'
,
shape
,
'min'
,
float
(
min
),
'max'
,
float
(
max
),
'seed'
,
seed
,
'dtype'
,
dtype
,
)
else
:
check_type
(
shape
,
'shape'
,
(
list
,
tuple
,
Variable
),
'uniform/rand'
)
check_dtype
(
dtype
,
'dtype'
,
(
'float32'
,
'float64'
),
'uniform/rand'
)
check_type
(
min
,
'min'
,
(
float
,
int
,
Variable
),
'uniform/rand'
)
...
...
@@ -710,7 +668,10 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
helper
=
LayerHelper
(
"uniform"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
"uniform_random"
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
"Out"
:
out
}
type
=
"uniform_random"
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
"Out"
:
out
},
)
out
.
stop_gradient
=
True
return
out
...
...
@@ -751,12 +712,7 @@ def uniform_(x, min=-1.0, max=1.0, seed=0, name=None):
# [-0.34646994, -0.45116323, -0.09902662, -0.11397249], # random
# [ 0.433519, 0.39483607, -0.8660099, 0.83664286]] # random
"""
if
in_dygraph_mode
():
return
_C_ops
.
uniform_inplace_
(
x
,
min
,
max
,
seed
,
0
,
0
,
1.0
)
else
:
return
_legacy_C_ops
.
uniform_random_inplace_
(
x
,
'min'
,
min
,
'max'
,
max
,
'seed'
,
seed
)
def
randint
(
low
=
0
,
high
=
None
,
shape
=
[
1
],
dtype
=
None
,
name
=
None
):
...
...
@@ -841,12 +797,7 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None):
shape
=
utils
.
convert_shape_to_list
(
shape
)
place
=
_current_expected_place
()
return
_C_ops
.
randint
(
low
,
high
,
shape
,
dtype
,
place
)
if
_in_legacy_dygraph
():
shape
=
utils
.
convert_shape_to_list
(
shape
)
return
_legacy_C_ops
.
randint
(
'shape'
,
shape
,
'low'
,
low
,
'high'
,
high
,
'seed'
,
0
,
'dtype'
,
dtype
)
else
:
check_shape
(
shape
,
'randint'
)
check_dtype
(
dtype
,
'dtype'
,
[
'int32'
,
'int64'
],
'randint'
)
if
low
>=
high
:
...
...
@@ -1015,7 +966,7 @@ def randint_like(x, low=0, high=None, dtype=None, name=None):
"high = {1}"
.
format
(
low
,
high
)
)
if
paddle
.
in_dynamic
_mode
():
if
in_dygraph
_mode
():
shape
=
utils
.
convert_shape_to_list
(
shape
)
out
=
_legacy_C_ops
.
randint
(
'shape'
,
...
...
@@ -1031,7 +982,7 @@ def randint_like(x, low=0, high=None, dtype=None, name=None):
)
out
=
paddle
.
cast
(
out
,
dtype
)
return
out
else
:
check_shape
(
shape
,
'randint_like'
)
check_dtype
(
dtype
,
...
...
@@ -1095,11 +1046,11 @@ def randperm(n, dtype="int64", name=None):
if
in_dygraph_mode
():
return
_C_ops
.
randperm
(
n
,
dtype
,
_current_expected_place
())
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
randperm
(
'n'
,
n
,
'seed'
,
0
,
'dtype'
,
dtype
)
else
:
if
n
<
1
:
raise
ValueError
(
"The input n should be greater than 0 in randperm op."
)
raise
ValueError
(
"The input n should be greater than 0 in randperm op."
)
check_dtype
(
dtype
,
'dtype'
,
[
'int64'
,
'int32'
,
'float32'
,
'float64'
],
'randperm'
)
...
...
@@ -1199,9 +1150,7 @@ def exponential_(x, lam=1.0, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
exponential_
(
x
,
lam
)
elif
paddle
.
in_dynamic_mode
():
return
_legacy_C_ops
.
exponential_
(
x
,
"lambda"
,
lam
)
else
:
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"exponential"
)
helper
=
LayerHelper
(
"exponential"
,
**
locals
())
...
...
python/paddle/tensor/search.py
浏览文件 @
861fef52
...
...
@@ -17,14 +17,12 @@
import
numpy
as
np
import
paddle
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle
import
_C_ops
from
paddle.common_ops_import
import
VarDesc
,
Variable
from
..fluid.data_feeder
import
check_dtype
,
check_variable_and_dtype
from
..fluid.framework
import
_in_legacy_dygraph
from
..framework
import
(
LayerHelper
,
_non_static_mode
,
convert_np_dtype_to_dtype_
,
core
,
in_dygraph_mode
,
...
...
@@ -99,12 +97,7 @@ def argsort(x, axis=-1, descending=False, name=None):
if
in_dygraph_mode
():
_
,
ids
=
_C_ops
.
argsort
(
x
,
axis
,
descending
)
return
ids
if
_in_legacy_dygraph
():
_
,
ids
=
_legacy_C_ops
.
argsort
(
x
,
'axis'
,
axis
,
'descending'
,
descending
)
return
ids
else
:
check_variable_and_dtype
(
x
,
'x'
,
...
...
@@ -187,20 +180,7 @@ def argmax(x, axis=None, keepdim=False, dtype="int64", name=None):
if
in_dygraph_mode
():
return
_C_ops
.
argmax
(
x
,
axis
,
keepdim
,
flatten
,
var_dtype
)
if
_in_legacy_dygraph
():
out
=
_legacy_C_ops
.
arg_max
(
x
,
'axis'
,
axis
,
'dtype'
,
var_dtype
,
'keepdims'
,
keepdim
,
'flatten'
,
flatten
,
)
return
out
else
:
helper
=
LayerHelper
(
"argmax"
,
**
locals
())
check_variable_and_dtype
(
x
,
...
...
@@ -281,20 +261,7 @@ def argmin(x, axis=None, keepdim=False, dtype="int64", name=None):
if
in_dygraph_mode
():
return
_C_ops
.
argmin
(
x
,
axis
,
keepdim
,
flatten
,
var_dtype
)
if
_in_legacy_dygraph
():
out
=
_legacy_C_ops
.
arg_min
(
x
,
'axis'
,
axis
,
'dtype'
,
var_dtype
,
'keepdims'
,
keepdim
,
'flatten'
,
flatten
,
)
return
out
else
:
helper
=
LayerHelper
(
"argmin"
,
**
locals
())
check_variable_and_dtype
(
x
,
...
...
@@ -354,10 +321,7 @@ def index_select(x, index, axis=0, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
index_select
(
x
,
index
,
axis
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
index_select
(
x
,
index
,
'dim'
,
axis
)
else
:
helper
=
LayerHelper
(
"index_select"
,
**
locals
())
check_variable_and_dtype
(
x
,
...
...
@@ -366,7 +330,10 @@ def index_select(x, index, axis=0, name=None):
'paddle.tensor.search.index_select'
,
)
check_variable_and_dtype
(
index
,
'index'
,
[
'int32'
,
'int64'
],
'paddle.tensor.search.index_select'
index
,
'index'
,
[
'int32'
,
'int64'
],
'paddle.tensor.search.index_select'
,
)
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
...
...
@@ -438,8 +405,6 @@ def nonzero(x, as_tuple=False):
if
in_dygraph_mode
():
outs
=
_C_ops
.
nonzero
(
x
)
elif
paddle
.
in_dynamic_mode
():
outs
=
_legacy_C_ops
.
where_index
(
x
)
else
:
helper
=
LayerHelper
(
"where_index"
,
**
locals
())
...
...
@@ -522,12 +487,7 @@ def sort(x, axis=-1, descending=False, name=None):
if
in_dygraph_mode
():
outs
,
_
=
_C_ops
.
argsort
(
x
,
axis
,
descending
)
return
outs
if
_in_legacy_dygraph
():
outs
,
_
=
_legacy_C_ops
.
argsort
(
x
,
'axis'
,
axis
,
'descending'
,
descending
)
return
outs
else
:
helper
=
LayerHelper
(
"sort"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
,
stop_gradient
=
False
...
...
@@ -577,9 +537,7 @@ def mode(x, axis=-1, keepdim=False, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
mode
(
x
,
axis
,
keepdim
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
mode
(
x
,
"axis"
,
axis
,
"keepdim"
,
keepdim
)
else
:
helper
=
LayerHelper
(
"mode"
,
**
locals
())
inputs
=
{
"X"
:
[
x
]}
attrs
=
{}
...
...
@@ -687,11 +645,6 @@ def where(condition, x=None, y=None, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
where
(
broadcast_condition
,
broadcast_x
,
broadcast_y
)
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
where
(
broadcast_condition
,
broadcast_x
,
broadcast_y
)
else
:
helper
=
LayerHelper
(
"where"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
...
...
@@ -784,9 +737,6 @@ def index_sample(x, index):
"""
if
in_dygraph_mode
():
return
_C_ops
.
index_sample
(
x
,
index
)
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
index_sample
(
x
,
index
)
else
:
helper
=
LayerHelper
(
"index_sample"
,
**
locals
())
check_variable_and_dtype
(
...
...
@@ -843,9 +793,7 @@ def masked_select(x, mask, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
masked_select
(
x
,
mask
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
masked_select
(
x
,
mask
)
else
:
helper
=
LayerHelper
(
"masked_select"
,
**
locals
())
check_variable_and_dtype
(
x
,
...
...
@@ -858,7 +806,9 @@ def masked_select(x, mask, name=None):
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'masked_select'
,
inputs
=
{
'X'
:
x
,
'Mask'
:
mask
},
outputs
=
{
'Y'
:
out
}
type
=
'masked_select'
,
inputs
=
{
'X'
:
x
,
'Mask'
:
mask
},
outputs
=
{
'Y'
:
out
},
)
return
out
...
...
@@ -916,26 +866,7 @@ def topk(x, k, axis=None, largest=True, sorted=True, name=None):
axis
=
-
1
out
,
indices
=
_C_ops
.
topk
(
x
,
k
,
axis
,
largest
,
sorted
)
return
out
,
indices
if
_non_static_mode
():
if
axis
is
None
:
out
,
indices
=
_legacy_C_ops
.
top_k_v2
(
x
,
'k'
,
int
(
k
),
'largest'
,
largest
,
'sorted'
,
sorted
)
else
:
out
,
indices
=
_legacy_C_ops
.
top_k_v2
(
x
,
'k'
,
int
(
k
),
'axis'
,
axis
,
'largest'
,
largest
,
'sorted'
,
sorted
,
)
return
out
,
indices
helper
=
LayerHelper
(
"top_k_v2"
,
**
locals
())
inputs
=
{
"X"
:
[
x
]}
attrs
=
{}
...
...
@@ -1065,12 +996,7 @@ def searchsorted(
"""
if
in_dygraph_mode
():
return
_C_ops
.
searchsorted
(
sorted_sequence
,
values
,
out_int32
,
right
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
searchsorted
(
sorted_sequence
,
values
,
"out_int32"
,
out_int32
,
"right"
,
right
)
else
:
check_variable_and_dtype
(
sorted_sequence
,
'SortedSequence'
,
...
...
@@ -1135,16 +1061,10 @@ def kthvalue(x, k, axis=None, keepdim=False, name=None):
# [[0, 2],
# [1, 2]]))
"""
if
_non_static
_mode
():
if
in_dygraph
_mode
():
if
axis
is
not
None
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
kthvalue
(
x
,
'k'
,
k
,
"axis"
,
axis
,
"keepdim"
,
keepdim
)
return
_C_ops
.
kthvalue
(
x
,
k
,
axis
,
keepdim
)
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
kthvalue
(
x
,
'k'
,
k
,
"keepdim"
,
keepdim
)
return
_C_ops
.
kthvalue
(
x
,
k
,
-
1
,
keepdim
)
helper
=
LayerHelper
(
"kthvalue"
,
**
locals
())
...
...
python/paddle/tensor/stat.py
浏览文件 @
861fef52
...
...
@@ -16,7 +16,7 @@
import
paddle
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle.fluid.framework
import
_in_legacy_dygraph
,
in_dygraph_mode
from
paddle.fluid.framework
import
in_dygraph_mode
from
..fluid.data_feeder
import
check_type
,
check_variable_and_dtype
from
..framework
import
LayerHelper
,
core
...
...
@@ -81,13 +81,8 @@ def mean(x, axis=None, keepdim=False, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
mean
(
x
,
axis
,
keepdim
)
else
:
reduce_all
,
axis
=
_get_reduce_axis_with_tensor
(
axis
,
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
reduce_mean
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
check_variable_and_dtype
(
x
,
'x/input'
,
...
...
@@ -111,7 +106,10 @@ def mean(x, axis=None, keepdim=False, name=None):
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
}
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'reduce_mean'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
type
=
'reduce_mean'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
,
)
return
out
...
...
@@ -146,7 +144,7 @@ def var(x, axis=None, unbiased=True, keepdim=False, name=None):
out2 = paddle.var(x, axis=1)
# [1. 4.33333333]
"""
if
not
paddle
.
in_dynamic
_mode
():
if
not
in_dygraph
_mode
():
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'var'
)
u
=
mean
(
x
,
axis
,
True
,
name
)
...
...
@@ -211,7 +209,7 @@ def std(x, axis=None, unbiased=True, keepdim=False, name=None):
# [1. 2.081666]
"""
if
not
paddle
.
in_dynamic
_mode
():
if
not
in_dygraph
_mode
():
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'std'
)
out
=
var
(
**
locals
())
...
...
@@ -243,9 +241,7 @@ def numel(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
numel
(
x
)
elif
_in_legacy_dygraph
():
return
_legacy_C_ops
.
size
(
x
)
else
:
if
not
isinstance
(
x
,
Variable
):
raise
TypeError
(
"x must be a Tensor in numel"
)
helper
=
LayerHelper
(
'numel'
,
**
locals
())
...
...
@@ -331,14 +327,17 @@ def nanmedian(x, axis=None, keepdim=True, name=None):
if
len
(
axis
)
!=
len
(
set
(
axis
)):
raise
ValueError
(
"Axis has duplicated elements."
)
if
_in_legacy_dygraph
():
if
in_dygraph_mode
():
median_index
,
out
=
_legacy_C_ops
.
nanmedian
(
x
,
'axis'
,
axis
,
'keepdim'
,
keepdim
)
return
out
else
:
check_variable_and_dtype
(
x
,
'X'
,
[
'int32'
,
'int64'
,
'float16'
,
'float32'
,
'float64'
],
'nanmedian'
x
,
'X'
,
[
'int32'
,
'int64'
,
'float16'
,
'float32'
,
'float64'
],
'nanmedian'
,
)
helper
=
LayerHelper
(
'nanmedian'
,
**
locals
())
...
...
@@ -534,7 +533,7 @@ def _compute_quantile(x, q, axis=None, keepdim=False, ignore_nan=False):
for
q_num
in
q
:
if
q_num
<
0
or
q_num
>
1
:
raise
ValueError
(
"q should be in range [0, 1]"
)
if
paddle
.
in_dynamic
_mode
():
if
in_dygraph
_mode
():
q_num
=
paddle
.
to_tensor
(
q_num
,
dtype
=
'float64'
)
if
ignore_nan
:
indices
.
append
(
q_num
*
(
valid_counts
-
1
))
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录