Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
861fef52
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 2 年 前同步成功
通知
2325
Star
20933
Fork
5424
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
861fef52
编写于
12月 27, 2022
作者:
W
wanghuancoder
提交者:
GitHub
12月 27, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
delete legacy dygraph code in python/paddle/tensor (#49286)
* delete _in_legacy_dygraph
上级
ea741aff
变更
15
隐藏空白更改
内联
并排
Showing
15 changed file
with
4091 addition
and
5104 deletion
+4091
-5104
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+1
-2
python/paddle/fluid/tests/unittests/test_unique.py
python/paddle/fluid/tests/unittests/test_unique.py
+13
-6
python/paddle/tensor/array.py
python/paddle/tensor/array.py
+68
-66
python/paddle/tensor/attribute.py
python/paddle/tensor/attribute.py
+43
-51
python/paddle/tensor/creation.py
python/paddle/tensor/creation.py
+368
-438
python/paddle/tensor/einsum.py
python/paddle/tensor/einsum.py
+29
-32
python/paddle/tensor/layer_function_generator.py
python/paddle/tensor/layer_function_generator.py
+49
-45
python/paddle/tensor/linalg.py
python/paddle/tensor/linalg.py
+685
-921
python/paddle/tensor/logic.py
python/paddle/tensor/logic.py
+243
-268
python/paddle/tensor/manipulation.py
python/paddle/tensor/manipulation.py
+1223
-1476
python/paddle/tensor/math.py
python/paddle/tensor/math.py
+761
-1053
python/paddle/tensor/ops.py
python/paddle/tensor/ops.py
+194
-200
python/paddle/tensor/random.py
python/paddle/tensor/random.py
+153
-204
python/paddle/tensor/search.py
python/paddle/tensor/search.py
+201
-281
python/paddle/tensor/stat.py
python/paddle/tensor/stat.py
+60
-61
未找到文件。
python/paddle/fluid/framework.py
浏览文件 @
861fef52
...
...
@@ -255,8 +255,7 @@ def _test_eager_guard(place=None):
try
:
yield
finally
:
if
not
already_fallback
:
_enable_legacy_dygraph
()
pass
global_ipu_index
=
-
1
...
...
python/paddle/fluid/tests/unittests/test_unique.py
浏览文件 @
861fef52
...
...
@@ -28,7 +28,9 @@ class TestUniqueOp(OpTest):
self
.
init_config
()
def
test_check_output
(
self
):
paddle
.
enable_static
()
self
.
check_output
()
paddle
.
disable_static
()
def
init_config
(
self
):
self
.
inputs
=
{
...
...
@@ -72,6 +74,8 @@ class TestRandom(TestUniqueOp):
class
TestUniqueRaiseError
(
unittest
.
TestCase
):
def
test_errors
(
self
):
paddle
.
enable_static
()
def
test_type
():
paddle
.
unique
([
10
])
...
...
@@ -82,6 +86,7 @@ class TestUniqueRaiseError(unittest.TestCase):
paddle
.
unique
(
data
)
self
.
assertRaises
(
TypeError
,
test_dtype
)
paddle
.
disable_static
()
@
unittest
.
skipIf
(
...
...
@@ -100,8 +105,10 @@ class TestOneGPU(TestUniqueOp):
def
test_check_output
(
self
):
if
core
.
is_compiled_with_cuda
():
paddle
.
enable_static
()
place
=
core
.
CUDAPlace
(
0
)
self
.
check_output_with_place
(
place
,
atol
=
1e-5
)
paddle
.
disable_static
()
@
unittest
.
skipIf
(
...
...
@@ -125,8 +132,10 @@ class TestRandomGPU(TestUniqueOp):
def
test_check_output
(
self
):
if
core
.
is_compiled_with_cuda
():
paddle
.
enable_static
()
place
=
core
.
CUDAPlace
(
0
)
self
.
check_output_with_place
(
place
,
atol
=
1e-5
)
paddle
.
disable_static
()
class
TestSortedUniqueOp
(
TestUniqueOp
):
...
...
@@ -209,16 +218,13 @@ class TestUniqueOpAxis1(TestUniqueOp):
class
TestUniqueAPI
(
unittest
.
TestCase
):
def
test_dygraph_api_out
(
self
):
paddle
.
disable_static
()
x_data
=
x_data
=
np
.
random
.
randint
(
0
,
10
,
(
120
))
x
=
paddle
.
to_tensor
(
x_data
)
out
=
paddle
.
unique
(
x
)
expected_out
=
np
.
unique
(
x_data
)
self
.
assertTrue
((
out
.
numpy
()
==
expected_out
).
all
(),
True
)
paddle
.
enable_static
()
def
test_dygraph_api_attr
(
self
):
paddle
.
disable_static
()
x_data
=
np
.
random
.
random
((
3
,
5
,
5
)).
astype
(
"float32"
)
x
=
paddle
.
to_tensor
(
x_data
)
out
,
index
,
inverse
,
counts
=
paddle
.
unique
(
...
...
@@ -239,10 +245,8 @@ class TestUniqueAPI(unittest.TestCase):
self
.
assertTrue
((
index
.
numpy
()
==
np_index
).
all
(),
True
)
self
.
assertTrue
((
inverse
.
numpy
()
==
np_inverse
).
all
(),
True
)
self
.
assertTrue
((
counts
.
numpy
()
==
np_counts
).
all
(),
True
)
paddle
.
enable_static
()
def
test_dygraph_attr_dtype
(
self
):
paddle
.
disable_static
()
x_data
=
x_data
=
np
.
random
.
randint
(
0
,
10
,
(
120
))
x
=
paddle
.
to_tensor
(
x_data
)
out
,
indices
,
inverse
,
counts
=
paddle
.
unique
(
...
...
@@ -259,9 +263,9 @@ class TestUniqueAPI(unittest.TestCase):
self
.
assertTrue
((
indices
.
numpy
()
==
np_indices
).
all
(),
True
)
self
.
assertTrue
((
inverse
.
numpy
()
==
np_inverse
).
all
(),
True
)
self
.
assertTrue
((
counts
.
numpy
()
==
np_counts
).
all
(),
True
)
paddle
.
enable_static
()
def
test_static_graph
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
(),
paddle
.
static
.
Program
()
):
...
...
@@ -281,6 +285,7 @@ class TestUniqueAPI(unittest.TestCase):
np
.
testing
.
assert_allclose
(
result
[
0
],
np_unique
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
result
[
1
],
np_inverse
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
result
[
2
],
np_counts
,
rtol
=
1e-05
)
paddle
.
disable_static
()
class
TestUniqueError
(
unittest
.
TestCase
):
...
...
@@ -295,6 +300,7 @@ class TestUniqueError(unittest.TestCase):
self
.
assertRaises
(
TypeError
,
test_x_dtype
)
def
test_attr
(
self
):
paddle
.
enable_static
()
x
=
paddle
.
fluid
.
data
(
name
=
'x'
,
shape
=
[
10
,
10
],
dtype
=
'float64'
)
def
test_return_index
():
...
...
@@ -319,6 +325,7 @@ class TestUniqueError(unittest.TestCase):
result
=
paddle
.
unique
(
x
,
dtype
=
'float64'
)
self
.
assertRaises
(
TypeError
,
test_axis
)
paddle
.
disable_static
()
if
__name__
==
"__main__"
:
...
...
python/paddle/tensor/array.py
浏览文件 @
861fef52
...
...
@@ -15,7 +15,7 @@
# Define functions about array.
from
..fluid.data_feeder
import
check_type
,
check_variable_and_dtype
from
..framework
import
LayerHelper
,
_non_static_mode
,
cor
e
from
..framework
import
LayerHelper
,
core
,
in_dygraph_mod
e
from
..static
import
Variable
__all__
=
[]
...
...
@@ -45,27 +45,29 @@ def array_length(array):
arr_len = paddle.tensor.array_length(arr)
print(arr_len) # 1
"""
if
_non_static
_mode
():
if
in_dygraph
_mode
():
assert
isinstance
(
array
,
list
),
"The 'array' in array_write must be a list in dygraph mode"
return
len
(
array
)
else
:
if
(
not
isinstance
(
array
,
Variable
)
or
array
.
type
!=
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
):
raise
TypeError
(
"array should be tensor array vairable in array_length Op"
)
if
(
not
isinstance
(
array
,
Variable
)
or
array
.
type
!=
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
):
raise
TypeError
(
"array should be tensor array vairable in array_length Op"
helper
=
LayerHelper
(
'array_length'
,
**
locals
())
tmp
=
helper
.
create_variable_for_type_inference
(
dtype
=
'int64'
)
tmp
.
stop_gradient
=
True
helper
.
append_op
(
type
=
'lod_array_length'
,
inputs
=
{
'X'
:
[
array
]},
outputs
=
{
'Out'
:
[
tmp
]},
)
helper
=
LayerHelper
(
'array_length'
,
**
locals
())
tmp
=
helper
.
create_variable_for_type_inference
(
dtype
=
'int64'
)
tmp
.
stop_gradient
=
True
helper
.
append_op
(
type
=
'lod_array_length'
,
inputs
=
{
'X'
:
[
array
]},
outputs
=
{
'Out'
:
[
tmp
]}
)
return
tmp
return
tmp
def
array_read
(
array
,
i
):
...
...
@@ -107,7 +109,7 @@ def array_read(array, i):
item = paddle.tensor.array_read(arr, i)
print(item) # [[5., 5., 5.]]
"""
if
_non_static
_mode
():
if
in_dygraph
_mode
():
assert
isinstance
(
array
,
list
),
"The 'array' in array_read must be list in dygraph mode"
...
...
@@ -119,21 +121,21 @@ def array_read(array, i):
],
"The shape of index 'i' should be [1] in dygraph mode"
i
=
i
.
numpy
().
item
(
0
)
return
array
[
i
]
check_variable_and_dtype
(
i
,
'i'
,
[
'int64'
],
'array_read'
)
helper
=
LayerHelper
(
'array_read'
,
**
locals
())
if
(
not
isinstance
(
array
,
Variable
)
or
array
.
type
!=
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
):
raise
TypeError
(
"array should be tensor array vairable"
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
array
.
dtype
)
helper
.
append_op
(
type
=
'read_from_array'
,
inputs
=
{
'X'
:
[
array
],
'I'
:
[
i
]},
outputs
=
{
'Out'
:
[
out
]},
)
return
out
else
:
check_variable_and_dtype
(
i
,
'i'
,
[
'int64'
],
'array_read'
)
helper
=
LayerHelper
(
'array_read'
,
**
locals
())
if
(
not
isinstance
(
array
,
Variable
)
or
array
.
type
!=
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
):
raise
TypeError
(
"array should be tensor array vairable"
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
array
.
dtype
)
helper
.
append_op
(
type
=
'read_from_array'
,
inputs
=
{
'X'
:
[
array
],
'I'
:
[
i
]},
outputs
=
{
'Out'
:
[
out
]},
)
return
out
def
array_write
(
x
,
i
,
array
=
None
):
...
...
@@ -167,7 +169,7 @@ def array_write(x, i, array=None):
item = paddle.tensor.array_read(arr, i)
print(item) # [[5., 5., 5.]]
"""
if
_non_static
_mode
():
if
in_dygraph
_mode
():
assert
isinstance
(
x
,
Variable
),
"The input data 'x' in array_write must be Variable in dygraph mode"
...
...
@@ -191,30 +193,30 @@ def array_write(x, i, array=None):
else
:
array
.
append
(
x
)
return
array
check_variable_and_dtype
(
i
,
'i'
,
[
'int64'
],
'array_write'
)
check_type
(
x
,
'x'
,
(
Variable
),
'array_write'
)
helper
=
LayerHelper
(
'array_write'
,
**
locals
())
if
array
is
not
None
:
if
(
not
isinstance
(
array
,
Variable
)
or
array
.
type
!=
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
):
raise
TypeError
(
"array should be tensor array vairable in array_write Op"
else
:
check_variable_and_dtype
(
i
,
'i'
,
[
'int64'
],
'array_write'
)
check_type
(
x
,
'x'
,
(
Variable
),
'array_write'
)
helper
=
LayerHelper
(
'array_write'
,
**
locals
())
if
array
is
not
None
:
if
(
not
isinstance
(
array
,
Variable
)
or
array
.
type
!=
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
):
raise
TypeError
(
"array should be tensor array vairable in array_write Op"
)
if
array
is
None
:
array
=
helper
.
create_variable
(
name
=
"{0}.out"
.
format
(
helper
.
name
),
type
=
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
,
dtype
=
x
.
dtype
,
)
if
array
is
None
:
array
=
helper
.
create_variable
(
name
=
"{0}.out"
.
format
(
helper
.
name
),
type
=
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
,
dtype
=
x
.
dtype
,
helper
.
append_op
(
type
=
'write_to_array'
,
inputs
=
{
'X'
:
[
x
],
'I'
:
[
i
]},
outputs
=
{
'Out'
:
[
array
]},
)
helper
.
append_op
(
type
=
'write_to_array'
,
inputs
=
{
'X'
:
[
x
],
'I'
:
[
i
]},
outputs
=
{
'Out'
:
[
array
]},
)
return
array
return
array
def
create_array
(
dtype
,
initialized_list
=
None
):
...
...
@@ -265,17 +267,17 @@ def create_array(dtype, initialized_list=None):
)
)
if
_non_static
_mode
():
if
in_dygraph
_mode
():
return
array
else
:
helper
=
LayerHelper
(
"array"
,
**
locals
())
tensor_array
=
helper
.
create_variable
(
name
=
"{0}.out"
.
format
(
helper
.
name
),
type
=
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
,
dtype
=
dtype
,
)
helper
=
LayerHelper
(
"array"
,
**
locals
())
tensor_array
=
helper
.
create_variable
(
name
=
"{0}.out"
.
format
(
helper
.
name
),
type
=
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
,
dtype
=
dtype
,
)
for
val
in
array
:
array_write
(
x
=
val
,
i
=
array_length
(
tensor_array
),
array
=
tensor_array
)
for
val
in
array
:
array_write
(
x
=
val
,
i
=
array_length
(
tensor_array
),
array
=
tensor_array
)
return
tensor_array
return
tensor_array
python/paddle/tensor/attribute.py
浏览文件 @
861fef52
...
...
@@ -17,10 +17,10 @@
import
numpy
as
np
import
paddle
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle
import
_C_ops
from
..fluid.data_feeder
import
check_type
,
check_variable_and_dtype
from
..fluid.framework
import
_in_legacy_dygraph
,
in_dygraph_mode
from
..fluid.framework
import
in_dygraph_mode
from
..framework
import
LayerHelper
,
core
from
..static
import
Variable
from
.creation
import
_complex_to_real_dtype
,
assign
...
...
@@ -107,36 +107,32 @@ def shape(input):
out
=
_C_ops
.
shape
(
input
)
out
.
stop_gradient
=
True
return
out
if
_in_legacy_dygraph
():
out
=
_legacy_C_ops
.
shape
(
input
)
out
.
stop_gradient
=
True
return
out
check_variable_and_dtype
(
input
,
'input'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
,
'complex64'
,
'complex128'
,
],
'shape'
,
)
helper
=
LayerHelper
(
'shape'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'int32'
)
helper
.
append_op
(
type
=
'shape'
,
inputs
=
{
'Input'
:
input
},
outputs
=
{
'Out'
:
out
},
stop_gradient
=
True
,
)
else
:
check_variable_and_dtype
(
input
,
'input'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
,
'complex64'
,
'complex128'
,
],
'shape'
,
)
helper
=
LayerHelper
(
'shape'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'int32'
)
helper
.
append_op
(
type
=
'shape'
,
inputs
=
{
'Input'
:
input
},
outputs
=
{
'Out'
:
out
},
stop_gradient
=
True
,
)
return
out
return
out
def
is_complex
(
x
):
...
...
@@ -289,16 +285,14 @@ def real(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
real
(
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
real
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'complex64'
,
'complex128'
],
'real'
)
helper
=
LayerHelper
(
'real'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
_complex_to_real_dtype
(
helper
.
input_dtype
())
)
helper
.
append_op
(
type
=
'real'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
})
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'complex64'
,
'complex128'
],
'real'
)
helper
=
LayerHelper
(
'real'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
_complex_to_real_dtype
(
helper
.
input_dtype
())
)
helper
.
append_op
(
type
=
'real'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
})
return
out
def
imag
(
x
,
name
=
None
):
...
...
@@ -336,13 +330,11 @@ def imag(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
imag
(
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
imag
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'complex64'
,
'complex128'
],
'imag'
)
helper
=
LayerHelper
(
'imag'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
_complex_to_real_dtype
(
helper
.
input_dtype
())
)
helper
.
append_op
(
type
=
'imag'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
})
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'complex64'
,
'complex128'
],
'imag'
)
helper
=
LayerHelper
(
'imag'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
_complex_to_real_dtype
(
helper
.
input_dtype
())
)
helper
.
append_op
(
type
=
'imag'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
})
return
out
python/paddle/tensor/creation.py
浏览文件 @
861fef52
...
...
@@ -33,7 +33,6 @@ from ..fluid.data_feeder import (
from
..fluid.framework
import
(
Variable
,
_in_eager_without_dygraph_check
,
_in_legacy_dygraph
,
device_guard
,
)
from
..fluid.initializer
import
Constant
,
Initializer
...
...
@@ -43,7 +42,6 @@ from ..framework import (
LayerHelper
,
_current_expected_place
,
_get_paddle_place
,
_non_static_mode
,
convert_np_dtype_to_dtype_
,
core
,
in_dygraph_mode
,
...
...
@@ -324,65 +322,65 @@ def linspace(start, stop, num, dtype=None, name=None):
dtype
,
_current_expected_place
(),
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
linspace
(
tensor_start
,
tensor_stop
,
tensor_num
,
'dtype'
,
dtype
)
helper
=
LayerHelper
(
"linspace"
,
**
locals
())
start_dtype
=
convert_dtype
(
tensor_start
.
dtype
)
stop_dtype
=
convert_dtype
(
tensor_stop
.
dtype
)
out_dtype
=
convert_dtype
(
dtype
)
if
isinstance
(
start
,
Variable
):
check_dtype
(
start
.
dtype
,
'start'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'linspace'
,
)
else
:
check_type
(
start
,
'start'
,
(
int
,
float
),
'linspace'
)
helper
=
LayerHelper
(
"linspace"
,
**
locals
())
start_dtype
=
convert_dtype
(
tensor_start
.
dtype
)
stop_dtype
=
convert_dtype
(
tensor_stop
.
dtype
)
out_dtype
=
convert_dtype
(
dtype
)
if
isinstance
(
start
,
Variable
):
check_dtype
(
start
.
dtype
,
'start'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'linspace'
,
)
else
:
check_type
(
start
,
'start'
,
(
int
,
float
),
'linspace'
)
if
isinstance
(
stop
,
Variable
):
if
isinstance
(
stop
,
Variable
):
check_dtype
(
stop
.
dtype
,
'stop'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'linspace'
,
)
else
:
check_type
(
stop
,
'stop'
,
(
int
,
float
),
'linspace'
)
if
isinstance
(
num
,
Variable
):
check_dtype
(
num
.
dtype
,
'num'
,
[
'int32'
],
'linspace'
)
check_dtype
(
stop
.
dtype
,
'stop'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'linspace'
,
dtype
,
'dtype'
,
[
'int32'
,
'int64'
,
'float32'
,
'float64'
],
'linspace'
)
else
:
check_type
(
stop
,
'stop'
,
(
int
,
float
),
'linspace'
)
if
isinstance
(
num
,
Variable
):
check_dtype
(
num
.
dtype
,
'num'
,
[
'int32'
],
'linspace'
)
check_dtype
(
dtype
,
'dtype'
,
[
'int32'
,
'int64'
,
'float32'
,
'float64'
],
'linspace'
)
if
(
(
stop_dtype
==
"float64"
or
start_dtype
==
"float64"
)
and
out_dtype
in
[
"float32"
,
"int32"
]
)
or
(
(
stop_dtype
==
"int64"
or
start_dtype
==
"int64"
)
and
out_dtype
==
"int32"
):
raise
ValueError
(
"The dtype of start/stop is {}/{} but the attr(dtype) of linspace is {}, "
"which may cause data type overflows. Please reset attr(dtype) of linspace."
.
format
(
start_dtype
,
stop_dtype
,
dtype
if
(
(
stop_dtype
==
"float64"
or
start_dtype
==
"float64"
)
and
out_dtype
in
[
"float32"
,
"int32"
]
)
or
(
(
stop_dtype
==
"int64"
or
start_dtype
==
"int64"
)
and
out_dtype
==
"int32"
):
raise
ValueError
(
"The dtype of start/stop is {}/{} but the attr(dtype) of linspace is {}, "
"which may cause data type overflows. Please reset attr(dtype) of linspace."
.
format
(
start_dtype
,
stop_dtype
,
dtype
)
)
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
helper
.
append_op
(
type
=
'linspace'
,
inputs
=
{
'Start'
:
tensor_start
,
'Stop'
:
tensor_stop
,
'Num'
:
tensor_num
},
attrs
=
{
'dtype'
:
dtype
},
outputs
=
{
'Out'
:
[
out
]},
)
if
isinstance
(
num
,
int
):
out
.
desc
.
set_shape
((
num
,))
return
out
helper
.
append_op
(
type
=
'linspace'
,
inputs
=
{
'Start'
:
tensor_start
,
'Stop'
:
tensor_stop
,
'Num'
:
tensor_num
,
},
attrs
=
{
'dtype'
:
dtype
},
outputs
=
{
'Out'
:
[
out
]},
)
if
isinstance
(
num
,
int
):
out
.
desc
.
set_shape
((
num
,))
return
out
def
logspace
(
start
,
stop
,
num
,
base
=
10.0
,
dtype
=
None
,
name
=
None
):
...
...
@@ -446,91 +444,91 @@ def logspace(start, stop, num, base=10.0, dtype=None, name=None):
if
not
isinstance
(
base
,
Variable
):
with
device_guard
(
"cpu"
):
tensor_base
=
fill_constant
([
1
],
dtype
,
base
)
if
_non_static
_mode
():
if
in_dygraph
_mode
():
return
_legacy_C_ops
.
logspace
(
tensor_start
,
tensor_stop
,
tensor_num
,
tensor_base
,
'dtype'
,
dtype
)
else
:
helper
=
LayerHelper
(
"logspace"
,
**
locals
())
helper
=
LayerHelper
(
"logspace"
,
**
locals
())
start_dtype
=
convert_dtype
(
tensor_start
.
dtype
)
stop_dtype
=
convert_dtype
(
tensor_stop
.
dtype
)
base_dtype
=
convert_dtype
(
tensor_base
.
dtype
)
out_dtype
=
convert_dtype
(
dtype
)
if
isinstance
(
start
,
Variable
):
check_dtype
(
start
.
dtype
,
'start'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'logspace'
,
)
else
:
check_type
(
start
,
'start'
,
(
int
,
float
),
'logspace'
)
start_dtype
=
convert_dtype
(
tensor_start
.
dtype
)
stop_dtype
=
convert_dtype
(
tensor_stop
.
dtype
)
base_dtype
=
convert_dtype
(
tensor_base
.
dtype
)
out_dtype
=
convert_dtype
(
dtype
)
if
isinstance
(
start
,
Variable
):
check_dtype
(
start
.
dtype
,
'start'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'logspace'
,
)
else
:
check_type
(
start
,
'start'
,
(
int
,
float
),
'logspace'
)
if
isinstance
(
stop
,
Variable
):
check_dtype
(
stop
.
dtype
,
'stop'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'logspace'
,
)
else
:
check_type
(
stop
,
'stop'
,
(
int
,
float
),
'logspace'
)
if
isinstance
(
stop
,
Variable
):
check_dtype
(
stop
.
dtype
,
'stop'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'logspace'
,
)
else
:
check_type
(
stop
,
'stop'
,
(
int
,
float
),
'logspace'
)
if
isinstance
(
num
,
Variable
):
check_dtype
(
num
.
dtype
,
'num'
,
[
'int32'
],
'logspace'
)
if
isinstance
(
num
,
Variable
):
check_dtype
(
num
.
dtype
,
'num'
,
[
'int32'
],
'logspace'
)
if
isinstance
(
base
,
Variable
):
check_dtype
(
base
.
dtype
,
'base'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'logspace'
,
)
else
:
check_type
(
base
,
'base'
,
(
int
,
float
),
'logspace'
)
if
isinstance
(
base
,
Variable
):
check_dtype
(
base
.
dtype
,
'base'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'logspace'
,
dtype
,
'dtype'
,
[
'int32'
,
'int64'
,
'float32'
,
'float64'
],
'logspace'
)
else
:
check_type
(
base
,
'base'
,
(
int
,
float
),
'logspace'
)
check_dtype
(
dtype
,
'dtype'
,
[
'int32'
,
'int64'
,
'float32'
,
'float64'
],
'logspace'
)
if
(
(
stop_dtype
==
"float64"
or
start_dtype
==
"float64"
or
base_dtype
==
"float64"
)
and
out_dtype
in
[
"float32"
,
"int32"
]
)
or
(
(
stop_dtype
==
"int64"
or
start_dtype
==
"int64"
or
base_dtype
==
"int64"
)
and
out_dtype
==
"int32"
):
raise
ValueError
(
"The dtype of start/stop/base is {}/{}/{} but the attr(dtype) of logspace is {}, "
"which may cause data type overflows. Please reset attr(dtype) of logspace."
.
format
(
start_dtype
,
stop_dtype
,
base_dtype
,
dtype
if
(
(
stop_dtype
==
"float64"
or
start_dtype
==
"float64"
or
base_dtype
==
"float64"
)
and
out_dtype
in
[
"float32"
,
"int32"
]
)
or
(
(
stop_dtype
==
"int64"
or
start_dtype
==
"int64"
or
base_dtype
==
"int64"
)
and
out_dtype
==
"int32"
):
raise
ValueError
(
"The dtype of start/stop/base is {}/{}/{} but the attr(dtype) of logspace is {}, "
"which may cause data type overflows. Please reset attr(dtype) of logspace."
.
format
(
start_dtype
,
stop_dtype
,
base_dtype
,
dtype
)
)
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
helper
.
append_op
(
type
=
'logspace'
,
inputs
=
{
'Start'
:
tensor_start
,
'Stop'
:
tensor_stop
,
'Num'
:
tensor_num
,
'Base'
:
tensor_base
,
},
attrs
=
{
'dtype'
:
dtype
},
outputs
=
{
'Out'
:
[
out
]},
)
if
isinstance
(
num
,
int
):
out
.
desc
.
set_shape
((
num
,))
return
out
helper
.
append_op
(
type
=
'logspace'
,
inputs
=
{
'Start'
:
tensor_start
,
'Stop'
:
tensor_stop
,
'Num'
:
tensor_num
,
'Base'
:
tensor_base
,
},
attrs
=
{
'dtype'
:
dtype
},
outputs
=
{
'Out'
:
[
out
]},
)
if
isinstance
(
num
,
int
):
out
.
desc
.
set_shape
((
num
,))
return
out
def
_to_tensor_non_static
(
data
,
dtype
=
None
,
place
=
None
,
stop_gradient
=
True
):
...
...
@@ -746,7 +744,7 @@ def to_tensor(data, dtype=None, place=None, stop_gradient=True):
if
place
is
None
:
place
=
_current_expected_place
()
if
_non_static_mode
():
if
paddle
.
fluid
.
framework
.
_non_static_mode
():
return
_to_tensor_non_static
(
data
,
dtype
,
place
,
stop_gradient
)
# call assign for static graph
...
...
@@ -785,44 +783,53 @@ def full_like(x, fill_value, dtype=None, name=None):
# [[2. 2. 2.]
# [2. 2. 2.]]
"""
if
dtype
is
None
:
dtype
=
x
.
dtype
else
:
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
in_dygraph_mode
():
return
_C_ops
.
full_like
(
x
,
fill_value
,
dtype
,
x
.
place
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
fill_any_like
(
x
,
'value'
,
fill_value
,
'dtype'
,
dtype
else
:
helper
=
LayerHelper
(
"full_like"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
],
'full_like'
,
)
check_dtype
(
dtype
,
'dtype'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
],
'full_like/zeros_like/ones_like'
,
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
helper
=
LayerHelper
(
"full_like"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
],
'full_like'
,
)
check_dtype
(
dtype
,
'dtype'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
],
'full_like/zeros_like/ones_like'
,
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
helper
.
append_op
(
type
=
'fill_any_like'
,
inputs
=
{
'X'
:
[
x
]},
attrs
=
{
'value'
:
fill_value
,
"dtype"
:
dtype
},
outputs
=
{
'Out'
:
[
out
]},
)
out
.
stop_gradient
=
True
return
out
helper
.
append_op
(
type
=
'fill_any_like'
,
inputs
=
{
'X'
:
[
x
]},
attrs
=
{
'value'
:
fill_value
,
"dtype"
:
dtype
},
outputs
=
{
'Out'
:
[
out
]},
)
out
.
stop_gradient
=
True
return
out
def
ones
(
shape
,
dtype
=
None
,
name
=
None
):
...
...
@@ -1011,7 +1018,7 @@ def eye(num_rows, num_columns=None, dtype=None, name=None):
"""
def
_check_attr
(
attr
,
message
):
if
isinstance
(
attr
,
((
Variable
,
core
.
VarBase
,
core
.
eager
.
Tensor
))):
if
isinstance
(
attr
,
((
Variable
,
core
.
eager
.
Tensor
))):
assert
len
(
attr
.
shape
)
==
1
and
attr
.
shape
[
0
]
in
[
1
,
-
1
]
elif
not
isinstance
(
attr
,
int
)
or
attr
<
0
:
raise
TypeError
(
"{} should be a non-negative int."
.
format
(
message
))
...
...
@@ -1027,16 +1034,10 @@ def eye(num_rows, num_columns=None, dtype=None, name=None):
else
:
num_columns
=
num_rows
if
_non_static_mode
():
if
in_dygraph_mode
():
out
=
_C_ops
.
eye
(
num_rows
,
num_columns
,
dtype
,
_current_expected_place
()
)
elif
_in_legacy_dygraph
():
out
=
_legacy_C_ops
.
eye
(
'dtype'
,
dtype
,
'num_rows'
,
num_rows
,
'num_columns'
,
num_columns
)
if
in_dygraph_mode
():
out
=
_C_ops
.
eye
(
num_rows
,
num_columns
,
dtype
,
_current_expected_place
()
)
else
:
helper
=
LayerHelper
(
"eye"
,
**
locals
())
check_dtype
(
...
...
@@ -1211,27 +1212,25 @@ def arange(start=0, end=None, step=1, dtype=None, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
arange
(
start
,
end
,
step
,
dtype
,
_current_expected_place
())
if
_in_legacy_dygraph
():
out
=
_legacy_C_ops
.
range
(
start
,
end
,
step
)
else
:
check_dtype
(
dtype
,
'dtype'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'range/arange'
,
)
helper
=
LayerHelper
(
'range'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
,
shape
=
out_shape
)
helper
.
append_op
(
type
=
'range'
,
inputs
=
{
'Start'
:
start
,
'End'
:
end
,
'Step'
:
step
},
outputs
=
{
'Out'
:
out
},
)
out
.
stop_gradient
=
True
if
out_shape
is
not
None
:
out
.
desc
.
set_shape
(
out_shape
)
return
out
check_dtype
(
dtype
,
'dtype'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'range/arange'
)
helper
=
LayerHelper
(
'range'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
,
shape
=
out_shape
)
helper
.
append_op
(
type
=
'range'
,
inputs
=
{
'Start'
:
start
,
'End'
:
end
,
'Step'
:
step
},
outputs
=
{
'Out'
:
out
},
)
out
.
stop_gradient
=
True
if
out_shape
is
not
None
:
out
.
desc
.
set_shape
(
out_shape
)
return
out
def
_tril_triu_op
(
helper
):
"""Base op of tril_op and triu_op"""
...
...
@@ -1328,12 +1327,8 @@ def tril(x, diagonal=0, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
tril
(
x
,
diagonal
,
True
)
if
_in_legacy_dygraph
():
op
=
getattr
(
_legacy_C_ops
,
'tril_triu'
)
return
op
(
x
,
'diagonal'
,
diagonal
,
"lower"
,
True
)
return
_tril_triu_op
(
LayerHelper
(
'tril'
,
**
locals
()))
else
:
return
_tril_triu_op
(
LayerHelper
(
'tril'
,
**
locals
()))
def
triu
(
x
,
diagonal
=
0
,
name
=
None
):
...
...
@@ -1394,12 +1389,8 @@ def triu(x, diagonal=0, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
triu
(
x
,
diagonal
,
False
)
if
_in_legacy_dygraph
():
op
=
getattr
(
_legacy_C_ops
,
'tril_triu'
)
return
op
(
x
,
'diagonal'
,
diagonal
,
"lower"
,
False
)
return
_tril_triu_op
(
LayerHelper
(
'triu'
,
**
locals
()))
else
:
return
_tril_triu_op
(
LayerHelper
(
'triu'
,
**
locals
()))
def
meshgrid
(
*
args
,
**
kwargs
):
...
...
@@ -1437,37 +1428,35 @@ def meshgrid(*args, **kwargs):
if
len
(
args
)
==
1
and
isinstance
(
args
[
0
],
(
list
,
tuple
)):
args
=
args
[
0
]
if
_in_legacy_dygraph
():
num
=
len
(
args
)
out
=
_legacy_C_ops
.
meshgrid
(
list
(
args
),
num
)
return
out
if
in_dygraph_mode
():
return
_C_ops
.
meshgrid
(
list
(
args
))
else
:
name
=
kwargs
.
get
(
"name"
,
None
)
helper
=
LayerHelper
(
'meshgrid'
,
**
locals
())
name
=
kwargs
.
get
(
"name"
,
None
)
helper
=
LayerHelper
(
'meshgrid'
,
**
locals
())
if
not
isinstance
(
args
,
(
list
,
tuple
)):
raise
TypeError
(
"The type of input args in meshgrid should be list."
)
if
not
isinstance
(
args
,
(
list
,
tuple
)):
raise
TypeError
(
"The type of input args in meshgrid should be list."
)
for
id
,
input_
in
enumerate
(
args
):
check_dtype
(
input_
.
dtype
,
'create data type'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'meshgrid'
,
)
for
id
,
input_
in
enumerate
(
args
):
check_dtype
(
input_
.
dtype
,
'create data type'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'meshgrid'
,
num
=
len
(
args
)
out
=
[
helper
.
create_variable_for_type_inference
(
dtype
=
args
[
i
].
dtype
)
for
i
in
range
(
num
)
]
helper
.
append_op
(
type
=
'meshgrid'
,
inputs
=
{
'X'
:
list
(
args
)},
outputs
=
{
'Out'
:
out
}
)
num
=
len
(
args
)
out
=
[
helper
.
create_variable_for_type_inference
(
dtype
=
args
[
i
].
dtype
)
for
i
in
range
(
num
)
]
helper
.
append_op
(
type
=
'meshgrid'
,
inputs
=
{
'X'
:
list
(
args
)},
outputs
=
{
'Out'
:
out
}
)
return
out
return
out
def
diagflat
(
x
,
offset
=
0
,
name
=
None
):
...
...
@@ -1555,62 +1544,49 @@ def diagflat(x, offset=0, name=None):
# [0, 0, 3, 0, 0],
# [0, 0, 0, 4, 0]])
"""
padding_value
=
0
if
in_dygraph_mode
():
if
len
(
x
.
shape
)
<=
1
:
return
_C_ops
.
diag
(
x
,
offset
,
padding_value
)
return
_C_ops
.
diag
(
x
,
offset
,
0
)
else
:
y
=
_C_ops
.
flatten
(
x
,
0
,
-
1
)
return
_C_ops
.
diag
(
y
,
offset
,
padding_value
)
return
_C_ops
.
diag
(
y
,
offset
,
0
)
else
:
padding_value
=
0
check_type
(
x
,
'x'
,
(
Variable
),
'diagflat'
)
check_dtype
(
x
.
dtype
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'diagflat'
)
check_type
(
offset
,
'offset'
,
(
int
),
'diagflat'
)
if
_in_legacy_dygraph
():
if
len
(
x
.
shape
)
==
1
:
return
_legacy_C_ops
.
diag_v2
(
x
,
"offset"
,
offset
,
"padding_value"
,
padding_value
helper
=
LayerHelper
(
"diagflat"
,
**
locals
())
out1
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out1_shape
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
out2
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
if
len
(
x
.
shape
)
<=
1
:
helper
.
append_op
(
type
=
'diag_v2'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out2
},
attrs
=
{
'offset'
:
offset
,
'padding_value'
:
padding_value
},
)
else
:
y
,
_
=
_legacy_C_ops
.
flatten_contiguous_range
(
x
,
"start_axis"
,
0
,
"stop_axis"
,
-
1
)
return
_legacy_C_ops
.
diag_v2
(
y
,
"offset"
,
offset
,
"padding_value"
,
padding_value
helper
.
append_op
(
type
=
'flatten_contiguous_range'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out1
,
'XShape'
:
out1_shape
},
attrs
=
{
'start_axis'
:
0
,
'stop_axis'
:
-
1
},
)
out1
.
stop_gradient
=
True
check_type
(
x
,
'x'
,
(
Variable
),
'diagflat'
)
check_dtype
(
x
.
dtype
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'diagflat'
)
check_type
(
offset
,
'offset'
,
(
int
),
'diagflat'
)
helper
=
LayerHelper
(
"diagflat"
,
**
locals
())
out1
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out1_shape
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
out2
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
if
len
(
x
.
shape
)
<=
1
:
helper
.
append_op
(
type
=
'diag_v2'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out2
},
attrs
=
{
'offset'
:
offset
,
'padding_value'
:
padding_value
},
)
else
:
helper
.
append_op
(
type
=
'flatten_contiguous_range'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out1
,
'XShape'
:
out1_shape
},
attrs
=
{
'start_axis'
:
0
,
'stop_axis'
:
-
1
},
)
out1
.
stop_gradient
=
True
helper
.
append_op
(
type
=
'diag_v2'
,
inputs
=
{
'X'
:
out1
},
outputs
=
{
'Out'
:
out2
},
attrs
=
{
'offset'
:
offset
,
'padding_value'
:
padding_value
},
)
out2
.
stop_gradient
=
True
return
out2
helper
.
append_op
(
type
=
'diag_v2'
,
inputs
=
{
'X'
:
out1
},
outputs
=
{
'Out'
:
out2
},
attrs
=
{
'offset'
:
offset
,
'padding_value'
:
padding_value
},
)
out2
.
stop_gradient
=
True
return
out2
def
diag
(
x
,
offset
=
0
,
padding_value
=
0
,
name
=
None
):
...
...
@@ -1691,40 +1667,35 @@ def diag(x, offset=0, padding_value=0, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
diag
(
x
,
offset
,
padding_value
)
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
diag_v2
(
x
,
"offset"
,
offset
,
"padding_value"
,
padding_value
)
else
:
check_type
(
x
,
'x'
,
(
Variable
),
'diag_v2'
)
check_dtype
(
x
.
dtype
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'diag_v2'
,
)
check_type
(
offset
,
'offset'
,
(
int
),
'diag_v2'
)
check_type
(
padding_value
,
'padding_value'
,
(
int
,
float
),
'diag_v2'
)
if
len
(
x
.
shape
)
!=
1
and
len
(
x
.
shape
)
!=
2
:
raise
ValueError
(
"The dimension of input x must be either 1 or 2, but received {}"
.
format
(
len
(
x
.
shape
)
)
check_type
(
x
,
'x'
,
(
Variable
),
'diag_v2'
)
check_dtype
(
x
.
dtype
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'diag_v2'
,
)
check_type
(
offset
,
'offset'
,
(
int
),
'diag_v2'
)
check_type
(
padding_value
,
'padding_value'
,
(
int
,
float
),
'diag_v2'
)
if
len
(
x
.
shape
)
!=
1
and
len
(
x
.
shape
)
!=
2
:
raise
ValueError
(
"The dimension of input x must be either 1 or 2, but received {}"
.
format
(
len
(
x
.
shape
)
)
)
helper
=
LayerHelper
(
"diag_v2"
,
**
locals
())
helper
=
LayerHelper
(
"diag_v2"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'diag_v2'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'offset'
:
offset
,
'padding_value'
:
padding_value
},
)
helper
.
append_op
(
type
=
'diag_v2'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'offset'
:
offset
,
'padding_value'
:
padding_value
},
)
out
.
stop_gradient
=
True
return
out
out
.
stop_gradient
=
True
return
out
def
empty
(
shape
,
dtype
=
None
,
name
=
None
):
...
...
@@ -1782,45 +1753,37 @@ def empty(shape, dtype=None, name=None):
)
out
.
stop_gradient
=
True
return
out
else
:
helper
=
LayerHelper
(
"empty"
,
**
locals
())
inputs
=
{}
if
_in_legacy_dygraph
():
shape
=
utils
.
convert_shape_to_list
(
shape
)
out
=
_legacy_C_ops
.
empty
(
'shape'
,
shape
,
'dtype'
,
convert_np_dtype_to_dtype_
(
dtype
)
check_dtype
(
dtype
,
'dtype'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'empty'
,
)
out
.
stop_gradient
=
True
return
out
helper
=
LayerHelper
(
"empty"
,
**
locals
())
inputs
=
{}
check_dtype
(
dtype
,
'dtype'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'empty'
,
)
check_type
(
shape
,
'shape'
,
(
Variable
,
list
,
tuple
),
'empty'
)
check_type
(
shape
,
'shape'
,
(
Variable
,
list
,
tuple
),
'empty'
)
if
isinstance
(
shape
,
Variable
):
check_dtype
(
shape
.
dtype
,
'shape'
,
[
'int32'
,
'int64'
],
'empty'
)
if
isinstance
(
shape
,
Variable
):
check_dtype
(
shape
.
dtype
,
'shape'
,
[
'int32'
,
'int64'
],
'empty'
)
attrs
=
{}
utils
.
get_shape_tensor_inputs
(
inputs
=
inputs
,
attrs
=
attrs
,
shape
=
shape
,
op_type
=
'empty'
)
attrs
=
{}
utils
.
get_shape_tensor_inputs
(
inputs
=
inputs
,
attrs
=
attrs
,
shape
=
shape
,
op_type
=
'empty'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
attrs
[
'dtype'
]
=
convert_np_dtype_to_dtype_
(
dtype
)
helper
.
append_op
(
type
=
'empty'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
[
out
]},
attrs
=
attrs
,
stop_gradient
=
True
,
)
out
.
stop_gradient
=
True
return
out
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
attrs
[
'dtype'
]
=
convert_np_dtype_to_dtype_
(
dtype
)
helper
.
append_op
(
type
=
'empty'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
[
out
]},
attrs
=
attrs
,
stop_gradient
=
True
,
)
out
.
stop_gradient
=
True
return
out
def
empty_like
(
x
,
dtype
=
None
,
name
=
None
):
...
...
@@ -1863,47 +1826,40 @@ def empty_like(x, dtype=None, name=None):
)
out
.
stop_gradient
=
True
return
out
else
:
helper
=
LayerHelper
(
"empty_like"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'empty_like'
,
)
check_dtype
(
dtype
,
'dtype'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'empty_like'
,
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
if
_in_legacy_dygraph
():
out
=
_legacy_C_ops
.
empty
(
'shape'
,
x
.
shape
,
'dtype'
,
convert_np_dtype_to_dtype_
(
dtype
)
inputs
=
{}
attrs
=
{}
attrs
[
'dtype'
]
=
convert_np_dtype_to_dtype_
(
dtype
)
shape
=
paddle
.
shape
(
x
)
utils
.
get_shape_tensor_inputs
(
inputs
=
inputs
,
attrs
=
attrs
,
shape
=
shape
,
op_type
=
'empty_like'
)
helper
.
append_op
(
type
=
'empty'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
[
out
]},
attrs
=
attrs
,
stop_gradient
=
True
,
)
out
.
stop_gradient
=
True
return
out
helper
=
LayerHelper
(
"empty_like"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'empty_like'
,
)
check_dtype
(
dtype
,
'dtype'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'empty_like'
,
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
inputs
=
{}
attrs
=
{}
attrs
[
'dtype'
]
=
convert_np_dtype_to_dtype_
(
dtype
)
shape
=
paddle
.
shape
(
x
)
utils
.
get_shape_tensor_inputs
(
inputs
=
inputs
,
attrs
=
attrs
,
shape
=
shape
,
op_type
=
'empty_like'
)
helper
.
append_op
(
type
=
'empty'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
[
out
]},
attrs
=
attrs
,
stop_gradient
=
True
,
)
out
.
stop_gradient
=
True
return
out
def
assign
(
x
,
output
=
None
):
"""
...
...
@@ -1958,10 +1914,6 @@ def assign(x, output=None):
output
=
_C_ops
.
assign
(
input
)
else
:
_C_ops
.
assign_out_
(
input
,
output
)
elif
_in_legacy_dygraph
():
if
output
is
None
:
output
=
core
.
VarBase
()
_legacy_C_ops
.
assign
(
input
,
output
)
else
:
check_dtype
(
input
.
dtype
,
...
...
@@ -2060,18 +2012,6 @@ def assign(x, output=None):
values
,
_current_expected_place
(),
)
elif
_in_legacy_dygraph
():
if
output
is
None
:
output
=
core
.
VarBase
()
_legacy_C_ops
.
assign_value
(
output
,
'shape'
,
list
(
input
.
shape
),
'dtype'
,
dtype
,
value_name
,
values
,
)
else
:
if
output
is
None
:
output
=
helper
.
create_variable_for_type_inference
(
...
...
@@ -2087,9 +2027,6 @@ def assign(x, output=None):
},
)
if
is_inplace
and
_in_legacy_dygraph
():
output
.
_bump_inplace_version
()
return
output
...
...
@@ -2227,23 +2164,26 @@ def complex(real, imag, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
complex
(
real
,
imag
)
else
:
check_variable_and_dtype
(
real
,
'real'
,
[
'float32'
,
'float64'
],
'complex'
)
check_variable_and_dtype
(
imag
,
'imag'
,
[
'float32'
,
'float64'
],
'complex'
)
if
paddle
.
in_dynamic_mode
():
return
paddle
.
_legacy_C_ops
.
complex
(
real
,
imag
)
check_variable_and_dtype
(
real
,
'real'
,
[
'float32'
,
'float64'
],
'complex'
)
check_variable_and_dtype
(
imag
,
'imag'
,
[
'float32'
,
'float64'
],
'complex'
)
op_type
=
"complex"
helper
=
LayerHelper
(
op_type
,
**
locals
())
inputs
=
{
"X"
:
real
,
"Y"
:
imag
}
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
_real_to_complex_dtype
(
real
.
dtype
)
)
outputs
=
{
"Out"
:
out
}
attrs
=
{}
helper
.
append_op
(
type
=
op_type
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
outputs
)
return
out
op_type
=
"complex"
helper
=
LayerHelper
(
op_type
,
**
locals
())
inputs
=
{
"X"
:
real
,
"Y"
:
imag
}
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
_real_to_complex_dtype
(
real
.
dtype
)
)
outputs
=
{
"Out"
:
out
}
attrs
=
{}
helper
.
append_op
(
type
=
op_type
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
outputs
)
return
out
def
tril_indices
(
row
,
col
,
offset
=
0
,
dtype
=
'int64'
):
...
...
@@ -2291,34 +2231,29 @@ def tril_indices(row, col, offset=0, dtype='int64'):
# [[ 1, 2, 2, 3, 3, 3],
# [ 0, 0, 1, 0, 1, 2]]
"""
if
not
isinstance
(
row
,
int
)
or
row
<
0
:
raise
TypeError
(
"row should be a non-negative int"
)
if
col
is
not
None
:
if
not
isinstance
(
col
,
int
)
or
col
<
0
:
raise
TypeError
(
"col should be a non-negative int"
)
else
:
col
=
row
if
not
isinstance
(
offset
,
int
):
raise
TypeError
(
"offset should be a int"
)
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
in_dygraph_mode
():
if
col
is
None
:
col
=
row
out
=
_C_ops
.
tril_indices
(
row
,
col
,
offset
,
dtype
,
_current_expected_place
()
)
return
out
else
:
if
not
isinstance
(
row
,
int
)
or
row
<
0
:
raise
TypeError
(
"row should be a non-negative int"
)
if
_in_legacy_dygraph
():
out
=
_legacy_C_ops
.
tril_indices
(
'rows'
,
row
,
'cols'
,
col
,
'offset'
,
offset
,
"dtype"
,
dtype
)
return
out
if
col
is
not
None
:
if
not
isinstance
(
col
,
int
)
or
col
<
0
:
raise
TypeError
(
"col should be a non-negative int"
)
else
:
col
=
row
if
not
isinstance
(
offset
,
int
):
raise
TypeError
(
"offset should be a int"
)
else
:
helper
=
LayerHelper
(
"tril_indices"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
...
...
@@ -2375,34 +2310,29 @@ def triu_indices(row, col=None, offset=0, dtype='int64'):
# [[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 3, 3],
# [0, 1, 2, 3, 0, 1, 2, 3, 1, 2, 3, 2, 3]]
"""
if
not
isinstance
(
row
,
int
)
or
row
<
0
:
raise
TypeError
(
"row should be a non-negative int"
)
if
col
is
not
None
:
if
not
isinstance
(
col
,
int
)
or
col
<
0
:
raise
TypeError
(
"col should be a non-negative int"
)
else
:
col
=
row
if
not
isinstance
(
offset
,
int
):
raise
TypeError
(
"offset should be a int"
)
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
in_dygraph_mode
():
if
col
is
None
:
col
=
row
out
=
_C_ops
.
triu_indices
(
row
,
col
,
offset
,
dtype
,
_current_expected_place
()
)
return
out
else
:
if
not
isinstance
(
row
,
int
)
or
row
<
0
:
raise
TypeError
(
"row should be a non-negative int"
)
if
_in_legacy_dygraph
():
out
=
_legacy_C_ops
.
triu_indices
(
'row'
,
row
,
'col'
,
col
,
'offset'
,
offset
,
"dtype"
,
dtype
)
return
out
if
col
is
not
None
:
if
not
isinstance
(
col
,
int
)
or
col
<
0
:
raise
TypeError
(
"col should be a non-negative int"
)
else
:
col
=
row
if
not
isinstance
(
offset
,
int
):
raise
TypeError
(
"offset should be a int"
)
else
:
helper
=
LayerHelper
(
"triu_indices"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
...
...
python/paddle/tensor/einsum.py
浏览文件 @
861fef52
...
...
@@ -20,10 +20,10 @@ import string
import
numpy
as
np
import
opt_einsum
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle
import
_C_ops
from
..fluid.data_feeder
import
check_type
,
check_variable_and_dtype
from
..fluid.framework
import
_in_legacy_dygraph
,
in_dygraph_mode
from
..fluid.framework
import
in_dygraph_mode
from
..fluid.layer_helper
import
LayerHelper
from
.linalg
import
matmul
,
transpose
from
.manipulation
import
reshape
,
squeeze
,
unsqueeze
...
...
@@ -829,38 +829,35 @@ def gen_einsum_op(equation, *operands):
"""
EinsumOp Python Interface:
"""
assert
len
(
operands
)
<=
2
,
"Only support two operands in EinsumOp."
if
in_dygraph_mode
():
return
_C_ops
.
einsum
(
operands
,
equation
)[
0
]
if
_in_legacy_dygraph
():
# dygraph
return
_legacy_C_ops
.
einsum
(
operands
,
len
(
operands
),
len
(
operands
),
'equation'
,
equation
)[
0
]
for
inp
in
operands
:
check_variable_and_dtype
(
inp
,
'dtype'
,
[
'float32'
,
'float64'
],
'einsum'
)
check_type
(
equation
,
'equation'
,
str
,
'einsum'
)
helper
=
LayerHelper
(
'einsum'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
operands
[
0
].
dtype
)
attrs
=
dict
()
attrs
[
'equation'
]
=
equation
caches
=
[
helper
.
create_variable_for_type_inference
(
dtype
=
operands
[
0
].
dtype
)
for
i
in
range
(
len
(
operands
))
]
xshape
=
[
helper
.
create_variable_for_type_inference
(
dtype
=
operands
[
0
].
dtype
)
for
i
in
range
(
len
(
operands
))
]
helper
.
append_op
(
type
=
'einsum'
,
inputs
=
{
'Operands'
:
operands
},
outputs
=
{
'Out'
:
out
,
"InnerCache"
:
caches
,
"XShape"
:
xshape
},
attrs
=
attrs
,
)
return
out
else
:
assert
len
(
operands
)
<=
2
,
"Only support two operands in EinsumOp."
for
inp
in
operands
:
check_variable_and_dtype
(
inp
,
'dtype'
,
[
'float32'
,
'float64'
],
'einsum'
)
check_type
(
equation
,
'equation'
,
str
,
'einsum'
)
helper
=
LayerHelper
(
'einsum'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
operands
[
0
].
dtype
)
attrs
=
dict
()
attrs
[
'equation'
]
=
equation
caches
=
[
helper
.
create_variable_for_type_inference
(
dtype
=
operands
[
0
].
dtype
)
for
i
in
range
(
len
(
operands
))
]
xshape
=
[
helper
.
create_variable_for_type_inference
(
dtype
=
operands
[
0
].
dtype
)
for
i
in
range
(
len
(
operands
))
]
helper
.
append_op
(
type
=
'einsum'
,
inputs
=
{
'Operands'
:
operands
},
outputs
=
{
'Out'
:
out
,
"InnerCache"
:
caches
,
"XShape"
:
xshape
},
attrs
=
attrs
,
)
return
out
def
einsum
(
equation
,
*
operands
):
...
...
python/paddle/tensor/layer_function_generator.py
浏览文件 @
861fef52
...
...
@@ -24,7 +24,6 @@ from ..fluid.proto import framework_pb2
from
..framework
import
(
LayerHelper
,
OpProtoHolder
,
_non_static_mode
,
convert_np_dtype_to_dtype_
,
core
,
in_dygraph_mode
,
...
...
@@ -274,41 +273,44 @@ def generate_activation_fn(op_type):
op_proto
=
OpProtoHolder
.
instance
().
get_op_proto
(
op_type
)
def
func
(
x
,
name
=
None
):
if
in_dygraph_mode
()
and
hasattr
(
_C_ops
,
op_type
):
op
=
getattr
(
_C_ops
,
op_type
)
return
op
(
x
)
# TODO(dev): Because some ops' yaml has not been migrated.
# Replace it with _in_legacy_dygraph while all yaml work is done.
if
_non_static_mode
():
op
=
getattr
(
_legacy_C_ops
,
op_type
)
return
op
(
x
)
if
op_type
not
in
[
"abs"
,
"exp"
,
"square"
]:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
op_type
)
if
in_dygraph_mode
():
if
hasattr
(
_C_ops
,
op_type
):
op
=
getattr
(
_C_ops
,
op_type
)
return
op
(
x
)
else
:
# TODO(dev): Because some ops' yaml has not been migrated.
# Replace it with _C_ops while all yaml work is done.
op
=
getattr
(
_legacy_C_ops
,
op_type
)
return
op
(
x
)
else
:
# abs exp square ops support dtype(int32, int64, float16, float32, float64)
check_variable_and_dtype
(
x
,
'x'
,
[
'int32'
,
'int64'
,
'float16'
,
'float32'
,
'float64'
,
'complex64'
,
'complex128'
,
],
op_type
,
if
op_type
not
in
[
"abs"
,
"exp"
,
"square"
]:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
op_type
)
else
:
# abs exp square ops support dtype(int32, int64, float16, float32, float64)
check_variable_and_dtype
(
x
,
'x'
,
[
'int32'
,
'int64'
,
'float16'
,
'float32'
,
'float64'
,
'complex64'
,
'complex128'
,
],
op_type
,
)
helper
=
LayerHelper
(
op_type
,
**
locals
())
output
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
op_type
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
output
}
)
helper
=
LayerHelper
(
op_type
,
**
locals
())
output
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
op_type
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
output
})
return
output
return
output
func
.
__name__
=
op_type
func
.
__doc__
=
_generate_doc_string_
(
...
...
@@ -332,18 +334,20 @@ def generate_inplace_fn(inplace_op_type):
origin_op_type
=
inplace_op_type
[:
-
1
]
def
func
(
x
,
name
=
None
):
if
in_dygraph_mode
()
and
hasattr
(
_C_ops
,
inplace_op_type
):
op
=
getattr
(
_C_ops
,
inplace_op_type
)
return
op
(
x
)
if
_non_static_mode
():
op
=
getattr
(
_legacy_C_ops
,
inplace_op_type
)
return
op
(
x
)
warnings
.
warn
(
"In static mode, {}() is the same as {}() and does not perform inplace operation."
.
format
(
inplace_op_type
,
origin_op_type
if
in_dygraph_mode
():
if
hasattr
(
_C_ops
,
inplace_op_type
):
op
=
getattr
(
_C_ops
,
inplace_op_type
)
return
op
(
x
)
else
:
op
=
getattr
(
_legacy_C_ops
,
inplace_op_type
)
return
op
(
x
)
else
:
warnings
.
warn
(
"In static mode, {}() is the same as {}() and does not perform inplace operation."
.
format
(
inplace_op_type
,
origin_op_type
)
)
)
return
generate_activation_fn
(
origin_op_type
)(
x
,
name
)
return
generate_activation_fn
(
origin_op_type
)(
x
,
name
)
func
.
__name__
=
inplace_op_type
func
.
__doc__
=
"""
...
...
python/paddle/tensor/linalg.py
浏览文件 @
861fef52
...
...
@@ -15,7 +15,7 @@
import
numpy
as
np
import
paddle
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle
import
_C_ops
from
paddle.common_ops_import
import
VarDesc
from
..fluid.data_feeder
import
(
...
...
@@ -23,8 +23,7 @@ from ..fluid.data_feeder import (
check_type
,
check_variable_and_dtype
,
)
from
..fluid.framework
import
_in_legacy_dygraph
from
..framework
import
LayerHelper
,
_non_static_mode
,
in_dygraph_mode
from
..framework
import
LayerHelper
,
in_dygraph_mode
from
..static
import
Variable
from
.creation
import
full
from
.logic
import
logical_not
...
...
@@ -90,53 +89,49 @@ def transpose(x, perm, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
transpose
(
x
,
perm
)
else
:
if
_in_legacy_dygraph
():
out
,
_
=
_legacy_C_ops
.
transpose2
(
x
,
'axis'
,
perm
)
return
out
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
,
'complex64'
,
'complex128'
,
],
'transpose'
,
)
check_type
(
perm
,
'perm'
,
(
list
,
tuple
),
'transpose'
)
if
isinstance
(
perm
,
tuple
):
perm
=
list
(
perm
)
if
len
(
perm
)
!=
len
(
x
.
shape
):
raise
ValueError
(
"Input(perm) is the permutation of dimensions of Input(x), "
"its length should be equal to dimensions of Input(x), "
"but received dimension of Input(x) is %s, "
"the length of Input(perm) is %s."
%
(
len
(
x
.
shape
),
len
(
perm
))
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
,
'complex64'
,
'complex128'
,
],
'transpose'
,
)
for
idx
,
dim
in
enumerate
(
perm
):
if
dim
>=
len
(
x
.
shape
):
check_type
(
perm
,
'perm'
,
(
list
,
tuple
),
'transpose'
)
if
isinstance
(
perm
,
tuple
):
perm
=
list
(
perm
)
if
len
(
perm
)
!=
len
(
x
.
shape
):
raise
ValueError
(
"Each element in Input(perm) should be less than Input(x)'s dimension, "
"but %d-th element in Input(perm) is %d which exceeds Input(x)'s "
"dimension %d."
%
(
idx
,
perm
[
idx
],
len
(
x
.
shape
))
"Input(perm) is the permutation of dimensions of Input(x), "
"its length should be equal to dimensions of Input(x), "
"but received dimension of Input(x) is %s, "
"the length of Input(perm) is %s."
%
(
len
(
x
.
shape
),
len
(
perm
))
)
for
idx
,
dim
in
enumerate
(
perm
):
if
dim
>=
len
(
x
.
shape
):
raise
ValueError
(
"Each element in Input(perm) should be less than Input(x)'s dimension, "
"but %d-th element in Input(perm) is %d which exceeds Input(x)'s "
"dimension %d."
%
(
idx
,
perm
[
idx
],
len
(
x
.
shape
))
)
helper
=
LayerHelper
(
'transpose'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
x_shape
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'transpose2'
,
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'Out'
:
[
out
],
'XShape'
:
[
x_shape
]},
attrs
=
{
'axis'
:
perm
},
)
return
out
helper
=
LayerHelper
(
'transpose'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
x_shape
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'transpose2'
,
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'Out'
:
[
out
],
'XShape'
:
[
x_shape
]},
attrs
=
{
'axis'
:
perm
},
)
return
out
def
matmul
(
x
,
y
,
transpose_x
=
False
,
transpose_y
=
False
,
name
=
None
):
...
...
@@ -235,38 +230,39 @@ def matmul(x, y, transpose_x=False, transpose_y=False, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
matmul
(
x
,
y
,
transpose_x
,
transpose_y
)
else
:
attrs
=
{
'trans_x'
:
transpose_x
,
'trans_y'
:
transpose_y
,
}
if
_in_legacy_dygraph
():
op_type
=
'matmul_v2'
op
=
getattr
(
_legacy_C_ops
,
op_type
)
return
op
(
x
,
y
,
'trans_x'
,
transpose_x
,
'trans_y'
,
transpose_y
)
attrs
=
{
'trans_x'
:
transpose_x
,
'trans_y'
:
transpose_y
,
}
def
__check_input
(
x
,
y
):
var_names
=
{
'x'
:
x
,
'y'
:
y
}
for
name
,
val
in
var_names
.
items
():
check_variable_and_dtype
(
val
,
name
,
[
'float16'
,
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
'matmul'
,
)
def
__check_input
(
x
,
y
):
var_names
=
{
'x'
:
x
,
'y'
:
y
}
for
name
,
val
in
var_names
.
items
():
check_variable_and_dtype
(
val
,
name
,
[
'float16'
,
'float32'
,
'float64'
,
'complex64'
,
'complex128'
,
],
'matmul'
,
)
__check_input
(
x
,
y
)
__check_input
(
x
,
y
)
helper
=
LayerHelper
(
'matmul_v2'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'matmul_v2'
,
inputs
=
{
'X'
:
x
,
'Y'
:
y
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
,
)
return
out
helper
=
LayerHelper
(
'matmul_v2'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'matmul_v2'
,
inputs
=
{
'X'
:
x
,
'Y'
:
y
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
,
)
return
out
def
norm
(
x
,
p
=
'fro'
,
axis
=
None
,
keepdim
=
False
,
name
=
None
):
...
...
@@ -373,33 +369,26 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
if
dim
is
None
:
return
_C_ops
.
frobenius_norm
(
input
,
[],
keepdim
,
True
)
return
_C_ops
.
frobenius_norm
(
input
,
dim
,
keepdim
,
False
)
if
_in_legacy_dygraph
():
else
:
attrs
=
{
'dim'
:
dim
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
False
}
if
dim
is
None
:
return
_legacy_C_ops
.
frobenius_norm
(
input
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
True
)
return
_legacy_C_ops
.
frobenius_norm
(
input
,
'dim'
,
dim
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
False
attrs
[
'reduce_all'
]
=
True
check_variable_and_dtype
(
input
,
'input'
,
[
'float32'
,
'float64'
],
'frobenius_norm'
)
attrs
=
{
'dim'
:
dim
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
False
}
if
dim
is
None
:
attrs
[
'reduce_all'
]
=
True
check_variable_and_dtype
(
input
,
'input'
,
[
'float32'
,
'float64'
],
'frobenius_norm'
)
helper
=
LayerHelper
(
'frobenius_norm'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
()
)
helper
=
LayerHelper
(
'frobenius_norm'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
()
)
helper
.
append_op
(
type
=
'frobenius_norm'
,
inputs
=
{
'X'
:
input
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
,
)
return
out
helper
.
append_op
(
type
=
'frobenius_norm'
,
inputs
=
{
'X'
:
input
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
,
)
return
out
def
vector_norm
(
input
,
porder
=
None
,
axis
=
None
,
keepdim
=
False
,
asvector
=
False
,
name
=
None
...
...
@@ -416,49 +405,34 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
if
axis
is
None
:
axis
=
-
1
return
_C_ops
.
p_norm
(
input
,
porder
,
axis
,
1e-12
,
keepdim
,
asvector
)
else
:
if
porder
is
not
None
:
check_type
(
porder
,
'porder'
,
(
float
,
int
),
'p_norm'
)
if
axis
is
not
None
:
check_type
(
axis
,
'axis'
,
(
int
),
'p_norm'
)
check_variable_and_dtype
(
input
,
'input'
,
[
'float32'
,
'float64'
],
'p_norm'
)
if
_in_legacy_dygraph
():
if
axis
is
None
:
axis
=
-
1
return
_legacy_C_ops
.
p_norm
(
input
,
'porder'
,
porder
,
'axis'
,
axis
,
'keepdim'
,
keepdim
,
'asvector'
,
asvector
,
)
if
porder
is
not
None
:
check_type
(
porder
,
'porder'
,
(
float
,
int
),
'p_norm'
)
if
axis
is
not
None
:
check_type
(
axis
,
'axis'
,
(
int
),
'p_norm'
)
check_variable_and_dtype
(
input
,
'input'
,
[
'float32'
,
'float64'
],
'p_norm'
)
attrs
=
{
'axis'
:
axis
if
axis
is
not
None
else
-
1
,
'porder'
:
float
(
porder
)
if
porder
is
not
None
else
2.0
,
'keepdim'
:
keepdim
,
'asvector'
:
asvector
,
'epsilon'
:
1e-12
,
}
helper
=
LayerHelper
(
'p_norm'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
()
)
attrs
=
{
'axis'
:
axis
if
axis
is
not
None
else
-
1
,
'porder'
:
float
(
porder
)
if
porder
is
not
None
else
2.0
,
'keepdim'
:
keepdim
,
'asvector'
:
asvector
,
'epsilon'
:
1e-12
,
}
helper
=
LayerHelper
(
'p_norm'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
()
)
helper
.
append_op
(
type
=
'p_norm'
,
inputs
=
{
'X'
:
input
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
,
)
return
out
helper
.
append_op
(
type
=
'p_norm'
,
inputs
=
{
'X'
:
input
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
,
)
return
out
def
inf_norm
(
input
,
porder
=
None
,
axis
=
axis
,
keepdim
=
False
,
asvector
=
False
,
name
=
None
...
...
@@ -469,30 +443,38 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
return
_C_ops
.
max
(
out
,
axis
,
keepdim
)
else
:
return
_C_ops
.
min
(
out
,
axis
,
keepdim
)
else
:
helper
=
LayerHelper
(
'inf_norm'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
()
)
helper
.
append_op
(
type
=
'abs'
,
inputs
=
{
'X'
:
input
},
outputs
=
{
'Out'
:
out
}
)
reduce_out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
()
)
helper
=
LayerHelper
(
'inf_norm'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
()
)
helper
.
append_op
(
type
=
'abs'
,
inputs
=
{
'X'
:
input
},
outputs
=
{
'Out'
:
out
})
reduce_out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
()
)
reduce_all
=
True
if
axis
is
None
or
axis
==
[]
or
asvector
else
False
axis
=
axis
if
axis
is
not
None
and
axis
!=
[]
else
[
0
]
reduce_all
=
(
True
if
axis
is
None
or
axis
==
[]
or
asvector
else
False
)
axis
=
axis
if
axis
is
not
None
and
axis
!=
[]
else
[
0
]
reduce_type
=
(
'reduce_max'
if
porder
==
np
.
float64
(
'inf'
)
else
'reduce_min'
)
helper
.
append_op
(
type
=
reduce_type
,
inputs
=
{
'X'
:
out
},
outputs
=
{
'Out'
:
reduce_out
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
},
)
reduce_type
=
(
'reduce_max'
if
porder
==
np
.
float64
(
'inf'
)
else
'reduce_min'
)
helper
.
append_op
(
type
=
reduce_type
,
inputs
=
{
'X'
:
out
},
outputs
=
{
'Out'
:
reduce_out
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
,
},
)
return
reduce_out
return
reduce_out
def
p_matrix_norm
(
input
,
porder
=
1.0
,
axis
=
axis
,
keepdim
=
False
,
name
=
None
):
"""
...
...
@@ -846,40 +828,6 @@ def cond(x, p=None, name=None):
return
_C_ops
.
max
(
sum_out
,
[
-
1
],
False
)
if
porder
==
-
1
or
porder
==
-
np
.
inf
:
return
_C_ops
.
min
(
sum_out
,
[
-
1
],
False
)
elif
_in_legacy_dygraph
():
reduce_all
=
True
if
axis
is
None
or
axis
==
[]
else
False
axis
=
axis
if
axis
is
not
None
and
axis
!=
[]
else
[
0
]
abs_out
=
_legacy_C_ops
.
abs
(
input
)
sum_out
=
_legacy_C_ops
.
reduce_sum
(
abs_out
,
'dim'
,
axis
,
'keepdim'
,
False
,
'reduce_all'
,
reduce_all
,
)
if
porder
==
1
or
porder
==
np
.
inf
:
return
_legacy_C_ops
.
reduce_max
(
sum_out
,
'dim'
,
[
-
1
],
'keepdim'
,
False
,
'reduce_all'
,
reduce_all
,
)
if
porder
==
-
1
or
porder
==
-
np
.
inf
:
return
_legacy_C_ops
.
reduce_min
(
sum_out
,
'dim'
,
[
-
1
],
'keepdim'
,
False
,
'reduce_all'
,
reduce_all
,
)
else
:
reduce_all
=
True
if
axis
is
None
or
axis
==
[]
else
False
axis
=
axis
if
axis
is
not
None
and
axis
!=
[]
else
[
0
]
...
...
@@ -940,68 +888,54 @@ def cond(x, p=None, name=None):
sum_out_1
=
_C_ops
.
sum
(
pow_out
,
axis
,
None
,
False
)
sum_out_2
=
_C_ops
.
sum
(
sum_out_1
,
axis
,
None
,
False
)
return
_C_ops
.
pow
(
sum_out_2
,
float
(
1.0
/
porder
))
el
if
paddle
.
in_dynamic_mode
()
:
el
se
:
reduce_all
=
True
if
axis
is
None
or
axis
==
[]
else
False
pow_out
=
_legacy_C_ops
.
pow
(
input
,
'factor'
,
porder
)
sum_out_1
=
_legacy_C_ops
.
reduce_sum
(
pow_out
,
'dim'
,
axis
,
'keepdim'
,
False
,
'reduce_all'
,
reduce_all
,
)
sum_out_2
=
_legacy_C_ops
.
reduce_sum
(
sum_out_1
,
'dim'
,
axis
,
'keepdim'
,
False
,
'reduce_all'
,
reduce_all
,
)
return
_legacy_C_ops
.
pow
(
sum_out_2
,
'factor'
,
float
(
1.0
/
porder
))
reduce_all
=
True
if
axis
is
None
or
axis
==
[]
else
False
block
=
LayerHelper
(
'norm'
,
**
locals
())
pow_out
=
block
.
create_variable_for_type_inference
(
dtype
=
block
.
input_dtype
()
)
sum_out_1
=
block
.
create_variable_for_type_inference
(
dtype
=
block
.
input_dtype
()
)
sum_out_2
=
block
.
create_variable_for_type_inference
(
dtype
=
block
.
input_dtype
()
)
out
=
block
.
create_variable_for_type_inference
(
dtype
=
block
.
input_dtype
()
)
block
.
append_op
(
type
=
'pow'
,
inputs
=
{
'X'
:
input
},
outputs
=
{
'Out'
:
pow_out
},
attrs
=
{
'factor'
:
porder
},
)
block
.
append_op
(
type
=
'reduce_sum'
,
inputs
=
{
'X'
:
pow_out
},
outputs
=
{
'Out'
:
sum_out_1
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
False
,
'reduce_all'
:
reduce_all
},
)
block
.
append_op
(
type
=
'reduce_sum'
,
inputs
=
{
'X'
:
sum_out_1
},
outputs
=
{
'Out'
:
sum_out_2
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
False
,
'reduce_all'
:
reduce_all
},
)
block
.
append_op
(
type
=
'pow'
,
inputs
=
{
'X'
:
sum_out_2
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'factor'
:
float
(
1.0
/
porder
)},
)
return
out
block
=
LayerHelper
(
'norm'
,
**
locals
())
pow_out
=
block
.
create_variable_for_type_inference
(
dtype
=
block
.
input_dtype
()
)
sum_out_1
=
block
.
create_variable_for_type_inference
(
dtype
=
block
.
input_dtype
()
)
sum_out_2
=
block
.
create_variable_for_type_inference
(
dtype
=
block
.
input_dtype
()
)
out
=
block
.
create_variable_for_type_inference
(
dtype
=
block
.
input_dtype
()
)
block
.
append_op
(
type
=
'pow'
,
inputs
=
{
'X'
:
input
},
outputs
=
{
'Out'
:
pow_out
},
attrs
=
{
'factor'
:
porder
},
)
block
.
append_op
(
type
=
'reduce_sum'
,
inputs
=
{
'X'
:
pow_out
},
outputs
=
{
'Out'
:
sum_out_1
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
False
,
'reduce_all'
:
reduce_all
,
},
)
block
.
append_op
(
type
=
'reduce_sum'
,
inputs
=
{
'X'
:
sum_out_1
},
outputs
=
{
'Out'
:
sum_out_2
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
False
,
'reduce_all'
:
reduce_all
,
},
)
block
.
append_op
(
type
=
'pow'
,
inputs
=
{
'X'
:
sum_out_2
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'factor'
:
float
(
1.0
/
porder
)},
)
return
out
def
svd_norm
(
input
,
porder
,
axis
=
[
-
1
]):
"""
...
...
@@ -1009,101 +943,80 @@ def cond(x, p=None, name=None):
Calculate the matrix norm, which is related to singular values, of a matrix
or batches of matrices, including nuclear norm, 2-norm and (-2)-norm.
"""
if
not
in_dygraph_mode
():
reduce_all
=
True
if
axis
is
None
or
axis
==
[]
else
False
u
,
s
,
vh
=
svd
(
input
,
full_matrices
=
False
)
if
_non_static
_mode
():
if
in_dygraph
_mode
():
if
porder
==
"nuc"
:
if
in_dygraph_mode
():
return
_C_ops
.
sum
(
s
,
axis
,
None
,
False
)
else
:
return
_legacy_C_ops
.
reduce_sum
(
s
,
'dim'
,
axis
,
'keepdim'
,
False
,
'reduce_all'
,
reduce_all
,
)
if
in_dygraph_mode
():
max_out
=
_C_ops
.
max
(
s
,
axis
,
False
)
min_out
=
_C_ops
.
min
(
s
,
axis
,
False
)
if
porder
==
2
:
return
_C_ops
.
divide
(
max_out
,
min_out
)
if
porder
==
-
2
:
return
_C_ops
.
divide
(
min_out
,
max_out
)
else
:
max_out
=
_legacy_C_ops
.
reduce_max
(
s
,
'dim'
,
axis
,
'keepdim'
,
False
,
'reduce_all'
,
reduce_all
)
min_out
=
_legacy_C_ops
.
reduce_min
(
s
,
'dim'
,
axis
,
'keepdim'
,
False
,
'reduce_all'
,
reduce_all
return
_C_ops
.
sum
(
s
,
axis
,
None
,
False
)
max_out
=
_C_ops
.
max
(
s
,
axis
,
False
)
min_out
=
_C_ops
.
min
(
s
,
axis
,
False
)
if
porder
==
2
:
return
_C_ops
.
divide
(
max_out
,
min_out
)
if
porder
==
-
2
:
return
_C_ops
.
divide
(
min_out
,
max_out
)
else
:
reduce_all
=
True
if
axis
is
None
or
axis
==
[]
else
False
block
=
LayerHelper
(
'norm'
,
**
locals
())
out
=
block
.
create_variable_for_type_inference
(
dtype
=
block
.
input_dtype
()
)
if
porder
==
"nuc"
:
block
.
append_op
(
type
=
'reduce_sum'
,
inputs
=
{
'X'
:
s
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
False
,
'reduce_all'
:
reduce_all
,
},
)
if
porder
==
2
:
return
_legacy_C_ops
.
elementwise_div
(
max_out
,
min_out
,
'aixs'
,
axis
,
'use_mkldnn'
,
False
)
if
porder
==
-
2
:
return
_legacy_C_ops
.
elementwise_div
(
min_out
,
max_out
,
'aixs'
,
axis
,
'use_mkldnn'
,
False
)
block
=
LayerHelper
(
'norm'
,
**
locals
())
out
=
block
.
create_variable_for_type_inference
(
dtype
=
block
.
input_dtype
()
)
if
porder
==
"nuc"
:
return
out
max_out
=
block
.
create_variable_for_type_inference
(
dtype
=
block
.
input_dtype
()
)
min_out
=
block
.
create_variable_for_type_inference
(
dtype
=
block
.
input_dtype
()
)
block
.
append_op
(
type
=
'reduce_
sum
'
,
type
=
'reduce_
max
'
,
inputs
=
{
'X'
:
s
},
outputs
=
{
'Out'
:
out
},
outputs
=
{
'Out'
:
max_
out
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
False
,
'reduce_all'
:
reduce_all
,
},
)
return
out
max_out
=
block
.
create_variable_for_type_inference
(
dtype
=
block
.
input_dtype
()
)
min_out
=
block
.
create_variable_for_type_inference
(
dtype
=
block
.
input_dtype
()
)
block
.
append_op
(
type
=
'reduce_max'
,
inputs
=
{
'X'
:
s
},
outputs
=
{
'Out'
:
max_out
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
False
,
'reduce_all'
:
reduce_all
},
)
block
.
append_op
(
type
=
'reduce_min'
,
inputs
=
{
'X'
:
s
},
outputs
=
{
'Out'
:
min_out
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
False
,
'reduce_all'
:
reduce_all
},
)
if
porder
==
2
:
block
.
append_op
(
type
=
'elementwise_div'
,
inputs
=
{
'X'
:
max_out
,
'Y'
:
min_out
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'aixs'
:
axis
,
'use_mkldnn'
:
False
},
)
return
out
if
porder
==
-
2
:
block
.
append_op
(
type
=
'elementwise_div'
,
inputs
=
{
'X'
:
min_out
,
'Y'
:
max_out
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'aixs'
:
axis
,
'use_mkldnn'
:
False
},
type
=
'reduce_min'
,
inputs
=
{
'X'
:
s
},
outputs
=
{
'Out'
:
min_out
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
False
,
'reduce_all'
:
reduce_all
,
},
)
return
out
if
porder
==
2
:
block
.
append_op
(
type
=
'elementwise_div'
,
inputs
=
{
'X'
:
max_out
,
'Y'
:
min_out
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'aixs'
:
axis
,
'use_mkldnn'
:
False
},
)
return
out
if
porder
==
-
2
:
block
.
append_op
(
type
=
'elementwise_div'
,
inputs
=
{
'X'
:
min_out
,
'Y'
:
max_out
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'aixs'
:
axis
,
'use_mkldnn'
:
False
},
)
return
out
def
empty_tensor
(
input
,
shape
):
if
paddle
.
in_dynamic
_mode
():
if
in_dygraph
_mode
():
return
input
.
reshape
(
shape
)
raise
ValueError
(
"only support x is nonempty tensor in static mode"
)
...
...
@@ -1186,32 +1099,30 @@ def dot(x, y, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
dot
(
x
,
y
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
dot
(
x
,
y
)
op_type
=
'dot'
else
:
op_type
=
'dot'
assert
x
is
not
None
,
'x cannot be None in {}'
.
format
(
op_type
)
assert
y
is
not
None
,
'y cannot be None in {}'
.
format
(
op_type
)
assert
x
is
not
None
,
'x cannot be None in {}'
.
format
(
op_type
)
assert
y
is
not
None
,
'y cannot be None in {}'
.
format
(
op_type
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
op_type
)
check_variable_and_dtype
(
y
,
'y'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
op_type
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
op_type
)
check_variable_and_dtype
(
y
,
'y'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
op_type
)
helper
=
LayerHelper
(
op_type
,
**
locals
())
if
name
is
None
:
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
else
:
out
=
helper
.
create_variable
(
name
=
name
,
dtype
=
x
.
dtype
,
persistable
=
False
helper
=
LayerHelper
(
op_type
,
**
locals
())
if
name
is
None
:
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
else
:
out
=
helper
.
create_variable
(
name
=
name
,
dtype
=
x
.
dtype
,
persistable
=
False
)
helper
.
append_op
(
type
=
"dot"
,
inputs
=
{
'X'
:
x
,
'Y'
:
y
},
attrs
=
{},
outputs
=
{
"Out"
:
out
}
)
helper
.
append_op
(
type
=
"dot"
,
inputs
=
{
'X'
:
x
,
'Y'
:
y
},
attrs
=
{},
outputs
=
{
"Out"
:
out
}
)
return
out
return
out
def
cov
(
x
,
rowvar
=
True
,
ddof
=
True
,
fweights
=
None
,
aweights
=
None
,
name
=
None
):
...
...
@@ -1389,36 +1300,28 @@ def t(input, name=None):
perm
=
[
1
,
0
]
out
=
_C_ops
.
transpose
(
input
,
perm
)
return
out
else
:
check_variable_and_dtype
(
input
,
'input'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'transpose'
,
)
if
_in_legacy_dygraph
():
helper
=
LayerHelper
(
't'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
input
.
dtype
)
input_shape
=
helper
.
create_variable_for_type_inference
(
input
.
dtype
)
if
len
(
input
.
shape
)
==
1
:
return
input
# 2-D tensor
perm
=
[
1
,
0
]
out
,
_
=
_legacy_C_ops
.
transpose2
(
input
,
'axis'
,
perm
)
out
=
input
else
:
helper
.
append_op
(
type
=
'transpose2'
,
inputs
=
{
'X'
:
[
input
]},
outputs
=
{
'Out'
:
[
out
],
'XShape'
:
[
input_shape
]},
attrs
=
{
'axis'
:
[
1
,
0
]},
)
return
out
check_variable_and_dtype
(
input
,
'input'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'transpose'
,
)
helper
=
LayerHelper
(
't'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
input
.
dtype
)
input_shape
=
helper
.
create_variable_for_type_inference
(
input
.
dtype
)
if
len
(
input
.
shape
)
==
1
:
out
=
input
else
:
helper
.
append_op
(
type
=
'transpose2'
,
inputs
=
{
'X'
:
[
input
]},
outputs
=
{
'Out'
:
[
out
],
'XShape'
:
[
input_shape
]},
attrs
=
{
'axis'
:
[
1
,
0
]},
)
return
out
def
cross
(
x
,
y
,
axis
=
9
,
name
=
None
):
"""
...
...
@@ -1462,24 +1365,18 @@ def cross(x, y, axis=9, name=None):
axis
=
K_DEFAULT_DIM
if
axis
is
None
else
axis
return
_C_ops
.
cross
(
x
,
y
,
axis
)
else
:
if
_in_legacy_dygraph
():
if
axis
is
not
None
:
return
_legacy_C_ops
.
cross
(
x
,
y
,
'dim'
,
axis
)
else
:
return
_legacy_C_ops
.
cross
(
x
,
y
)
else
:
helper
=
LayerHelper
(
"cross"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
attrs
=
dict
()
attrs
[
'dim'
]
=
axis
helper
=
LayerHelper
(
"cross"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
attrs
=
dict
()
attrs
[
'dim'
]
=
axis
helper
.
append_op
(
type
=
'cross'
,
inputs
=
{
'X'
:
x
,
'Y'
:
y
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
,
)
return
out
helper
.
append_op
(
type
=
'cross'
,
inputs
=
{
'X'
:
x
,
'Y'
:
y
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
,
)
return
out
def
cholesky
(
x
,
upper
=
False
,
name
=
None
):
...
...
@@ -1520,21 +1417,18 @@ def cholesky(x, upper=False, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
cholesky
(
x
,
upper
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
cholesky
(
x
,
"upper"
,
upper
)
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'cholesky'
)
check_type
(
upper
,
'upper'
,
bool
,
'cholesky'
)
helper
=
LayerHelper
(
'cholesky'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'cholesky'
,
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'upper'
:
upper
},
)
return
out
else
:
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'cholesky'
)
check_type
(
upper
,
'upper'
,
bool
,
'cholesky'
)
helper
=
LayerHelper
(
'cholesky'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'cholesky'
,
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'upper'
:
upper
},
)
return
out
def
matrix_rank
(
x
,
tol
=
None
,
hermitian
=
False
,
name
=
None
):
...
...
@@ -1594,59 +1488,32 @@ def matrix_rank(x, tol=None, hermitian=False, name=None):
tol_attr
=
float
(
tol
)
use_default_tol
=
False
return
_C_ops
.
matrix_rank
(
x
,
tol_attr
,
hermitian
,
use_default_tol
)
if
_in_legacy_dygraph
():
else
:
inputs
=
{}
attrs
=
{}
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'matrix_rank'
)
inputs
[
'X'
]
=
x
if
tol
is
None
:
tol_tensor
=
None
tol_attr
=
0.0
use_default_tol
=
True
attrs
[
'use_default_tol'
]
=
True
elif
isinstance
(
tol
,
Variable
):
attrs
[
'use_default_tol'
]
=
False
if
tol
.
dtype
!=
x
.
dtype
:
tol_tensor
=
cast
(
tol
,
x
.
dtype
)
inputs
[
'TolTensor'
]
=
cast
(
tol
,
x
.
dtype
)
else
:
tol_tensor
=
tol
tol_attr
=
0.0
use_default_tol
=
False
else
:
tol_tensor
=
None
tol_attr
=
float
(
tol
)
use_default_tol
=
False
return
_legacy_C_ops
.
matrix_rank
(
x
,
tol_tensor
,
"tol"
,
tol_attr
,
'hermitian'
,
hermitian
,
'use_default_tol'
,
use_default_tol
,
)
inputs
=
{}
attrs
=
{}
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'matrix_rank'
)
inputs
[
'X'
]
=
x
if
tol
is
None
:
attrs
[
'use_default_tol'
]
=
True
elif
isinstance
(
tol
,
Variable
):
attrs
[
'use_default_tol'
]
=
False
if
tol
.
dtype
!=
x
.
dtype
:
inputs
[
'TolTensor'
]
=
cast
(
tol
,
x
.
dtype
)
inputs
[
'TolTensor'
]
=
tol
else
:
inputs
[
'TolTensor'
]
=
tol
else
:
check_type
(
tol
,
'tol'
,
float
,
'matrix_rank'
)
attrs
[
'use_default_tol'
]
=
False
attrs
[
'tol'
]
=
tol
check_type
(
hermitian
,
'hermitian'
,
bool
,
'matrix_rank'
)
attrs
[
'hermitian'
]
=
hermitian
helper
=
LayerHelper
(
'matrix_rank'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'int32'
)
helper
.
append_op
(
type
=
'matrix_rank'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
return
out
check_type
(
tol
,
'tol'
,
float
,
'matrix_rank'
)
attrs
[
'use_default_tol'
]
=
False
attrs
[
'tol'
]
=
tol
check_type
(
hermitian
,
'hermitian'
,
bool
,
'matrix_rank'
)
attrs
[
'hermitian'
]
=
hermitian
helper
=
LayerHelper
(
'matrix_rank'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'int32'
)
helper
.
append_op
(
type
=
'matrix_rank'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
return
out
def
bmm
(
x
,
y
,
name
=
None
):
...
...
@@ -1711,14 +1578,13 @@ def bmm(x, y, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
bmm
(
x
,
y
)
if
paddle
.
in_dynamic_mode
():
return
_legacy_C_ops
.
bmm
(
x
,
y
)
helper
=
LayerHelper
(
'bmm'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'bmm'
,
inputs
=
{
'X'
:
x
,
'Y'
:
y
},
outputs
=
{
'Out'
:
out
})
return
out
else
:
helper
=
LayerHelper
(
'bmm'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'bmm'
,
inputs
=
{
'X'
:
x
,
'Y'
:
y
},
outputs
=
{
'Out'
:
out
}
)
return
out
def
histogram
(
input
,
bins
=
100
,
min
=
0
,
max
=
0
,
name
=
None
):
...
...
@@ -1748,24 +1614,19 @@ def histogram(input, bins=100, min=0, max=0, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
histogram
(
input
,
bins
,
min
,
max
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
histogram
(
input
,
"bins"
,
bins
,
"min"
,
min
,
"max"
,
max
else
:
helper
=
LayerHelper
(
'histogram'
,
**
locals
())
check_variable_and_dtype
(
input
,
'X'
,
[
'int32'
,
'int64'
,
'float32'
,
'float64'
],
'histogram'
)
helper
=
LayerHelper
(
'histogram'
,
**
locals
())
check_variable_and_dtype
(
input
,
'X'
,
[
'int32'
,
'int64'
,
'float32'
,
'float64'
],
'histogram'
)
out
=
helper
.
create_variable_for_type_inference
(
VarDesc
.
VarType
.
INT64
)
helper
.
append_op
(
type
=
'histogram'
,
inputs
=
{
'X'
:
input
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'bins'
:
bins
,
'min'
:
min
,
'max'
:
max
},
)
return
out
out
=
helper
.
create_variable_for_type_inference
(
VarDesc
.
VarType
.
INT64
)
helper
.
append_op
(
type
=
'histogram'
,
inputs
=
{
'X'
:
input
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'bins'
:
bins
,
'min'
:
min
,
'max'
:
max
},
)
return
out
def
bincount
(
x
,
weights
=
None
,
minlength
=
0
,
name
=
None
):
...
...
@@ -1800,30 +1661,28 @@ def bincount(x, weights=None, minlength=0, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
bincount
(
x
,
weights
,
minlength
)
elif
_in_legacy_dygraph
():
return
_legacy_C_ops
.
bincount
(
x
,
weights
,
"minlength"
,
minlength
)
helper
=
LayerHelper
(
'bincount'
,
**
locals
())
else
:
helper
=
LayerHelper
(
'bincount'
,
**
locals
())
check_variable_and_dtype
(
x
,
'X'
,
[
'int32'
,
'int64'
],
'bincount'
)
check_variable_and_dtype
(
x
,
'X'
,
[
'int32'
,
'int64'
],
'bincount'
)
if
weights
is
not
None
:
check_variable_and_dtype
(
weights
,
'Weights'
,
[
'int32'
,
'int64'
,
'float32'
,
'float64'
],
'bincount'
,
if
weights
is
not
None
:
check_variable_and_dtype
(
weights
,
'Weights'
,
[
'int32'
,
'int64'
,
'float32'
,
'float64'
],
'bincount'
,
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
weights
.
dtype
)
else
:
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'bincount'
,
inputs
=
{
'X'
:
x
,
'Weights'
:
weights
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'minlength'
:
minlength
},
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
weights
.
dtype
)
else
:
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'bincount'
,
inputs
=
{
'X'
:
x
,
'Weights'
:
weights
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'minlength'
:
minlength
},
)
return
out
return
out
def
mv
(
x
,
vec
,
name
=
None
):
...
...
@@ -1859,40 +1718,36 @@ def mv(x, vec, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
mv
(
x
,
vec
)
else
:
if
_in_legacy_dygraph
():
out
=
_legacy_C_ops
.
mv
(
x
,
vec
)
return
out
else
:
def
__check_input
(
x
,
vec
):
var_names
=
{
'x'
:
x
,
'vec'
:
vec
}
for
name
,
val
in
var_names
.
items
():
check_variable_and_dtype
(
val
,
name
,
[
'float32'
,
'float64'
],
'mv'
)
x_shape
=
list
(
x
.
shape
)
vec_shape
=
list
(
vec
.
shape
)
if
len
(
x_shape
)
!=
2
:
raise
ValueError
(
"x should be 2-dimensional. But received x's dimention: {}"
.
format
(
x_shape
)
def
__check_input
(
x
,
vec
):
var_names
=
{
'x'
:
x
,
'vec'
:
vec
}
for
name
,
val
in
var_names
.
items
():
check_variable_and_dtype
(
val
,
name
,
[
'float32'
,
'float64'
],
'mv'
)
x_shape
=
list
(
x
.
shape
)
vec_shape
=
list
(
vec
.
shape
)
if
len
(
x_shape
)
!=
2
:
raise
ValueError
(
"x should be 2-dimensional. But received x's dimention: {}"
.
format
(
x_shape
)
if
len
(
vec_shape
)
!=
1
:
raise
ValueError
(
"vec should be 1-dimensional. But received vec's dimention: {}"
.
format
(
vec_shape
)
)
if
len
(
vec_shape
)
!=
1
:
raise
ValueError
(
"vec should be 1-dimensional. But received vec's dimention: {}"
.
format
(
vec_shape
)
)
__check_input
(
x
,
vec
)
__check_input
(
x
,
vec
)
helper
=
LayerHelper
(
'mv'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'mv'
,
inputs
=
{
'X'
:
x
,
'Vec'
:
vec
},
outputs
=
{
'Out'
:
out
}
)
return
out
helper
=
LayerHelper
(
'mv'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'mv'
,
inputs
=
{
'X'
:
x
,
'Vec'
:
vec
},
outputs
=
{
'Out'
:
out
}
)
return
out
def
det
(
x
,
name
=
None
):
...
...
@@ -1927,31 +1782,28 @@ def det(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
det
(
x
)
else
:
check_dtype
(
x
.
dtype
,
'Input'
,
[
'float32'
,
'float64'
],
'det'
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
determinant
(
x
)
check_dtype
(
x
.
dtype
,
'Input'
,
[
'float32'
,
'float64'
],
'det'
)
input_shape
=
list
(
x
.
shape
)
assert
len
(
input_shape
)
>=
2
,
(
"The x must be at least 2-dimensional, "
"but received Input x's dimensional: %s.
\n
"
%
len
(
input_shape
)
)
input_shape
=
list
(
x
.
shape
)
assert
len
(
input_shape
)
>=
2
,
(
"The x must be at least 2-dimensional, "
"but received Input x's dimensional: %s.
\n
"
%
len
(
input_shape
)
)
assert
(
input_shape
[
-
1
]
==
input_shape
[
-
2
]
),
"Expect squared input,"
"but received %s by %s matrix.
\n
"
%
(
input_shape
[
-
2
],
input_shape
[
-
1
],
)
helper
=
LayerHelper
(
'determinant'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
assert
(
input_shape
[
-
1
]
==
input_shape
[
-
2
]
),
"Expect squared input,"
"but received %s by %s matrix.
\n
"
%
(
input_shape
[
-
2
],
input_shape
[
-
1
],
)
helper
=
LayerHelper
(
'determinant'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'determinant'
,
inputs
=
{
'Input'
:
[
x
]},
outputs
=
{
'Out'
:
[
out
]}
)
return
out
helper
.
append_op
(
type
=
'determinant'
,
inputs
=
{
'Input'
:
[
x
]},
outputs
=
{
'Out'
:
[
out
]}
)
return
out
def
slogdet
(
x
,
name
=
None
):
...
...
@@ -1989,31 +1841,30 @@ def slogdet(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
slogdet
(
x
)
else
:
check_dtype
(
x
.
dtype
,
'Input'
,
[
'float32'
,
'float64'
],
'slogdet'
)
elif
paddle
.
in_dynamic_mode
():
return
_legacy_C_ops
.
slogdeterminant
(
x
)
check_dtype
(
x
.
dtype
,
'Input'
,
[
'float32'
,
'float64'
],
'slogdet'
)
input_shape
=
list
(
x
.
shape
)
assert
len
(
input_shape
)
>=
2
,
(
"The x must be at least 2-dimensional, "
"but received Input x's dimensional: %s.
\n
"
%
len
(
input_shape
)
)
input_shape
=
list
(
x
.
shape
)
assert
len
(
input_shape
)
>=
2
,
(
"The x must be at least 2-dimensional, "
"but received Input x's dimensional: %s.
\n
"
%
len
(
input_shape
)
)
assert
(
input_shape
[
-
1
]
==
input_shape
[
-
2
]
),
"Expect squared input,"
"but received %s by %s matrix.
\n
"
%
(
input_shape
[
-
2
],
input_shape
[
-
1
],
)
helper
=
LayerHelper
(
'slogdeterminant'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
assert
(
input_shape
[
-
1
]
==
input_shape
[
-
2
]
),
"Expect squared input,"
"but received %s by %s matrix.
\n
"
%
(
input_shape
[
-
2
],
input_shape
[
-
1
],
)
helper
=
LayerHelper
(
'slogdeterminant'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'slogdeterminant'
,
inputs
=
{
'Input'
:
[
x
]},
outputs
=
{
'Out'
:
[
out
]}
)
return
out
helper
.
append_op
(
type
=
'slogdeterminant'
,
inputs
=
{
'Input'
:
[
x
]},
outputs
=
{
'Out'
:
[
out
]},
)
return
out
def
svd
(
x
,
full_matrices
=
False
,
name
=
None
):
...
...
@@ -2071,23 +1922,22 @@ def svd(x, full_matrices=False, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
svd
(
x
,
full_matrices
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
svd
(
x
,
'full_matrices'
,
full_matrices
)
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'svd'
)
check_type
(
full_matrices
,
'full_matrices'
,
bool
,
'svd'
)
helper
=
LayerHelper
(
'svd'
,
**
locals
())
u
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
vh
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
s
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
attrs
=
dict
()
attrs
[
'full_matrices'
]
=
full_matrices
helper
.
append_op
(
type
=
'svd'
,
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'U'
:
u
,
'VH'
:
vh
,
'S'
:
s
},
attrs
=
attrs
,
)
return
u
,
s
,
vh
else
:
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'svd'
)
check_type
(
full_matrices
,
'full_matrices'
,
bool
,
'svd'
)
helper
=
LayerHelper
(
'svd'
,
**
locals
())
u
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
vh
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
s
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
attrs
=
dict
()
attrs
[
'full_matrices'
]
=
full_matrices
helper
.
append_op
(
type
=
'svd'
,
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'U'
:
u
,
'VH'
:
vh
,
'S'
:
s
},
attrs
=
attrs
,
)
return
u
,
s
,
vh
def
matrix_power
(
x
,
n
,
name
=
None
):
...
...
@@ -2146,21 +1996,20 @@ def matrix_power(x, n, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
matrix_power
(
x
,
n
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
matrix_power
(
x
,
"n"
,
n
)
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'matrix_power'
)
check_type
(
n
,
'n'
,
int
,
'matrix_power'
)
helper
=
LayerHelper
(
'matrix_power'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'matrix_power'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'n'
:
n
},
)
return
out
else
:
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'matrix_power'
)
check_type
(
n
,
'n'
,
int
,
'matrix_power'
)
helper
=
LayerHelper
(
'matrix_power'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'matrix_power'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'n'
:
n
},
)
return
out
def
qr
(
x
,
mode
=
"reduced"
,
name
=
None
):
...
...
@@ -2211,26 +2060,21 @@ def qr(x, mode="reduced", name=None):
return
r
else
:
return
q
,
r
if
_in_legacy_dygraph
():
q
,
r
=
_legacy_C_ops
.
qr
(
x
,
'mode'
,
mode
)
else
:
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'qr'
)
check_type
(
mode
,
'mode'
,
str
,
'qr'
)
helper
=
LayerHelper
(
'qr'
,
**
locals
())
q
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
r
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
attrs
=
dict
()
attrs
[
'mode'
]
=
mode
helper
.
append_op
(
type
=
'qr'
,
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'Q'
:
q
,
'R'
:
r
},
attrs
=
attrs
)
if
mode
==
"r"
:
return
r
else
:
return
q
,
r
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'qr'
)
check_type
(
mode
,
'mode'
,
str
,
'qr'
)
helper
=
LayerHelper
(
'qr'
,
**
locals
())
q
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
r
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
attrs
=
dict
()
attrs
[
'mode'
]
=
mode
helper
.
append_op
(
type
=
'qr'
,
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'Q'
:
q
,
'R'
:
r
},
attrs
=
attrs
)
if
mode
==
"r"
:
return
r
else
:
return
q
,
r
def
lu
(
x
,
pivot
=
True
,
get_infos
=
False
,
name
=
None
):
...
...
@@ -2315,8 +2159,6 @@ def lu(x, pivot=True, get_infos=False, name=None):
if
in_dygraph_mode
():
lu
,
p
,
info
=
_C_ops
.
lu
(
x
,
pivot
)
elif
paddle
.
in_dynamic_mode
():
lu
,
p
,
info
=
_legacy_C_ops
.
lu
(
x
,
'pivot'
,
pivot
)
else
:
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'lu'
)
helper
=
LayerHelper
(
'lu'
,
**
locals
())
...
...
@@ -2413,29 +2255,25 @@ def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None):
if
in_dygraph_mode
():
P
,
L
,
U
=
_C_ops
.
lu_unpack
(
x
,
y
,
unpack_ludata
,
unpack_pivots
)
return
P
,
L
,
U
if
paddle
.
in_dynamic_mode
():
P
,
L
,
U
=
_legacy_C_ops
.
lu_unpack
(
x
,
y
,
'unpack_ludata'
,
unpack_ludata
,
'unpack_pivots'
,
unpack_pivots
else
:
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'lu_unpack'
)
return
P
,
L
,
U
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'lu_unpack'
)
helper
=
LayerHelper
(
'lu_unpack'
,
**
locals
())
p
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
l
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
u
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
=
LayerHelper
(
'lu_unpack'
,
**
locals
())
p
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
l
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
u
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
attrs
=
dict
()
attrs
[
'unpack_ludata'
]
=
unpack_ludata
attrs
[
'unpack_pivots'
]
=
unpack_pivots
helper
.
append_op
(
type
=
'lu_unpack'
,
inputs
=
{
'X'
:
x
,
'Pivots'
:
y
},
outputs
=
{
'Pmat'
:
p
,
'L'
:
l
,
'U'
:
u
},
attrs
=
attrs
,
)
return
p
,
l
,
u
attrs
=
dict
()
attrs
[
'unpack_ludata'
]
=
unpack_ludata
attrs
[
'unpack_pivots'
]
=
unpack_pivots
helper
.
append_op
(
type
=
'lu_unpack'
,
inputs
=
{
'X'
:
x
,
'Pivots'
:
y
},
outputs
=
{
'Pmat'
:
p
,
'L'
:
l
,
'U'
:
u
},
attrs
=
attrs
,
)
return
p
,
l
,
u
def
eig
(
x
,
name
=
None
):
...
...
@@ -2486,23 +2324,20 @@ def eig(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
eig
(
x
)
elif
paddle
.
in_dynamic_mode
():
w
,
v
=
_legacy_C_ops
.
eig
(
x
)
return
w
,
v
check_variable_and_dtype
(
x
,
'X'
,
[
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
'eig'
)
helper
=
LayerHelper
(
'eig'
,
**
locals
())
else
:
check_variable_and_dtype
(
x
,
'X'
,
[
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
'eig'
)
helper
=
LayerHelper
(
'eig'
,
**
locals
())
w
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
v
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
w
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
v
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
inputs
=
{
'X'
:
x
}
outputs
=
{
'Eigenvalues'
:
w
,
'Eigenvectors'
:
v
}
helper
.
append_op
(
type
=
'eig'
,
inputs
=
inputs
,
outputs
=
outputs
)
inputs
=
{
'X'
:
x
}
outputs
=
{
'Eigenvalues'
:
w
,
'Eigenvectors'
:
v
}
helper
.
append_op
(
type
=
'eig'
,
inputs
=
inputs
,
outputs
=
outputs
)
return
w
,
v
return
w
,
v
def
eigvals
(
x
,
name
=
None
):
...
...
@@ -2562,13 +2397,11 @@ def eigvals(x, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
eigvals
(
x
)
elif
paddle
.
in_dynamic_mode
():
return
_legacy_C_ops
.
eigvals
(
x
)
helper
=
LayerHelper
(
'eigvals'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'eigvals'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
})
return
out
else
:
helper
=
LayerHelper
(
'eigvals'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'eigvals'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
})
return
out
def
multi_dot
(
x
,
name
=
None
):
...
...
@@ -2627,29 +2460,29 @@ def multi_dot(x, name=None):
# [10, 7]
"""
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
multi_dot
(
x
)
if
in_dygraph_mode
():
return
_C_ops
.
multi_dot
(
x
)
else
:
check_type
(
x
,
'x'
,
(
list
,
tuple
),
'multi_dot'
)
for
id
,
item
in
enumerate
(
x
):
check_variable_and_dtype
(
item
,
'x['
+
str
(
id
)
+
']'
,
[
'float16'
,
'float32'
,
'float64'
],
'multi_dot'
,
)
if
item
.
dtype
!=
x
[
0
].
dtype
:
raise
TypeError
(
"All the Tensors in the input must have the same data type."
)
check_type
(
x
,
'x'
,
(
list
,
tuple
),
'multi_dot'
)
for
id
,
item
in
enumerate
(
x
):
check_variable_and_dtype
(
item
,
'x['
+
str
(
id
)
+
']'
,
[
'float16'
,
'float32'
,
'float64'
],
'multi_dot'
,
helper
=
LayerHelper
(
'multi_dot'
,
**
locals
())
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
'multi_dot'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
}
)
if
item
.
dtype
!=
x
[
0
].
dtype
:
raise
TypeError
(
"All the Tensors in the input must have the same data type."
)
helper
=
LayerHelper
(
'multi_dot'
,
**
locals
())
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
'multi_dot'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
return
out
def
eigh
(
x
,
UPLO
=
'L'
,
name
=
None
):
...
...
@@ -2687,45 +2520,46 @@ def eigh(x, UPLO='L', name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
eigh
(
x
,
UPLO
)
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
eigh
(
x
,
'UPLO'
,
UPLO
)
def
__check_input
(
x
,
UPLO
):
x_shape
=
list
(
x
.
shape
)
if
len
(
x
.
shape
)
<
2
:
raise
ValueError
(
"Input(input) only support >=2 tensor, but received "
"length of Input(input) is %s."
%
len
(
x
.
shape
)
)
if
x_shape
[
-
1
]
!=
x_shape
[
-
2
]:
raise
ValueError
(
"The input matrix must be batches of square matrices. But received x's dimention: {}"
.
format
(
x_shape
def
__check_input
(
x
,
UPLO
):
x_shape
=
list
(
x
.
shape
)
if
len
(
x
.
shape
)
<
2
:
raise
ValueError
(
"Input(input) only support >=2 tensor, but received "
"length of Input(input) is %s."
%
len
(
x
.
shape
)
)
if
x_shape
[
-
1
]
!=
x_shape
[
-
2
]:
raise
ValueError
(
"The input matrix must be batches of square matrices. But received x's dimention: {}"
.
format
(
x_shape
)
)
if
UPLO
!=
'L'
and
UPLO
!=
'U'
:
raise
ValueError
(
"UPLO must be L or U. But received UPLO is: {}"
.
format
(
UPLO
)
)
)
if
UPLO
!=
'L'
and
UPLO
!=
'U'
:
raise
ValueError
(
"UPLO must be L or U. But received UPLO is: {}"
.
format
(
UPLO
)
)
__check_input
(
x
,
UPLO
)
__check_input
(
x
,
UPLO
)
helper
=
LayerHelper
(
'eigh'
,
**
locals
())
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
'eigh'
)
helper
=
LayerHelper
(
'eigh'
,
**
locals
())
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
'eigh'
,
)
out_value
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out_vector
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out_value
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out_vector
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'eigh'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Eigenvalues'
:
out_value
,
'Eigenvectors'
:
out_vector
},
attrs
=
{
'UPLO'
:
UPLO
},
)
return
out_value
,
out_vector
helper
.
append_op
(
type
=
'eigh'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Eigenvalues'
:
out_value
,
'Eigenvectors'
:
out_vector
},
attrs
=
{
'UPLO'
:
UPLO
},
)
return
out_value
,
out_vector
def
pinv
(
x
,
rcond
=
1e-15
,
hermitian
=
False
,
name
=
None
):
...
...
@@ -2838,68 +2672,6 @@ def pinv(x, rcond=1e-15, hermitian=False, name=None):
u_conj
=
_C_ops
.
conj
(
u
)
out_2
=
_C_ops
.
matmul
(
out_1
,
u_conj
,
False
,
True
)
return
out_2
if
_in_legacy_dygraph
():
if
not
hermitian
:
# combine svd and matmul op
u
,
s
,
vt
=
_legacy_C_ops
.
svd
(
x
,
'full_matrices'
,
False
)
max_singular_val
=
_legacy_C_ops
.
reduce_max
(
s
,
'dim'
,
[
-
1
],
'keep_dim'
,
True
,
'reduce_all'
,
False
)
rcond
=
paddle
.
to_tensor
(
rcond
,
dtype
=
x
.
dtype
)
cutoff
=
rcond
*
max_singular_val
y
=
float
(
'inf'
)
y
=
paddle
.
to_tensor
(
y
,
dtype
=
x
.
dtype
)
condition
=
s
>
cutoff
cond_int
=
cast
(
condition
,
s
.
dtype
)
cond_not_int
=
cast
(
logical_not
(
condition
),
s
.
dtype
)
out1
=
multiply
(
1
/
s
,
cond_int
)
out2
=
multiply
(
1
/
y
,
cond_not_int
)
singular
=
add
(
out1
,
out2
)
st
,
_
=
_legacy_C_ops
.
unsqueeze2
(
singular
,
'axes'
,
[
-
2
])
dims
=
list
(
range
(
len
(
vt
.
shape
)))
perm
=
dims
[:
-
2
]
+
[
dims
[
-
1
]]
+
[
dims
[
-
2
]]
v
,
_
=
_legacy_C_ops
.
transpose2
(
vt
,
'axis'
,
perm
)
out_1
=
v
*
st
if
in_dygraph_mode
():
out_2
=
_C_ops
.
matmul
(
out_1
,
u
,
False
,
True
)
else
:
out_2
=
_legacy_C_ops
.
matmul_v2
(
out_1
,
u
,
'trans_x'
,
False
,
'trans_y'
,
True
)
return
out_2
else
:
# combine eigh and matmul op
s
,
u
=
_legacy_C_ops
.
eigh
(
x
,
'UPLO'
,
'L'
)
s_abs
=
paddle
.
abs
(
s
)
max_singular_val
=
_legacy_C_ops
.
reduce_max
(
s_abs
,
'dim'
,
[
-
1
],
'keep_dim'
,
True
,
'reduce_all'
,
False
)
rcond
=
paddle
.
to_tensor
(
rcond
,
dtype
=
s
.
dtype
)
cutoff
=
rcond
*
max_singular_val
y
=
float
(
'inf'
)
y
=
paddle
.
to_tensor
(
y
,
dtype
=
s
.
dtype
)
condition
=
s_abs
>
cutoff
cond_int
=
cast
(
condition
,
s
.
dtype
)
cond_not_int
=
cast
(
logical_not
(
condition
),
s
.
dtype
)
out1
=
multiply
(
1
/
s
,
cond_int
)
out2
=
multiply
(
1
/
y
,
cond_not_int
)
singular
=
add
(
out1
,
out2
)
st
,
_
=
_legacy_C_ops
.
unsqueeze2
(
singular
,
'axes'
,
[
-
2
])
out_1
=
u
*
st
u_conj
=
_legacy_C_ops
.
conj
(
u
)
if
in_dygraph_mode
():
out_2
=
_C_ops
.
matmul
(
out_1
,
u_conj
,
False
,
True
)
else
:
out_2
=
_legacy_C_ops
.
matmul_v2
(
out_1
,
u_conj
,
'trans_x'
,
False
,
'trans_y'
,
True
)
return
out_2
else
:
if
not
hermitian
:
helper
=
LayerHelper
(
'pinv'
,
**
locals
())
...
...
@@ -3098,20 +2870,17 @@ def solve(x, y, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
solve
(
x
,
y
)
else
:
inputs
=
{
"X"
:
[
x
],
"Y"
:
[
y
]}
helper
=
LayerHelper
(
"solve"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'solve'
)
check_variable_and_dtype
(
y
,
'y'
,
[
'float32'
,
'float64'
],
'solve'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
solve
(
x
,
y
)
inputs
=
{
"X"
:
[
x
],
"Y"
:
[
y
]}
helper
=
LayerHelper
(
"solve"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'solve'
)
check_variable_and_dtype
(
y
,
'y'
,
[
'float32'
,
'float64'
],
'solve'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
"solve"
,
inputs
=
{
"X"
:
x
,
"Y"
:
y
},
outputs
=
{
"Out"
:
out
}
)
return
out
helper
.
append_op
(
type
=
"solve"
,
inputs
=
{
"X"
:
x
,
"Y"
:
y
},
outputs
=
{
"Out"
:
out
}
)
return
out
def
triangular_solve
(
...
...
@@ -3170,36 +2939,28 @@ def triangular_solve(
"""
if
in_dygraph_mode
():
return
_C_ops
.
triangular_solve
(
x
,
y
,
upper
,
transpose
,
unitriangular
)
if
paddle
.
in_dynamic_mode
():
return
_legacy_C_ops
.
triangular_solve
(
x
,
y
,
'upper'
,
upper
,
'transpose'
,
transpose
,
'unitriangular'
,
unitriangular
,
else
:
inputs
=
{
"X"
:
[
x
],
"Y"
:
[
y
]}
helper
=
LayerHelper
(
"triangular_solve"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'triangular_solve'
)
check_variable_and_dtype
(
y
,
'y'
,
[
'float32'
,
'float64'
],
'triangular_solve'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
inputs
=
{
"X"
:
[
x
],
"Y"
:
[
y
]}
helper
=
LayerHelper
(
"triangular_solve"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'triangular_solve'
)
check_variable_and_dtype
(
y
,
'y'
,
[
'float32'
,
'float64'
],
'triangular_solve'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'triangular_solve'
,
inputs
=
{
'X'
:
x
,
'Y'
:
y
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'upper'
:
upper
,
'transpose'
:
transpose
,
'unitriangular'
:
unitriangular
,
},
)
return
out
helper
.
append_op
(
type
=
'triangular_solve'
,
inputs
=
{
'X'
:
x
,
'Y'
:
y
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'upper'
:
upper
,
'transpose'
:
transpose
,
'unitriangular'
:
unitriangular
,
},
)
return
out
def
cholesky_solve
(
x
,
y
,
upper
=
False
,
name
=
None
):
...
...
@@ -3237,22 +2998,23 @@ def cholesky_solve(x, y, upper=False, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
cholesky_solve
(
x
,
y
,
upper
)
else
:
helper
=
LayerHelper
(
"cholesky_solve"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'cholesky_solve'
)
check_variable_and_dtype
(
y
,
'y'
,
[
'float32'
,
'float64'
],
'cholesky_solve'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
cholesky_solve
(
x
,
y
,
'upper'
,
upper
)
helper
=
LayerHelper
(
"cholesky_solve"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'cholesky_solve'
)
check_variable_and_dtype
(
y
,
'y'
,
[
'float32'
,
'float64'
],
'cholesky_solve'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'cholesky_solve'
,
inputs
=
{
'X'
:
x
,
'Y'
:
y
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'upper'
:
upper
},
)
return
out
helper
.
append_op
(
type
=
'cholesky_solve'
,
inputs
=
{
'X'
:
x
,
'Y'
:
y
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'upper'
:
upper
},
)
return
out
def
eigvalsh
(
x
,
UPLO
=
'L'
,
name
=
None
):
...
...
@@ -3284,51 +3046,47 @@ def eigvalsh(x, UPLO='L', name=None):
if
in_dygraph_mode
():
values
,
_
=
_C_ops
.
eigvalsh
(
x
,
UPLO
,
x
.
stop_gradient
)
return
values
else
:
elif
paddle
.
in_dynamic_mode
(
):
is_test
=
x
.
stop_gradient
values
,
_
=
_legacy_C_ops
.
eigvalsh
(
x
,
'UPLO'
,
UPLO
,
'is_test'
,
is_test
)
return
values
def
__check_input
(
x
,
UPLO
):
x_shape
=
list
(
x
.
shape
)
if
len
(
x
.
shape
)
<
2
:
raise
ValueError
(
"Input(input) only support >=2 tensor, but received "
"length of Input(input) is %s."
%
len
(
x
.
shape
)
)
if
x_shape
[
-
1
]
!=
x_shape
[
-
2
]:
raise
ValueError
(
"The input matrix must be batches of square matrices. But received x's dimention: {}"
.
format
(
x_shape
def
__check_input
(
x
,
UPLO
):
x_shape
=
list
(
x
.
shape
)
if
len
(
x
.
shape
)
<
2
:
raise
ValueError
(
"Input(input) only support >=2 tensor, but received "
"length of Input(input) is %s."
%
len
(
x
.
shape
)
)
if
x_shape
[
-
1
]
!=
x_shape
[
-
2
]
:
raise
ValueError
(
"The input matrix must be batches of square matrices. But received x's dimention: {}"
.
format
(
x_shape
)
)
if
UPLO
!=
'L'
and
UPLO
!=
'U'
:
raise
ValueError
(
"UPLO must be L or U. But received UPLO is: {}"
.
format
(
UPLO
)
)
)
if
UPLO
!=
'L'
and
UPLO
!=
'U'
:
raise
ValueError
(
"UPLO must be L or U. But received UPLO is: {}"
.
format
(
UPLO
)
)
__check_input
(
x
,
UPLO
)
__check_input
(
x
,
UPLO
)
helper
=
LayerHelper
(
'eigvalsh'
,
**
locals
())
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
'eigvalsh'
,
)
helper
=
LayerHelper
(
'eigvalsh'
,
**
locals
())
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
'eigvalsh'
,
)
out_value
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out_vector
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out_value
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out_vector
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
is_test
=
x
.
stop_gradient
helper
.
append_op
(
type
=
'eigvalsh'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Eigenvalues'
:
out_value
,
'Eigenvectors'
:
out_vector
},
attrs
=
{
'UPLO'
:
UPLO
,
'is_test'
:
is_test
},
)
return
out_value
is_test
=
x
.
stop_gradient
helper
.
append_op
(
type
=
'eigvalsh'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Eigenvalues'
:
out_value
,
'Eigenvectors'
:
out_vector
},
attrs
=
{
'UPLO'
:
UPLO
,
'is_test'
:
is_test
},
)
return
out_value
def
lstsq
(
x
,
y
,
rcond
=
None
,
driver
=
None
,
name
=
None
):
...
...
@@ -3423,16 +3181,10 @@ def lstsq(x, y, rcond=None, driver=None, name=None):
elif
x
.
dtype
==
paddle
.
float64
:
rcond
=
1e-15
*
max
(
x
.
shape
[
-
2
],
x
.
shape
[
-
1
])
if
_non_static_mode
():
if
in_dygraph_mode
():
solution
,
residuals
,
rank
,
singular_values
=
_C_ops
.
lstsq
(
x
,
y
,
rcond
,
driver
)
else
:
solution
,
residuals
,
rank
,
singular_values
=
_legacy_C_ops
.
lstsq
(
x
,
y
,
'rcond'
,
rcond
,
'driver'
,
driver
)
if
in_dygraph_mode
():
solution
,
residuals
,
rank
,
singular_values
=
_C_ops
.
lstsq
(
x
,
y
,
rcond
,
driver
)
if
driver
==
"gels"
:
rank
=
paddle
.
empty
(
shape
=
[
0
],
dtype
=
paddle
.
int32
)
singular_values
=
paddle
.
empty
(
shape
=
[
0
],
dtype
=
x
.
dtype
)
...
...
@@ -3440,39 +3192,51 @@ def lstsq(x, y, rcond=None, driver=None, name=None):
singular_values
=
paddle
.
empty
(
shape
=
[
0
],
dtype
=
x
.
dtype
)
return
solution
,
residuals
,
rank
,
singular_values
else
:
helper
=
LayerHelper
(
'lstsq'
,
**
locals
())
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
'lstsq'
,
)
check_variable_and_dtype
(
y
,
'dtype'
,
[
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
'lstsq'
,
)
helper
=
LayerHelper
(
'lstsq'
,
**
locals
())
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
'lstsq'
)
check_variable_and_dtype
(
y
,
'dtype'
,
[
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
'lstsq'
)
solution
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
residuals
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
rank
=
helper
.
create_variable_for_type_inference
(
dtype
=
paddle
.
int32
)
singular_values
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
solution
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
residuals
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
rank
=
helper
.
create_variable_for_type_inference
(
dtype
=
paddle
.
int32
)
singular_values
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'lstsq'
,
inputs
=
{
'X'
:
x
,
'Y'
:
y
},
outputs
=
{
'Solution'
:
solution
,
'Residuals'
:
residuals
,
'Rank'
:
rank
,
'SingularValues'
:
singular_values
,
},
attrs
=
{
'rcond'
:
rcond
,
'driver'
:
driver
},
)
helper
.
append_op
(
type
=
'lstsq'
,
inputs
=
{
'X'
:
x
,
'Y'
:
y
},
outputs
=
{
'Solution'
:
solution
,
'Residuals'
:
residuals
,
'Rank'
:
rank
,
'SingularValues'
:
singular_values
,
},
attrs
=
{
'rcond'
:
rcond
,
'driver'
:
driver
},
)
if
driver
==
"gels"
:
rank
=
paddle
.
static
.
data
(
name
=
'rank'
,
shape
=
[
0
])
singular_values
=
paddle
.
static
.
data
(
name
=
'singular_values'
,
shape
=
[
0
])
elif
driver
==
"gelsy"
:
singular_values
=
paddle
.
static
.
data
(
name
=
'singular_values'
,
shape
=
[
0
])
if
driver
==
"gels"
:
rank
=
paddle
.
static
.
data
(
name
=
'rank'
,
shape
=
[
0
])
singular_values
=
paddle
.
static
.
data
(
name
=
'singular_values'
,
shape
=
[
0
]
)
elif
driver
==
"gelsy"
:
singular_values
=
paddle
.
static
.
data
(
name
=
'singular_values'
,
shape
=
[
0
]
)
return
solution
,
residuals
,
rank
,
singular_values
return
solution
,
residuals
,
rank
,
singular_values
def
corrcoef
(
x
,
rowvar
=
True
,
name
=
None
):
...
...
python/paddle/tensor/logic.py
浏览文件 @
861fef52
...
...
@@ -26,10 +26,9 @@ if _in_eager_mode_:
else
:
from
..framework
import
VarBase
as
Tensor
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle
import
_C_ops
from
paddle.tensor.creation
import
full
from
..fluid.framework
import
_in_legacy_dygraph
from
..framework
import
LayerHelper
,
in_dygraph_mode
__all__
=
[]
...
...
@@ -42,47 +41,52 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
return
op
(
x
,
y
)
else
:
return
op
(
x
)
elif
_in_legacy_dygraph
():
op
=
getattr
(
_legacy_C_ops
,
op_name
)
if
binary_op
:
return
op
(
x
,
y
)
else
:
return
op
(
x
)
check_variable_and_dtype
(
x
,
"x"
,
[
"bool"
,
"int8"
,
"int16"
,
"int32"
,
"int64"
,
"float32"
,
"float64"
],
op_name
,
)
if
y
is
not
None
:
else
:
check_variable_and_dtype
(
y
,
"
y
"
,
x
,
"
x
"
,
[
"bool"
,
"int8"
,
"int16"
,
"int32"
,
"int64"
,
"float32"
,
"float64"
],
op_name
,
)
if
out
is
not
None
:
check_type
(
out
,
"out"
,
Variable
,
op_name
)
if
y
is
not
None
:
check_variable_and_dtype
(
y
,
"y"
,
[
"bool"
,
"int8"
,
"int16"
,
"int32"
,
"int64"
,
"float32"
,
"float64"
,
],
op_name
,
)
if
out
is
not
None
:
check_type
(
out
,
"out"
,
Variable
,
op_name
)
helper
=
LayerHelper
(
op_name
,
**
locals
())
helper
=
LayerHelper
(
op_name
,
**
locals
())
if
binary_op
and
x
.
dtype
!=
y
.
dtype
:
raise
ValueError
(
"(InvalidArgument) The DataType of %s Op's Variable must be consistent, but received %s and %s."
%
(
op_name
,
x
.
dtype
,
y
.
dtype
)
)
if
binary_op
and
x
.
dtype
!=
y
.
dtype
:
raise
ValueError
(
"(InvalidArgument) The DataType of %s Op's Variable must be consistent, but received %s and %s."
%
(
op_name
,
x
.
dtype
,
y
.
dtype
)
)
if
out
is
None
:
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
if
out
is
None
:
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
if
binary_op
:
helper
.
append_op
(
type
=
op_name
,
inputs
=
{
"X"
:
x
,
"Y"
:
y
},
outputs
=
{
"Out"
:
out
}
)
else
:
helper
.
append_op
(
type
=
op_name
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
if
binary_op
:
helper
.
append_op
(
type
=
op_name
,
inputs
=
{
"X"
:
x
,
"Y"
:
y
},
outputs
=
{
"Out"
:
out
}
)
else
:
helper
.
append_op
(
type
=
op_name
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
}
)
return
out
return
out
def
logical_and
(
x
,
y
,
out
=
None
,
name
=
None
):
...
...
@@ -288,21 +292,19 @@ def is_empty(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
is_empty
(
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
is_empty
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'is_empty'
)
check_type
(
name
,
"name"
,
(
str
,
type
(
None
)),
"is_empty"
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'is_empty'
)
check_type
(
name
,
"name"
,
(
str
,
type
(
None
)),
"is_empty"
)
helper
=
LayerHelper
(
"is_empty"
,
**
locals
())
cond
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
cond
.
stop_gradient
=
True
helper
.
append_op
(
type
=
'is_empty'
,
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'Out'
:
[
cond
]}
)
return
cond
helper
=
LayerHelper
(
"is_empty"
,
**
locals
())
cond
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
cond
.
stop_gradient
=
True
helper
.
append_op
(
type
=
'is_empty'
,
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'Out'
:
[
cond
]}
)
return
cond
def
equal_all
(
x
,
y
,
name
=
None
):
...
...
@@ -336,16 +338,15 @@ def equal_all(x, y, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
equal_all
(
x
,
y
)
if
paddle
.
in_dynamic_mode
():
return
_legacy_C_ops
.
equal_all
(
x
,
y
)
helper
=
LayerHelper
(
"equal_all"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
helper
.
append_op
(
type
=
'equal_all'
,
inputs
=
{
'X'
:
[
x
],
'Y'
:
[
y
]},
outputs
=
{
'Out'
:
[
out
]}
)
return
out
else
:
helper
=
LayerHelper
(
"equal_all"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
helper
.
append_op
(
type
=
'equal_all'
,
inputs
=
{
'X'
:
[
x
],
'Y'
:
[
y
]},
outputs
=
{
'Out'
:
[
out
]},
)
return
out
@
templatedoc
()
...
...
@@ -393,27 +394,24 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
allclose
(
x
,
y
,
rtol
,
atol
,
equal_nan
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
allclose
(
x
,
y
,
'rtol'
,
str
(
rtol
),
'atol'
,
str
(
atol
),
'equal_nan'
,
equal_nan
else
:
check_variable_and_dtype
(
x
,
"input"
,
[
'float32'
,
'float64'
],
'allclose'
)
check_variable_and_dtype
(
y
,
"input"
,
[
'float32'
,
'float64'
],
'allclose'
)
check_type
(
rtol
,
'rtol'
,
float
,
'allclose'
)
check_type
(
atol
,
'atol'
,
float
,
'allclose'
)
check_type
(
equal_nan
,
'equal_nan'
,
bool
,
'allclose'
)
helper
=
LayerHelper
(
"allclose"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
inputs
=
{
'Input'
:
x
,
'Other'
:
y
}
outputs
=
{
'Out'
:
out
}
attrs
=
{
'rtol'
:
str
(
rtol
),
'atol'
:
str
(
atol
),
'equal_nan'
:
equal_nan
}
helper
.
append_op
(
type
=
'allclose'
,
inputs
=
inputs
,
outputs
=
outputs
,
attrs
=
attrs
)
check_variable_and_dtype
(
x
,
"input"
,
[
'float32'
,
'float64'
],
'allclose'
)
check_variable_and_dtype
(
y
,
"input"
,
[
'float32'
,
'float64'
],
'allclose'
)
check_type
(
rtol
,
'rtol'
,
float
,
'allclose'
)
check_type
(
atol
,
'atol'
,
float
,
'allclose'
)
check_type
(
equal_nan
,
'equal_nan'
,
bool
,
'allclose'
)
helper
=
LayerHelper
(
"allclose"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
inputs
=
{
'Input'
:
x
,
'Other'
:
y
}
outputs
=
{
'Out'
:
out
}
attrs
=
{
'rtol'
:
str
(
rtol
),
'atol'
:
str
(
atol
),
'equal_nan'
:
equal_nan
}
helper
.
append_op
(
type
=
'allclose'
,
inputs
=
inputs
,
outputs
=
outputs
,
attrs
=
attrs
)
return
out
return
out
@
templatedoc
()
...
...
@@ -457,31 +455,28 @@ def equal(x, y, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
equal
(
x
,
y
)
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
equal
(
x
,
y
)
else
:
check_variable_and_dtype
(
x
,
"x"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"equal"
,
)
check_variable_and_dtype
(
y
,
"y"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"equal"
,
)
helper
=
LayerHelper
(
"equal"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
out
.
stop_gradient
=
True
check_variable_and_dtype
(
x
,
"x"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"equal"
,
)
check_variable_and_dtype
(
y
,
"y"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"equal"
,
)
helper
=
LayerHelper
(
"equal"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
out
.
stop_gradient
=
True
helper
.
append_op
(
type
=
'equal'
,
inputs
=
{
'X'
:
[
x
],
'Y'
:
[
y
]},
outputs
=
{
'Out'
:
[
out
]},
)
return
out
helper
.
append_op
(
type
=
'equal'
,
inputs
=
{
'X'
:
[
x
],
'Y'
:
[
y
]},
outputs
=
{
'Out'
:
[
out
]},
)
return
out
@
templatedoc
()
...
...
@@ -513,31 +508,28 @@ def greater_equal(x, y, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
greater_equal
(
x
,
y
)
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
greater_equal
(
x
,
y
)
else
:
check_variable_and_dtype
(
x
,
"x"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"greater_equal"
,
)
check_variable_and_dtype
(
y
,
"y"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"greater_equal"
,
)
helper
=
LayerHelper
(
"greater_equal"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
out
.
stop_gradient
=
True
check_variable_and_dtype
(
x
,
"x"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"greater_equal"
,
)
check_variable_and_dtype
(
y
,
"y"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"greater_equal"
,
)
helper
=
LayerHelper
(
"greater_equal"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
out
.
stop_gradient
=
True
helper
.
append_op
(
type
=
'greater_equal'
,
inputs
=
{
'X'
:
[
x
],
'Y'
:
[
y
]},
outputs
=
{
'Out'
:
[
out
]},
)
return
out
helper
.
append_op
(
type
=
'greater_equal'
,
inputs
=
{
'X'
:
[
x
],
'Y'
:
[
y
]},
outputs
=
{
'Out'
:
[
out
]},
)
return
out
@
templatedoc
()
...
...
@@ -569,31 +561,28 @@ def greater_than(x, y, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
greater_than
(
x
,
y
)
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
greater_than
(
x
,
y
)
else
:
check_variable_and_dtype
(
x
,
"x"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"greater_than"
,
)
check_variable_and_dtype
(
y
,
"y"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"greater_than"
,
)
helper
=
LayerHelper
(
"greater_than"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
out
.
stop_gradient
=
True
check_variable_and_dtype
(
x
,
"x"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"greater_than"
,
)
check_variable_and_dtype
(
y
,
"y"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"greater_than"
,
)
helper
=
LayerHelper
(
"greater_than"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
out
.
stop_gradient
=
True
helper
.
append_op
(
type
=
'greater_than'
,
inputs
=
{
'X'
:
[
x
],
'Y'
:
[
y
]},
outputs
=
{
'Out'
:
[
out
]},
)
return
out
helper
.
append_op
(
type
=
'greater_than'
,
inputs
=
{
'X'
:
[
x
],
'Y'
:
[
y
]},
outputs
=
{
'Out'
:
[
out
]},
)
return
out
@
templatedoc
()
...
...
@@ -626,31 +615,28 @@ def less_equal(x, y, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
less_equal
(
x
,
y
)
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
less_equal
(
x
,
y
)
else
:
check_variable_and_dtype
(
x
,
"x"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"less_equal"
,
)
check_variable_and_dtype
(
y
,
"y"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"less_equal"
,
)
helper
=
LayerHelper
(
"less_equal"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
out
.
stop_gradient
=
True
check_variable_and_dtype
(
x
,
"x"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"less_equal"
,
)
check_variable_and_dtype
(
y
,
"y"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"less_equal"
,
)
helper
=
LayerHelper
(
"less_equal"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
out
.
stop_gradient
=
True
helper
.
append_op
(
type
=
'less_equal'
,
inputs
=
{
'X'
:
[
x
],
'Y'
:
[
y
]},
outputs
=
{
'Out'
:
[
out
]},
)
return
out
helper
.
append_op
(
type
=
'less_equal'
,
inputs
=
{
'X'
:
[
x
],
'Y'
:
[
y
]},
outputs
=
{
'Out'
:
[
out
]},
)
return
out
@
templatedoc
()
...
...
@@ -683,31 +669,28 @@ def less_than(x, y, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
less_than
(
x
,
y
)
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
less_than
(
x
,
y
)
else
:
check_variable_and_dtype
(
x
,
"x"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"less_than"
,
)
check_variable_and_dtype
(
y
,
"y"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"less_than"
,
)
helper
=
LayerHelper
(
"less_than"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
out
.
stop_gradient
=
True
check_variable_and_dtype
(
x
,
"x"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"less_than"
,
)
check_variable_and_dtype
(
y
,
"y"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"less_than"
,
)
helper
=
LayerHelper
(
"less_than"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
out
.
stop_gradient
=
True
helper
.
append_op
(
type
=
'less_than'
,
inputs
=
{
'X'
:
[
x
],
'Y'
:
[
y
]},
outputs
=
{
'Out'
:
[
out
]},
)
return
out
helper
.
append_op
(
type
=
'less_than'
,
inputs
=
{
'X'
:
[
x
],
'Y'
:
[
y
]},
outputs
=
{
'Out'
:
[
out
]},
)
return
out
@
templatedoc
()
...
...
@@ -740,31 +723,28 @@ def not_equal(x, y, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
not_equal
(
x
,
y
)
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
not_equal
(
x
,
y
)
else
:
check_variable_and_dtype
(
x
,
"x"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"not_equal"
,
)
check_variable_and_dtype
(
y
,
"y"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"not_equal"
,
)
helper
=
LayerHelper
(
"not_equal"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
out
.
stop_gradient
=
True
check_variable_and_dtype
(
x
,
"x"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"not_equal"
,
)
check_variable_and_dtype
(
y
,
"y"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"not_equal"
,
)
helper
=
LayerHelper
(
"not_equal"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
out
.
stop_gradient
=
True
helper
.
append_op
(
type
=
'not_equal'
,
inputs
=
{
'X'
:
[
x
],
'Y'
:
[
y
]},
outputs
=
{
'Out'
:
[
out
]},
)
return
out
helper
.
append_op
(
type
=
'not_equal'
,
inputs
=
{
'X'
:
[
x
],
'Y'
:
[
y
]},
outputs
=
{
'Out'
:
[
out
]},
)
return
out
def
is_tensor
(
x
):
...
...
@@ -802,41 +782,40 @@ def _bitwise_op(op_name, x, y, out=None, name=None, binary_op=True):
return
op
(
x
,
y
)
else
:
return
op
(
x
)
elif
_in_legacy_dygraph
():
op
=
getattr
(
_legacy_C_ops
,
op_name
)
if
binary_op
:
return
op
(
x
,
y
)
else
:
return
op
(
x
)
check_variable_and_dtype
(
x
,
"x"
,
[
"bool"
,
"uint8"
,
"int8"
,
"int16"
,
"int32"
,
"int64"
],
op_name
)
if
y
is
not
None
:
else
:
check_variable_and_dtype
(
y
,
"
y
"
,
x
,
"
x
"
,
[
"bool"
,
"uint8"
,
"int8"
,
"int16"
,
"int32"
,
"int64"
],
op_name
,
)
if
out
is
not
None
:
check_type
(
out
,
"out"
,
Variable
,
op_name
)
if
y
is
not
None
:
check_variable_and_dtype
(
y
,
"y"
,
[
"bool"
,
"uint8"
,
"int8"
,
"int16"
,
"int32"
,
"int64"
],
op_name
,
)
if
out
is
not
None
:
check_type
(
out
,
"out"
,
Variable
,
op_name
)
helper
=
LayerHelper
(
op_name
,
**
locals
())
if
binary_op
:
assert
x
.
dtype
==
y
.
dtype
helper
=
LayerHelper
(
op_name
,
**
locals
())
if
binary_op
:
assert
x
.
dtype
==
y
.
dtype
if
out
is
None
:
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
if
out
is
None
:
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
if
binary_op
:
helper
.
append_op
(
type
=
op_name
,
inputs
=
{
"X"
:
x
,
"Y"
:
y
},
outputs
=
{
"Out"
:
out
}
)
else
:
helper
.
append_op
(
type
=
op_name
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
if
binary_op
:
helper
.
append_op
(
type
=
op_name
,
inputs
=
{
"X"
:
x
,
"Y"
:
y
},
outputs
=
{
"Out"
:
out
}
)
else
:
helper
.
append_op
(
type
=
op_name
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
}
)
return
out
return
out
@
templatedoc
()
...
...
@@ -998,24 +977,20 @@ def isclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
isclose
(
x
,
y
,
rtol
,
atol
,
equal_nan
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
isclose
(
x
,
y
,
'rtol'
,
str
(
rtol
),
'atol'
,
str
(
atol
),
'equal_nan'
,
equal_nan
else
:
check_variable_and_dtype
(
x
,
"input"
,
[
'float32'
,
'float64'
],
'isclose'
)
check_variable_and_dtype
(
y
,
"input"
,
[
'float32'
,
'float64'
],
'isclose'
)
check_type
(
rtol
,
'rtol'
,
float
,
'isclose'
)
check_type
(
atol
,
'atol'
,
float
,
'isclose'
)
check_type
(
equal_nan
,
'equal_nan'
,
bool
,
'isclose'
)
helper
=
LayerHelper
(
"isclose"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
inputs
=
{
'Input'
:
x
,
'Other'
:
y
}
outputs
=
{
'Out'
:
out
}
attrs
=
{
'rtol'
:
str
(
rtol
),
'atol'
:
str
(
atol
),
'equal_nan'
:
equal_nan
}
helper
.
append_op
(
type
=
'isclose'
,
inputs
=
inputs
,
outputs
=
outputs
,
attrs
=
attrs
)
check_variable_and_dtype
(
x
,
"input"
,
[
'float32'
,
'float64'
],
'isclose'
)
check_variable_and_dtype
(
y
,
"input"
,
[
'float32'
,
'float64'
],
'isclose'
)
check_type
(
rtol
,
'rtol'
,
float
,
'isclose'
)
check_type
(
atol
,
'atol'
,
float
,
'isclose'
)
check_type
(
equal_nan
,
'equal_nan'
,
bool
,
'isclose'
)
helper
=
LayerHelper
(
"isclose"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
inputs
=
{
'Input'
:
x
,
'Other'
:
y
}
outputs
=
{
'Out'
:
out
}
attrs
=
{
'rtol'
:
str
(
rtol
),
'atol'
:
str
(
atol
),
'equal_nan'
:
equal_nan
}
helper
.
append_op
(
type
=
'isclose'
,
inputs
=
inputs
,
outputs
=
outputs
,
attrs
=
attrs
)
return
out
return
out
python/paddle/tensor/manipulation.py
浏览文件 @
861fef52
...
...
@@ -19,17 +19,16 @@ from collections import Counter
import
numpy
as
np
import
paddle
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle
import
_C_ops
from
paddle.utils.inplace_utils
import
inplace_apis_in_dygraph_only
from
..common_ops_import
import
_varbase_creator
,
fill_constant
from
..common_ops_import
import
fill_constant
from
..fluid.data_feeder
import
(
check_dtype
,
check_type
,
check_variable_and_dtype
,
convert_dtype
,
)
from
..fluid.framework
import
_in_legacy_dygraph
,
_non_static_mode
from
..fluid.layers
import
utils
from
..framework
import
(
LayerHelper
,
...
...
@@ -124,7 +123,7 @@ def tensor_array_to_tensor(input, axis=1, use_stack=False, name=None):
paddle.tensor.array.array_write(x1, i + 1, array)
output, output_index = paddle.tensor.manipulation.tensor_array_to_tensor(input=array)
"""
if
_non_static
_mode
():
if
in_dygraph
_mode
():
assert
isinstance
(
input
,
list
),
"The 'input' in tensor_array_to_tensor must be list"
...
...
@@ -136,26 +135,28 @@ def tensor_array_to_tensor(input, axis=1, use_stack=False, name=None):
np
.
array
(
list
(
map
(
lambda
x
:
int
(
x
.
shape
[
axis
]),
input
)))
)
return
res
,
sizes
check_type
(
input
,
'input'
,
(
list
,
Variable
),
'tensor_array_to_tensor'
)
if
isinstance
(
input
,
list
):
for
i
,
input_x
in
enumerate
(
input
):
check_type
(
input_x
,
'input['
+
str
(
i
)
+
']'
,
Variable
,
'tensor_array_to_tensor'
,
)
helper
=
LayerHelper
(
'tensor_array_to_tensor'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
())
out_index
=
helper
.
create_variable_for_type_inference
(
dtype
=
"int32"
)
helper
.
append_op
(
type
=
'tensor_array_to_tensor'
,
inputs
=
{
'X'
:
input
},
outputs
=
{
'Out'
:
[
out
],
'OutIndex'
:
[
out_index
]},
attrs
=
{
'axis'
:
axis
,
'use_stack'
:
use_stack
},
)
return
out
,
out_index
else
:
check_type
(
input
,
'input'
,
(
list
,
Variable
),
'tensor_array_to_tensor'
)
if
isinstance
(
input
,
list
):
for
i
,
input_x
in
enumerate
(
input
):
check_type
(
input_x
,
'input['
+
str
(
i
)
+
']'
,
Variable
,
'tensor_array_to_tensor'
,
)
helper
=
LayerHelper
(
'tensor_array_to_tensor'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
()
)
out_index
=
helper
.
create_variable_for_type_inference
(
dtype
=
"int32"
)
helper
.
append_op
(
type
=
'tensor_array_to_tensor'
,
inputs
=
{
'X'
:
input
},
outputs
=
{
'Out'
:
[
out
],
'OutIndex'
:
[
out_index
]},
attrs
=
{
'axis'
:
axis
,
'use_stack'
:
use_stack
},
)
return
out
,
out_index
def
cast
(
x
,
dtype
):
...
...
@@ -186,59 +187,53 @@ def cast(x, dtype):
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
return
_C_ops
.
cast
(
x
,
dtype
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
,
'uint16'
,
],
'cast'
,
)
check_dtype
(
dtype
,
'dtype'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int8'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
,
'uint16'
,
],
'cast'
,
)
if
_non_static_mode
():
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
out
=
_legacy_C_ops
.
cast
(
x
,
'in_dtype'
,
x
.
dtype
,
'out_dtype'
,
dtype
)
helper
=
LayerHelper
(
'cast'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
,
stop_gradient
=
x
.
stop_gradient
)
helper
.
append_op
(
type
=
'cast'
,
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'Out'
:
[
out
]},
attrs
=
{
'in_dtype'
:
x
.
dtype
,
'out_dtype'
:
out
.
dtype
},
)
return
out
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
,
'uint16'
,
],
'cast'
,
)
check_dtype
(
dtype
,
'dtype'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int8'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
,
'uint16'
,
],
'cast'
,
)
helper
=
LayerHelper
(
'cast'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
,
stop_gradient
=
x
.
stop_gradient
)
helper
.
append_op
(
type
=
'cast'
,
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'Out'
:
[
out
]},
attrs
=
{
'in_dtype'
:
x
.
dtype
,
'out_dtype'
:
out
.
dtype
},
)
return
out
def
slice
(
input
,
axes
,
starts
,
ends
):
"""
...
...
@@ -362,134 +357,69 @@ def slice(input, axes, starts, ends):
return
_C_ops
.
slice
(
input
,
axes
,
starts
,
ends
,
infer_flags
,
[])
else
:
if
_in_legacy_dygraph
():
attrs
=
()
starts_tensor
=
None
ends_tensor
=
None
if
isinstance
(
axes
,
(
list
,
tuple
)):
axes
=
list
(
axes
)
if
len
(
axes
)
==
0
:
raise
ValueError
(
"Input axes should not be an empty list/tuple."
)
for
i
in
range
(
len
(
axes
)):
if
axes
[
i
]
<
0
:
axes
[
i
]
=
max
(
0
,
axes
[
i
]
+
len
(
input
.
shape
))
else
:
axes
[
i
]
=
min
(
len
(
input
.
shape
)
-
1
,
axes
[
i
])
if
not
isinstance
(
starts
,
(
list
,
tuple
,
Variable
)):
raise
ValueError
(
"Input starts must be an Variable, python list or tuple."
)
if
not
isinstance
(
ends
,
(
list
,
tuple
,
Variable
)):
raise
ValueError
(
"Input ends must be an Variable, python list or tuple."
)
else
:
raise
ValueError
(
"Input axes must be a python list or tuple, but reveived {}"
.
format
(
type
(
axes
)
)
helper
=
LayerHelper
(
'slice'
,
**
locals
())
inputs
=
{
'Input'
:
input
}
attrs
=
{
'axes'
:
axes
}
infer_flags
=
list
(
1
for
i
in
range
(
len
(
axes
)))
# starts
if
isinstance
(
starts
,
Variable
):
starts
.
stop_gradient
=
True
inputs
[
'StartsTensor'
]
=
starts
infer_flags
=
list
(
-
1
for
i
in
range
(
len
(
axes
)))
elif
isinstance
(
starts
,
(
list
,
tuple
)):
attrs
[
'starts'
]
=
[]
if
utils
.
_contain_var
(
starts
):
inputs
[
'StartsTensorList'
]
=
utils
.
_convert_to_tensor_list
(
starts
)
for
i
,
dim
in
enumerate
(
starts
):
if
isinstance
(
dim
,
Variable
):
attrs
[
'starts'
].
append
(
-
1
)
infer_flags
[
i
]
=
-
1
else
:
attrs
[
'starts'
].
append
(
dim
)
else
:
attrs
[
'starts'
]
=
starts
infer_flags
=
list
(
1
for
i
in
range
(
len
(
axes
)))
tmp_tensor_type
=
Variable
if
isinstance
(
starts
,
(
list
,
tuple
)):
starts
=
[
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
tmp_tensor_type
)
else
item
for
item
in
starts
]
attrs
+=
(
'starts'
,
starts
)
elif
isinstance
(
starts
,
tmp_tensor_type
):
starts_tensor
=
starts
starts
.
stop_gradient
=
True
infer_flags
=
list
(
-
1
for
i
in
range
(
len
(
axes
)))
if
isinstance
(
ends
,
(
list
,
tuple
)):
ends
=
[
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
tmp_tensor_type
)
else
item
for
item
in
ends
]
attrs
+=
(
'ends'
,
ends
)
elif
isinstance
(
ends
,
tmp_tensor_type
):
ends_tensor
=
ends
ends_tensor
.
stop_gradient
=
True
infer_flags
=
list
(
-
1
for
i
in
range
(
len
(
axes
)))
return
_legacy_C_ops
.
slice
(
input
,
starts_tensor
,
ends_tensor
,
None
,
None
,
'axes'
,
axes
,
'infer_flags'
,
infer_flags
,
*
attrs
,
)
# ends
if
isinstance
(
ends
,
Variable
):
ends
.
stop_gradient
=
True
inputs
[
'EndsTensor'
]
=
ends
infer_flags
=
list
(
-
1
for
i
in
range
(
len
(
axes
)))
elif
isinstance
(
ends
,
(
list
,
tuple
)):
attrs
[
'ends'
]
=
[]
if
utils
.
_contain_var
(
ends
):
inputs
[
'EndsTensorList'
]
=
utils
.
_convert_to_tensor_list
(
ends
)
for
i
,
dim
in
enumerate
(
ends
):
if
isinstance
(
dim
,
Variable
):
attrs
[
'ends'
].
append
(
-
1
)
infer_flags
[
i
]
=
-
1
else
:
attrs
[
'ends'
].
append
(
dim
)
else
:
attrs
[
'ends'
]
=
ends
if
not
isinstance
(
starts
,
(
list
,
tuple
,
Variable
)):
raise
ValueError
(
"Input starts must be an Variable, python list or tuple."
# infer_flags
attrs
[
'infer_flags'
]
=
infer_flags
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
(
'input'
)
)
if
not
isinstance
(
ends
,
(
list
,
tuple
,
Variable
)):
raise
ValueError
(
"Input ends must be an Variable, python list or tuple."
helper
.
append_op
(
type
=
'slice'
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
'Out'
:
out
}
)
helper
=
LayerHelper
(
'slice'
,
**
locals
())
inputs
=
{
'Input'
:
input
}
attrs
=
{
'axes'
:
axes
}
infer_flags
=
list
(
1
for
i
in
range
(
len
(
axes
)))
# starts
if
isinstance
(
starts
,
Variable
):
starts
.
stop_gradient
=
True
inputs
[
'StartsTensor'
]
=
starts
infer_flags
=
list
(
-
1
for
i
in
range
(
len
(
axes
)))
elif
isinstance
(
starts
,
(
list
,
tuple
)):
attrs
[
'starts'
]
=
[]
if
utils
.
_contain_var
(
starts
):
inputs
[
'StartsTensorList'
]
=
utils
.
_convert_to_tensor_list
(
starts
)
for
i
,
dim
in
enumerate
(
starts
):
if
isinstance
(
dim
,
Variable
):
attrs
[
'starts'
].
append
(
-
1
)
infer_flags
[
i
]
=
-
1
else
:
attrs
[
'starts'
].
append
(
dim
)
else
:
attrs
[
'starts'
]
=
starts
# ends
if
isinstance
(
ends
,
Variable
):
ends
.
stop_gradient
=
True
inputs
[
'EndsTensor'
]
=
ends
infer_flags
=
list
(
-
1
for
i
in
range
(
len
(
axes
)))
elif
isinstance
(
ends
,
(
list
,
tuple
)):
attrs
[
'ends'
]
=
[]
if
utils
.
_contain_var
(
ends
):
inputs
[
'EndsTensorList'
]
=
utils
.
_convert_to_tensor_list
(
ends
)
for
i
,
dim
in
enumerate
(
ends
):
if
isinstance
(
dim
,
Variable
):
attrs
[
'ends'
].
append
(
-
1
)
infer_flags
[
i
]
=
-
1
else
:
attrs
[
'ends'
].
append
(
dim
)
else
:
attrs
[
'ends'
]
=
ends
# infer_flags
attrs
[
'infer_flags'
]
=
infer_flags
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
(
'input'
)
)
helper
.
append_op
(
type
=
'slice'
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
'Out'
:
out
}
)
return
out
return
out
def
transpose
(
x
,
perm
,
name
=
None
):
...
...
@@ -545,53 +475,49 @@ def transpose(x, perm, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
transpose
(
x
,
perm
)
else
:
if
_in_legacy_dygraph
():
out
,
_
=
_legacy_C_ops
.
transpose2
(
x
,
'axis'
,
perm
)
return
out
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
,
'complex64'
,
'complex128'
,
],
'transpose'
,
)
check_type
(
perm
,
'perm'
,
(
list
,
tuple
),
'transpose'
)
if
isinstance
(
perm
,
tuple
):
perm
=
list
(
perm
)
if
len
(
perm
)
!=
len
(
x
.
shape
):
raise
ValueError
(
"Input(perm) is the permutation of dimensions of Input(x), "
"its length should be equal to dimensions of Input(x), "
"but received dimension of Input(x) is %s, "
"the length of Input(perm) is %s."
%
(
len
(
x
.
shape
),
len
(
perm
))
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
,
'complex64'
,
'complex128'
,
],
'transpose'
,
)
for
idx
,
dim
in
enumerate
(
perm
):
if
dim
>=
len
(
x
.
shape
):
check_type
(
perm
,
'perm'
,
(
list
,
tuple
),
'transpose'
)
if
isinstance
(
perm
,
tuple
):
perm
=
list
(
perm
)
if
len
(
perm
)
!=
len
(
x
.
shape
):
raise
ValueError
(
"Each element in Input(perm) should be less than Input(x)'s dimension, "
"but %d-th element in Input(perm) is %d which exceeds Input(x)'s "
"dimension %d."
%
(
idx
,
perm
[
idx
],
len
(
x
.
shape
))
"Input(perm) is the permutation of dimensions of Input(x), "
"its length should be equal to dimensions of Input(x), "
"but received dimension of Input(x) is %s, "
"the length of Input(perm) is %s."
%
(
len
(
x
.
shape
),
len
(
perm
))
)
for
idx
,
dim
in
enumerate
(
perm
):
if
dim
>=
len
(
x
.
shape
):
raise
ValueError
(
"Each element in Input(perm) should be less than Input(x)'s dimension, "
"but %d-th element in Input(perm) is %d which exceeds Input(x)'s "
"dimension %d."
%
(
idx
,
perm
[
idx
],
len
(
x
.
shape
))
)
helper
=
LayerHelper
(
'transpose'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
x_shape
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'transpose2'
,
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'Out'
:
[
out
],
'XShape'
:
[
x_shape
]},
attrs
=
{
'axis'
:
perm
},
)
return
out
helper
=
LayerHelper
(
'transpose'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
x_shape
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'transpose2'
,
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'Out'
:
[
out
],
'XShape'
:
[
x_shape
]},
attrs
=
{
'axis'
:
perm
},
)
return
out
def
unstack
(
x
,
axis
=
0
,
num
=
None
):
...
...
@@ -625,32 +551,25 @@ def unstack(x, axis=0, num=None):
if
num
==
0
:
return
[]
return
_C_ops
.
unstack
(
x
,
axis
,
num
)
if
_non_static_mode
():
else
:
helper
=
LayerHelper
(
'unstack'
,
**
locals
())
if
num
is
None
:
num
=
x
.
shape
[
axis
]
if
num
==
0
:
return
[]
return
_legacy_C_ops
.
unstack
(
x
,
num
,
'axis'
,
int
(
axis
),
'num'
,
num
)
helper
=
LayerHelper
(
'unstack'
,
**
locals
())
if
num
is
None
:
if
axis
is
None
or
x
.
shape
[
axis
]
<=
0
:
raise
ValueError
(
'unknown unstack number'
)
else
:
num
=
x
.
shape
[
axis
]
if
axis
is
None
or
x
.
shape
[
axis
]
<=
0
:
raise
ValueError
(
'unknown unstack number'
)
else
:
num
=
x
.
shape
[
axis
]
outs
=
[]
for
_
in
range
(
num
):
outs
.
append
(
helper
.
create_variable_for_type_inference
(
x
.
dtype
))
outs
=
[]
for
_
in
range
(
num
):
outs
.
append
(
helper
.
create_variable_for_type_inference
(
x
.
dtype
))
helper
.
append_op
(
type
=
'unstack'
,
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'Y'
:
outs
},
attrs
=
{
'axis'
:
axis
,
'num'
:
num
},
)
return
outs
helper
.
append_op
(
type
=
'unstack'
,
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'Y'
:
outs
},
attrs
=
{
'axis'
:
axis
,
'num'
:
num
},
)
return
outs
def
shard_index
(
input
,
index_num
,
nshards
,
shard_id
,
ignore_value
=-
1
):
...
...
@@ -959,12 +878,7 @@ def fill_(x, value):
"The type of 'value' must be int or float, but received %s."
%
(
type
(
value
))
)
if
in_dygraph_mode
():
return
_C_ops
.
fill_
(
x
,
value
)
else
:
return
_legacy_C_ops
.
fill_any_
(
x
,
"value_float"
,
float
(
value
),
"value_int"
,
int
(
value
)
)
return
_C_ops
.
fill_
(
x
,
value
)
@
dygraph_only
...
...
@@ -992,12 +906,7 @@ def zero_(x):
print(tensor.tolist()) #[0, 0, 0, 0, 0]
"""
if
in_dygraph_mode
():
return
_C_ops
.
fill_
(
x
,
0.0
)
else
:
return
_legacy_C_ops
.
fill_any_
(
x
,
"value_float"
,
0.0
,
"value_int"
,
int
(
0
)
)
return
_C_ops
.
fill_
(
x
,
0.0
)
@
dygraph_only
...
...
@@ -1025,39 +934,11 @@ def fill_diagonal_(x, value, offset=0, wrap=False, name=None):
x.fill_diagonal_(1.0)
print(x.tolist()) #[[1.0, 2.0, 2.0], [2.0, 1.0, 2.0], [2.0, 2.0, 1.0], [2.0, 2.0, 2.0]]
"""
helper
=
LayerHelper
(
"fill_diagonal_"
,
**
locals
())
check_type
(
x
,
'X'
,
(
Variable
),
'fill_diagonal_'
)
dtype
=
helper
.
input_dtype
(
'x'
)
check_dtype
(
dtype
,
'X'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'fill_diagonal_'
,
)
check_type
(
value
,
'value'
,
(
bool
,
int
,
float
),
'fill_diagonal_'
)
check_type
(
wrap
,
'wrap'
,
(
bool
),
'fill_diagonal_'
)
inshape
=
x
.
shape
inshapeset
=
set
(
inshape
)
assert
len
(
inshape
)
>=
2
,
'Tensor dims should >= 2 in fill_diagonal_ API'
if
len
(
inshape
)
>
2
:
assert
(
len
(
inshapeset
)
==
1
),
'Tensor dims should be equal while input dims > 2 in fill_diagonal_ API'
if
in_dygraph_mode
():
if
len
(
in
shape
)
==
2
:
if
len
(
x
.
shape
)
==
2
:
return
_C_ops
.
fill_diagonal_
(
x
,
value
,
offset
,
wrap
)
return
_C_ops
.
fill_diagonal_
(
x
,
value
,
offset
,
True
)
if
len
(
inshape
)
==
2
:
return
_legacy_C_ops
.
fill_diagonal_
(
x
,
'value'
,
value
,
'offset'
,
offset
,
'wrap'
,
wrap
)
return
_legacy_C_ops
.
fill_diagonal_
(
x
,
'value'
,
value
,
'offset'
,
offset
,
'wrap'
,
True
)
def
_fill_diagonal_tensor_impl
(
x
,
y
,
offset
=
0
,
dim1
=
0
,
dim2
=
1
,
inplace
=
False
):
inshape
=
x
.
shape
...
...
@@ -1087,18 +968,8 @@ def _fill_diagonal_tensor_impl(x, y, offset=0, dim1=0, dim2=1, inplace=False):
y
=
y
.
reshape
([
1
,
-
1
])
if
inplace
:
if
in_dygraph_mode
():
return
_C_ops
.
fill_diagonal_tensor_
(
x
,
y
,
offset
,
dim1
,
dim2
)
else
:
return
_legacy_C_ops
.
fill_diagonal_tensor_
(
x
,
y
,
'offset'
,
offset
,
'dim1'
,
dim1
,
'dim2'
,
dim2
)
if
in_dygraph_mode
():
return
_C_ops
.
fill_diagonal_tensor
(
x
,
y
,
offset
,
dim1
,
dim2
)
else
:
return
_legacy_C_ops
.
fill_diagonal_tensor
(
x
,
y
,
'offset'
,
offset
,
'dim1'
,
dim1
,
'dim2'
,
dim2
)
return
_C_ops
.
fill_diagonal_tensor_
(
x
,
y
,
offset
,
dim1
,
dim2
)
return
_C_ops
.
fill_diagonal_tensor
(
x
,
y
,
offset
,
dim1
,
dim2
)
def
fill_diagonal_tensor_
(
x
,
y
,
offset
=
0
,
dim1
=
0
,
dim2
=
1
,
name
=
None
):
...
...
@@ -1248,84 +1119,80 @@ def concat(x, axis=0, name=None):
if
not
isinstance
(
input
,
Variable
):
input
=
[
t
for
t
in
input
if
t
.
shape
.
count
(
0
)
==
0
]
return
_C_ops
.
concat
(
input
,
axis
)
if
_in_legacy_dygraph
():
if
isinstance
(
axis
,
Variable
):
axis
=
axis
.
numpy
()
axis
=
axis
.
item
(
0
)
else
:
check_type
(
input
,
'input'
,
(
list
,
tuple
,
Variable
),
'concat'
)
if
not
isinstance
(
input
,
Variable
):
input
=
[
t
for
t
in
input
if
t
.
shape
.
count
(
0
)
==
0
]
out
=
_varbase_creator
()
_legacy_C_ops
.
concat
(
input
,
out
,
'axis'
,
axis
)
return
out
for
id
,
x
in
enumerate
(
input
):
check_variable_and_dtype
(
x
,
'input['
+
str
(
id
)
+
']'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
,
'int8'
,
'unit8'
,
],
'concat'
,
)
if
x
.
dtype
!=
input
[
0
].
dtype
:
raise
TypeError
(
"All the Tensors in the input must have the same data type."
)
else
:
input
=
[
input
]
check_type
(
axis
,
'axis'
,
(
int
,
Variable
),
'concat'
)
check_type
(
input
,
'input'
,
(
list
,
tuple
,
Variable
),
'concat'
)
if
not
isinstance
(
input
,
Variable
):
for
id
,
x
in
enumerate
(
input
):
check_variable_and_dtype
(
x
,
'input['
+
str
(
id
)
+
']'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
,
'int8'
,
'unit8'
,
],
if
isinstance
(
axis
,
Variable
):
check_dtype
(
axis
.
dtype
,
'axis'
,
[
'int32'
,
'int64'
],
'concat'
,
"The data type of axis must be int32 or int64 when axis is a Tensor"
,
)
if
x
.
dtype
!=
input
[
0
].
dtype
:
raise
TypeError
(
"All the Tensors in the input must have the same data type."
)
else
:
input
=
[
input
]
check_type
(
axis
,
'axis'
,
(
int
,
Variable
),
'concat'
)
if
isinstance
(
axis
,
Variable
):
check_dtype
(
axis
.
dtype
,
'axis'
,
[
'int32'
,
'int64'
],
'concat'
,
"The data type of axis must be int32 or int64 when axis is a Tensor"
,
helper
=
LayerHelper
(
'concat'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
()
)
helper
=
LayerHelper
(
'concat'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
())
if
input
[
0
].
desc
.
type
()
==
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
:
# NOTE(liym27): Don't remove this if branch!
# This feature is supported for Dynamic-to-Static, because after transformed, the type of inputs[0]
# is LOD_TENSOR_ARRAY in some scenarios. And this feature can be used in static mode.
if
input
[
0
].
desc
.
type
()
==
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
:
# NOTE(liym27): Don't remove this if branch!
# This feature is supported for Dynamic-to-Static, because after transformed, the type of inputs[0]
# is LOD_TENSOR_ARRAY in some scenarios. And this feature can be used in static mode.
assert
len
(
input
)
==
1
,
(
"If the elements of 'input' in concat are Variable(LoDTensorArray), "
"number of the elements must be 1, but received %s."
%
len
(
input
)
)
out_index
=
helper
.
create_variable_for_type_inference
(
dtype
=
"int32"
)
helper
.
append_op
(
type
=
'tensor_array_to_tensor'
,
inputs
=
{
'X'
:
input
[
0
]},
outputs
=
{
'Out'
:
[
out
],
'OutIndex'
:
[
out_index
]},
attrs
=
{
'axis'
:
axis
,
'use_stack'
:
False
},
)
else
:
inputs
=
{
'X'
:
input
}
attrs
=
{}
if
isinstance
(
axis
,
Variable
):
axis
.
stop_gradient
=
True
inputs
[
'AxisTensor'
]
=
axis
assert
len
(
input
)
==
1
,
(
"If the elements of 'input' in concat are Variable(LoDTensorArray), "
"number of the elements must be 1, but received %s."
%
len
(
input
)
)
out_index
=
helper
.
create_variable_for_type_inference
(
dtype
=
"int32"
)
helper
.
append_op
(
type
=
'tensor_array_to_tensor'
,
inputs
=
{
'X'
:
input
[
0
]},
outputs
=
{
'Out'
:
[
out
],
'OutIndex'
:
[
out_index
]},
attrs
=
{
'axis'
:
axis
,
'use_stack'
:
False
},
)
else
:
attrs
[
'axis'
]
=
axis
inputs
=
{
'X'
:
input
}
attrs
=
{}
if
isinstance
(
axis
,
Variable
):
axis
.
stop_gradient
=
True
inputs
[
'AxisTensor'
]
=
axis
else
:
attrs
[
'axis'
]
=
axis
helper
.
append_op
(
type
=
'concat'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
[
out
]},
attrs
=
attrs
)
return
out
helper
.
append_op
(
type
=
'concat'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
[
out
]},
attrs
=
attrs
,
)
return
out
def
broadcast_tensors
(
input
,
name
=
None
):
...
...
@@ -1358,80 +1225,81 @@ def broadcast_tensors(input, name=None):
"""
num_inputs
=
len
(
input
)
if
paddle
.
framework
.
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
broadcast_tensors
(
input
)
if
paddle
.
framework
.
_non_static_mode
():
return
_legacy_C_ops
.
broadcast_tensors
(
input
,
num_inputs
)
check_type
(
input
,
'input'
,
(
list
,
tuple
),
'broadcast_tensors'
)
if
num_inputs
<
1
:
raise
TypeError
(
"At least 1 tensor is needed to perform broadcast_tensors"
)
# Check input types
for
id
,
x
in
enumerate
(
input
):
check_variable_and_dtype
(
x
,
'input['
+
str
(
id
)
+
']'
,
[
'bool'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'broadcast_tensors'
,
)
if
x
.
dtype
!=
input
[
0
].
dtype
:
else
:
check_type
(
input
,
'input'
,
(
list
,
tuple
),
'broadcast_tensors'
)
if
num_inputs
<
1
:
raise
TypeError
(
"A
ll the Tensors in the input must have the same data type.
"
"A
t least 1 tensor is needed to perform broadcast_tensors
"
)
# Check bcast semantics
output_shape_r_last_tensor_index
=
[]
output_shape_r
=
[]
# Use while loop due to weird behaviour of "range()"
j
=
0
while
j
<
len
(
input
):
tensor
=
input
[
j
]
shape
=
list
(
reversed
(
tensor
.
shape
))
# Check input types
for
id
,
x
in
enumerate
(
input
):
check_variable_and_dtype
(
x
,
'input['
+
str
(
id
)
+
']'
,
[
'bool'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'broadcast_tensors'
,
)
if
x
.
dtype
!=
input
[
0
].
dtype
:
raise
TypeError
(
"All the Tensors in the input must have the same data type."
)
# Check bcast semantics
output_shape_r_last_tensor_index
=
[]
output_shape_r
=
[]
# Use while loop due to weird behaviour of "range()"
j
=
0
while
j
<
len
(
input
):
tensor
=
input
[
j
]
shape
=
list
(
reversed
(
tensor
.
shape
))
i
=
0
while
i
<
len
(
shape
):
if
len
(
output_shape_r
)
<=
i
:
output_shape_r
.
append
(
shape
[
i
])
output_shape_r_last_tensor_index
.
append
(
j
)
else
:
invalid
=
(
output_shape_r
[
i
]
!=
shape
[
i
]
and
output_shape_r
[
i
]
!=
1
and
shape
[
i
]
!=
1
)
if
invalid
:
last_index
=
output_shape_r_last_tensor_index
[
i
]
raise
TypeError
(
"Input tensors to broadcast_tensors does not follow bcast semantics"
"Tensor {last_index} conflicts with Tensor {j} in reversed dimension {i}"
)
if
output_shape_r
[
i
]
<=
shape
[
i
]:
output_shape_r
[
i
]
=
shape
[
i
]
output_shape_r_last_tensor_index
[
i
]
=
j
i
+=
1
# while i < len(shape)
j
+=
1
# while j < len(input)
helper
=
LayerHelper
(
'broadcast_tensors'
,
**
locals
())
i
=
0
while
i
<
len
(
shape
):
if
len
(
output_shape_r
)
<=
i
:
output_shape_r
.
append
(
shape
[
i
])
output_shape_r_last_tensor_index
.
append
(
j
)
else
:
invalid
=
(
output_shape_r
[
i
]
!=
shape
[
i
]
and
output_shape_r
[
i
]
!=
1
and
shape
[
i
]
!=
1
out
=
[]
while
i
<
num_inputs
:
out
.
append
(
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
()
)
if
invalid
:
last_index
=
output_shape_r_last_tensor_index
[
i
]
raise
TypeError
(
"Input tensors to broadcast_tensors does not follow bcast semantics"
"Tensor {last_index} conflicts with Tensor {j} in reversed dimension {i}"
)
if
output_shape_r
[
i
]
<=
shape
[
i
]:
output_shape_r
[
i
]
=
shape
[
i
]
output_shape_r_last_tensor_index
[
i
]
=
j
i
+=
1
# while i < len(shape)
j
+=
1
# while j < len(input)
helper
=
LayerHelper
(
'broadcast_tensors'
,
**
locals
())
i
=
0
out
=
[]
while
i
<
num_inputs
:
out
.
append
(
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
()
)
)
i
+=
1
i
+=
1
inputs
=
{
'X'
:
input
}
helper
.
append_op
(
type
=
'broadcast_tensors'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
{}
)
inputs
=
{
'X'
:
input
}
helper
.
append_op
(
type
=
'broadcast_tensors'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
{},
)
return
out
return
out
def
flip
(
x
,
axis
,
name
=
None
):
...
...
@@ -1465,29 +1333,31 @@ def flip(x, axis, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
flip
(
x
,
axis
)
else
:
helper
=
LayerHelper
(
"flip"
,
**
locals
())
check_type
(
x
,
'X'
,
(
Variable
),
'flip'
)
dtype
=
helper
.
input_dtype
(
'x'
)
check_dtype
(
dtype
,
'X'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
,
'bool'
],
'flip'
,
)
check_type
(
axis
,
'axis'
,
(
list
,
tuple
),
'flip'
)
if
name
is
None
:
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
else
:
out
=
helper
.
create_variable
(
name
=
name
,
dtype
=
dtype
,
persistable
=
False
)
if
paddle
.
in_dynamic_mode
():
return
_legacy_C_ops
.
flip
(
x
,
"axis"
,
axis
)
helper
=
LayerHelper
(
"flip"
,
**
locals
())
check_type
(
x
,
'X'
,
(
Variable
),
'flip'
)
dtype
=
helper
.
input_dtype
(
'x'
)
check_dtype
(
dtype
,
'X'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
,
'bool'
],
'flip'
,
)
check_type
(
axis
,
'axis'
,
(
list
,
tuple
),
'flip'
)
if
name
is
None
:
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
else
:
out
=
helper
.
create_variable
(
name
=
name
,
dtype
=
dtype
,
persistable
=
False
)
helper
.
append_op
(
type
=
"flip"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
},
attrs
=
{
"axis"
:
axis
}
)
return
out
helper
.
append_op
(
type
=
"flip"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
},
attrs
=
{
"axis"
:
axis
},
)
return
out
def
rot90
(
x
,
k
=
1
,
axes
=
[
0
,
1
],
name
=
None
):
...
...
@@ -1705,23 +1575,17 @@ def flatten(x, start_axis=0, stop_axis=-1, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
flatten
(
x
,
start_axis
,
stop_axis
)
if
_in_legacy_dygraph
():
dy_out
,
_
=
_legacy_C_ops
.
flatten_contiguous_range
(
x
,
'start_axis'
,
start_axis
,
'stop_axis'
,
stop_axis
else
:
helper
=
LayerHelper
(
'flatten'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
x_shape
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'flatten_contiguous_range'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
'Out'
:
out
,
'XShape'
:
x_shape
},
attrs
=
{
"start_axis"
:
start_axis
,
"stop_axis"
:
stop_axis
},
)
return
dy_out
helper
=
LayerHelper
(
'flatten'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
x_shape
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'flatten_contiguous_range'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
'Out'
:
out
,
'XShape'
:
x_shape
},
attrs
=
{
"start_axis"
:
start_axis
,
"stop_axis"
:
stop_axis
},
)
return
out
return
out
@
inplace_apis_in_dygraph_only
...
...
@@ -1760,12 +1624,6 @@ def flatten_(x, start_axis=0, stop_axis=-1, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
flatten_
(
x
,
start_axis
,
stop_axis
)
if
_in_legacy_dygraph
():
dy_out
,
_
=
_legacy_C_ops
.
flatten_contiguous_range_
(
x
,
'start_axis'
,
start_axis
,
'stop_axis'
,
stop_axis
)
return
dy_out
def
roll
(
x
,
shifts
,
axis
=
None
,
name
=
None
):
"""
...
...
@@ -1830,31 +1688,28 @@ def roll(x, shifts, axis=None, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
roll
(
x
,
shifts
,
axis
)
else
:
helper
=
LayerHelper
(
"roll"
,
**
locals
())
check_type
(
axis
,
'axis'
,
(
list
,
tuple
),
'roll'
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
roll
(
x
,
'axis'
,
axis
,
'shifts'
,
shifts
)
helper
=
LayerHelper
(
"roll"
,
**
locals
())
check_type
(
axis
,
'axis'
,
(
list
,
tuple
),
'roll'
)
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
if
isinstance
(
shifts
,
Variable
):
helper
.
append_op
(
type
=
'roll'
,
inputs
=
{
'X'
:
x
,
"ShiftsTensor"
:
shifts
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'axis'
:
axis
},
)
else
:
check_type
(
shifts
,
'shifts'
,
(
list
,
tuple
),
'roll'
)
helper
.
append_op
(
type
=
'roll'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'axis'
:
axis
,
'shifts'
:
shifts
},
)
return
out
if
isinstance
(
shifts
,
Variable
):
helper
.
append_op
(
type
=
'roll'
,
inputs
=
{
'X'
:
x
,
"ShiftsTensor"
:
shifts
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'axis'
:
axis
},
)
else
:
check_type
(
shifts
,
'shifts'
,
(
list
,
tuple
),
'roll'
)
helper
.
append_op
(
type
=
'roll'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'axis'
:
axis
,
'shifts'
:
shifts
},
)
return
out
def
stack
(
x
,
axis
=
0
,
name
=
None
):
...
...
@@ -1947,62 +1802,59 @@ def stack(x, axis=0, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
stack
(
x
,
axis
)
else
:
if
not
isinstance
(
x
,
list
)
and
not
isinstance
(
x
,
tuple
):
# NOTE:(zhiqiu) Only support Variable as input if the Variable is a LOD_TENSOR_ARRAY create by create_array, array_write, array_read, etc.
# In that case, Variable is array of tensors indeed.
if
(
isinstance
(
x
,
Variable
)
and
x
.
desc
.
type
()
==
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
):
x
=
[
x
]
else
:
raise
TypeError
(
"The type of '%s' in %s must be %s, but received %s"
%
(
'x'
,
'stack'
,
'list[Tensor], tuple[Tensor] or TensorArray'
,
type
(
x
),
)
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
stack
(
x
,
'axis'
,
axis
)
if
not
isinstance
(
x
,
list
)
and
not
isinstance
(
x
,
tuple
):
# NOTE:(zhiqiu) Only support Variable as input if the Variable is a LOD_TENSOR_ARRAY create by create_array, array_write, array_read, etc.
# In that case, Variable is array of tensors indeed.
if
(
isinstance
(
x
,
Variable
)
and
x
.
desc
.
type
()
==
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
):
x
=
[
x
]
else
:
raise
TypeError
(
"The type of '%s' in %s must be %s, but received %s"
%
(
helper
=
LayerHelper
(
'stack'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
[
0
].
dtype
)
if
x
[
0
].
desc
.
type
()
==
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
:
assert
len
(
x
)
==
1
,
(
"If the elements of 'x' in stack are Variable(LoDTensorArray), "
"number of the elements must be 1, but received %s."
%
len
(
x
)
)
out_index
=
helper
.
create_variable_for_type_inference
(
dtype
=
"int32"
)
for
i
in
x
:
check_variable_and_dtype
(
i
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'stack'
,
'list[Tensor], tuple[Tensor] or TensorArray'
,
type
(
x
),
)
)
helper
=
LayerHelper
(
'stack'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
[
0
].
dtype
)
if
x
[
0
].
desc
.
type
()
==
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
:
assert
len
(
x
)
==
1
,
(
"If the elements of 'x' in stack are Variable(LoDTensorArray), "
"number of the elements must be 1, but received %s."
%
len
(
x
)
)
out_index
=
helper
.
create_variable_for_type_inference
(
dtype
=
"int32"
)
for
i
in
x
:
check_variable_and_dtype
(
i
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'stack'
,
helper
.
append_op
(
type
=
'tensor_array_to_tensor'
,
inputs
=
{
'X'
:
x
[
0
]},
outputs
=
{
'Out'
:
[
out
],
'OutIndex'
:
[
out_index
]},
attrs
=
{
'axis'
:
axis
,
'use_stack'
:
True
},
)
else
:
helper
.
append_op
(
type
=
'stack'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Y'
:
out
},
attrs
=
{
'axis'
:
axis
},
)
helper
.
append_op
(
type
=
'tensor_array_to_tensor'
,
inputs
=
{
'X'
:
x
[
0
]},
outputs
=
{
'Out'
:
[
out
],
'OutIndex'
:
[
out_index
]},
attrs
=
{
'axis'
:
axis
,
'use_stack'
:
True
},
)
else
:
helper
.
append_op
(
type
=
'stack'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Y'
:
out
},
attrs
=
{
'axis'
:
axis
},
)
return
out
return
out
def
split
(
x
,
num_or_sections
,
axis
=
0
,
name
=
None
):
...
...
@@ -2055,7 +1907,7 @@ def split(x, num_or_sections, axis=0, name=None):
"""
input
=
x
dim
=
axis
if
_non_static
_mode
():
if
in_dygraph
_mode
():
num
=
None
attrs
=
()
...
...
@@ -2085,108 +1937,111 @@ def split(x, num_or_sections, axis=0, name=None):
"The type of 'num_or_sections' in split must be int, list or tuple in imperative mode, but "
"received %s."
%
(
type
(
num_or_sections
))
)
if
in_dygraph_mode
():
if
isinstance
(
num_or_sections
,
int
):
return
_C_ops
.
split_with_num
(
input
,
num_or_sections
,
dim
)
else
:
return
_C_ops
.
split
(
input
,
num_or_sections
,
dim
)
elif
_in_legacy_dygraph
():
out
=
[
_varbase_creator
()
for
n
in
range
(
num
)]
_legacy_C_ops
.
split
(
input
,
out
,
*
attrs
)
return
out
check_variable_and_dtype
(
input
,
'input'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
,
'uint8'
,
'int8'
,
],
'split'
,
)
check_type
(
num_or_sections
,
'num_or_sections'
,
(
list
,
int
,
tuple
),
'split'
)
check_type
(
dim
,
'dim'
,
(
int
,
Variable
),
'split'
)
if
isinstance
(
dim
,
Variable
):
check_dtype
(
dim
.
dtype
,
'dim'
,
[
'int32'
,
'int64'
],
'split'
)
if
isinstance
(
num_or_sections
,
int
):
return
_C_ops
.
split_with_num
(
input
,
num_or_sections
,
dim
)
else
:
return
_C_ops
.
split
(
input
,
num_or_sections
,
dim
)
else
:
check_variable_and_dtype
(
input
,
'input'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
,
'uint8'
,
'int8'
,
],
'split'
,
)
check_type
(
num_or_sections
,
'num_or_sections'
,
(
list
,
int
,
tuple
),
'split'
)
check_type
(
dim
,
'dim'
,
(
int
,
Variable
),
'split'
)
if
isinstance
(
dim
,
Variable
):
check_dtype
(
dim
.
dtype
,
'dim'
,
[
'int32'
,
'int64'
],
'split'
)
helper
=
LayerHelper
(
'split'
,
**
locals
())
helper
=
LayerHelper
(
'split'
,
**
locals
())
input_shape
=
input
.
shape
inputs
=
{
'X'
:
input
}
attrs
=
{
'num'
:
num_or_sections
if
isinstance
(
num_or_sections
,
int
)
else
0
}
input_shape
=
input
.
shape
inputs
=
{
'X'
:
input
}
attrs
=
{
'num'
:
num_or_sections
if
isinstance
(
num_or_sections
,
int
)
else
0
}
def
_get_SectionsTensorList
(
one_list
):
tensor_list
=
[]
unk_dim_idx
=
-
1
for
idx
,
dim_size
in
enumerate
(
one_list
):
if
isinstance
(
dim_size
,
Variable
):
dim_size
.
stop_gradient
=
True
tensor_list
.
append
(
dim_size
)
else
:
assert
isinstance
(
dim_size
,
int
)
if
dim_size
==
-
1
:
assert
unk_dim_idx
==
-
1
,
(
"Only one value of 'num_or_section' in split can "
"be -1. But received num_or_section[%d] is also -1."
%
idx
def
_get_SectionsTensorList
(
one_list
):
tensor_list
=
[]
unk_dim_idx
=
-
1
for
idx
,
dim_size
in
enumerate
(
one_list
):
if
isinstance
(
dim_size
,
Variable
):
dim_size
.
stop_gradient
=
True
tensor_list
.
append
(
dim_size
)
else
:
assert
isinstance
(
dim_size
,
int
)
if
dim_size
==
-
1
:
assert
unk_dim_idx
==
-
1
,
(
"Only one value of 'num_or_section' in split can "
"be -1. But received num_or_section[%d] is also -1."
%
idx
)
unk_dim_idx
=
idx
temp_out
=
helper
.
create_variable_for_type_inference
(
'int32'
)
unk_dim_idx
=
idx
temp_out
=
helper
.
create_variable_for_type_inference
(
'int32'
)
fill_constant
(
[
1
],
'int32'
,
dim_size
,
force_cpu
=
True
,
out
=
temp_out
)
tensor_list
.
append
(
temp_out
)
return
tensor_list
fill_constant
(
[
1
],
'int32'
,
dim_size
,
force_cpu
=
True
,
out
=
temp_out
)
tensor_list
.
append
(
temp_out
)
return
tensor_list
if
isinstance
(
dim
,
Variable
):
dim
.
stop_gradient
=
True
inputs
[
'AxisTensor'
]
=
dim
else
:
assert
len
(
input
.
shape
)
+
dim
>=
0
,
"(rank(x) + axis) must >= 0"
dim
=
(
len
(
input_shape
)
+
dim
)
if
dim
<
0
else
dim
attrs
[
'axis'
]
=
dim
if
isinstance
(
num_or_sections
,
int
):
assert
num_or_sections
>
1
,
'num_or_sections must be more than 1.'
if
isinstance
(
dim
,
int
)
and
input_shape
[
dim
]
>
0
:
assert
input_shape
[
dim
]
%
num_or_sections
==
0
,
(
"The input's size along the split dimension "
"must be evenly divisible by Attr(num_or_sections). "
"But %d is not evenly divisible by %d. "
%
(
num_or_sections
,
input_shape
[
dim
])
if
isinstance
(
dim
,
Variable
):
dim
.
stop_gradient
=
True
inputs
[
'AxisTensor'
]
=
dim
else
:
assert
len
(
input
.
shape
)
+
dim
>=
0
,
"(rank(x) + axis) must >= 0"
dim
=
(
len
(
input_shape
)
+
dim
)
if
dim
<
0
else
dim
attrs
[
'axis'
]
=
dim
if
isinstance
(
num_or_sections
,
int
):
assert
num_or_sections
>
1
,
'num_or_sections must be more than 1.'
if
isinstance
(
dim
,
int
)
and
input_shape
[
dim
]
>
0
:
assert
input_shape
[
dim
]
%
num_or_sections
==
0
,
(
"The input's size along the split dimension "
"must be evenly divisible by Attr(num_or_sections). "
"But %d is not evenly divisible by %d. "
%
(
num_or_sections
,
input_shape
[
dim
])
)
num
=
num_or_sections
else
:
if
isinstance
(
dim
,
int
)
and
input_shape
[
dim
]
>
0
:
assert
(
len
(
num_or_sections
)
<=
input_shape
[
dim
]
),
'len(num_or_sections) must not be more than input.shape[dim].'
num
=
len
(
num_or_sections
)
attrs
[
'sections'
]
=
list
(
map
(
lambda
ele
:
-
1
if
isinstance
(
ele
,
Variable
)
else
ele
,
num_or_sections
,
)
)
num
=
num_or_sections
else
:
if
isinstance
(
dim
,
int
)
and
input_shape
[
dim
]
>
0
:
assert
(
len
(
num_or_sections
)
<=
input_shape
[
dim
]
),
'len(num_or_sections) must not be more than input.shape[dim].'
num
=
len
(
num_or_sections
)
attrs
[
'sections'
]
=
list
(
map
(
lambda
ele
:
-
1
if
isinstance
(
ele
,
Variable
)
else
ele
,
num_or_sections
,
if
utils
.
_contain_var
(
num_or_sections
):
inputs
[
'SectionsTensorList'
]
=
_get_SectionsTensorList
(
num_or_sections
)
outs
=
[
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
()
)
for
i
in
range
(
num
)
]
helper
.
append_op
(
type
=
'split'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
outs
},
attrs
=
attrs
)
if
utils
.
_contain_var
(
num_or_sections
):
inputs
[
'SectionsTensorList'
]
=
_get_SectionsTensorList
(
num_or_sections
)
outs
=
[
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
())
for
i
in
range
(
num
)
]
helper
.
append_op
(
type
=
'split'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
outs
},
attrs
=
attrs
)
return
outs
return
outs
def
vsplit
(
x
,
num_or_sections
,
name
=
None
):
...
...
@@ -2317,49 +2172,46 @@ def squeeze(x, axis=None, name=None):
axes
=
axis
if
in_dygraph_mode
():
return
_C_ops
.
squeeze
(
input
,
axes
)
if
_in_legacy_dygraph
():
out
,
_
=
_legacy_C_ops
.
squeeze2
(
input
,
'axes'
,
axes
)
return
out
helper
=
LayerHelper
(
"squeeze"
,
**
locals
())
check_variable_and_dtype
(
input
,
'input'
,
[
'float16'
,
'float32'
,
'float64'
,
'bool'
,
'int8'
,
'int32'
,
'int64'
,
'complex64'
,
'complex128'
,
],
'squeeze'
,
)
else
:
helper
=
LayerHelper
(
"squeeze"
,
**
locals
())
check_variable_and_dtype
(
input
,
'input'
,
[
'float16'
,
'float32'
,
'float64'
,
'bool'
,
'int8'
,
'int32'
,
'int64'
,
'complex64'
,
'complex128'
,
],
'squeeze'
,
)
check_type
(
axes
,
'axis/axes'
,
(
int
,
list
,
tuple
,
Variable
),
'squeeze'
)
attrs
=
{}
if
isinstance
(
axes
,
Variable
):
axes
.
stop_gradient
=
True
attrs
[
"axes"
]
=
axes
elif
isinstance
(
axes
,
(
list
,
tuple
)):
if
utils
.
_contain_var
(
axes
):
attrs
[
"axes"
]
=
utils
.
_convert_to_tensor_list
(
axes
)
else
:
check_type
(
axes
,
'axis/axes'
,
(
int
,
list
,
tuple
,
Variable
),
'squeeze'
)
attrs
=
{}
if
isinstance
(
axes
,
Variable
):
axes
.
stop_gradient
=
True
attrs
[
"axes"
]
=
axes
elif
isinstance
(
axes
,
(
list
,
tuple
)):
if
utils
.
_contain_var
(
axes
):
attrs
[
"axes"
]
=
utils
.
_convert_to_tensor_list
(
axes
)
else
:
attrs
[
"axes"
]
=
axes
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
x_shape
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
helper
.
append_op
(
type
=
"squeeze2"
,
inputs
=
{
"X"
:
input
},
attrs
=
attrs
,
outputs
=
{
"Out"
:
out
,
"XShape"
:
x_shape
},
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
x_shape
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
helper
.
append_op
(
type
=
"squeeze2"
,
inputs
=
{
"X"
:
input
},
attrs
=
attrs
,
outputs
=
{
"Out"
:
out
,
"XShape"
:
x_shape
},
)
return
out
return
out
@
inplace_apis_in_dygraph_only
...
...
@@ -2379,9 +2231,6 @@ def squeeze_(x, axis=None, name=None):
axes
=
axis
if
in_dygraph_mode
():
return
_C_ops
.
squeeze_
(
input
,
axes
)
if
_in_legacy_dygraph
():
out
,
_
=
_legacy_C_ops
.
squeeze2_
(
input
,
'axes'
,
axes
)
return
out
def
unique_consecutive
(
...
...
@@ -2473,65 +2322,49 @@ def unique_consecutive(
if
len
(
outs
)
==
1
:
return
outs
[
0
]
return
tuple
(
outs
)
el
if
paddle
.
in_dynamic_mode
()
:
out
,
inverse
,
counts
=
_legacy_C_ops
.
unique_consecutiv
e
(
el
se
:
check_variable_and_dtyp
e
(
x
,
'dtype'
,
attr_dtype
,
'return_inverse'
,
return_inverse
,
'return_counts'
,
return_counts
,
'axis'
,
axis
,
"input"
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'unique_consecutive'
,
)
check_type
(
return_inverse
,
'return_inverse'
,
bool
,
'unique_consecutive'
)
check_type
(
return_counts
,
'return_counts'
,
bool
,
'unique_consecutive'
)
check_dtype
(
dtype
,
'dtype'
,
[
'int32'
,
'int64'
],
'unique_consecutive'
)
if
len
(
axis
)
!=
0
:
check_type
(
axis
[
0
],
'axis'
,
int
,
'unique_consecutive'
)
helper
=
LayerHelper
(
'unique_consecutive'
,
**
locals
())
attrs
=
{
'dtype'
:
attr_dtype
,
"return_inverse"
:
return_inverse
,
"return_counts"
:
return_counts
,
"axis"
:
axis
,
}
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
,
stop_gradient
=
True
)
inverse
=
helper
.
create_variable_for_type_inference
(
dtype
=
attr_dtype
,
stop_gradient
=
True
)
counts
=
helper
.
create_variable_for_type_inference
(
dtype
=
attr_dtype
,
stop_gradient
=
True
)
outputs
=
{
"Out"
:
out
,
"Index"
:
inverse
,
"Counts"
:
counts
}
outs
=
[
out
]
if
return_inverse
:
outs
.
append
(
inverse
)
if
return_counts
:
outs
.
append
(
counts
)
helper
.
append_op
(
type
=
"unique_consecutive"
,
inputs
=
{
"X"
:
x
},
attrs
=
attrs
,
outputs
=
outputs
,
)
if
len
(
outs
)
==
1
:
return
outs
[
0
]
return
tuple
(
outs
)
check_variable_and_dtype
(
x
,
"input"
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'unique_consecutive'
,
)
check_type
(
return_inverse
,
'return_inverse'
,
bool
,
'unique_consecutive'
)
check_type
(
return_counts
,
'return_counts'
,
bool
,
'unique_consecutive'
)
check_dtype
(
dtype
,
'dtype'
,
[
'int32'
,
'int64'
],
'unique_consecutive'
)
if
len
(
axis
)
!=
0
:
check_type
(
axis
[
0
],
'axis'
,
int
,
'unique_consecutive'
)
helper
=
LayerHelper
(
'unique_consecutive'
,
**
locals
())
attrs
=
{
'dtype'
:
attr_dtype
,
"return_inverse"
:
return_inverse
,
"return_counts"
:
return_counts
,
"axis"
:
axis
,
}
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
,
stop_gradient
=
True
)
inverse
=
helper
.
create_variable_for_type_inference
(
dtype
=
attr_dtype
,
stop_gradient
=
True
)
counts
=
helper
.
create_variable_for_type_inference
(
dtype
=
attr_dtype
,
stop_gradient
=
True
)
outputs
=
{
"Out"
:
out
,
"Index"
:
inverse
,
"Counts"
:
counts
}
outs
=
[
out
]
if
return_inverse
:
outs
.
append
(
inverse
)
if
return_counts
:
outs
.
append
(
counts
)
helper
.
append_op
(
type
=
"unique_consecutive"
,
inputs
=
{
"X"
:
x
},
attrs
=
attrs
,
outputs
=
outputs
)
if
len
(
outs
)
==
1
:
return
outs
[
0
]
return
tuple
(
outs
)
def
unique
(
...
...
@@ -2604,27 +2437,10 @@ def unique(
else
:
axis
=
[
axis
]
attr_dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
_non_static_mode
():
if
in_dygraph_mode
():
out
,
indices
,
inverse
,
counts
=
_C_ops
.
unique
(
x
,
return_index
,
return_inverse
,
return_counts
,
axis
,
attr_dtype
)
if
_in_legacy_dygraph
():
out
,
inverse
,
indices
,
counts
=
_legacy_C_ops
.
unique
(
x
,
'dtype'
,
attr_dtype
,
'return_index'
,
return_index
,
'return_inverse'
,
return_inverse
,
'return_counts'
,
return_counts
,
'axis'
,
axis
,
"is_sorted"
,
True
,
)
if
in_dygraph_mode
():
out
,
indices
,
inverse
,
counts
=
_C_ops
.
unique
(
x
,
return_index
,
return_inverse
,
return_counts
,
axis
,
attr_dtype
)
outs
=
[
out
]
if
return_index
:
outs
.
append
(
indices
)
...
...
@@ -2637,60 +2453,60 @@ def unique(
return
outs
[
0
]
return
tuple
(
outs
)
else
:
check_variable_and_dtype
(
x
,
"input"
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'unique'
)
check_type
(
return_index
,
'return_index'
,
bool
,
'unique'
)
check_type
(
return_inverse
,
'return_inverse'
,
bool
,
'unique'
)
check_type
(
return_counts
,
'return_counts'
,
bool
,
'unique'
)
check_dtype
(
dtype
,
'dtype'
,
[
'int32'
,
'int64'
],
'unique'
)
if
len
(
axis
)
!=
0
:
check_type
(
axis
[
0
],
'axis'
,
int
,
'unique'
)
helper
=
LayerHelper
(
'unique'
,
**
locals
())
attrs
=
{
'dtype'
:
attr_dtype
,
"return_index"
:
return_index
,
"return_inverse"
:
return_inverse
,
"return_counts"
:
return_counts
,
"axis"
:
axis
,
"is_sorted"
:
True
,
}
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
,
stop_gradient
=
True
)
indices
=
helper
.
create_variable_for_type_inference
(
dtype
=
attr_dtype
,
stop_gradient
=
True
)
inverse
=
helper
.
create_variable_for_type_inference
(
dtype
=
attr_dtype
,
stop_gradient
=
True
)
counts
=
helper
.
create_variable_for_type_inference
(
dtype
=
attr_dtype
,
stop_gradient
=
True
)
outputs
=
{
"Out"
:
out
,
"Indices"
:
indices
,
"Index"
:
inverse
,
"Counts"
:
counts
,
}
outs
=
[
out
]
if
return_index
:
outs
.
append
(
indices
)
if
return_inverse
:
outs
.
append
(
inverse
)
if
return_counts
:
outs
.
append
(
counts
)
check_variable_and_dtype
(
x
,
"input"
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'unique'
)
check_type
(
return_index
,
'return_index'
,
bool
,
'unique'
)
check_type
(
return_inverse
,
'return_inverse'
,
bool
,
'unique'
)
check_type
(
return_counts
,
'return_counts'
,
bool
,
'unique'
)
check_dtype
(
dtype
,
'dtype'
,
[
'int32'
,
'int64'
],
'unique'
)
if
len
(
axis
)
!=
0
:
check_type
(
axis
[
0
],
'axis'
,
int
,
'unique'
)
helper
=
LayerHelper
(
'unique'
,
**
locals
())
attrs
=
{
'dtype'
:
attr_dtype
,
"return_index"
:
return_index
,
"return_inverse"
:
return_inverse
,
"return_counts"
:
return_counts
,
"axis"
:
axis
,
"is_sorted"
:
True
,
}
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
,
stop_gradient
=
True
)
indices
=
helper
.
create_variable_for_type_inference
(
dtype
=
attr_dtype
,
stop_gradient
=
True
)
inverse
=
helper
.
create_variable_for_type_inference
(
dtype
=
attr_dtype
,
stop_gradient
=
True
)
counts
=
helper
.
create_variable_for_type_inference
(
dtype
=
attr_dtype
,
stop_gradient
=
True
)
outputs
=
{
"Out"
:
out
,
"Indices"
:
indices
,
"Index"
:
inverse
,
"Counts"
:
counts
,
}
outs
=
[
out
]
if
return_index
:
outs
.
append
(
indices
)
if
return_inverse
:
outs
.
append
(
inverse
)
if
return_counts
:
outs
.
append
(
counts
)
helper
.
append_op
(
type
=
"unique"
,
inputs
=
{
"X"
:
x
},
attrs
=
attrs
,
outputs
=
outputs
)
helper
.
append_op
(
type
=
"unique"
,
inputs
=
{
"X"
:
x
},
attrs
=
attrs
,
outputs
=
outputs
)
if
len
(
outs
)
==
1
:
return
outs
[
0
]
if
len
(
outs
)
==
1
:
return
outs
[
0
]
return
tuple
(
outs
)
return
tuple
(
outs
)
def
unsqueeze
(
x
,
axis
,
name
=
None
):
...
...
@@ -2741,7 +2557,7 @@ def unsqueeze(x, axis, name=None):
"""
input
=
x
axes
=
axis
if
_non_static
_mode
():
if
in_dygraph
_mode
():
if
isinstance
(
axes
,
int
):
axes
=
[
axes
]
elif
isinstance
(
axes
,
Variable
):
...
...
@@ -2751,54 +2567,51 @@ def unsqueeze(x, axis, name=None):
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
Variable
)
else
item
for
item
in
axes
]
if
_in_legacy_dygraph
():
out
,
_
=
_legacy_C_ops
.
unsqueeze2
(
input
,
'axes'
,
axes
)
return
out
return
_C_ops
.
unsqueeze
(
input
,
axes
)
else
:
check_type
(
axes
,
'axis/axes'
,
(
int
,
list
,
tuple
,
Variable
),
'unsqueeze'
)
check_variable_and_dtype
(
input
,
'input'
,
[
'float16'
,
'float32'
,
'float64'
,
'bool'
,
'int8'
,
'int16'
,
'int32'
,
'int64'
,
'complex64'
,
'complex128'
,
],
'unsqueeze'
,
)
helper
=
LayerHelper
(
"unsqueeze2"
,
**
locals
())
inputs
=
{
"X"
:
input
}
attrs
=
{}
check_type
(
axes
,
'axis/axes'
,
(
int
,
list
,
tuple
,
Variable
),
'unsqueeze'
)
check_variable_and_dtype
(
input
,
'input'
,
[
'float16'
,
'float32'
,
'float64'
,
'bool'
,
'int8'
,
'int16'
,
'int32'
,
'int64'
,
'complex64'
,
'complex128'
,
],
'unsqueeze'
,
)
helper
=
LayerHelper
(
"unsqueeze2"
,
**
locals
())
inputs
=
{
"X"
:
input
}
attrs
=
{}
if
isinstance
(
axes
,
int
):
axes
=
[
axes
]
if
isinstance
(
axes
,
Variable
):
axes
.
stop_gradient
=
True
inputs
[
"AxesTensor"
]
=
axes
elif
isinstance
(
axes
,
(
list
,
tuple
)):
if
utils
.
_contain_var
(
axes
):
inputs
[
"AxesTensorList"
]
=
utils
.
_convert_to_tensor_list
(
axes
)
else
:
attrs
[
"axes"
]
=
axes
if
isinstance
(
axes
,
int
):
axes
=
[
axes
]
if
isinstance
(
axes
,
Variable
):
axes
.
stop_gradient
=
True
inputs
[
"AxesTensor"
]
=
axes
elif
isinstance
(
axes
,
(
list
,
tuple
)):
if
utils
.
_contain_var
(
axes
):
inputs
[
"AxesTensorList"
]
=
utils
.
_convert_to_tensor_list
(
axes
)
else
:
attrs
[
"axes"
]
=
axes
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
x_shape
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
helper
.
append_op
(
type
=
"unsqueeze2"
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
"Out"
:
out
,
"XShape"
:
x_shape
},
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
x_shape
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
helper
.
append_op
(
type
=
"unsqueeze2"
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
"Out"
:
out
,
"XShape"
:
x_shape
},
)
return
out
return
out
@
inplace_apis_in_dygraph_only
...
...
@@ -2818,10 +2631,7 @@ def unsqueeze_(x, axis, name=None):
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
Variable
)
else
item
for
item
in
axes
]
if
in_dygraph_mode
():
return
_C_ops
.
unsqueeze_
(
input
,
axes
)
out
,
_
=
_legacy_C_ops
.
unsqueeze2_
(
input
,
'axes'
,
axes
)
return
out
return
_C_ops
.
unsqueeze_
(
input
,
axes
)
def
gather
(
x
,
index
,
axis
=
None
,
name
=
None
):
...
...
@@ -2874,42 +2684,45 @@ def gather(x, index, axis=None, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
gather
(
x
,
index
,
axis
)
if
_in_legacy_dygraph
():
axis
=
axis
.
item
()
if
isinstance
(
axis
,
paddle
.
Tensor
)
else
axis
return
_legacy_C_ops
.
gather
(
x
,
index
,
None
,
"axis"
,
axis
,
"overwrite"
,
False
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
,
],
'gather'
,
)
check_variable_and_dtype
(
index
,
'index'
,
[
'int32'
,
'int64'
],
'gather'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
],
'gather'
,
)
check_variable_and_dtype
(
index
,
'index'
,
[
'int32'
,
'int64'
],
'gather'
)
if
isinstance
(
axis
,
Variable
):
check_variable_and_dtype
(
axis
,
'axis'
,
[
'int32'
,
'int64'
],
'gather'
)
if
isinstance
(
axis
,
Variable
):
check_variable_and_dtype
(
axis
,
'axis'
,
[
'int32'
,
'int64'
],
'gather'
)
helper
=
LayerHelper
(
'gather'
,
**
locals
())
dtype
=
helper
.
input_dtype
(
'x'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
if
not
isinstance
(
axis
,
Variable
):
helper
.
append_op
(
type
=
"gather"
,
inputs
=
{
"X"
:
x
,
"Index"
:
index
},
attrs
=
{
'axis'
:
axis
,
'overwrite'
:
False
},
outputs
=
{
"Out"
:
out
},
)
else
:
helper
.
append_op
(
type
=
"gather"
,
inputs
=
{
"X"
:
x
,
"Index"
:
index
,
"Axis"
:
axis
},
attrs
=
{
"overwrite"
:
False
},
outputs
=
{
"Out"
:
out
},
)
helper
=
LayerHelper
(
'gather'
,
**
locals
())
dtype
=
helper
.
input_dtype
(
'x'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
if
not
isinstance
(
axis
,
Variable
):
helper
.
append_op
(
type
=
"gather"
,
inputs
=
{
"X"
:
x
,
"Index"
:
index
},
attrs
=
{
'axis'
:
axis
,
'overwrite'
:
False
},
outputs
=
{
"Out"
:
out
},
)
else
:
helper
.
append_op
(
type
=
"gather"
,
inputs
=
{
"X"
:
x
,
"Index"
:
index
,
"Axis"
:
axis
},
attrs
=
{
"overwrite"
:
False
},
outputs
=
{
"Out"
:
out
},
)
return
out
return
out
def
unbind
(
input
,
axis
=
0
):
...
...
@@ -2945,36 +2758,36 @@ def unbind(input, axis=0):
"""
if
in_dygraph_mode
():
return
_C_ops
.
unbind
(
input
,
axis
)
if
not
isinstance
(
axis
,
(
int
)):
raise
TypeError
(
"The type of 'axis' must be int, but received %s."
%
(
type
(
axis
))
else
:
if
not
isinstance
(
axis
,
(
int
)):
raise
TypeError
(
"The type of 'axis' must be int, but received %s."
%
(
type
(
axis
))
)
if
isinstance
(
axis
,
np
.
generic
):
axis
=
np
.
asscalar
(
axis
)
input_shape
=
input
.
shape
axis_
=
axis
if
axis
>=
0
else
len
(
input_shape
)
+
axis
num
=
input_shape
[
axis_
]
helper
=
LayerHelper
(
"unbind"
,
**
locals
())
check_type
(
input
,
'input'
,
(
Variable
),
'unbind'
)
dtype
=
helper
.
input_dtype
()
check_dtype
(
dtype
,
'unbind'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'unbind'
)
if
isinstance
(
axis
,
np
.
generic
):
axis
=
np
.
asscalar
(
axis
)
input_shape
=
input
.
shape
axis_
=
axis
if
axis
>=
0
else
len
(
input_shape
)
+
axis
num
=
input_shape
[
axis_
]
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
unbind
(
input
,
num
,
'axis'
,
axis
)
helper
=
LayerHelper
(
"unbind"
,
**
locals
())
check_type
(
input
,
'input'
,
(
Variable
),
'unbind'
)
dtype
=
helper
.
input_dtype
()
check_dtype
(
dtype
,
'unbind'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'unbind'
)
outs
=
[
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
())
for
i
in
range
(
num
)
]
helper
.
append_op
(
type
=
"unbind"
,
inputs
=
{
"X"
:
input
},
outputs
=
{
"Out"
:
outs
},
attrs
=
{
"axis"
:
axis
},
)
return
outs
outs
=
[
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
()
)
for
i
in
range
(
num
)
]
helper
.
append_op
(
type
=
"unbind"
,
inputs
=
{
"X"
:
input
},
outputs
=
{
"Out"
:
outs
},
attrs
=
{
"axis"
:
axis
},
)
return
outs
def
scatter
(
x
,
index
,
updates
,
overwrite
=
True
,
name
=
None
):
...
...
@@ -3054,27 +2867,22 @@ def scatter(x, index, updates, overwrite=True, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
scatter
(
x
,
index
,
updates
,
overwrite
)
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
scatter
(
x
,
index
,
updates
,
'overwrite'
,
overwrite
)
else
:
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
,
'float16'
,
'int32'
,
'int64'
],
'scatter'
,
)
check_type
(
overwrite
,
'overwrite'
,
bool
,
'scatter'
)
helper
=
LayerHelper
(
'scatter'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
"scatter"
,
inputs
=
{
"X"
:
x
,
"Ids"
:
index
,
"Updates"
:
updates
},
attrs
=
{
'overwrite'
:
overwrite
},
outputs
=
{
"Out"
:
out
},
)
return
out
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
,
'float16'
,
'int32'
,
'int64'
],
'scatter'
,
)
check_type
(
overwrite
,
'overwrite'
,
bool
,
'scatter'
)
helper
=
LayerHelper
(
'scatter'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
"scatter"
,
inputs
=
{
"X"
:
x
,
"Ids"
:
index
,
"Updates"
:
updates
},
attrs
=
{
'overwrite'
:
overwrite
},
outputs
=
{
"Out"
:
out
},
)
return
out
@
inplace_apis_in_dygraph_only
...
...
@@ -3083,9 +2891,7 @@ def scatter_(x, index, updates, overwrite=True, name=None):
Inplace version of ``scatter`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_paddle_tensor_scatter`.
"""
if
in_dygraph_mode
():
return
_C_ops
.
scatter_
(
x
,
index
,
updates
,
overwrite
)
return
_legacy_C_ops
.
scatter_
(
x
,
index
,
updates
,
'overwrite'
,
overwrite
)
return
_C_ops
.
scatter_
(
x
,
index
,
updates
,
overwrite
)
def
scatter_nd_add
(
x
,
index
,
updates
,
name
=
None
):
...
...
@@ -3160,22 +2966,18 @@ def scatter_nd_add(x, index, updates, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
scatter_nd_add
(
x
,
index
,
updates
)
else
:
if
_in_legacy_dygraph
():
op
=
getattr
(
_legacy_C_ops
,
'scatter_nd_add'
)
return
op
(
x
,
index
,
updates
)
else
:
if
x
.
dtype
!=
updates
.
dtype
:
raise
ValueError
(
"x and updates must have same data type."
)
if
x
.
dtype
!=
updates
.
dtype
:
raise
ValueError
(
"x and updates must have same data type."
)
helper
=
LayerHelper
(
'scatter_nd_add'
,
**
locals
())
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
output
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
"scatter_nd_add"
,
inputs
=
{
"X"
:
x
,
"Index"
:
index
,
"Updates"
:
updates
},
outputs
=
{
"Out"
:
output
},
)
return
output
helper
=
LayerHelper
(
'scatter_nd_add'
,
**
locals
())
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
output
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
"scatter_nd_add"
,
inputs
=
{
"X"
:
x
,
"Index"
:
index
,
"Updates"
:
updates
},
outputs
=
{
"Out"
:
output
},
)
return
output
def
scatter_nd
(
index
,
updates
,
shape
,
name
=
None
):
...
...
@@ -3307,71 +3109,70 @@ def tile(x, repeat_times, name=None):
repeat_times
=
repeat_times
.
numpy
().
tolist
()
return
_C_ops
.
tile
(
x
,
repeat_times
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
tile
(
x
,
'repeat_times'
,
repeat_times
)
check_type
(
repeat_times
,
'repeat_times'
,
(
list
,
tuple
,
Variable
),
'tile'
)
if
isinstance
(
repeat_times
,
Variable
):
assert
(
len
(
repeat_times
.
shape
)
==
1
),
'repeat_times must be an 1-D Tensor.'
else
:
for
elem
in
repeat_times
:
if
isinstance
(
elem
,
Variable
):
assert
(
len
(
elem
.
shape
)
==
1
),
'Elements in repeat_times must be 1-D Tensors or integers.'
else
:
type_tuple
=
(
int
,
np
.
int32
,
np
.
int64
)
assert
isinstance
(
elem
,
type_tuple
),
'Elements in repeat_times must be 1-D Tensors or integers.'
check_type
(
repeat_times
,
'repeat_times'
,
(
list
,
tuple
,
Variable
),
'tile'
)
if
isinstance
(
repeat_times
,
Variable
):
assert
(
len
(
repeat_times
.
shape
)
==
1
),
'repeat_times must be an 1-D Tensor.'
else
:
for
elem
in
repeat_times
:
if
isinstance
(
elem
,
Variable
):
assert
(
len
(
elem
.
shape
)
==
1
),
'Elements in repeat_times must be 1-D Tensors or integers.'
else
:
type_tuple
=
(
int
,
np
.
int32
,
np
.
int64
)
assert
isinstance
(
elem
,
type_tuple
),
'Elements in repeat_times must be 1-D Tensors or integers.'
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'tile'
)
if
convert_dtype
(
x
.
dtype
)
==
'bool'
and
not
x
.
stop_gradient
:
raise
ValueError
(
"When the date type is bool for the input 'x' of tile op, you "
"must set its stop_gradient to be True by "
"some_var.stop_gradient == True supporting some_var is the input."
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'tile'
)
if
convert_dtype
(
x
.
dtype
)
==
'bool'
and
not
x
.
stop_gradient
:
raise
ValueError
(
"When the date type is bool for the input 'x' of tile op, you "
"must set its stop_gradient to be True by "
"some_var.stop_gradient == True supporting some_var is the input."
)
helper
=
LayerHelper
(
'tile'
,
**
locals
())
helper
=
LayerHelper
(
'tile'
,
**
locals
())
inputs
=
{
"X"
:
[
x
]}
attrs
=
{}
inputs
=
{
"X"
:
[
x
]}
attrs
=
{}
def
get_attr_repeat_times
(
list_repeat_times
):
attrs_repeat_times
=
[]
for
idx
,
times
in
enumerate
(
list_repeat_times
):
if
isinstance
(
times
,
Variable
):
attrs_repeat_times
.
append
(
-
1
)
else
:
attrs_repeat_times
.
append
(
times
)
assert
(
times
>
0
),
"All elements in repeat_times must be positive for tile."
return
attrs_repeat_times
if
isinstance
(
repeat_times
,
Variable
):
repeat_times
.
stop_gradient
=
True
inputs
[
'RepeatTimes'
]
=
repeat_times
attrs
[
'repeat_times'
]
=
[
-
1
]
elif
isinstance
(
repeat_times
,
(
list
,
tuple
)):
attrs
[
'repeat_times'
]
=
get_attr_repeat_times
(
repeat_times
)
if
utils
.
_contain_var
(
repeat_times
):
inputs
[
'repeat_times_tensor'
]
=
utils
.
_convert_to_tensor_list
(
repeat_times
)
def
get_attr_repeat_times
(
list_repeat_times
):
attrs_repeat_times
=
[]
for
idx
,
times
in
enumerate
(
list_repeat_times
):
if
isinstance
(
times
,
Variable
):
attrs_repeat_times
.
append
(
-
1
)
else
:
attrs_repeat_times
.
append
(
times
)
assert
(
times
>
0
),
"All elements in repeat_times must be positive for tile."
return
attrs_repeat_times
if
isinstance
(
repeat_times
,
Variable
):
repeat_times
.
stop_gradient
=
True
inputs
[
'RepeatTimes'
]
=
repeat_times
attrs
[
'repeat_times'
]
=
[
-
1
]
elif
isinstance
(
repeat_times
,
(
list
,
tuple
)):
attrs
[
'repeat_times'
]
=
get_attr_repeat_times
(
repeat_times
)
if
utils
.
_contain_var
(
repeat_times
):
inputs
[
'repeat_times_tensor'
]
=
utils
.
_convert_to_tensor_list
(
repeat_times
)
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
'tile'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
return
out
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
'tile'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
return
out
def
expand_as
(
x
,
y
,
name
=
None
):
...
...
@@ -3404,34 +3205,34 @@ def expand_as(x, y, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
expand_as
(
x
,
None
,
y
.
shape
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'expand_as'
,
)
check_type
(
y
,
'y'
,
Variable
,
'expand_as'
)
if
_non_static_mode
():
return
_legacy_C_ops
.
expand_as_v2
(
x
,
'target_shape'
,
y
.
shape
)
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'expand_as'
)
check_type
(
y
,
'y'
,
Variable
,
'expand_as'
)
if
convert_dtype
(
x
.
dtype
)
==
'bool'
and
not
x
.
stop_gradient
:
raise
ValueError
(
"When the data type of input 'x' for expand_as is bool, "
"you must set its stop_gradient to be False by "
"some_var.stop_gradient = True, supporting "
"some_var as the input 'x'."
)
inputs
=
{
"X"
:
[
x
],
"Y"
:
[
y
]}
if
convert_dtype
(
x
.
dtype
)
==
'bool'
and
not
x
.
stop_gradient
:
raise
ValueError
(
"When the data type of input 'x' for expand_as is bool, "
"you must set its stop_gradient to be False by "
"some_var.stop_gradient = True, supporting "
"some_var as the input 'x'."
helper
=
LayerHelper
(
'expand_as'
,
**
locals
())
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
'expand_as_v2'
,
inputs
=
inputs
,
attrs
=
{
'target_shape'
:
y
.
shape
},
outputs
=
{
'Out'
:
out
},
)
inputs
=
{
"X"
:
[
x
],
"Y"
:
[
y
]}
helper
=
LayerHelper
(
'expand_as'
,
**
locals
())
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
'expand_as_v2'
,
inputs
=
inputs
,
attrs
=
{
'target_shape'
:
y
.
shape
},
outputs
=
{
'Out'
:
out
},
)
return
out
return
out
def
broadcast_to
(
x
,
shape
,
name
=
None
):
...
...
@@ -3463,68 +3264,69 @@ def broadcast_to(x, shape, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
expand
(
x
,
shape
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
expand_v2
(
x
,
'shape'
,
shape
)
if
isinstance
(
shape
,
Variable
):
assert
len
(
shape
.
shape
)
==
1
,
'shape must be an 1-D Tensor.'
else
:
for
elem
in
shape
:
if
isinstance
(
elem
,
Variable
):
assert
(
len
(
elem
.
shape
)
==
1
),
'Elements in shape must be 1-D Tensors or integers.'
else
:
type_tuple
=
(
int
,
np
.
int32
,
np
.
int64
)
assert
isinstance
(
elem
,
type_tuple
),
'Elements in shape must be 1-D Tensors or integers.'
if
isinstance
(
shape
,
Variable
):
assert
len
(
shape
.
shape
)
==
1
,
'shape must be an 1-D Tensor.'
else
:
for
elem
in
shape
:
if
isinstance
(
elem
,
Variable
):
assert
(
len
(
elem
.
shape
)
==
1
),
'Elements in shape must be 1-D Tensors or integers.'
else
:
type_tuple
=
(
int
,
np
.
int32
,
np
.
int64
)
assert
isinstance
(
elem
,
type_tuple
),
'Elements in shape must be 1-D Tensors or integers.'
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'broadcast_to'
)
check_type
(
shape
,
'shape'
,
(
list
,
tuple
,
Variable
),
'broadcast_to'
)
if
convert_dtype
(
x
.
dtype
)
==
'bool'
and
not
x
.
stop_gradient
:
raise
ValueError
(
"When the data type of input 'x' for broadcast_to is bool, "
"you must set its stop_gradient to be False by "
"some_var.stop_gradient = True, supporting "
"some_var as the input."
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'broadcast_to'
,
)
check_type
(
shape
,
'shape'
,
(
list
,
tuple
,
Variable
),
'broadcast_to'
)
if
convert_dtype
(
x
.
dtype
)
==
'bool'
and
not
x
.
stop_gradient
:
raise
ValueError
(
"When the data type of input 'x' for broadcast_to is bool, "
"you must set its stop_gradient to be False by "
"some_var.stop_gradient = True, supporting "
"some_var as the input."
)
inputs
=
{
"X"
:
[
x
]}
attrs
=
{}
inputs
=
{
"X"
:
[
x
]}
attrs
=
{}
helper
=
LayerHelper
(
'expand'
,
**
locals
())
helper
=
LayerHelper
(
'expand'
,
**
locals
())
def
get_attr_expand_shape
(
list_expand_shape
):
attrs_expand_shape
=
[]
for
idx
,
shape
in
enumerate
(
list_expand_shape
):
if
isinstance
(
shape
,
Variable
):
attrs_expand_shape
.
append
(
-
1
)
else
:
attrs_expand_shape
.
append
(
shape
)
assert
(
shape
>
0
or
shape
==
-
1
),
"All elements in shape of broadcast_to must be positive or -1."
return
attrs_expand_shape
def
get_attr_expand_shape
(
list_expand_shape
):
attrs_expand_shape
=
[]
for
idx
,
shape
in
enumerate
(
list_expand_shape
):
if
isinstance
(
shape
,
Variable
):
attrs_expand_shape
.
append
(
-
1
)
else
:
attrs_expand_shape
.
append
(
shape
)
assert
(
shape
>
0
or
shape
==
-
1
),
"All elements in shape of broadcast_to must be positive or -1."
return
attrs_expand_shape
if
isinstance
(
shape
,
Variable
):
shape
.
stop_gradient
=
True
inputs
[
'Shape'
]
=
shape
elif
isinstance
(
shape
,
(
list
,
tuple
)):
attrs
[
'shape'
]
=
get_attr_expand_shape
(
shape
)
if
utils
.
_contain_var
(
shape
):
inputs
[
'expand_shapes_tensor'
]
=
utils
.
_convert_to_tensor_list
(
shape
)
if
isinstance
(
shape
,
Variable
):
shape
.
stop_gradient
=
True
inputs
[
'Shape'
]
=
shape
elif
isinstance
(
shape
,
(
list
,
tuple
)):
attrs
[
'shape'
]
=
get_attr_expand_shape
(
shape
)
if
utils
.
_contain_var
(
shape
):
inputs
[
'expand_shapes_tensor'
]
=
utils
.
_convert_to_tensor_list
(
shape
)
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
'expand_v2'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
return
out
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
'expand_v2'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
return
out
def
expand
(
x
,
shape
,
name
=
None
):
...
...
@@ -3557,72 +3359,69 @@ def expand(x, shape, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
expand
(
x
,
shape
)
if
paddle
.
in_dynamic_mode
():
return
_legacy_C_ops
.
expand_v2
(
x
,
'shape'
,
shape
)
if
isinstance
(
shape
,
Variable
):
assert
len
(
shape
.
shape
)
==
1
,
'shape must be an 1-D Tensor.'
else
:
for
elem
in
shape
:
if
isinstance
(
elem
,
Variable
):
assert
(
len
(
elem
.
shape
)
==
1
),
'Elements in shape must be 1-D Tensors or integers.'
else
:
type_tuple
=
(
int
,
np
.
int32
,
np
.
int64
)
assert
isinstance
(
elem
,
type_tuple
),
'Elements in shape must be 1-D Tensors or integers.'
if
isinstance
(
shape
,
Variable
):
assert
len
(
shape
.
shape
)
==
1
,
'shape must be an 1-D Tensor.'
else
:
for
elem
in
shape
:
if
isinstance
(
elem
,
Variable
):
assert
(
len
(
elem
.
shape
)
==
1
),
'Elements in shape must be 1-D Tensors or integers.'
else
:
type_tuple
=
(
int
,
np
.
int32
,
np
.
int64
)
assert
isinstance
(
elem
,
type_tuple
),
'Elements in shape must be 1-D Tensors or integers.'
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'expand'
,
)
check_type
(
shape
,
'shape'
,
(
list
,
tuple
,
Variable
),
'expand'
)
if
convert_dtype
(
x
.
dtype
)
==
'bool'
and
not
x
.
stop_gradient
:
raise
ValueError
(
"When the data type of input 'x' for expand is bool, "
"you must set its stop_gradient to be False by "
"some_var.stop_gradient = True, supporting "
"some_var as the input."
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'expand'
,
)
check_type
(
shape
,
'shape'
,
(
list
,
tuple
,
Variable
),
'expand'
)
if
convert_dtype
(
x
.
dtype
)
==
'bool'
and
not
x
.
stop_gradient
:
raise
ValueError
(
"When the data type of input 'x' for expand is bool, "
"you must set its stop_gradient to be False by "
"some_var.stop_gradient = True, supporting "
"some_var as the input."
)
inputs
=
{
"X"
:
[
x
]}
attrs
=
{}
inputs
=
{
"X"
:
[
x
]}
attrs
=
{}
helper
=
LayerHelper
(
'expand'
,
**
locals
())
helper
=
LayerHelper
(
'expand'
,
**
locals
())
def
get_attr_expand_shape
(
list_expand_shape
):
attrs_expand_shape
=
[]
for
idx
,
shape
in
enumerate
(
list_expand_shape
):
if
isinstance
(
shape
,
Variable
):
attrs_expand_shape
.
append
(
-
2
)
else
:
attrs_expand_shape
.
append
(
shape
)
assert
(
shape
>
0
or
shape
==
-
1
),
"All elements in shape of expand must be positive or -1."
return
attrs_expand_shape
def
get_attr_expand_shape
(
list_expand_shape
):
attrs_expand_shape
=
[]
for
idx
,
shape
in
enumerate
(
list_expand_shape
):
if
isinstance
(
shape
,
Variable
):
attrs_expand_shape
.
append
(
-
2
)
else
:
attrs_expand_shape
.
append
(
shape
)
assert
(
shape
>
0
or
shape
==
-
1
),
"All elements in shape of expand must be positive or -1."
return
attrs_expand_shape
if
isinstance
(
shape
,
Variable
):
shape
.
stop_gradient
=
True
inputs
[
'Shape'
]
=
shape
elif
isinstance
(
shape
,
(
list
,
tuple
)):
attrs
[
'shape'
]
=
get_attr_expand_shape
(
shape
)
if
utils
.
_contain_var
(
shape
):
inputs
[
'expand_shapes_tensor'
]
=
utils
.
_convert_to_tensor_list
(
shape
)
if
isinstance
(
shape
,
Variable
):
shape
.
stop_gradient
=
True
inputs
[
'Shape'
]
=
shape
elif
isinstance
(
shape
,
(
list
,
tuple
)):
attrs
[
'shape'
]
=
get_attr_expand_shape
(
shape
)
if
utils
.
_contain_var
(
shape
):
inputs
[
'expand_shapes_tensor'
]
=
utils
.
_convert_to_tensor_list
(
shape
)
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
'expand_v2'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
return
out
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
'expand_v2'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
return
out
def
reshape
(
x
,
shape
,
name
=
None
):
...
...
@@ -3710,109 +3509,92 @@ def reshape(x, shape, name=None):
return
out
else
:
if
_in_legacy_dygraph
():
tmp_tensor_type
=
Variable
if
isinstance
(
shape
,
(
list
,
tuple
)):
shape
=
[
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
Variable
)
else
item
for
item
in
shape
]
out
,
_
=
_legacy_C_ops
.
reshape2
(
x
,
None
,
'shape'
,
shape
)
elif
isinstance
(
shape
,
tmp_tensor_type
):
shape
.
stop_gradient
=
True
out
,
_
=
_legacy_C_ops
.
reshape2
(
x
,
shape
)
else
:
raise
ValueError
(
"shape must be an instance of `list`, `tuple` or `Variable`,"
" got '{}.'"
.
format
(
type
(
shape
))
)
return
out
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
'bool'
,
'uint16'
,
],
'reshape'
,
)
check_type
(
shape
,
'shape'
,
(
list
,
tuple
,
Variable
),
'reshape'
)
check_type
(
actual_shape
,
'actual_shape'
,
(
Variable
,
type
(
None
)),
'reshape'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
'bool'
,
'uint16'
,
],
'reshape'
,
)
check_type
(
shape
,
'shape'
,
(
list
,
tuple
,
Variable
),
'reshape'
)
check_type
(
actual_shape
,
'actual_shape'
,
(
Variable
,
type
(
None
)),
'reshape'
)
helper
=
LayerHelper
(
"reshape2"
,
**
locals
())
helper
=
LayerHelper
(
"reshape2"
,
**
locals
())
def
get_attr_shape
(
list_shape
):
unk_dim_idx
=
-
1
attrs_shape
=
[]
for
dim_idx
,
dim_size
in
enumerate
(
list_shape
):
if
isinstance
(
dim_size
,
Variable
):
attrs_shape
.
append
(
-
1
)
else
:
attrs_shape
.
append
(
dim_size
)
if
dim_size
==
-
1
:
assert
unk_dim_idx
==
-
1
,
(
"Only one dimension value of 'shape' in reshape can "
"be -1. But received shape[%d] is also -1.
\n
"
"
\n\t
# N = x.shape()[2]
\t\t
# N is an int. "
"(NOT recommend under @to_static)
\n\t
N = paddle.shape(x)[2]
\t\t
"
"# N is a Tensor. (Recommend)
\n\t
z = paddle.reshape([N, -1, 4])"
"
\t
# z.shape is [-1, -1, 4]
\n\n
"
" If your target shape in Reshape represents dynamic shape, "
"please turn it into a Tensor under @to_static. See above example for details."
%
dim_idx
)
unk_dim_idx
=
dim_idx
elif
dim_size
==
0
:
assert
dim_idx
<
len
(
x
.
shape
),
(
"The index of 0 in `shape` must be less than "
"the input tensor X's dimensions. "
"But received shape[%d] = 0, X's dimensions = %d."
%
(
dim_idx
,
len
(
x
.
shape
))
)
def
get_attr_shape
(
list_shape
):
unk_dim_idx
=
-
1
attrs_shape
=
[]
for
dim_idx
,
dim_size
in
enumerate
(
list_shape
):
if
isinstance
(
dim_size
,
Variable
):
attrs_shape
.
append
(
-
1
)
else
:
assert
dim_size
>
0
,
(
"Each dimension value of 'shape' in reshape must not "
"be negative except one unknown dimension. "
"But received shape[%d] = %s."
%
(
dim_idx
,
str
(
dim_size
))
)
return
attrs_shape
inputs
=
{
"X"
:
x
}
attrs
=
{}
if
isinstance
(
shape
,
Variable
):
shape
.
stop_gradient
=
True
inputs
[
"Shape"
]
=
shape
elif
isinstance
(
shape
,
(
list
,
tuple
)):
assert
len
(
shape
)
>
0
,
(
"The size of 'shape' in reshape can't be zero, "
"but received %s."
%
len
(
shape
)
attrs_shape
.
append
(
dim_size
)
if
dim_size
==
-
1
:
assert
unk_dim_idx
==
-
1
,
(
"Only one dimension value of 'shape' in reshape can "
"be -1. But received shape[%d] is also -1.
\n
"
"
\n\t
# N = x.shape()[2]
\t\t
# N is an int. "
"(NOT recommend under @to_static)
\n\t
N = paddle.shape(x)[2]
\t\t
"
"# N is a Tensor. (Recommend)
\n\t
z = paddle.reshape([N, -1, 4])"
"
\t
# z.shape is [-1, -1, 4]
\n\n
"
" If your target shape in Reshape represents dynamic shape, "
"please turn it into a Tensor under @to_static. See above example for details."
%
dim_idx
)
unk_dim_idx
=
dim_idx
elif
dim_size
==
0
:
assert
dim_idx
<
len
(
x
.
shape
),
(
"The index of 0 in `shape` must be less than "
"the input tensor X's dimensions. "
"But received shape[%d] = 0, X's dimensions = %d."
%
(
dim_idx
,
len
(
x
.
shape
))
)
else
:
assert
dim_size
>
0
,
(
"Each dimension value of 'shape' in reshape must not "
"be negative except one unknown dimension. "
"But received shape[%d] = %s."
%
(
dim_idx
,
str
(
dim_size
))
)
return
attrs_shape
inputs
=
{
"X"
:
x
}
attrs
=
{}
if
isinstance
(
shape
,
Variable
):
shape
.
stop_gradient
=
True
inputs
[
"Shape"
]
=
shape
elif
isinstance
(
shape
,
(
list
,
tuple
)):
assert
len
(
shape
)
>
0
,
(
"The size of 'shape' in reshape can't be zero, "
"but received %s."
%
len
(
shape
)
)
attrs
[
"shape"
]
=
get_attr_shape
(
shape
)
if
utils
.
_contain_var
(
shape
):
inputs
[
'ShapeTensor'
]
=
utils
.
_convert_to_tensor_list
(
shape
)
elif
isinstance
(
actual_shape
,
Variable
):
actual_shape
.
stop_gradient
=
True
inputs
[
"Shape"
]
=
actual_shape
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
x_shape
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
"reshape2"
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
"Out"
:
out
,
"XShape"
:
x_shape
},
)
attrs
[
"shape"
]
=
get_attr_shape
(
shape
)
if
utils
.
_contain_var
(
shape
):
inputs
[
'ShapeTensor'
]
=
utils
.
_convert_to_tensor_list
(
shape
)
elif
isinstance
(
actual_shape
,
Variable
):
actual_shape
.
stop_gradient
=
True
inputs
[
"Shape"
]
=
actual_shape
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
x_shape
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
"reshape2"
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
"Out"
:
out
,
"XShape"
:
x_shape
},
)
return
out
return
out
@
inplace_apis_in_dygraph_only
...
...
@@ -3844,24 +3626,6 @@ def reshape_(x, shape, name=None):
)
return
out
else
:
if
isinstance
(
shape
,
(
list
,
tuple
)):
shape
=
[
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
Variable
)
else
item
for
item
in
shape
]
out
,
_
=
_legacy_C_ops
.
reshape2_
(
x
,
None
,
'shape'
,
shape
)
return
out
elif
isinstance
(
shape
,
Variable
):
shape
.
stop_gradient
=
True
# NOTE(pangyoki): Cannot support the case where the shape Tensor
# is negative. In the infer_shape stage, the input's dim will
# be changed to a negative number.
# Thus, convert Shape Tensor to list firstly and then call
# reshape inplace op.
shape_list
=
shape
.
numpy
().
tolist
()
out
,
_
=
_legacy_C_ops
.
reshape2_
(
x
,
None
,
'shape'
,
shape_list
)
return
out
def
gather_nd
(
x
,
index
,
name
=
None
):
...
...
@@ -3939,24 +3703,24 @@ def gather_nd(x, index, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
gather_nd
(
x
,
index
)
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
gather_nd
(
x
,
index
)
check_variable_and_dtype
(
x
,
'x
'
,
[
'bool'
,
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
],
'gather_np'
,
)
check_variable_and_dtype
(
index
,
'index'
,
[
'int32'
,
'int64'
],
'gather_np'
)
helper
=
LayerHelper
(
'gather_nd'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
output
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
"gather_nd"
,
inputs
=
{
"X"
:
x
,
"Index"
:
index
},
outputs
=
{
"Out"
:
output
},
)
return
output
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
]
,
'gather_np
'
,
)
check_variable_and_dtype
(
index
,
'index'
,
[
'int32'
,
'int64'
],
'gather_np'
)
helper
=
LayerHelper
(
'gather_nd'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
output
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
"gather_nd"
,
inputs
=
{
"X"
:
x
,
"Index"
:
index
},
outputs
=
{
"Out"
:
output
},
)
return
output
def
strided_slice
(
x
,
axes
,
starts
,
ends
,
strides
,
name
=
None
):
...
...
@@ -4043,63 +3807,58 @@ def strided_slice(x, axes, starts, ends, strides, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
strided_slice
(
x
,
axes
,
starts
,
ends
,
strides
)
else
:
helper
=
LayerHelper
(
'strided_slice'
,
**
locals
())
helper
=
LayerHelper
(
'strided_slice'
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'strided_slice'
,
)
check_type
(
axes
,
'axes'
,
(
list
,
tuple
),
'strided_slice'
)
check_type
(
starts
,
'starts'
,
(
list
,
tuple
,
Variable
),
'strided_slice'
)
check_type
(
ends
,
'ends'
,
(
list
,
tuple
,
Variable
),
'strided_slice'
)
check_type
(
strides
,
'strides'
,
(
list
,
tuple
,
Variable
),
'strided_slice'
)
def
check_list_elements_dtype
(
list_input
,
input_name
):
if
isinstance
(
list_input
,
Variable
):
check_dtype
(
list_input
.
dtype
,
input_name
,
[
'int32'
],
'strided_slice'
)
else
:
for
i
,
var
in
enumerate
(
list_input
):
var_name
=
input_name
+
'['
+
str
(
i
)
+
']'
if
isinstance
(
var
,
Variable
):
check_dtype
(
var
.
dtype
,
var_name
,
[
'int32'
],
'strided_slice'
)
check_list_elements_dtype
(
axes
,
'axes'
)
check_list_elements_dtype
(
starts
,
'starts'
)
check_list_elements_dtype
(
ends
,
'ends'
)
check_list_elements_dtype
(
strides
,
'strides'
)
def
get_new_list_tensor
(
old_list
):
new_list_tensor
=
[]
for
dim
in
old_list
:
if
isinstance
(
dim
,
Variable
):
dim
.
stop_gradient
=
True
new_list_tensor
.
append
(
dim
)
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'strided_slice'
,
)
check_type
(
axes
,
'axes'
,
(
list
,
tuple
),
'strided_slice'
)
check_type
(
starts
,
'starts'
,
(
list
,
tuple
,
Variable
),
'strided_slice'
)
check_type
(
ends
,
'ends'
,
(
list
,
tuple
,
Variable
),
'strided_slice'
)
check_type
(
strides
,
'strides'
,
(
list
,
tuple
,
Variable
),
'strided_slice'
)
def
check_list_elements_dtype
(
list_input
,
input_name
):
if
isinstance
(
list_input
,
Variable
):
check_dtype
(
list_input
.
dtype
,
input_name
,
[
'int32'
],
'strided_slice'
)
else
:
assert
isinstance
(
dim
,
int
)
temp_out
=
helper
.
create_variable_for_type_inference
(
'int32'
)
fill_constant
([
1
],
'int32'
,
dim
,
force_cpu
=
True
,
out
=
temp_out
)
new_list_tensor
.
append
(
temp_out
)
return
new_list_tensor
inputs
=
{
'Input'
:
x
}
attrs
=
{
'axes'
:
axes
}
infer_flags
=
list
(
1
for
i
in
range
(
len
(
axes
)))
for
i
,
var
in
enumerate
(
list_input
):
var_name
=
input_name
+
'['
+
str
(
i
)
+
']'
if
isinstance
(
var
,
Variable
):
check_dtype
(
var
.
dtype
,
var_name
,
[
'int32'
],
'strided_slice'
)
check_list_elements_dtype
(
axes
,
'axes'
)
check_list_elements_dtype
(
starts
,
'starts'
)
check_list_elements_dtype
(
ends
,
'ends'
)
check_list_elements_dtype
(
strides
,
'strides'
)
def
get_new_list_tensor
(
old_list
):
new_list_tensor
=
[]
for
dim
in
old_list
:
if
isinstance
(
dim
,
Variable
):
dim
.
stop_gradient
=
True
new_list_tensor
.
append
(
dim
)
else
:
assert
isinstance
(
dim
,
int
)
temp_out
=
helper
.
create_variable_for_type_inference
(
'int32'
)
fill_constant
(
[
1
],
'int32'
,
dim
,
force_cpu
=
True
,
out
=
temp_out
)
new_list_tensor
.
append
(
temp_out
)
return
new_list_tensor
if
_in_legacy_dygraph
():
inputs
=
{
'Input'
:
x
}
attrs
=
{
'axes'
:
axes
,
'starts'
:
starts
,
'ends'
:
ends
,
'strides'
:
strides
,
'infer_flags'
:
infer_flags
,
}
else
:
attrs
=
{
'axes'
:
axes
}
infer_flags
=
list
(
1
for
i
in
range
(
len
(
axes
)))
# starts
if
isinstance
(
starts
,
Variable
):
starts
.
stop_gradient
=
True
...
...
@@ -4151,14 +3910,17 @@ def strided_slice(x, axes, starts, ends, strides, name=None):
else
:
attrs
[
'strides'
]
=
strides
attrs
[
'infer_flags'
]
=
infer_flags
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
(
'x'
)
)
helper
.
append_op
(
type
=
'strided_slice'
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
'Out'
:
out
}
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
(
'x'
)
)
helper
.
append_op
(
type
=
'strided_slice'
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
'Out'
:
out
},
)
return
out
return
out
def
tensordot
(
x
,
y
,
axes
=
2
,
name
=
None
):
...
...
@@ -4281,7 +4043,7 @@ def tensordot(x, y, axes=2, name=None):
check_type
(
axes
,
'axes'
,
(
int
,
tuple
,
list
,
Variable
),
op_type
)
def
_var_to_list
(
var
):
if
paddle
.
in_dynamic
_mode
():
if
in_dygraph
_mode
():
return
tolist
(
var
)
raise
TypeError
(
"The 'axes' with type 'Tensor' in "
...
...
@@ -4409,20 +4171,20 @@ def as_complex(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
as_complex
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
as_complex
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'as_complex'
)
op_type
=
"as_complex"
helper
=
LayerHelper
(
op_type
,
**
locals
())
inputs
=
{
"X"
:
x
}
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
_real_to_complex_dtype
(
x
.
dtype
)
)
outputs
=
{
"Out"
:
out
}
attrs
=
{}
helper
.
append_op
(
type
=
op_type
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
outputs
)
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'as_complex'
)
op_type
=
"as_complex"
helper
=
LayerHelper
(
op_type
,
**
locals
()
)
inputs
=
{
"X"
:
x
}
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
_real_to_complex_dtype
(
x
.
dtype
)
)
outputs
=
{
"Out"
:
out
}
attrs
=
{}
helper
.
append_op
(
type
=
op_type
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
outputs
)
return
out
def
as_real
(
x
,
name
=
None
):
...
...
@@ -4462,19 +4224,17 @@ def as_real(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
as_real
(
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
as_real
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'complex64'
,
'complex128'
],
'as_real'
)
op_type
=
"as_real"
helper
=
LayerHelper
(
op_type
,
**
locals
())
inputs
=
{
"X"
:
x
}
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
_complex_to_real_dtype
(
x
.
dtype
)
)
outputs
=
{
"Out"
:
out
}
helper
.
append_op
(
type
=
op_type
,
inputs
=
inputs
,
outputs
=
outputs
)
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'complex64'
,
'complex128'
],
'as_real'
)
op_type
=
"as_real"
helper
=
LayerHelper
(
op_type
,
**
locals
())
inputs
=
{
"X"
:
x
}
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
_complex_to_real_dtype
(
x
.
dtype
)
)
outputs
=
{
"Out"
:
out
}
helper
.
append_op
(
type
=
op_type
,
inputs
=
inputs
,
outputs
=
outputs
)
return
out
def
repeat_interleave
(
x
,
repeats
,
axis
=
None
,
name
=
None
):
...
...
@@ -4633,38 +4393,34 @@ def moveaxis(x, source, destination, name=None):
if
in_dygraph_mode
():
out
=
_C_ops
.
transpose
(
x
,
perm
)
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
,
'complex64'
,
'complex128'
,
],
'moveaxis'
,
)
if
_in_legacy_dygraph
():
out
,
_
=
_legacy_C_ops
.
transpose2
(
x
,
'axis'
,
perm
)
helper
=
LayerHelper
(
'moveaxis'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
x_shape
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'transpose2'
,
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'Out'
:
[
out
],
'XShape'
:
[
x_shape
]},
attrs
=
{
'axis'
:
perm
},
)
return
out
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
,
'complex64'
,
'complex128'
,
],
'moveaxis'
,
)
helper
=
LayerHelper
(
'moveaxis'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
x_shape
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'transpose2'
,
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'Out'
:
[
out
],
'XShape'
:
[
x_shape
]},
attrs
=
{
'axis'
:
perm
},
)
return
out
def
non_negative_axis
(
arr
,
axis
):
ndim
=
len
(
arr
.
shape
)
...
...
@@ -4727,39 +4483,38 @@ def take_along_axis(arr, indices, axis):
if
not
broadcast_shape
:
# if indices matrix have larger size than arr, arr should broadcast into indices shape.
broadcast_shape
=
indices
.
shape
if
_non_static
_mode
():
if
in_dygraph
_mode
():
indices
=
paddle
.
broadcast_to
(
indices
,
broadcast_shape
)
broadcast_shape_list
=
list
(
broadcast_shape
)
broadcast_shape_list
[
axis
]
=
list
(
arr
.
shape
)[
axis
]
broadcast_shape
=
tuple
(
broadcast_shape_list
)
arr
=
paddle
.
broadcast_to
(
arr
,
broadcast_shape
)
if
not
_in_legacy_dygraph
():
return
_C_ops
.
take_along_axis
(
arr
,
indices
,
axis
)
return
_legacy_C_ops
.
take_along_axis
(
arr
,
indices
,
'Axis'
,
axis
)
check_variable_and_dtype
(
arr
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
,
'uint8'
],
'take_along_axis'
,
)
check_variable_and_dtype
(
indices
,
'index'
,
[
'int32'
,
'int64'
],
'take_along_axis'
)
indices
=
paddle
.
broadcast_to
(
indices
,
broadcast_shape
)
broadcast_shape_list
=
list
(
broadcast_shape
)
broadcast_shape_list
[
axis
]
=
list
(
arr
.
shape
)[
axis
]
broadcast_shape
=
tuple
(
broadcast_shape_list
)
arr
=
paddle
.
broadcast_to
(
arr
,
broadcast_shape
)
helper
=
LayerHelper
(
'take_along_axis'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
result
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
"take_along_axis"
,
inputs
=
{
"Input"
:
arr
,
"Index"
:
indices
},
attrs
=
{
"Axis"
:
axis
},
outputs
=
{
"Result"
:
result
},
)
return
result
return
_C_ops
.
take_along_axis
(
arr
,
indices
,
axis
)
else
:
check_variable_and_dtype
(
arr
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
,
'uint8'
],
'take_along_axis'
,
)
check_variable_and_dtype
(
indices
,
'index'
,
[
'int32'
,
'int64'
],
'take_along_axis'
)
indices
=
paddle
.
broadcast_to
(
indices
,
broadcast_shape
)
broadcast_shape_list
=
list
(
broadcast_shape
)
broadcast_shape_list
[
axis
]
=
list
(
arr
.
shape
)[
axis
]
broadcast_shape
=
tuple
(
broadcast_shape_list
)
arr
=
paddle
.
broadcast_to
(
arr
,
broadcast_shape
)
helper
=
LayerHelper
(
'take_along_axis'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
result
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
"take_along_axis"
,
inputs
=
{
"Input"
:
arr
,
"Index"
:
indices
},
attrs
=
{
"Axis"
:
axis
},
outputs
=
{
"Result"
:
result
},
)
return
result
def
put_along_axis
(
arr
,
indices
,
values
,
axis
,
reduce
=
'assign'
):
...
...
@@ -4797,7 +4552,7 @@ def put_along_axis(arr, indices, values, axis, reduce='assign'):
)
axis
=
non_negative_axis
(
arr
,
axis
)
broadcast_shape
=
infer_broadcast_shape
(
arr
,
indices
,
axis
)
if
_non_static
_mode
():
if
in_dygraph
_mode
():
values
=
(
paddle
.
to_tensor
(
values
)
if
not
isinstance
(
values
,
paddle
.
Tensor
)
...
...
@@ -4806,34 +4561,30 @@ def put_along_axis(arr, indices, values, axis, reduce='assign'):
if
broadcast_shape
:
indices
=
paddle
.
broadcast_to
(
indices
,
broadcast_shape
)
values
=
paddle
.
broadcast_to
(
values
,
indices
.
shape
)
if
in_dygraph_mode
():
return
_C_ops
.
put_along_axis
(
arr
,
indices
,
values
,
axis
,
reduce
)
return
_legacy_C_ops
.
put_along_axis
(
arr
,
indices
,
values
,
"Axis"
,
axis
,
"Reduce"
,
reduce
return
_C_ops
.
put_along_axis
(
arr
,
indices
,
values
,
axis
,
reduce
)
else
:
check_variable_and_dtype
(
arr
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
,
'uint8'
],
'put_along_axis'
,
)
check_variable_and_dtype
(
arr
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
,
'uint8'
],
'put_along_axis'
,
)
check_variable_and_dtype
(
indices
,
'index'
,
[
'int32'
,
'int64'
],
'put_along_axis'
)
if
broadcast_shape
:
indices
=
paddle
.
broadcast_to
(
indices
,
broadcast_shape
)
values
=
paddle
.
broadcast_to
(
values
,
indices
.
shape
)
helper
=
LayerHelper
(
'put_along_axis'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
result
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
"put_along_axis"
,
inputs
=
{
"Input"
:
arr
,
"Index"
:
indices
,
"Value"
:
values
},
attrs
=
{
"Axis"
:
axis
,
"Reduce"
:
reduce
},
outputs
=
{
"Result"
:
result
},
)
return
result
check_variable_and_dtype
(
indices
,
'index'
,
[
'int32'
,
'int64'
],
'put_along_axis'
)
if
broadcast_shape
:
indices
=
paddle
.
broadcast_to
(
indices
,
broadcast_shape
)
values
=
paddle
.
broadcast_to
(
values
,
indices
.
shape
)
helper
=
LayerHelper
(
'put_along_axis'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
result
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
"put_along_axis"
,
inputs
=
{
"Input"
:
arr
,
"Index"
:
indices
,
"Value"
:
values
},
attrs
=
{
"Axis"
:
axis
,
"Reduce"
:
reduce
},
outputs
=
{
"Result"
:
result
},
)
return
result
@
inplace_apis_in_dygraph_only
...
...
@@ -4856,11 +4607,7 @@ def put_along_axis_(arr, indices, values, axis, reduce='assign'):
if
broadcast_shape
:
indices
=
paddle
.
broadcast_to
(
indices
,
broadcast_shape
)
values
=
paddle
.
broadcast_to
(
values
,
indices
.
shape
)
if
in_dygraph_mode
():
return
_C_ops
.
put_along_axis_
(
arr
,
indices
,
values
,
axis
,
reduce
)
return
_legacy_C_ops
.
put_along_axis_
(
arr
,
indices
,
values
,
"Axis"
,
axis
,
"Reduce"
,
reduce
)
return
_C_ops
.
put_along_axis_
(
arr
,
indices
,
values
,
axis
,
reduce
)
def
index_add
(
x
,
index
,
axis
,
value
,
name
=
None
):
...
...
python/paddle/tensor/math.py
浏览文件 @
861fef52
...
...
@@ -34,9 +34,6 @@ from ..fluid.data_feeder import (
from
..fluid.layers
import
utils
from
..framework
import
(
LayerHelper
,
_in_legacy_dygraph
,
_non_static_mode
,
_varbase_creator
,
convert_np_dtype_to_dtype_
,
core
,
in_dygraph_mode
,
...
...
@@ -158,16 +155,14 @@ def log(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
log
(
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
log
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
"log"
)
inputs
=
{
'X'
:
[
x
]}
helper
=
LayerHelper
(
'log'
,
**
locals
())
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
"log"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
"log"
)
inputs
=
{
'X'
:
[
x
]}
helper
=
LayerHelper
(
'log'
,
**
locals
())
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
"log"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
def
scale
(
x
,
scale
=
1.0
,
bias
=
0.0
,
bias_after_scale
=
True
,
act
=
None
,
name
=
None
):
...
...
@@ -220,51 +215,39 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
if
in_dygraph_mode
():
out
=
_C_ops
.
scale
(
x
,
scale
,
float
(
bias
),
bias_after_scale
)
return
dygraph_utils
.
_append_activation_in_dygraph
(
out
,
act
)
elif
_in_legacy_dygraph
():
_scale
=
scale
.
numpy
().
item
(
0
)
if
isinstance
(
scale
,
Variable
)
else
scale
out
=
_legacy_C_ops
.
scale
(
else
:
check_variable_and_dtype
(
x
,
'scale'
,
float
(
_scale
),
'bias'
,
float
(
bias
),
'bias_after_scale'
,
bias_after_scale
,
"x"
,
[
'float16'
,
'uint16'
,
'float32'
,
'float64'
,
'int8'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
,
],
"scale"
,
)
return
dygraph_utils
.
_append_activation_in_dygraph
(
out
,
act
)
check_variable_and_dtype
(
x
,
"x"
,
[
'float16'
,
'uint16'
,
'float32'
,
'float64'
,
'int8'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
,
],
"scale"
,
)
inputs
=
{
'X'
:
[
x
]}
attrs
=
{
'bias'
:
float
(
bias
),
'bias_after_scale'
:
bias_after_scale
,
}
if
isinstance
(
scale
,
Variable
):
inputs
[
'ScaleTensor'
]
=
[
scale
]
else
:
attrs
[
'scale'
]
=
float
(
scale
)
helper
=
LayerHelper
(
'scale'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
inputs
=
{
'X'
:
[
x
]}
attrs
=
{
'bias'
:
float
(
bias
),
'bias_after_scale'
:
bias_after_scale
,
}
if
isinstance
(
scale
,
Variable
):
inputs
[
'ScaleTensor'
]
=
[
scale
]
else
:
attrs
[
'scale'
]
=
float
(
scale
)
helper
=
LayerHelper
(
'scale'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'scale'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
return
helper
.
append_activation
(
out
)
helper
.
append_op
(
type
=
'scale'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
return
helper
.
append_activation
(
out
)
def
stanh
(
x
,
scale_a
=
0.67
,
scale_b
=
1.7159
,
name
=
None
):
...
...
@@ -295,20 +278,22 @@ def stanh(x, scale_a=0.67, scale_b=1.7159, name=None):
"""
if
_non_static
_mode
():
if
in_dygraph
_mode
():
return
_legacy_C_ops
.
stanh
(
x
,
'scale_a'
,
scale_a
,
'scale_b'
,
scale_b
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'stanh'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'stanh'
)
helper
=
LayerHelper
(
'stanh'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'stanh'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'scale_a'
:
scale_a
,
'scale_b'
:
scale_b
},
)
return
out
helper
=
LayerHelper
(
'stanh'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'stanh'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'scale_a'
:
scale_a
,
'scale_b'
:
scale_b
},
)
return
out
def
multiplex
(
inputs
,
index
,
name
=
None
):
...
...
@@ -363,32 +348,32 @@ def multiplex(inputs, index, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
multiplex
(
inputs
,
index
)
elif
_in_legacy_dygraph
():
return
_legacy_C_ops
.
multiplex
(
index
,
inputs
)
helper
=
LayerHelper
(
'multiplex'
,
**
locals
())
else
:
helper
=
LayerHelper
(
'multiplex'
,
**
locals
())
check_type
(
inputs
,
'inputs'
,
(
list
),
'multiplex'
)
if
len
(
inputs
)
<
2
:
raise
ValueError
(
"inputs should be a list object with at least 2 elements."
)
for
id
,
x
in
enumerate
(
inputs
):
check_type
(
inputs
,
'inputs'
,
(
list
),
'multiplex'
)
if
len
(
inputs
)
<
2
:
raise
ValueError
(
"inputs should be a list object with at least 2 elements."
)
for
id
,
x
in
enumerate
(
inputs
):
check_variable_and_dtype
(
x
,
'input['
+
str
(
id
)
+
']'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'multiplex'
,
)
check_variable_and_dtype
(
x
,
'input['
+
str
(
id
)
+
']'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'multiplex'
,
index
,
"index"
,
[
'int32'
,
'int64'
],
'multiplex'
)
check_variable_and_dtype
(
index
,
"index"
,
[
'int32'
,
'int64'
],
'multiplex'
)
out
=
helper
.
create_variable_for_type_inference
(
inputs
[
0
].
dtype
)
helper
.
append_op
(
type
=
'multiplex'
,
inputs
=
{
'X'
:
inputs
,
'Ids'
:
index
},
outputs
=
{
'Out'
:
[
out
]},
)
return
out
out
=
helper
.
create_variable_for_type_inference
(
inputs
[
0
].
dtype
)
helper
.
append_op
(
type
=
'multiplex'
,
inputs
=
{
'X'
:
inputs
,
'Ids'
:
index
},
outputs
=
{
'Out'
:
[
out
]},
)
return
out
@
inplace_apis_in_dygraph_only
...
...
@@ -399,17 +384,6 @@ def scale_(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
scale_
(
x
,
scale
,
float
(
bias
),
bias_after_scale
)
if
_in_legacy_dygraph
():
_scale
=
scale
.
numpy
().
item
(
0
)
if
isinstance
(
scale
,
Variable
)
else
scale
return
_legacy_C_ops
.
scale_
(
x
,
'scale'
,
float
(
_scale
),
'bias'
,
float
(
bias
),
'bias_after_scale'
,
bias_after_scale
,
)
def
pow
(
x
,
y
,
name
=
None
):
...
...
@@ -469,36 +443,26 @@ def pow(x, y, name=None):
raise
TypeError
(
'y must be scalar or tensor type, but received: %s '
%
(
y
.
dtype
)
)
if
_in_legacy_dygraph
():
else
:
# in static graph mode
if
isinstance
(
y
,
(
int
,
float
)):
return
_legacy_C_ops
.
pow
(
x
,
'factor'
,
y
)
elif
isinstance
(
y
,
(
paddle
.
Tensor
,
Variable
)):
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=-
1
,
act
=
None
,
op_name
=
'elementwise_pow'
helper
=
LayerHelper
(
'pow'
,
**
locals
())
inputs
=
{
'X'
:
x
}
attrs
=
{
'factor'
:
y
}
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'pow'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
return
out
elif
isinstance
(
y
,
(
paddle
.
Tensor
,
Variable
)):
# TODO A potential speed improvement is supporting different types in C++ and removing the cast ops here
helper
=
LayerHelper
(
'elementwise_pow'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
return
_elementwise_op
(
LayerHelper
(
'elementwise_pow'
,
**
locals
()))
else
:
raise
TypeError
(
'y must be scalar or tensor type, but received: %s '
%
(
y
.
dtype
)
'y must be scalar or tensor type, but received: %s '
%
(
type
(
y
)
)
)
# in static graph mode
if
isinstance
(
y
,
(
int
,
float
)):
helper
=
LayerHelper
(
'pow'
,
**
locals
())
inputs
=
{
'X'
:
x
}
attrs
=
{
'factor'
:
y
}
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'pow'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
return
out
elif
isinstance
(
y
,
(
paddle
.
Tensor
,
Variable
)):
# TODO A potential speed improvement is supporting different types in C++ and removing the cast ops here
helper
=
LayerHelper
(
'elementwise_pow'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
return
_elementwise_op
(
LayerHelper
(
'elementwise_pow'
,
**
locals
()))
else
:
raise
TypeError
(
'y must be scalar or tensor type, but received: %s '
%
(
type
(
y
))
)
OP_NAMEMAPPING
=
{
...
...
@@ -531,11 +495,6 @@ def _elementwise_op_in_dygraph(
OP_NAMEMAPPING
[
op_name
]
if
not
is_inplace
(
op_name
)
else
op_name
,
)
out
=
op
(
x
,
y
)
if
_in_legacy_dygraph
():
op
=
getattr
(
_legacy_C_ops
,
op_name
)
out
=
op
(
x
,
y
,
'axis'
,
axis
,
'use_mkldnn'
,
use_mkldnn
)
return
dygraph_utils
.
_append_activation_in_dygraph
(
out
,
act
,
use_mkldnn
=
use_mkldnn
)
...
...
@@ -643,10 +602,7 @@ def add(x, y, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
add
(
x
,
y
)
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
elementwise_add
(
x
,
y
)
else
:
return
_elementwise_op
(
LayerHelper
(
'elementwise_add'
,
**
locals
()))
return
_elementwise_op
(
LayerHelper
(
'elementwise_add'
,
**
locals
()))
@
inplace_apis_in_dygraph_only
...
...
@@ -735,12 +691,7 @@ def subtract(x, y, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
subtract
(
x
,
y
)
else
:
if
_in_legacy_dygraph
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
else
:
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
@
inplace_apis_in_dygraph_only
...
...
@@ -807,12 +758,7 @@ def divide(x, y, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
divide
(
x
,
y
)
else
:
if
_in_legacy_dygraph
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
else
:
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
def
floor_divide
(
x
,
y
,
name
=
None
):
...
...
@@ -853,10 +799,8 @@ def floor_divide(x, y, name=None):
axis
=
-
1
if
in_dygraph_mode
():
return
_C_ops
.
floor_divide
(
x
,
y
)
elif
_in_legacy_dygraph
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
op_name
=
op_type
)
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
else
:
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
def
remainder
(
x
,
y
,
name
=
None
):
...
...
@@ -897,10 +841,8 @@ def remainder(x, y, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
remainder
(
x
,
y
)
elif
_in_legacy_dygraph
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
op_name
=
op_type
)
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
else
:
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
@
inplace_apis_in_dygraph_only
...
...
@@ -971,18 +913,13 @@ def multiply(x, y, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
multiply
(
x
,
y
)
else
:
if
_in_legacy_dygraph
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
if
x
.
dtype
!=
y
.
dtype
:
raise
TypeError
(
'Input tensors must be same type, but received type of x: %s, type of y: %s '
%
(
x
.
dtype
,
y
.
dtype
)
)
else
:
if
x
.
dtype
!=
y
.
dtype
:
raise
TypeError
(
'Input tensors must be same type, but received type of x: %s, type of y: %s '
%
(
x
.
dtype
,
y
.
dtype
)
)
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
@
dygraph_only
...
...
@@ -1017,12 +954,7 @@ def _add_with_axis(x, y, axis=-1, name=None):
else
:
op_type
=
'elementwise_add'
act
=
None
if
_in_legacy_dygraph
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
else
:
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
def
_subtract_with_axis
(
x
,
y
,
axis
=-
1
,
name
=
None
):
...
...
@@ -1034,12 +966,7 @@ def _subtract_with_axis(x, y, axis=-1, name=None):
else
:
op_type
=
'elementwise_sub'
act
=
None
if
_in_legacy_dygraph
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
else
:
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
def
_multiply_with_axis
(
x
,
y
,
axis
=-
1
,
name
=
None
):
...
...
@@ -1051,12 +978,7 @@ def _multiply_with_axis(x, y, axis=-1, name=None):
else
:
op_type
=
'elementwise_mul'
act
=
None
if
_in_legacy_dygraph
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
else
:
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
def
_divide_with_axis
(
x
,
y
,
axis
=-
1
,
name
=
None
):
...
...
@@ -1066,12 +988,7 @@ def _divide_with_axis(x, y, axis=-1, name=None):
else
:
op_type
=
'elementwise_div'
act
=
None
if
_in_legacy_dygraph
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
else
:
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
def
maximum
(
x
,
y
,
name
=
None
):
...
...
@@ -1135,11 +1052,8 @@ def maximum(x, y, name=None):
act
=
None
if
in_dygraph_mode
():
return
_C_ops
.
maximum
(
x
,
y
)
elif
_in_legacy_dygraph
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
else
:
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
def
minimum
(
x
,
y
,
name
=
None
):
...
...
@@ -1203,11 +1117,8 @@ def minimum(x, y, name=None):
act
=
None
if
in_dygraph_mode
():
return
_C_ops
.
minimum
(
x
,
y
)
elif
_in_legacy_dygraph
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
else
:
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
def
fmax
(
x
,
y
,
name
=
None
):
...
...
@@ -1273,11 +1184,8 @@ def fmax(x, y, name=None):
act
=
None
if
in_dygraph_mode
():
return
_C_ops
.
fmax
(
x
,
y
)
if
_in_legacy_dygraph
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
else
:
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
def
fmin
(
x
,
y
,
name
=
None
):
...
...
@@ -1343,11 +1251,8 @@ def fmin(x, y, name=None):
act
=
None
if
in_dygraph_mode
():
return
_C_ops
.
fmin
(
x
,
y
)
if
_in_legacy_dygraph
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
else
:
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
def
sum
(
x
,
axis
=
None
,
dtype
=
None
,
keepdim
=
False
,
name
=
None
):
...
...
@@ -1417,68 +1322,46 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
sum
(
x
,
axis
,
dtype
,
keepdim
)
else
:
reduce_all
,
axis
=
_get_reduce_axis_with_tensor
(
axis
,
x
)
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
}
reduce_all
,
axis
=
_get_reduce_axis_with_tensor
(
axis
,
x
)
if
_in_legacy_dygraph
():
if
dtype_flag
:
return
_legacy_C_ops
.
reduce_sum
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
,
'in_dtype'
,
x
.
dtype
,
'out_dtype'
,
dtype
,
)
else
:
return
_legacy_C_ops
.
reduce_sum
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
,
)
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
}
attrs
.
update
({
'in_dtype'
:
x
.
dtype
,
'out_dtype'
:
dtype
})
if
dtype_flag
:
attrs
.
update
({
'in_dtype'
:
x
.
dtype
,
'out_dtype'
:
dtype
})
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
'complex64'
,
'complex128'
,
],
'sum'
,
)
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
'complex64'
,
'complex128'
,
],
'sum'
,
)
check_type
(
axis
,
'axis'
,
(
int
,
list
,
tuple
,
type
(
None
),
Variable
),
'sum'
)
check_type
(
axis
,
'axis'
,
(
int
,
list
,
tuple
,
type
(
None
),
Variable
),
'sum'
)
helper
=
LayerHelper
(
'sum'
,
**
locals
())
if
dtype_flag
:
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
else
:
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'reduce_sum'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
return
out
helper
=
LayerHelper
(
'sum'
,
**
locals
())
if
dtype_flag
:
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
else
:
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'reduce_sum'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
,
)
return
out
def
nan_to_num
(
x
,
nan
=
0.0
,
posinf
=
None
,
neginf
=
None
,
name
=
None
):
...
...
@@ -1784,41 +1667,37 @@ def add_n(inputs, name=None):
if
isinstance
(
inputs
,
Variable
):
inputs
=
[
inputs
]
return
_C_ops
.
add_n
(
inputs
)
if
_in_legacy_dygraph
():
if
isinstance
(
inputs
,
Variable
):
inputs
=
[
inputs
]
return
_legacy_C_ops
.
sum
(
inputs
,
'use_mkldnn'
,
False
)
helper
=
LayerHelper
(
'add_n'
,
**
locals
())
check_type
(
inputs
,
'inputs'
,
(
Variable
,
tuple
,
list
),
'add_n'
)
if
isinstance
(
inputs
,
list
)
or
isinstance
(
inputs
,
tuple
):
if
len
(
inputs
)
>
0
:
for
input
in
inputs
:
check_variable_and_dtype
(
input
,
"inputs"
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'add_n'
,
)
else
:
check_variable_and_dtype
(
inputs
,
"inputs"
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'add_n'
,
)
helper
=
LayerHelper
(
'add_n'
,
**
locals
())
check_type
(
inputs
,
'inputs'
,
(
Variable
,
tuple
,
list
),
'add_n'
)
if
isinstance
(
inputs
,
list
)
or
isinstance
(
inputs
,
tuple
):
if
len
(
inputs
)
>
0
:
for
input
in
inputs
:
check_variable_and_dtype
(
input
,
"inputs"
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'add_n'
,
)
else
:
check_variable_and_dtype
(
inputs
,
"inputs"
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'add_n'
,
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
(
'inputs'
)
)
helper
.
append_op
(
type
=
'sum'
,
inputs
=
{
'X'
:
inputs
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'use_mkldnn'
:
False
},
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
(
'inputs'
)
)
helper
.
append_op
(
type
=
'sum'
,
inputs
=
{
'X'
:
inputs
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'use_mkldnn'
:
False
},
)
return
out
return
out
def
trunc
(
input
,
name
=
None
):
...
...
@@ -1852,22 +1731,19 @@ def trunc(input, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
trunc
(
input
)
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
trunc
(
input
)
else
:
inputs
=
{
"X"
:
input
}
attrs
=
{}
inputs
=
{
"X"
:
input
}
attrs
=
{}
helper
=
LayerHelper
(
"trunc"
,
**
locals
())
check_variable_and_dtype
(
input
,
'X'
,
[
'int32'
,
'int64'
,
'float32'
,
'float64'
],
'trunc'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
helper
=
LayerHelper
(
"trunc"
,
**
locals
())
check_variable_and_dtype
(
input
,
'X'
,
[
'int32'
,
'int64'
,
'float32'
,
'float64'
],
'trunc'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
helper
.
append_op
(
type
=
"trunc"
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
"Out"
:
out
}
)
return
out
helper
.
append_op
(
type
=
"trunc"
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
"Out"
:
out
}
)
return
out
def
mm
(
input
,
mat2
,
name
=
None
):
...
...
@@ -1939,53 +1815,54 @@ def mm(input, mat2, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
matmul
(
input
,
mat2
,
False
,
False
)
elif
paddle
.
in_dynamic_mode
():
return
_legacy_C_ops
.
matmul_v2
(
input
,
mat2
)
else
:
def
__check_input
(
x
,
y
):
var_names
=
{
'x'
:
x
,
'y'
:
y
}
for
name
,
val
in
var_names
.
items
():
check_variable_and_dtype
(
val
,
name
,
[
'float16'
,
'float32'
,
'float64'
],
'mm'
)
x_shape
=
list
(
x
.
shape
)
y_shape
=
list
(
y
.
shape
)
if
len
(
x_shape
)
==
1
:
x_shape
=
[
1
]
+
x_shape
if
len
(
y_shape
)
==
1
:
y_shape
=
y_shape
+
[
1
]
# check the inner 2 dimensions
if
x_shape
[
-
1
]
!=
y_shape
[
-
2
]:
if
not
((
x_shape
[
-
1
]
==
-
1
)
or
(
y_shape
[
-
2
]
==
-
1
)):
raise
ValueError
(
"After performing an optional transpose, Input X's width should be "
"equal to Y's width for multiplication "
"prerequisites. But received X's shape: %s, Y's shape: %s
\n
"
%
(
x_shape
,
y_shape
)
def
__check_input
(
x
,
y
):
var_names
=
{
'x'
:
x
,
'y'
:
y
}
for
name
,
val
in
var_names
.
items
():
check_variable_and_dtype
(
val
,
name
,
[
'float16'
,
'float32'
,
'float64'
],
'mm'
)
x_shape
=
list
(
x
.
shape
)
y_shape
=
list
(
y
.
shape
)
if
len
(
x_shape
)
==
1
:
x_shape
=
[
1
]
+
x_shape
if
len
(
y_shape
)
==
1
:
y_shape
=
y_shape
+
[
1
]
if
len
(
y_shape
)
>
2
and
len
(
x_shape
)
>
2
:
for
i
,
dim_x
in
enumerate
(
x_shape
[:
-
2
]):
# don't check neg shape
if
dim_x
<
0
or
y_shape
[
i
]
<
0
:
continue
if
dim_x
!=
y_shape
[
i
]:
# check the inner 2 dimensions
if
x_shape
[
-
1
]
!=
y_shape
[
-
2
]:
if
not
((
x_shape
[
-
1
]
==
-
1
)
or
(
y_shape
[
-
2
]
==
-
1
)):
raise
ValueError
(
"
When the matrix is larger than 2 dimensions, the higher
"
"
dimensional values of the two matrices need to be equal.
"
"
But received x_shape[%d] != y_shape[%d]. X's shape: %s,
"
"Y's shape: %s.
\n
"
%
(
i
,
i
,
x_shape
,
y_shape
)
"
After performing an optional transpose, Input X's width should be
"
"
equal to Y's width for multiplication
"
"
prerequisites. But received X's shape: %s, Y's shape: %s
\n
"
%
(
x_shape
,
y_shape
)
)
__check_input
(
input
,
mat2
)
helper
=
LayerHelper
(
'mm'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
helper
.
append_op
(
type
=
'matmul_v2'
,
inputs
=
{
'X'
:
input
,
'Y'
:
mat2
},
outputs
=
{
'Out'
:
out
}
)
return
out
if
len
(
y_shape
)
>
2
and
len
(
x_shape
)
>
2
:
for
i
,
dim_x
in
enumerate
(
x_shape
[:
-
2
]):
# don't check neg shape
if
dim_x
<
0
or
y_shape
[
i
]
<
0
:
continue
if
dim_x
!=
y_shape
[
i
]:
raise
ValueError
(
"When the matrix is larger than 2 dimensions, the higher "
"dimensional values of the two matrices need to be equal. "
"But received x_shape[%d] != y_shape[%d]. X's shape: %s, "
"Y's shape: %s.
\n
"
%
(
i
,
i
,
x_shape
,
y_shape
)
)
__check_input
(
input
,
mat2
)
helper
=
LayerHelper
(
'mm'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
helper
.
append_op
(
type
=
'matmul_v2'
,
inputs
=
{
'X'
:
input
,
'Y'
:
mat2
},
outputs
=
{
'Out'
:
out
},
)
return
out
def
addmm
(
input
,
x
,
y
,
beta
=
1.0
,
alpha
=
1.0
,
name
=
None
):
...
...
@@ -2080,25 +1957,21 @@ def addmm(input, x, y, beta=1.0, alpha=1.0, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
addmm
(
input
,
x
,
y
,
beta
,
alpha
)
else
:
if
_in_legacy_dygraph
():
out
=
_legacy_C_ops
.
addmm
(
input
,
x
,
y
,
"Alpha"
,
alpha
,
"Beta"
,
beta
)
return
out
else
:
inputs
=
{
'Input'
:
input
,
"X"
:
x
,
"Y"
:
y
}
attrs
=
{
'Alpha'
:
alpha
,
'Beta'
:
beta
}
inputs
=
{
'Input'
:
input
,
"X"
:
x
,
"Y"
:
y
}
attrs
=
{
'Alpha'
:
alpha
,
'Beta'
:
beta
}
helper
=
LayerHelper
(
"addmm"
,
**
locals
())
check_variable_and_dtype
(
input
,
'Input'
,
[
'float32'
,
'float64'
],
'addmm'
)
check_variable_and_dtype
(
x
,
'X'
,
[
'float32'
,
'float64'
],
'addmm'
)
check_variable_and_dtype
(
y
,
'Y'
,
[
'float32'
,
'float64'
],
'addmm'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
=
LayerHelper
(
"addmm"
,
**
locals
())
check_variable_and_dtype
(
input
,
'Input'
,
[
'float32'
,
'float64'
],
'addmm'
)
check_variable_and_dtype
(
x
,
'X'
,
[
'float32'
,
'float64'
],
'addmm'
)
check_variable_and_dtype
(
y
,
'Y'
,
[
'float32'
,
'float64'
],
'addmm'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
"addmm"
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
"Out"
:
out
}
)
return
out
helper
.
append_op
(
type
=
"addmm"
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
"Out"
:
out
}
)
return
out
def
renorm
(
x
,
p
,
axis
,
max_norm
):
...
...
@@ -2154,22 +2027,17 @@ def renorm(x, p, axis, max_norm):
if
in_dygraph_mode
():
out
=
_C_ops
.
renorm
(
x
,
p
,
axis
,
max_norm
)
return
out
elif
_in_legacy_dygraph
():
out
=
_legacy_C_ops
.
renorm
(
x
,
'p'
,
p
,
'axis'
,
axis
,
'max_norm'
,
max_norm
)
return
out
inputs
=
{
'X'
:
x
}
attrs
=
{
'p'
:
p
,
'axis'
:
axis
,
'max_norm'
:
max_norm
}
else
:
inputs
=
{
'X'
:
x
}
attrs
=
{
'p'
:
p
,
'axis'
:
axis
,
'max_norm'
:
max_norm
}
helper
=
LayerHelper
(
"renorm"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
=
LayerHelper
(
"renorm"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
"renorm"
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
"Out"
:
out
}
)
return
out
helper
.
append_op
(
type
=
"renorm"
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
"Out"
:
out
}
)
return
out
def
inner
(
x
,
y
,
name
=
None
):
...
...
@@ -2213,36 +2081,37 @@ def inner(x, y, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
matmul
(
nx
,
ny
.
T
,
False
,
False
).
reshape
(
dstshape
)
elif
paddle
.
in_dynamic_mode
():
return
_legacy_C_ops
.
matmul_v2
(
nx
,
ny
.
T
).
reshape
(
dstshape
)
def
__check_input
(
x
,
y
):
var_names
=
{
'x'
:
x
,
'y'
:
y
}
for
name
,
val
in
var_names
.
items
():
check_variable_and_dtype
(
val
,
name
,
[
'float16'
,
'float32'
,
'float64'
],
'inner'
)
x_shape
=
list
(
xshape
)
y_shape
=
list
(
yshape
)
else
:
# check the inner 2 dimensions
if
x_shape
[
-
1
]
!=
y_shape
[
-
1
]:
if
not
((
x_shape
[
-
1
]
==
-
1
)
or
(
y_shape
[
-
1
]
==
-
1
)):
raise
ValueError
(
"After performing an optional transpose, Input X's last dim should be "
"equal to Y's last dim for multiplication "
"prerequisites. But received X's shape: %s, Y's shape: %s
\n
"
%
(
x_shape
,
y_shape
)
def
__check_input
(
x
,
y
):
var_names
=
{
'x'
:
x
,
'y'
:
y
}
for
name
,
val
in
var_names
.
items
():
check_variable_and_dtype
(
val
,
name
,
[
'float16'
,
'float32'
,
'float64'
],
'inner'
)
__check_input
(
nx
,
ny
)
helper
=
LayerHelper
(
'inner'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
nx
.
dtype
)
helper
.
append_op
(
type
=
'matmul_v2'
,
inputs
=
{
'X'
:
nx
,
'Y'
:
ny
.
T
},
outputs
=
{
'Out'
:
out
}
)
return
out
.
reshape
(
dstshape
)
x_shape
=
list
(
xshape
)
y_shape
=
list
(
yshape
)
# check the inner 2 dimensions
if
x_shape
[
-
1
]
!=
y_shape
[
-
1
]:
if
not
((
x_shape
[
-
1
]
==
-
1
)
or
(
y_shape
[
-
1
]
==
-
1
)):
raise
ValueError
(
"After performing an optional transpose, Input X's last dim should be "
"equal to Y's last dim for multiplication "
"prerequisites. But received X's shape: %s, Y's shape: %s
\n
"
%
(
x_shape
,
y_shape
)
)
__check_input
(
nx
,
ny
)
helper
=
LayerHelper
(
'inner'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
nx
.
dtype
)
helper
.
append_op
(
type
=
'matmul_v2'
,
inputs
=
{
'X'
:
nx
,
'Y'
:
ny
.
T
},
outputs
=
{
'Out'
:
out
},
)
return
out
.
reshape
(
dstshape
)
def
outer
(
x
,
y
,
name
=
None
):
...
...
@@ -2279,24 +2148,23 @@ def outer(x, y, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
matmul
(
nx
,
ny
,
False
,
False
)
elif
paddle
.
in_dynamic_mode
():
return
_legacy_C_ops
.
matmul_v2
(
nx
,
ny
)
else
:
def
__check_input
(
x
,
y
):
var_names
=
{
'x'
:
x
,
'y'
:
y
}
for
name
,
val
in
var_names
.
items
():
check_variable_and_dtype
(
val
,
name
,
[
'float16'
,
'float32'
,
'float64'
],
'inner'
)
def
__check_input
(
x
,
y
):
var_names
=
{
'x'
:
x
,
'y'
:
y
}
for
name
,
val
in
var_names
.
items
():
check_variable_and_dtype
(
val
,
name
,
[
'float16'
,
'float32'
,
'float64'
],
'inner'
)
__check_input
(
nx
,
ny
)
__check_input
(
nx
,
ny
)
helper
=
LayerHelper
(
'outer'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
nx
.
dtype
)
helper
.
append_op
(
type
=
'matmul_v2'
,
inputs
=
{
'X'
:
nx
,
'Y'
:
ny
},
outputs
=
{
'Out'
:
out
}
)
return
out
helper
=
LayerHelper
(
'outer'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
nx
.
dtype
)
helper
.
append_op
(
type
=
'matmul_v2'
,
inputs
=
{
'X'
:
nx
,
'Y'
:
ny
},
outputs
=
{
'Out'
:
out
}
)
return
out
def
logsumexp
(
x
,
axis
=
None
,
keepdim
=
False
,
name
=
None
):
...
...
@@ -2345,20 +2213,16 @@ def logsumexp(x, axis=None, keepdim=False, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
logsumexp
(
x
,
axis
,
keepdim
,
reduce_all
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
logsumexp
(
x
,
'axis'
,
axis
,
'keepdim'
,
keepdim
,
'reduce_all'
,
reduce_all
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'logsumexp'
)
helper
=
LayerHelper
(
'logsumexp'
,
**
locals
())
attrs
=
{
'axis'
:
axis
,
'keepdim'
:
keepdim
,
'reduce_all'
:
reduce_all
}
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'logsumexp'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'logsumexp'
)
helper
=
LayerHelper
(
'logsumexp'
,
**
locals
())
attrs
=
{
'axis'
:
axis
,
'keepdim'
:
keepdim
,
'reduce_all'
:
reduce_all
}
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'logsumexp'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
return
out
return
out
def
inverse
(
x
,
name
=
None
):
...
...
@@ -2390,25 +2254,24 @@ def inverse(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
inverse
(
x
)
elif
paddle
.
in_dynamic_mode
():
return
_legacy_C_ops
.
inverse
(
x
)
else
:
def
_check_input
(
x
):
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'inverse'
)
if
len
(
x
.
shape
)
<
2
:
raise
ValueError
(
"The input of inverse is expected to be a Tensor whose number "
"of dimensions is no less than 2. But reviced: %d, "
"x's shape: %s."
%
(
len
(
x
.
shape
),
x
.
shape
)
)
def
_check_input
(
x
):
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'inverse'
)
if
len
(
x
.
shape
)
<
2
:
raise
ValueError
(
"The input of inverse is expected to be a Tensor whose number "
"of dimensions is no less than 2. But reviced: %d, "
"x's shape: %s."
%
(
len
(
x
.
shape
),
x
.
shape
)
)
_check_input
(
x
)
helper
=
LayerHelper
(
'inverse'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'inverse'
,
inputs
=
{
'Input'
:
[
x
]},
outputs
=
{
'Output'
:
[
out
]}
)
return
out
_check_input
(
x
)
helper
=
LayerHelper
(
'inverse'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'inverse'
,
inputs
=
{
'Input'
:
[
x
]},
outputs
=
{
'Output'
:
[
out
]}
)
return
out
def
max
(
x
,
axis
=
None
,
keepdim
=
False
,
name
=
None
):
...
...
@@ -2491,27 +2354,23 @@ def max(x, axis=None, keepdim=False, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
max
(
x
,
axis
,
keepdim
)
reduce_all
,
axis
=
_get_reduce_axis_with_tensor
(
axis
,
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
reduce_max
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
else
:
reduce_all
,
axis
=
_get_reduce_axis_with_tensor
(
axis
,
x
)
helper
=
LayerHelper
(
'max'
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'max'
)
if
not
isinstance
(
axis
,
Variable
)
and
utils
.
_contain_var
(
axis
):
axis
=
utils
.
_convert_to_tensor_list
(
axis
)
helper
=
LayerHelper
(
'max'
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'max'
)
if
not
isinstance
(
axis
,
Variable
)
and
utils
.
_contain_var
(
axis
):
axis
=
utils
.
_convert_to_tensor_list
(
axis
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'reduce_max'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
},
)
return
out
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'reduce_max'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
},
)
return
out
def
min
(
x
,
axis
=
None
,
keepdim
=
False
,
name
=
None
):
...
...
@@ -2593,26 +2452,21 @@ def min(x, axis=None, keepdim=False, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
min
(
x
,
axis
,
keepdim
)
reduce_all
,
axis
=
_get_reduce_axis_with_tensor
(
axis
,
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
reduce_min
(
x
,
'
dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
else
:
reduce_all
,
axis
=
_get_reduce_axis_with_tensor
(
axis
,
x
)
helper
=
LayerHelper
(
'min'
,
**
locals
())
check_variable_and_dtype
(
x
,
'
x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'min'
)
helper
=
LayerHelper
(
'min'
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'min'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'reduce_min'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
},
)
return
out
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'reduce_min'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
},
)
return
out
def
amax
(
x
,
axis
=
None
,
keepdim
=
False
,
name
=
None
):
...
...
@@ -2707,25 +2561,21 @@ def amax(x, axis=None, keepdim=False, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
amax
(
x
,
axis
,
keepdim
)
reduce_all
,
axis
=
_get_reduce_axis
(
axis
,
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
reduce_amax
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
else
:
reduce_all
,
axis
=
_get_reduce_axis
(
axis
,
x
)
helper
=
LayerHelper
(
'amax'
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'amax'
)
helper
=
LayerHelper
(
'amax'
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'amax'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'reduce_amax'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
},
)
return
out
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'reduce_amax'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
},
)
return
out
def
amin
(
x
,
axis
=
None
,
keepdim
=
False
,
name
=
None
):
...
...
@@ -2821,24 +2671,21 @@ def amin(x, axis=None, keepdim=False, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
amin
(
x
,
axis
,
keepdim
)
reduce_all
,
axis
=
_get_reduce_axis
(
axis
,
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
reduce_amin
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
else
:
reduce_all
,
axis
=
_get_reduce_axis
(
axis
,
x
)
helper
=
LayerHelper
(
'amin'
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'amin'
)
helper
=
LayerHelper
(
'amin'
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'amin'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'reduce_amin'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
},
)
return
out
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'reduce_amin'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
},
)
return
out
def
log1p
(
x
,
name
=
None
):
...
...
@@ -2867,16 +2714,14 @@ def log1p(x, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
log1p
(
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
log1p
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
"log1p"
)
inputs
=
{
'X'
:
[
x
]}
helper
=
LayerHelper
(
'log1p'
,
**
locals
())
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
"log1p"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
"log1p"
)
inputs
=
{
'X'
:
[
x
]}
helper
=
LayerHelper
(
'log1p'
,
**
locals
())
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
"log1p"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
def
log2
(
x
,
name
=
None
):
...
...
@@ -2919,16 +2764,16 @@ def log2(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
log2
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
log2
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
"log2"
)
inputs
=
{
'X'
:
[
x
]}
helper
=
LayerHelper
(
'log2'
,
**
locals
())
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
"log2"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
"log2"
)
inputs
=
{
'X'
:
[
x
]}
helper
=
LayerHelper
(
'log2'
,
**
locals
())
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
"log2"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
def
log10
(
x
,
name
=
None
):
...
...
@@ -2971,16 +2816,16 @@ def log10(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
log10
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
log10
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
"log10"
)
inputs
=
{
'X'
:
[
x
]}
helper
=
LayerHelper
(
'log10'
,
**
locals
())
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
"log10"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
"log10"
)
inputs
=
{
'X'
:
[
x
]}
helper
=
LayerHelper
(
'log10'
,
**
locals
())
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
"log10"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
def
clip
(
x
,
min
=
None
,
max
=
None
,
name
=
None
):
...
...
@@ -3038,65 +2883,56 @@ def clip(x, min=None, max=None, name=None):
min
=
min_
if
min
is
None
else
min
max
=
max_
if
max
is
None
else
max
return
_C_ops
.
clip
(
x
,
min
,
max
)
else
:
if
min
is
not
None
:
check_type
(
min
,
'min'
,
(
float
,
int
,
Variable
),
'clip'
)
if
isinstance
(
min
,
Variable
):
check_dtype
(
min
.
dtype
,
'min'
,
[
'float32'
,
'float64'
,
'int32'
],
'clip'
,
'(When the type of min in clip is Variable.)'
,
)
if
max
is
not
None
:
check_type
(
max
,
'max'
,
(
float
,
int
,
Variable
),
'clip'
)
if
isinstance
(
max
,
Variable
):
check_dtype
(
max
.
dtype
,
'max'
,
[
'float32'
,
'float64'
,
'int32'
],
'clip'
,
'(When the type of max in clip is Variable.)'
,
)
if
_in_legacy_dygraph
():
if
isinstance
(
min
,
Variable
):
min
=
min
.
numpy
().
item
(
0
)
if
isinstance
(
max
,
Variable
):
max
=
max
.
numpy
().
item
(
0
)
min
=
min_
if
min
is
None
else
min
max
=
max_
if
max
is
None
else
max
return
_legacy_C_ops
.
clip
(
x
,
"min"
,
min
,
"max"
,
max
)
if
min
is
not
None
:
check_type
(
min
,
'min'
,
(
float
,
int
,
Variable
),
'clip'
)
if
isinstance
(
min
,
Variable
):
check_dtype
(
min
.
dtype
,
'min'
,
[
'float32'
,
'float64'
,
'int32'
],
'clip'
,
'(When the type of min in clip is Variable.)'
,
)
if
max
is
not
None
:
check_type
(
max
,
'max'
,
(
float
,
int
,
Variable
),
'clip'
)
if
isinstance
(
max
,
Variable
):
check_dtype
(
max
.
dtype
,
'max'
,
[
'float32'
,
'float64'
,
'int32'
],
'clip'
,
'(When the type of max in clip is Variable.)'
,
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'clip'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'clip'
)
inputs
=
{
'X'
:
x
}
attrs
=
{
'min'
:
min_
,
'max'
:
max_
}
inputs
=
{
'X'
:
x
}
attrs
=
{
'min'
:
min_
,
'max'
:
max_
}
if
isinstance
(
min
,
Variable
):
min
.
stop_gradient
=
True
inputs
[
'Min'
]
=
min
elif
min
is
not
None
:
attrs
[
'min'
]
=
min
if
isinstance
(
min
,
Variable
):
min
.
stop_gradient
=
True
inputs
[
'Min'
]
=
min
elif
min
is
not
None
:
attrs
[
'min'
]
=
min
if
isinstance
(
max
,
Variable
):
max
.
stop_gradient
=
True
inputs
[
'Max'
]
=
max
elif
max
is
not
None
:
attrs
[
'max'
]
=
max
helper
=
LayerHelper
(
'clip'
,
**
locals
())
output
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
(
'x'
)
)
helper
.
append_op
(
type
=
'clip'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
[
output
]},
attrs
=
attrs
)
if
isinstance
(
max
,
Variable
):
max
.
stop_gradient
=
True
inputs
[
'Max'
]
=
max
elif
max
is
not
None
:
attrs
[
'max'
]
=
max
helper
=
LayerHelper
(
'clip'
,
**
locals
())
output
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
(
'x'
)
)
helper
.
append_op
(
type
=
'clip'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
[
output
]},
attrs
=
attrs
)
return
output
return
output
@
inplace_apis_in_dygraph_only
...
...
@@ -3117,9 +2953,6 @@ def clip_(x, min=None, max=None, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
clip_
(
x
,
min
,
max
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
clip_
(
x
,
"min"
,
min
,
"max"
,
max
)
def
trace
(
x
,
offset
=
0
,
axis1
=
0
,
axis2
=
1
,
name
=
None
):
"""
...
...
@@ -3196,24 +3029,19 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
trace
(
x
,
offset
,
axis1
,
axis2
)
else
:
__check_input
(
x
,
offset
,
axis1
,
axis2
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
trace
(
x
,
'offset'
,
offset
,
'axis1'
,
axis1
,
'axis2'
,
axis2
)
__check_input
(
x
,
offset
,
axis1
,
axis2
)
helper
=
LayerHelper
(
'trace'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
=
LayerHelper
(
'trace'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'trace'
,
inputs
=
{
'Input'
:
[
x
]},
attrs
=
{
'offset'
:
offset
,
'axis1'
:
axis1
,
'axis2'
:
axis2
},
outputs
=
{
'Out'
:
[
out
]},
)
return
out
helper
.
append_op
(
type
=
'trace'
,
inputs
=
{
'Input'
:
[
x
]},
attrs
=
{
'offset'
:
offset
,
'axis1'
:
axis1
,
'axis2'
:
axis2
},
outputs
=
{
'Out'
:
[
out
]},
)
return
out
def
diagonal
(
x
,
offset
=
0
,
axis1
=
0
,
axis2
=
1
,
name
=
None
):
...
...
@@ -3284,54 +3112,50 @@ def diagonal(x, offset=0, axis1=0, axis2=1, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
diagonal
(
x
,
offset
,
axis1
,
axis2
)
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
diagonal
(
x
,
'offset'
,
offset
,
'axis1'
,
axis1
,
'axis2'
,
axis2
)
def
__check_input
(
x
,
offset
,
axis1
,
axis2
):
check_dtype
(
x
.
dtype
,
'Input'
,
[
'bool'
,
'int32'
,
'int64'
,
'float16'
,
'float32'
,
'float64'
],
'diagonal'
,
)
def
__check_input
(
x
,
offset
,
axis1
,
axis2
):
check_dtype
(
x
.
dtype
,
'Input'
,
[
'bool'
,
'int32'
,
'int64'
,
'float16'
,
'float32'
,
'float64'
],
'diagonal'
,
)
input_shape
=
list
(
x
.
shape
)
assert
len
(
input_shape
)
>=
2
,
(
"The x must be at least 2-dimensional, "
"But received Input x's dimensional: %s.
\n
"
%
len
(
input_shape
)
)
input_shape
=
list
(
x
.
shape
)
assert
len
(
input_shape
)
>=
2
,
(
"The x must be at least 2-dimensional, "
"But received Input x's dimensional: %s.
\n
"
%
len
(
input_shape
)
)
axis1_
=
axis1
if
axis1
>=
0
else
len
(
input_shape
)
+
axis1
axis2_
=
axis2
if
axis2
>=
0
else
len
(
input_shape
)
+
axis2
axis1_
=
axis1
if
axis1
>=
0
else
len
(
input_shape
)
+
axis1
axis2_
=
axis2
if
axis2
>=
0
else
len
(
input_shape
)
+
axis2
assert
axis1_
<
len
(
input_shape
),
(
"The argument axis1 is out of range (expected to be in range of [%d, %d], but got %d).
\n
"
%
(
-
(
len
(
input_shape
)),
len
(
input_shape
)
-
1
,
axis1
)
)
assert
axis1_
<
len
(
input_shape
),
(
"The argument axis1 is out of range (expected to be in range of [%d, %d], but got %d).
\n
"
%
(
-
(
len
(
input_shape
)),
len
(
input_shape
)
-
1
,
axis1
)
)
assert
axis2_
<
len
(
input_shape
),
(
"The argument axis2 is out of range (expected to be in range of [%d, %d], but got %d).
\n
"
%
(
-
(
len
(
input_shape
)),
len
(
input_shape
)
-
1
,
axis2
)
)
assert
axis2_
<
len
(
input_shape
),
(
"The argument axis2 is out of range (expected to be in range of [%d, %d], but got %d).
\n
"
%
(
-
(
len
(
input_shape
)),
len
(
input_shape
)
-
1
,
axis2
)
)
assert
axis1_
!=
axis2_
,
(
"axis1 and axis2 cannot be the same axis."
"But received axis1 = %d, axis2 = %d
\n
"
%
(
axis1
,
axis2
)
)
assert
axis1_
!=
axis2_
,
(
"axis1 and axis2 cannot be the same axis."
"But received axis1 = %d, axis2 = %d
\n
"
%
(
axis1
,
axis2
)
)
__check_input
(
x
,
offset
,
axis1
,
axis2
)
helper
=
LayerHelper
(
'diagonal'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
__check_input
(
x
,
offset
,
axis1
,
axis2
)
helper
=
LayerHelper
(
'diagonal'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'diagonal'
,
inputs
=
{
'Input'
:
[
x
]},
attrs
=
{
'offset'
:
offset
,
'axis1'
:
axis1
,
'axis2'
:
axis2
},
outputs
=
{
'Out'
:
[
out
]},
)
return
out
helper
.
append_op
(
type
=
'diagonal'
,
inputs
=
{
'Input'
:
[
x
]},
attrs
=
{
'offset'
:
offset
,
'axis1'
:
axis1
,
'axis2'
:
axis2
},
outputs
=
{
'Out'
:
[
out
]},
)
return
out
@
templatedoc
(
op_type
=
"kron"
)
...
...
@@ -3363,21 +3187,22 @@ def kron(x, y, name=None):
# [12, 15, 18, 16, 20, 24],
# [21, 24, 27, 28, 32, 36]])
"""
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
kron
(
x
,
y
)
if
in_dygraph_mode
():
return
_C_ops
.
kron
(
x
,
y
)
helper
=
LayerHelper
(
'kron'
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'kron'
)
check_variable_and_dtype
(
y
,
'y'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'kron'
)
return
_legacy_C_ops
.
kron
(
x
,
y
)
else
:
helper
=
LayerHelper
(
'kron'
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'kron'
)
check_variable_and_dtype
(
y
,
'y'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'kron'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
"kron"
,
inputs
=
{
"X"
:
x
,
"Y"
:
y
},
outputs
=
{
"Out"
:
out
})
return
out
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
"kron"
,
inputs
=
{
"X"
:
x
,
"Y"
:
y
},
outputs
=
{
"Out"
:
out
}
)
return
out
def
cumsum
(
x
,
axis
=
None
,
dtype
=
None
,
name
=
None
):
...
...
@@ -3432,20 +3257,15 @@ def cumsum(x, axis=None, dtype=None, name=None):
if
axis
is
None
:
axis
=
-
1
return
_C_ops
.
cumsum
(
x
,
axis
,
flatten
,
False
,
False
)
if
_in_legacy_dygraph
():
if
axis
is
None
:
return
_legacy_C_ops
.
cumsum
(
x
,
'flatten'
,
flatten
)
else
:
return
_legacy_C_ops
.
cumsum
(
x
,
'axis'
,
axis
,
'flatten'
,
flatten
)
check_type
(
x
,
'x'
,
(
Variable
),
'cumsum'
)
locals_var
=
locals
().
copy
()
kwargs
=
dict
()
for
name
,
val
in
locals_var
.
items
():
if
val
is
not
None
:
kwargs
[
name
]
=
val
_cum_sum_
=
generate_layer_fn
(
'cumsum'
)
return
_cum_sum_
(
**
kwargs
)
else
:
check_type
(
x
,
'x'
,
(
Variable
),
'cumsum'
)
locals_var
=
locals
().
copy
()
kwargs
=
dict
()
for
name
,
val
in
locals_var
.
items
():
if
val
is
not
None
:
kwargs
[
name
]
=
val
_cum_sum_
=
generate_layer_fn
(
'cumsum'
)
return
_cum_sum_
(
**
kwargs
)
def
logcumsumexp
(
x
,
axis
=
None
,
dtype
=
None
,
name
=
None
):
...
...
@@ -3507,27 +3327,20 @@ def logcumsumexp(x, axis=None, dtype=None, name=None):
if
axis
is
None
:
axis
=
-
1
return
_C_ops
.
logcumsumexp
(
x
,
axis
,
flatten
,
False
,
False
)
if
_in_legacy_dygraph
():
if
axis
is
None
:
return
_legacy_C_ops
.
logcumsumexp
(
x
,
'flatten'
,
flatten
)
else
:
return
_legacy_C_ops
.
logcumsumexp
(
x
,
'axis'
,
axis
,
'flatten'
,
flatten
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
"logcumsumexp"
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
"logcumsumexp"
)
helper
=
LayerHelper
(
'logcumsumexp'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'logcumsumexp'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'axis'
:
axis
,
'flatten'
:
flatten
},
)
return
out
helper
=
LayerHelper
(
'logcumsumexp'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'logcumsumexp'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'axis'
:
axis
,
'flatten'
:
flatten
},
)
return
out
def
cumprod
(
x
,
dim
=
None
,
dtype
=
None
,
name
=
None
):
...
...
@@ -3586,26 +3399,24 @@ def cumprod(x, dim=None, dtype=None, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
cumprod
(
x
,
dim
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
cumprod
(
x
,
'dim'
,
dim
)
check_variable_and_dtype
(
x
,
"x"
,
[
'complex64'
,
'complex128'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'cumprod'
,
)
check_type
(
dim
,
'dim'
,
int
,
'cumprod'
)
else
:
check_variable_and_dtype
(
x
,
"x"
,
[
'complex64'
,
'complex128'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'cumprod'
,
)
check_type
(
dim
,
'dim'
,
int
,
'cumprod'
)
helper
=
LayerHelper
(
'cumprod'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'cumprod'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'dim'
:
dim
},
)
return
out
helper
=
LayerHelper
(
'cumprod'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'cumprod'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'dim'
:
dim
},
)
return
out
def
isfinite
(
x
,
name
=
None
):
...
...
@@ -3631,15 +3442,19 @@ def isfinite(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
isfinite
(
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
isfinite_v2
(
x
)
helper
=
LayerHelper
(
"isfinite_v2"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'isfinite'
)
out
=
helper
.
create_variable_for_type_inference
(
'bool'
)
helper
.
append_op
(
type
=
"isfinite_v2"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
else
:
helper
=
LayerHelper
(
"isfinite_v2"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'isfinite'
,
)
out
=
helper
.
create_variable_for_type_inference
(
'bool'
)
helper
.
append_op
(
type
=
"isfinite_v2"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
}
)
return
out
def
isinf
(
x
,
name
=
None
):
...
...
@@ -3665,15 +3480,14 @@ def isinf(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
isinf
(
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
isinf_v2
(
x
)
helper
=
LayerHelper
(
"isinf_v2"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'isinf'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
helper
.
append_op
(
type
=
"isinf_v2"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
else
:
helper
=
LayerHelper
(
"isinf_v2"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'isinf'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
helper
.
append_op
(
type
=
"isinf_v2"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
def
isnan
(
x
,
name
=
None
):
...
...
@@ -3699,16 +3513,14 @@ def isnan(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
isnan
(
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
isnan_v2
(
x
)
helper
=
LayerHelper
(
"isnan_v2"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'isnan'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
helper
.
append_op
(
type
=
"isnan_v2"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
else
:
helper
=
LayerHelper
(
"isnan_v2"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'isnan'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
helper
.
append_op
(
type
=
"isnan_v2"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
def
prod
(
x
,
axis
=
None
,
keepdim
=
False
,
dtype
=
None
,
name
=
None
):
...
...
@@ -3775,24 +3587,24 @@ def prod(x, axis=None, keepdim=False, dtype=None, name=None):
reduce_all
,
axis
=
_get_reduce_axis_with_tensor
(
axis
,
x
)
if
in_dygraph_mode
():
return
_C_ops
.
prod
(
x
,
axis
,
keepdim
,
reduce_all
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
reduce_prod
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
else
:
helper
=
LayerHelper
(
'reduce_prod'
,
**
locals
())
check_variable_and_dtype
(
x
,
'x/input'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'reduce_prod'
,
)
helper
=
LayerHelper
(
'reduce_prod'
,
**
locals
())
check_variable_and_dtype
(
x
,
'x/input'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'reduce_prod'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
())
helper
.
append_op
(
type
=
'reduce_prod'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
},
)
return
out
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
()
)
helper
.
append_op
(
type
=
'reduce_prod'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
},
)
return
out
def
sign
(
x
,
name
=
None
):
...
...
@@ -3817,17 +3629,16 @@ def sign(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
sign
(
x
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sign'
)
helper
=
LayerHelper
(
"sign"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
sign
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sign'
)
helper
=
LayerHelper
(
"sign"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'sign'
,
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'Out'
:
[
out
]})
helper
.
append_op
(
type
=
'sign'
,
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'Out'
:
[
out
]})
return
out
return
out
def
tanh
(
x
,
name
=
None
):
...
...
@@ -3857,16 +3668,15 @@ def tanh(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
tanh
(
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
tanh
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'tanh'
)
check_type
(
x
,
'x'
,
(
Variable
),
'tanh'
)
helper
=
LayerHelper
(
'tanh'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'tanh'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
})
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'tanh'
)
check_type
(
x
,
'x'
,
(
Variable
),
'tanh'
)
helper
=
LayerHelper
(
'tanh'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'tanh'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
})
return
out
@
inplace_apis_in_dygraph_only
...
...
@@ -3875,9 +3685,7 @@ def tanh_(x, name=None):
Inplace version of ``tanh`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_tensor_tanh`.
"""
if
in_dygraph_mode
():
return
_C_ops
.
tanh_
(
x
)
return
_legacy_C_ops
.
tanh_
(
x
)
return
_C_ops
.
tanh_
(
x
)
def
increment
(
x
,
value
=
1.0
,
name
=
None
):
...
...
@@ -3905,21 +3713,18 @@ def increment(x, value=1.0, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
increment_
(
x
,
value
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
increment
(
x
,
'step'
,
value
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'increment'
)
helper
=
LayerHelper
(
"increment"
,
**
locals
())
helper
.
append_op
(
type
=
'increment'
,
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'Out'
:
[
x
]},
attrs
=
{
'step'
:
float
(
value
)},
)
return
x
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'increment'
)
helper
=
LayerHelper
(
"increment"
,
**
locals
())
helper
.
append_op
(
type
=
'increment'
,
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'Out'
:
[
x
]},
attrs
=
{
'step'
:
float
(
value
)},
)
return
x
def
all
(
x
,
axis
=
None
,
keepdim
=
False
,
name
=
None
):
...
...
@@ -3973,28 +3778,26 @@ def all(x, axis=None, keepdim=False, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
all
(
x
,
axis
,
keepdim
)
else
:
reduce_all
,
axis
=
_get_reduce_axis
(
axis
,
x
)
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
,
}
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
],
'all'
)
reduce_all
,
axis
=
_get_reduce_axis
(
axis
,
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
reduce_all
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
,
}
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
],
'all'
)
check_type
(
axis
,
'axis'
,
(
int
,
list
,
tuple
,
type
(
None
)),
'all'
)
check_type
(
axis
,
'axis'
,
(
int
,
list
,
tuple
,
type
(
None
)),
'all'
)
helper
=
LayerHelper
(
'all'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'reduce_all'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
return
out
helper
=
LayerHelper
(
'all'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'reduce_all'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
,
)
return
out
def
any
(
x
,
axis
=
None
,
keepdim
=
False
,
name
=
None
):
...
...
@@ -4049,29 +3852,27 @@ def any(x, axis=None, keepdim=False, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
any
(
x
,
axis
,
keepdim
)
else
:
reduce_all
,
axis
=
_get_reduce_axis
(
axis
,
x
)
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
,
}
reduce_all
,
axis
=
_get_reduce_axis
(
axis
,
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
reduce_any
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
,
}
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
],
'any'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
],
'any'
)
check_type
(
axis
,
'axis'
,
(
int
,
list
,
tuple
,
type
(
None
)),
'any'
)
check_type
(
axis
,
'axis'
,
(
int
,
list
,
tuple
,
type
(
None
)),
'any'
)
helper
=
LayerHelper
(
'any'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'reduce_any'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
return
out
helper
=
LayerHelper
(
'any'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'reduce_any'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
,
)
return
out
def
broadcast_shape
(
x_shape
,
y_shape
):
...
...
@@ -4137,22 +3938,21 @@ def conj(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
conj
(
x
)
else
:
check_variable_and_dtype
(
x
,
"x"
,
[
'complex64'
,
'complex128'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'conj'
,
)
if
paddle
.
in_dynamic_mode
():
return
_legacy_C_ops
.
conj
(
x
)
check_variable_and_dtype
(
x
,
"x"
,
[
'complex64'
,
'complex128'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'conj'
,
)
helper
=
LayerHelper
(
'conj'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
())
helper
=
LayerHelper
(
'conj'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
()
)
helper
.
append_op
(
type
=
'conj'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
[
out
]})
return
out
helper
.
append_op
(
type
=
'conj'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
[
out
]})
return
out
def
digamma
(
x
,
name
=
None
):
...
...
@@ -4184,14 +3984,11 @@ def digamma(x, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
digamma
(
x
)
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
digamma
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'digamma'
)
helper
=
LayerHelper
(
'digamma'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'digamma'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
})
return
out
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'digamma'
)
helper
=
LayerHelper
(
'digamma'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'digamma'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
})
return
out
def
lgamma
(
x
,
name
=
None
):
...
...
@@ -4221,14 +4018,12 @@ def lgamma(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
lgamma
(
x
)
elif
_in_legacy_dygraph
():
return
_legacy_C_ops
.
lgamma
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'lgamma'
)
helper
=
LayerHelper
(
'lgamma'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'lgamma'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
})
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'lgamma'
)
helper
=
LayerHelper
(
'lgamma'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'lgamma'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
})
return
out
def
neg
(
x
,
name
=
None
):
...
...
@@ -4304,27 +4099,24 @@ def atan2(x, y, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
atan2
(
x
,
y
)
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
atan2
(
x
,
y
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'int32'
,
'int64'
,
'float16'
,
'float32'
,
'float64'
],
'atan2'
,
)
check_variable_and_dtype
(
y
,
'y'
,
[
'int32'
,
'int64'
,
'float16'
,
'float32'
,
'float64'
],
'atan2'
,
)
check_variable_and_dtype
(
x
,
'x'
,
[
'int32'
,
'int64'
,
'float16'
,
'float32'
,
'float64'
],
'atan2'
,
)
check_variable_and_dtype
(
y
,
'y'
,
[
'int32'
,
'int64'
,
'float16'
,
'float32'
,
'float64'
],
'atan2'
,
)
helper
=
LayerHelper
(
'atan2'
,
**
locals
())
inputs
=
{
'X1'
:
x
,
'X2'
:
y
}
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'atan2'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
})
return
out
helper
=
LayerHelper
(
'atan2'
,
**
locals
())
inputs
=
{
'X1'
:
x
,
'X2'
:
y
}
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'atan2'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
})
return
out
def
logit
(
x
,
eps
=
None
,
name
=
None
):
...
...
@@ -4367,20 +4159,23 @@ def logit(x, eps=None, name=None):
# [-1.0277, -4.5365, -0.9544, -1.3269, 1.4468]
"""
if
eps
is
None
:
eps
=
0.0
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
logit
(
x
,
'eps'
,
eps
)
if
in_dygraph_mode
():
return
_C_ops
.
logit
(
x
,
eps
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'logit'
)
helper
=
LayerHelper
(
"logit"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'logit'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'eps'
:
eps
}
)
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'logit'
)
helper
=
LayerHelper
(
"logit"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'logit'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'eps'
:
eps
},
)
return
out
def
lerp
(
x
,
y
,
weight
,
name
=
None
):
...
...
@@ -4419,23 +4214,21 @@ def lerp(x, y, weight, name=None):
weight
=
paddle
.
to_tensor
(
weight
,
dtype
=
x
.
dtype
)
return
_C_ops
.
lerp
(
x
,
y
,
weight
)
if
_in_legacy_dygraph
()
:
else
:
if
isinstance
(
weight
,
float
):
weight
=
paddle
.
to_tensor
(
weight
,
dtype
=
x
.
dtype
)
return
_legacy_C_ops
.
lerp
(
x
,
y
,
weight
)
if
isinstance
(
weight
,
float
):
weight
=
paddle
.
full
(
shape
=
[
1
],
fill_value
=
weight
,
dtype
=
x
.
dtype
)
weight
=
paddle
.
full
(
shape
=
[
1
],
fill_value
=
weight
,
dtype
=
x
.
dtype
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'lerp'
)
check_variable_and_dtype
(
y
,
'y'
,
[
'float32'
,
'float64'
],
'lerp'
)
check_variable_and_dtype
(
weight
,
'weight'
,
[
'float32'
,
'float64'
],
'lerp'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'lerp'
)
check_variable_and_dtype
(
y
,
'y'
,
[
'float32'
,
'float64'
],
'lerp'
)
check_variable_and_dtype
(
weight
,
'weight'
,
[
'float32'
,
'float64'
],
'lerp'
)
helper
=
LayerHelper
(
'lerp'
,
**
locals
())
inputs
=
{
'X'
:
x
,
'Y'
:
y
,
'Weight'
:
weight
}
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'lerp'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
})
return
out
helper
=
LayerHelper
(
'lerp'
,
**
locals
())
inputs
=
{
'X'
:
x
,
'Y'
:
y
,
'Weight'
:
weight
}
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'lerp'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
})
return
out
@
inplace_apis_in_dygraph_only
...
...
@@ -4456,9 +4249,7 @@ def lerp_(x, y, weight, name=None):
out_shape
,
x
.
shape
)
)
if
in_dygraph_mode
():
return
_C_ops
.
lerp_
(
x
,
y
,
weight
)
return
_legacy_C_ops
.
lerp_
(
x
,
y
,
weight
)
return
_C_ops
.
lerp_
(
x
,
y
,
weight
)
def
erfinv
(
x
,
name
=
None
):
...
...
@@ -4488,16 +4279,12 @@ def erfinv(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
erfinv
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'erfinv'
)
if
paddle
.
in_dynamic_mode
():
return
_legacy_C_ops
.
erfinv
(
x
)
helper
=
LayerHelper
(
'erfinv'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'erfinv'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
})
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'erfinv'
)
helper
=
LayerHelper
(
'erfinv'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'erfinv'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
})
return
out
@
inplace_apis_in_dygraph_only
...
...
@@ -4507,9 +4294,7 @@ def erfinv_(x, name=None):
Please refer to :ref:`api_tensor_erfinv`.
"""
check_type
(
x
,
'x'
,
(
paddle
.
Tensor
,
Variable
),
'erfinv'
)
if
in_dygraph_mode
():
return
_C_ops
.
erfinv_
(
x
)
return
_legacy_C_ops
.
erfinv_
(
x
)
return
_C_ops
.
erfinv_
(
x
)
def
rad2deg
(
x
,
name
=
None
):
...
...
@@ -4558,10 +4343,6 @@ def rad2deg(x, name=None):
if
convert_dtype
(
x
.
dtype
)
in
[
'int32'
,
'int64'
]:
x
=
cast
(
x
,
dtype
=
"float32"
)
return
_C_ops
.
scale
(
x
,
rad2deg_scale
,
0.0
,
True
)
elif
paddle
.
in_dynamic_mode
():
if
convert_dtype
(
x
.
dtype
)
in
[
'int32'
,
'int64'
]:
x
=
cast
(
x
,
dtype
=
"float32"
)
return
_legacy_C_ops
.
scale
(
x
,
'scale'
,
rad2deg_scale
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'int32'
,
'int64'
,
'float32'
,
'float64'
],
'rad2deg'
...
...
@@ -4626,10 +4407,6 @@ def deg2rad(x, name=None):
if
convert_dtype
(
x
.
dtype
)
in
[
'int32'
,
'int64'
]:
x
=
cast
(
x
,
dtype
=
"float32"
)
return
_C_ops
.
scale
(
x
,
deg2rad_scale
,
0.0
,
True
)
elif
paddle
.
in_dynamic_mode
():
if
convert_dtype
(
x
.
dtype
)
in
[
'int32'
,
'int64'
]:
x
=
cast
(
x
,
dtype
=
"float32"
)
return
_legacy_C_ops
.
scale
(
x
,
'scale'
,
deg2rad_scale
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'int32'
,
'int64'
,
'float32'
,
'float64'
],
'deg2rad'
...
...
@@ -4729,7 +4506,7 @@ def gcd(x, y, name=None):
)
return
(
paddle
.
where
(
x
<
y
,
y
,
x
),
paddle
.
where
(
x
<
y
,
x
,
y
))
if
paddle
.
in_dynamic
_mode
():
if
in_dygraph
_mode
():
while
_gcd_cond_fn
(
x
,
y
):
x
,
y
=
_gcd_body_fn
(
x
,
y
)
...
...
@@ -4907,68 +4684,6 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None):
return
_C_ops
.
logical_xor
(
input_back
,
input_front
)
else
:
return
_C_ops
.
subtract
(
input_back
,
input_front
)
elif
_in_legacy_dygraph
():
has_pend
=
False
input_list
=
[]
if
prepend
is
not
None
and
append
is
not
None
:
input_list
=
[
prepend
,
x
,
append
]
has_pend
=
True
elif
prepend
is
not
None
:
input_list
=
[
prepend
,
x
]
has_pend
=
True
elif
append
is
not
None
:
input_list
=
[
x
,
append
]
has_pend
=
True
if
has_pend
:
new_input
=
_varbase_creator
()
_legacy_C_ops
.
concat
(
input_list
,
new_input
,
'axis'
,
axis
)
else
:
new_input
=
x
attrs_1
=
()
attrs_2
=
()
dim_len
=
new_input
.
shape
[
axis
]
starts_1
=
[
0
]
attrs_1
+=
(
'starts'
,
starts_1
)
ends_1
=
[
dim_len
-
1
]
attrs_1
+=
(
'ends'
,
ends_1
)
input_front
=
_legacy_C_ops
.
slice
(
new_input
,
None
,
None
,
None
,
None
,
'axes'
,
axes
,
'infer_flags'
,
infer_flags
,
*
attrs_1
)
starts_2
=
[
1
]
attrs_2
+=
(
'starts'
,
starts_2
)
ends_2
=
[
dim_len
]
attrs_2
+=
(
'ends'
,
ends_2
)
input_back
=
_legacy_C_ops
.
slice
(
new_input
,
None
,
None
,
None
,
None
,
'axes'
,
axes
,
'infer_flags'
,
infer_flags
,
*
attrs_2
)
if
x
.
dtype
==
paddle
.
bool
:
return
_legacy_C_ops
.
logical_xor
(
input_back
,
input_front
)
else
:
return
paddle
.
tensor
.
math
.
_subtract_with_axis
(
input_back
,
input_front
,
axis
=
axis
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'bool'
,
'int32'
,
'int64'
],
'diff'
...
...
@@ -5082,21 +4797,19 @@ def angle(x, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
angle
(
x
)
elif
paddle
.
in_dynamic_mode
():
return
_legacy_C_ops
.
angle
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
'angle'
)
op_type
=
"angle"
helper
=
LayerHelper
(
op_type
,
**
locals
())
inputs
=
{
"X"
:
x
}
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
_complex_to_real_dtype
(
x
.
dtype
)
)
outputs
=
{
"Out"
:
out
}
helper
.
append_op
(
type
=
op_type
,
inputs
=
inputs
,
outputs
=
outputs
)
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
'angle'
)
op_type
=
"angle"
helper
=
LayerHelper
(
op_type
,
**
locals
())
inputs
=
{
"X"
:
x
}
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
_complex_to_real_dtype
(
x
.
dtype
)
)
outputs
=
{
"Out"
:
out
}
helper
.
append_op
(
type
=
op_type
,
inputs
=
inputs
,
outputs
=
outputs
)
return
out
def
heaviside
(
x
,
y
,
name
=
None
):
...
...
@@ -5143,11 +4856,12 @@ def heaviside(x, y, name=None):
op_type
=
'elementwise_heaviside'
axis
=
-
1
act
=
None
if
_non_static
_mode
():
if
in_dygraph
_mode
():
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
else
:
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
def
frac
(
x
,
name
=
None
):
...
...
@@ -5192,24 +4906,18 @@ def frac(x, name=None):
y
=
_C_ops
.
trunc
(
x
)
return
_C_ops
.
subtract
(
x
,
y
)
else
:
if
_in_legacy_dygraph
():
y
=
_legacy_C_ops
.
trunc
(
x
)
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
else
:
inputs
=
{
"X"
:
x
}
attrs
=
{}
inputs
=
{
"X"
:
x
}
attrs
=
{}
helper
=
LayerHelper
(
"trunc"
,
**
locals
())
check_variable_and_dtype
(
x
,
"X"
,
[
'int32'
,
'int64'
,
'float32'
,
'float64'
],
'trunc'
)
y
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
"trunc"
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
"Out"
:
y
}
)
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
helper
=
LayerHelper
(
"trunc"
,
**
locals
())
check_variable_and_dtype
(
x
,
"X"
,
[
'int32'
,
'int64'
,
'float32'
,
'float64'
],
'trunc'
)
y
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
"trunc"
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
"Out"
:
y
}
)
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
def
sgn
(
x
,
name
=
None
):
...
...
@@ -5334,7 +5042,7 @@ def take(x, index, mode='raise', name=None):
)
)
if
paddle
.
in_dynamic
_mode
():
if
in_dygraph
_mode
():
if
not
isinstance
(
index
,
(
paddle
.
Tensor
,
Variable
)):
raise
TypeError
(
"The type of 'index' must be Tensor, but got {}"
.
format
(
...
...
python/paddle/tensor/ops.py
浏览文件 @
861fef52
...
...
@@ -12,9 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from
..
import
_C_ops
,
_legacy_C_ops
from
..
import
_C_ops
from
..fluid.data_feeder
import
check_variable_and_dtype
from
..fluid.framework
import
_in_legacy_dygraph
,
in_dygraph_mode
from
..fluid.framework
import
in_dygraph_mode
from
..framework
import
LayerHelper
from
.layer_function_generator
import
(
add_sample_code
,
...
...
@@ -218,14 +218,14 @@ def acos(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
acos
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
acos
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'acos'
)
helper
=
LayerHelper
(
'acos'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'acos'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'acos'
)
helper
=
LayerHelper
(
'acos'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'acos'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
def
acosh
(
x
,
name
=
None
):
...
...
@@ -255,14 +255,14 @@ def acosh(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
acosh
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
acosh
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'acosh'
)
helper
=
LayerHelper
(
'acosh'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'acosh'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'acosh'
)
helper
=
LayerHelper
(
'acosh'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'acosh'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
def
asin
(
x
,
name
=
None
):
...
...
@@ -292,14 +292,14 @@ def asin(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
asin
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
asin
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'asin'
)
helper
=
LayerHelper
(
'asin'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'asin'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'asin'
)
helper
=
LayerHelper
(
'asin'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'asin'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
def
asinh
(
x
,
name
=
None
):
...
...
@@ -329,14 +329,14 @@ def asinh(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
asinh
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
asinh
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'asinh'
)
helper
=
LayerHelper
(
'asinh'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'asinh'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'asinh'
)
helper
=
LayerHelper
(
'asinh'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'asinh'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
def
atan
(
x
,
name
=
None
):
...
...
@@ -366,14 +366,14 @@ def atan(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
atan
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
atan
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'atan'
)
helper
=
LayerHelper
(
'atan'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'atan'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'atan'
)
helper
=
LayerHelper
(
'atan'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'atan'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
def
atanh
(
x
,
name
=
None
):
...
...
@@ -403,14 +403,14 @@ def atanh(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
atanh
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
atanh
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'atanh'
)
helper
=
LayerHelper
(
'atanh'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'atanh'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'atanh'
)
helper
=
LayerHelper
(
'atanh'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'atanh'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
def
ceil
(
x
,
name
=
None
):
...
...
@@ -441,14 +441,14 @@ def ceil(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
ceil
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
ceil
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'ceil'
)
helper
=
LayerHelper
(
'ceil'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'ceil'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'ceil'
)
helper
=
LayerHelper
(
'ceil'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'ceil'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
def
cos
(
x
,
name
=
None
):
...
...
@@ -480,14 +480,14 @@ def cos(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
cos
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
cos
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'cos'
)
helper
=
LayerHelper
(
'cos'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'cos'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'cos'
)
helper
=
LayerHelper
(
'cos'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'cos'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
def
cosh
(
x
,
name
=
None
):
...
...
@@ -519,14 +519,14 @@ def cosh(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
cosh
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
cosh
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'cosh'
)
helper
=
LayerHelper
(
'cosh'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'cosh'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'cosh'
)
helper
=
LayerHelper
(
'cosh'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'cosh'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
def
exp
(
x
,
name
=
None
):
...
...
@@ -557,27 +557,25 @@ def exp(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
exp
(
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
exp
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'int32'
,
'int64'
,
'float16'
,
'float32'
,
'float64'
,
'complex64'
,
'complex128'
,
],
'exp'
,
)
helper
=
LayerHelper
(
'exp'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'exp'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'int32'
,
'int64'
,
'float16'
,
'float32'
,
'float64'
,
'complex64'
,
'complex128'
,
],
'exp'
,
)
helper
=
LayerHelper
(
'exp'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'exp'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
def
expm1
(
x
,
name
=
None
):
...
...
@@ -608,14 +606,14 @@ def expm1(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
expm1
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
expm1
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'expm1'
)
helper
=
LayerHelper
(
'expm1'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'expm1'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'expm1'
)
helper
=
LayerHelper
(
'expm1'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'expm1'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
def
floor
(
x
,
name
=
None
):
...
...
@@ -646,14 +644,14 @@ def floor(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
floor
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
floor
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'floor'
)
helper
=
LayerHelper
(
'floor'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'floor'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'floor'
)
helper
=
LayerHelper
(
'floor'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'floor'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
def
reciprocal
(
x
,
name
=
None
):
...
...
@@ -684,16 +682,16 @@ def reciprocal(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
reciprocal
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
reciprocal
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'reciprocal'
)
helper
=
LayerHelper
(
'reciprocal'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'reciprocal'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
}
)
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'reciprocal'
)
helper
=
LayerHelper
(
'reciprocal'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'reciprocal'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
}
)
return
out
def
round
(
x
,
name
=
None
):
...
...
@@ -731,14 +729,14 @@ def round(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
round
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
round
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'round'
)
helper
=
LayerHelper
(
'round'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'round'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'round'
)
helper
=
LayerHelper
(
'round'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'round'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
def
rsqrt
(
x
,
name
=
None
):
...
...
@@ -770,14 +768,14 @@ def rsqrt(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
rsqrt
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
rsqrt
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'rsqrt'
)
helper
=
LayerHelper
(
'rsqrt'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'rsqrt'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'rsqrt'
)
helper
=
LayerHelper
(
'rsqrt'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'rsqrt'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
def
sigmoid
(
x
,
name
=
None
):
...
...
@@ -808,16 +806,14 @@ def sigmoid(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
sigmoid
(
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
sigmoid
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sigmoid'
)
helper
=
LayerHelper
(
'sigmoid'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'sigmoid'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sigmoid'
)
helper
=
LayerHelper
(
'sigmoid'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'sigmoid'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
def
sin
(
x
,
name
=
None
):
...
...
@@ -847,14 +843,14 @@ def sin(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
sin
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
sin
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sin'
)
helper
=
LayerHelper
(
'sin'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'sin'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sin'
)
helper
=
LayerHelper
(
'sin'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'sin'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
def
sinh
(
x
,
name
=
None
):
...
...
@@ -884,14 +880,14 @@ def sinh(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
sinh
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
sinh
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sinh'
)
helper
=
LayerHelper
(
'sinh'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'sinh'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sinh'
)
helper
=
LayerHelper
(
'sinh'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'sinh'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
def
sqrt
(
x
,
name
=
None
):
...
...
@@ -920,14 +916,14 @@ def sqrt(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
sqrt
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
sqrt
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sqrt'
)
helper
=
LayerHelper
(
'sqrt'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'sqrt'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sqrt'
)
helper
=
LayerHelper
(
'sqrt'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'sqrt'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
def
square
(
x
,
name
=
None
):
...
...
@@ -956,27 +952,25 @@ def square(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
square
(
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
square
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'int32'
,
'int64'
,
'float16'
,
'float32'
,
'float64'
,
'complex64'
,
'complex128'
,
],
'square'
,
)
helper
=
LayerHelper
(
'square'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'square'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'int32'
,
'int64'
,
'float16'
,
'float32'
,
'float64'
,
'complex64'
,
'complex128'
,
],
'square'
,
)
helper
=
LayerHelper
(
'square'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'square'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
def
tan
(
x
,
name
=
None
):
...
...
@@ -1008,14 +1002,14 @@ def tan(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
tan
(
x
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
tan
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'tan'
)
helper
=
LayerHelper
(
'tan'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'tan'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'tan'
)
helper
=
LayerHelper
(
'tan'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'tan'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
_erf_
=
generate_layer_fn
(
'erf'
)
...
...
python/paddle/tensor/random.py
浏览文件 @
861fef52
...
...
@@ -16,11 +16,7 @@
import
paddle
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle.fluid.framework
import
(
_current_expected_place
,
_in_legacy_dygraph
,
in_dygraph_mode
,
)
from
paddle.fluid.framework
import
_current_expected_place
,
in_dygraph_mode
from
paddle.static
import
Variable
from
..fluid.data_feeder
import
(
...
...
@@ -80,21 +76,18 @@ def bernoulli(x, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
bernoulli
(
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
bernoulli
(
x
)
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"bernoulli"
)
helper
=
LayerHelper
(
"randint"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
# maybe set out to int32 ?
helper
.
append_op
(
type
=
'bernoulli'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{}
)
out
.
stop_gradient
=
True
return
out
else
:
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"bernoulli"
)
helper
=
LayerHelper
(
"randint"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
# maybe set out to int32 ?
helper
.
append_op
(
type
=
'bernoulli'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{}
)
out
.
stop_gradient
=
True
return
out
def
poisson
(
x
,
name
=
None
):
...
...
@@ -129,18 +122,15 @@ def poisson(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
poisson
(
x
)
else
:
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"poisson"
)
if
paddle
.
in_dynamic_mode
():
return
_legacy_C_ops
.
poisson
(
x
)
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"poisson"
)
helper
=
LayerHelper
(
"poisson"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'poisson'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{}
)
return
out
helper
=
LayerHelper
(
"poisson"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'poisson'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{}
)
return
out
def
multinomial
(
x
,
num_samples
=
1
,
replacement
=
False
,
name
=
None
):
...
...
@@ -197,26 +187,21 @@ def multinomial(x, num_samples=1, replacement=False, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
multinomial
(
x
,
num_samples
,
replacement
)
else
:
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"multinomial"
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
multinomial
(
x
,
'num_samples'
,
num_samples
,
'replacement'
,
replacement
helper
=
LayerHelper
(
"multinomial"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
convert_np_dtype_to_dtype_
(
'int64'
)
)
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"multinomial"
)
helper
=
LayerHelper
(
"multinomial"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
convert_np_dtype_to_dtype_
(
'int64'
)
)
helper
.
append_op
(
type
=
'multinomial'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'num_samples'
:
num_samples
,
'replacement'
:
replacement
},
)
out
.
stop_gradient
=
True
return
out
helper
.
append_op
(
type
=
'multinomial'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'num_samples'
:
num_samples
,
'replacement'
:
replacement
},
)
out
.
stop_gradient
=
True
return
out
def
uniform_random_batch_size_like
(
...
...
@@ -356,44 +341,32 @@ def gaussian(shape, mean=0.0, std=1.0, seed=0, dtype=None, name=None):
return
_C_ops
.
gaussian
(
shape
,
float
(
mean
),
float
(
std
),
seed
,
dtype
,
place
)
else
:
check_shape
(
shape
,
op_type_for_check
)
check_dtype
(
dtype
,
'dtype'
,
[
'float32'
,
'float64'
],
op_type_for_check
)
if
_in_legacy_dygraph
():
shape
=
utils
.
convert_shape_to_list
(
shape
)
return
_legacy_C_ops
.
gaussian_random
(
'shape'
,
shape
,
'mean'
,
float
(
mean
),
'std'
,
float
(
std
),
'seed'
,
seed
,
'dtype'
,
dtype
,
inputs
=
{}
attrs
=
{
'mean'
:
mean
,
'std'
:
std
,
'seed'
:
seed
,
'dtype'
:
dtype
,
'use_mkldnn'
:
False
,
}
utils
.
get_shape_tensor_inputs
(
inputs
=
inputs
,
attrs
=
attrs
,
shape
=
shape
,
op_type
=
op_type_for_check
)
check_shape
(
shape
,
op_type_for_check
)
check_dtype
(
dtype
,
'dtype'
,
[
'float32'
,
'float64'
],
op_type_for_check
)
inputs
=
{}
attrs
=
{
'mean'
:
mean
,
'std'
:
std
,
'seed'
:
seed
,
'dtype'
:
dtype
,
'use_mkldnn'
:
False
,
}
utils
.
get_shape_tensor_inputs
(
inputs
=
inputs
,
attrs
=
attrs
,
shape
=
shape
,
op_type
=
op_type_for_check
)
helper
=
LayerHelper
(
'gaussian'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
'gaussian_random'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
out
.
stop_gradient
=
True
return
out
helper
=
LayerHelper
(
'gaussian'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
'gaussian_random'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
,
)
out
.
stop_gradient
=
True
return
out
def
standard_normal
(
shape
,
dtype
=
None
,
name
=
None
):
...
...
@@ -550,7 +523,7 @@ def normal(mean=0.0, std=1.0, shape=None, name=None):
# [1.00780561 3.78457445 5.81058198] # random
"""
if
not
paddle
.
in_dynamic
_mode
():
if
not
in_dygraph
_mode
():
check_type
(
mean
,
'mean'
,
(
int
,
float
,
Variable
),
'normal'
)
check_type
(
std
,
'std'
,
(
int
,
float
,
Variable
),
'normal'
)
if
isinstance
(
mean
,
Variable
):
...
...
@@ -588,7 +561,7 @@ def normal(mean=0.0, std=1.0, shape=None, name=None):
return
gaussian
(
shape
=
shape
,
mean
=
mean
,
std
=
std
,
name
=
name
)
out
=
out
*
std
+
mean
if
not
paddle
.
in_dynamic
_mode
():
if
not
in_dygraph
_mode
():
out
.
stop_grediant
=
True
return
out
...
...
@@ -680,40 +653,28 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
seed
,
_current_expected_place
(),
)
if
_in_legacy_dygraph
():
shape
=
utils
.
convert_shape_to_list
(
shape
)
return
_legacy_C_ops
.
uniform_random
(
'shape'
,
shape
,
'min'
,
float
(
min
),
'max'
,
float
(
max
),
'seed'
,
seed
,
'dtype'
,
dtype
,
else
:
check_type
(
shape
,
'shape'
,
(
list
,
tuple
,
Variable
),
'uniform/rand'
)
check_dtype
(
dtype
,
'dtype'
,
(
'float32'
,
'float64'
),
'uniform/rand'
)
check_type
(
min
,
'min'
,
(
float
,
int
,
Variable
),
'uniform/rand'
)
check_type
(
max
,
'max'
,
(
float
,
int
,
Variable
),
'uniform/rand'
)
inputs
=
dict
()
attrs
=
{
'seed'
:
seed
,
'min'
:
min
,
'max'
:
max
,
'dtype'
:
dtype
}
utils
.
get_shape_tensor_inputs
(
inputs
=
inputs
,
attrs
=
attrs
,
shape
=
shape
,
op_type
=
'uniform/rand'
)
check_type
(
shape
,
'shape'
,
(
list
,
tuple
,
Variable
),
'uniform/rand'
)
check_dtype
(
dtype
,
'dtype'
,
(
'float32'
,
'float64'
),
'uniform/rand'
)
check_type
(
min
,
'min'
,
(
float
,
int
,
Variable
),
'uniform/rand'
)
check_type
(
max
,
'max'
,
(
float
,
int
,
Variable
),
'uniform/rand'
)
inputs
=
dict
()
attrs
=
{
'seed'
:
seed
,
'min'
:
min
,
'max'
:
max
,
'dtype'
:
dtype
}
utils
.
get_shape_tensor_inputs
(
inputs
=
inputs
,
attrs
=
attrs
,
shape
=
shape
,
op_type
=
'uniform/rand'
)
helper
=
LayerHelper
(
"uniform"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
"uniform_random"
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
"Out"
:
out
}
)
out
.
stop_gradient
=
True
return
out
helper
=
LayerHelper
(
"uniform"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
"uniform_random"
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
"Out"
:
out
},
)
out
.
stop_gradient
=
True
return
out
@
dygraph_only
...
...
@@ -751,12 +712,7 @@ def uniform_(x, min=-1.0, max=1.0, seed=0, name=None):
# [-0.34646994, -0.45116323, -0.09902662, -0.11397249], # random
# [ 0.433519, 0.39483607, -0.8660099, 0.83664286]] # random
"""
if
in_dygraph_mode
():
return
_C_ops
.
uniform_inplace_
(
x
,
min
,
max
,
seed
,
0
,
0
,
1.0
)
else
:
return
_legacy_C_ops
.
uniform_random_inplace_
(
x
,
'min'
,
min
,
'max'
,
max
,
'seed'
,
seed
)
return
_C_ops
.
uniform_inplace_
(
x
,
min
,
max
,
seed
,
0
,
0
,
1.0
)
def
randint
(
low
=
0
,
high
=
None
,
shape
=
[
1
],
dtype
=
None
,
name
=
None
):
...
...
@@ -841,33 +797,28 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None):
shape
=
utils
.
convert_shape_to_list
(
shape
)
place
=
_current_expected_place
()
return
_C_ops
.
randint
(
low
,
high
,
shape
,
dtype
,
place
)
if
_in_legacy_dygraph
():
shape
=
utils
.
convert_shape_to_list
(
shape
)
return
_legacy_C_ops
.
randint
(
'shape'
,
shape
,
'low'
,
low
,
'high'
,
high
,
'seed'
,
0
,
'dtype'
,
dtype
)
else
:
check_shape
(
shape
,
'randint'
)
check_dtype
(
dtype
,
'dtype'
,
[
'int32'
,
'int64'
],
'randint'
)
if
low
>=
high
:
raise
ValueError
(
"randint's low must less then high, but received low = {0}, "
"high = {1}"
.
format
(
low
,
high
)
)
check_shape
(
shape
,
'randint'
)
check_dtype
(
dtype
,
'dtype'
,
[
'int32'
,
'int64'
],
'randint'
)
if
low
>=
high
:
raise
ValueError
(
"randint's low must less then high, but received low = {0}, "
"high = {1}"
.
format
(
low
,
high
)
inputs
=
dict
()
attrs
=
{
'low'
:
low
,
'high'
:
high
,
'seed'
:
0
,
'dtype'
:
dtype
}
utils
.
get_shape_tensor_inputs
(
inputs
=
inputs
,
attrs
=
attrs
,
shape
=
shape
,
op_type
=
'randint'
)
inputs
=
dict
()
attrs
=
{
'low'
:
low
,
'high'
:
high
,
'seed'
:
0
,
'dtype'
:
dtype
}
utils
.
get_shape_tensor_inputs
(
inputs
=
inputs
,
attrs
=
attrs
,
shape
=
shape
,
op_type
=
'randint'
)
helper
=
LayerHelper
(
"randint"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
helper
.
append_op
(
type
=
'randint'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
out
.
stop_gradient
=
True
return
out
helper
=
LayerHelper
(
"randint"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
helper
.
append_op
(
type
=
'randint'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
out
.
stop_gradient
=
True
return
out
def
randint_like
(
x
,
low
=
0
,
high
=
None
,
dtype
=
None
,
name
=
None
):
...
...
@@ -1015,7 +966,7 @@ def randint_like(x, low=0, high=None, dtype=None, name=None):
"high = {1}"
.
format
(
low
,
high
)
)
if
paddle
.
in_dynamic
_mode
():
if
in_dygraph
_mode
():
shape
=
utils
.
convert_shape_to_list
(
shape
)
out
=
_legacy_C_ops
.
randint
(
'shape'
,
...
...
@@ -1031,33 +982,33 @@ def randint_like(x, low=0, high=None, dtype=None, name=None):
)
out
=
paddle
.
cast
(
out
,
dtype
)
return
out
else
:
check_shape
(
shape
,
'randint_like'
)
check_dtype
(
dtype
,
'dtype'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'randint_like'
,
)
check_shape
(
shape
,
'randint_like'
)
check_dtype
(
dtype
,
'dtype'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'randint_like'
,
)
inputs
=
{
"ShapeTensor"
:
shape
}
attrs
=
{
'low'
:
low
,
'high'
:
high
,
'seed'
:
0
,
'dtype'
:
core
.
VarDesc
.
VarType
.
INT64
,
}
helper
=
LayerHelper
(
"randint"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
core
.
VarDesc
.
VarType
.
INT64
)
helper
.
append_op
(
type
=
'randint'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
out
.
stop_gradient
=
True
out
=
paddle
.
cast
(
out
,
dtype
)
return
out
inputs
=
{
"ShapeTensor"
:
shape
}
attrs
=
{
'low'
:
low
,
'high'
:
high
,
'seed'
:
0
,
'dtype'
:
core
.
VarDesc
.
VarType
.
INT64
,
}
helper
=
LayerHelper
(
"randint"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
core
.
VarDesc
.
VarType
.
INT64
)
helper
.
append_op
(
type
=
'randint'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
out
.
stop_gradient
=
True
out
=
paddle
.
cast
(
out
,
dtype
)
return
out
def
randperm
(
n
,
dtype
=
"int64"
,
name
=
None
):
...
...
@@ -1095,23 +1046,23 @@ def randperm(n, dtype="int64", name=None):
if
in_dygraph_mode
():
return
_C_ops
.
randperm
(
n
,
dtype
,
_current_expected_place
())
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
randperm
(
'n'
,
n
,
'seed'
,
0
,
'dtype'
,
dtype
)
if
n
<
1
:
raise
ValueError
(
"The input n should be greater than 0 in randperm op."
)
check_dtype
(
dtype
,
'dtype'
,
[
'int64'
,
'int32'
,
'float32'
,
'float64'
],
'randperm'
)
else
:
if
n
<
1
:
raise
ValueError
(
"The input n should be greater than 0 in randperm op."
)
check_dtype
(
dtype
,
'dtype'
,
[
'int64'
,
'int32'
,
'float32'
,
'float64'
],
'randperm'
)
helper
=
LayerHelper
(
"randperm"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
attrs
=
{
'n'
:
n
,
'dtype'
:
dtype
,
'seed'
:
0
}
helper
.
append_op
(
type
=
'randperm'
,
inputs
=
{},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
out
.
stop_gradient
=
True
return
out
helper
=
LayerHelper
(
"randperm"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
attrs
=
{
'n'
:
n
,
'dtype'
:
dtype
,
'seed'
:
0
}
helper
.
append_op
(
type
=
'randperm'
,
inputs
=
{},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
out
.
stop_gradient
=
True
return
out
def
rand
(
shape
,
dtype
=
None
,
name
=
None
):
...
...
@@ -1199,16 +1150,14 @@ def exponential_(x, lam=1.0, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
exponential_
(
x
,
lam
)
elif
paddle
.
in_dynamic_mode
():
return
_legacy_C_ops
.
exponential_
(
x
,
"lambda"
,
lam
)
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"exponential"
)
helper
=
LayerHelper
(
"exponential"
,
**
locals
())
helper
.
append_op
(
type
=
'exponential'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
'Out'
:
x
},
attrs
=
{
"lambda"
:
lam
},
)
return
x
else
:
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"exponential"
)
helper
=
LayerHelper
(
"exponential"
,
**
locals
())
helper
.
append_op
(
type
=
'exponential'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
'Out'
:
x
},
attrs
=
{
"lambda"
:
lam
},
)
return
x
python/paddle/tensor/search.py
浏览文件 @
861fef52
...
...
@@ -17,14 +17,12 @@
import
numpy
as
np
import
paddle
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle
import
_C_ops
from
paddle.common_ops_import
import
VarDesc
,
Variable
from
..fluid.data_feeder
import
check_dtype
,
check_variable_and_dtype
from
..fluid.framework
import
_in_legacy_dygraph
from
..framework
import
(
LayerHelper
,
_non_static_mode
,
convert_np_dtype_to_dtype_
,
core
,
in_dygraph_mode
,
...
...
@@ -99,33 +97,28 @@ def argsort(x, axis=-1, descending=False, name=None):
if
in_dygraph_mode
():
_
,
ids
=
_C_ops
.
argsort
(
x
,
axis
,
descending
)
return
ids
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
],
'argsort'
,
)
if
_in_legacy_dygraph
():
_
,
ids
=
_legacy_C_ops
.
argsort
(
x
,
'axis'
,
axis
,
'descending'
,
descending
helper
=
LayerHelper
(
"argsort"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
,
stop_gradient
=
True
)
ids
=
helper
.
create_variable_for_type_inference
(
VarDesc
.
VarType
.
INT64
,
stop_gradient
=
True
)
helper
.
append_op
(
type
=
'argsort'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
,
'Indices'
:
ids
},
attrs
=
{
'axis'
:
axis
,
'descending'
:
descending
},
)
return
ids
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
],
'argsort'
,
)
helper
=
LayerHelper
(
"argsort"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
,
stop_gradient
=
True
)
ids
=
helper
.
create_variable_for_type_inference
(
VarDesc
.
VarType
.
INT64
,
stop_gradient
=
True
)
helper
.
append_op
(
type
=
'argsort'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
,
'Indices'
:
ids
},
attrs
=
{
'axis'
:
axis
,
'descending'
:
descending
},
)
return
ids
def
argmax
(
x
,
axis
=
None
,
keepdim
=
False
,
dtype
=
"int64"
,
name
=
None
):
...
...
@@ -187,40 +180,27 @@ def argmax(x, axis=None, keepdim=False, dtype="int64", name=None):
if
in_dygraph_mode
():
return
_C_ops
.
argmax
(
x
,
axis
,
keepdim
,
flatten
,
var_dtype
)
if
_in_legacy_dygraph
():
out
=
_legacy_C_ops
.
arg_max
(
else
:
helper
=
LayerHelper
(
"argmax"
,
**
locals
())
check_variable_and_dtype
(
x
,
'axis'
,
axis
,
'dtype'
,
var_dtype
,
'keepdims'
,
keepdim
,
'flatten'
,
flatten
,
'x'
,
[
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
],
'paddle.argmax'
,
)
check_dtype
(
var_dtype
,
'dtype'
,
[
'int32'
,
'int64'
],
'argmin'
)
attrs
=
{}
out
=
helper
.
create_variable_for_type_inference
(
var_dtype
)
attrs
[
'keepdims'
]
=
keepdim
attrs
[
'axis'
]
=
axis
attrs
[
'flatten'
]
=
flatten
attrs
[
'dtype'
]
=
var_dtype
helper
.
append_op
(
type
=
'arg_max'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
[
out
]},
attrs
=
attrs
)
out
.
stop_gradient
=
True
return
out
helper
=
LayerHelper
(
"argmax"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
],
'paddle.argmax'
,
)
check_dtype
(
var_dtype
,
'dtype'
,
[
'int32'
,
'int64'
],
'argmin'
)
attrs
=
{}
out
=
helper
.
create_variable_for_type_inference
(
var_dtype
)
attrs
[
'keepdims'
]
=
keepdim
attrs
[
'axis'
]
=
axis
attrs
[
'flatten'
]
=
flatten
attrs
[
'dtype'
]
=
var_dtype
helper
.
append_op
(
type
=
'arg_max'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
[
out
]},
attrs
=
attrs
)
out
.
stop_gradient
=
True
return
out
def
argmin
(
x
,
axis
=
None
,
keepdim
=
False
,
dtype
=
"int64"
,
name
=
None
):
"""
...
...
@@ -281,40 +261,27 @@ def argmin(x, axis=None, keepdim=False, dtype="int64", name=None):
if
in_dygraph_mode
():
return
_C_ops
.
argmin
(
x
,
axis
,
keepdim
,
flatten
,
var_dtype
)
if
_in_legacy_dygraph
():
out
=
_legacy_C_ops
.
arg_min
(
else
:
helper
=
LayerHelper
(
"argmin"
,
**
locals
())
check_variable_and_dtype
(
x
,
'axis'
,
axis
,
'dtype'
,
var_dtype
,
'keepdims'
,
keepdim
,
'flatten'
,
flatten
,
'x'
,
[
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
],
'paddle.argmin'
,
)
check_dtype
(
var_dtype
,
'dtype'
,
[
'int32'
,
'int64'
],
'argmin'
)
out
=
helper
.
create_variable_for_type_inference
(
var_dtype
)
attrs
=
{}
attrs
[
'keepdims'
]
=
keepdim
attrs
[
'axis'
]
=
axis
attrs
[
'flatten'
]
=
flatten
attrs
[
'dtype'
]
=
var_dtype
helper
.
append_op
(
type
=
'arg_min'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
[
out
]},
attrs
=
attrs
)
out
.
stop_gradient
=
True
return
out
helper
=
LayerHelper
(
"argmin"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
],
'paddle.argmin'
,
)
check_dtype
(
var_dtype
,
'dtype'
,
[
'int32'
,
'int64'
],
'argmin'
)
out
=
helper
.
create_variable_for_type_inference
(
var_dtype
)
attrs
=
{}
attrs
[
'keepdims'
]
=
keepdim
attrs
[
'axis'
]
=
axis
attrs
[
'flatten'
]
=
flatten
attrs
[
'dtype'
]
=
var_dtype
helper
.
append_op
(
type
=
'arg_min'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
[
out
]},
attrs
=
attrs
)
out
.
stop_gradient
=
True
return
out
def
index_select
(
x
,
index
,
axis
=
0
,
name
=
None
):
"""
...
...
@@ -354,30 +321,30 @@ def index_select(x, index, axis=0, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
index_select
(
x
,
index
,
axis
)
else
:
helper
=
LayerHelper
(
"index_select"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'paddle.tensor.search.index_select'
,
)
check_variable_and_dtype
(
index
,
'index'
,
[
'int32'
,
'int64'
],
'paddle.tensor.search.index_select'
,
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
index_select
(
x
,
index
,
'dim'
,
axis
)
helper
=
LayerHelper
(
"index_select"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'paddle.tensor.search.index_select'
,
)
check_variable_and_dtype
(
index
,
'index'
,
[
'int32'
,
'int64'
],
'paddle.tensor.search.index_select'
)
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'index_select'
,
inputs
=
{
'X'
:
x
,
'Index'
:
index
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'dim'
:
axis
},
)
return
out
helper
.
append_op
(
type
=
'index_select'
,
inputs
=
{
'X'
:
x
,
'Index'
:
index
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'dim'
:
axis
},
)
return
out
def
nonzero
(
x
,
as_tuple
=
False
):
...
...
@@ -438,8 +405,6 @@ def nonzero(x, as_tuple=False):
if
in_dygraph_mode
():
outs
=
_C_ops
.
nonzero
(
x
)
elif
paddle
.
in_dynamic_mode
():
outs
=
_legacy_C_ops
.
where_index
(
x
)
else
:
helper
=
LayerHelper
(
"where_index"
,
**
locals
())
...
...
@@ -522,26 +487,21 @@ def sort(x, axis=-1, descending=False, name=None):
if
in_dygraph_mode
():
outs
,
_
=
_C_ops
.
argsort
(
x
,
axis
,
descending
)
return
outs
if
_in_legacy_dygraph
():
out
s
,
_
=
_legacy_C_ops
.
argsort
(
x
,
'axis'
,
axis
,
'descending'
,
descending
else
:
helper
=
LayerHelper
(
"sort"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
,
stop_gradient
=
False
)
return
outs
helper
=
LayerHelper
(
"sort"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
,
stop_gradient
=
False
)
ids
=
helper
.
create_variable_for_type_inference
(
VarDesc
.
VarType
.
INT64
,
stop_gradient
=
True
)
helper
.
append_op
(
type
=
'argsort'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
,
'Indices'
:
ids
},
attrs
=
{
'axis'
:
axis
,
'descending'
:
descending
},
)
return
out
ids
=
helper
.
create_variable_for_type_inference
(
VarDesc
.
VarType
.
INT64
,
stop_gradient
=
True
)
helper
.
append_op
(
type
=
'argsort'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
,
'Indices'
:
ids
},
attrs
=
{
'axis'
:
axis
,
'descending'
:
descending
},
)
return
out
def
mode
(
x
,
axis
=-
1
,
keepdim
=
False
,
name
=
None
):
...
...
@@ -577,26 +537,24 @@ def mode(x, axis=-1, keepdim=False, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
mode
(
x
,
axis
,
keepdim
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
mode
(
x
,
"axis"
,
axis
,
"keepdim"
,
keepdim
)
helper
=
LayerHelper
(
"mode"
,
**
locals
())
inputs
=
{
"X"
:
[
x
]}
attrs
=
{}
attrs
[
'axis'
]
=
axis
attrs
[
'keepdim'
]
=
keepdim
else
:
helper
=
LayerHelper
(
"mode"
,
**
locals
())
inputs
=
{
"X"
:
[
x
]}
attrs
=
{}
attrs
[
'axis'
]
=
axis
attrs
[
'keepdim'
]
=
keepdim
values
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
indices
=
helper
.
create_variable_for_type_inference
(
dtype
=
"int64"
)
values
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
indices
=
helper
.
create_variable_for_type_inference
(
dtype
=
"int64"
)
helper
.
append_op
(
type
=
"mode"
,
inputs
=
inputs
,
outputs
=
{
"Out"
:
[
values
],
"Indices"
:
[
indices
]},
attrs
=
attrs
,
)
indices
.
stop_gradient
=
True
return
values
,
indices
helper
.
append_op
(
type
=
"mode"
,
inputs
=
inputs
,
outputs
=
{
"Out"
:
[
values
],
"Indices"
:
[
indices
]},
attrs
=
attrs
,
)
indices
.
stop_gradient
=
True
return
values
,
indices
def
where
(
condition
,
x
=
None
,
y
=
None
,
name
=
None
):
...
...
@@ -688,25 +646,20 @@ def where(condition, x=None, y=None, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
where
(
broadcast_condition
,
broadcast_x
,
broadcast_y
)
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
where
(
broadcast_condition
,
broadcast_x
,
broadcast_y
)
else
:
helper
=
LayerHelper
(
"where"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'where'
,
inputs
=
{
'Condition'
:
broadcast_condition
,
'X'
:
broadcast_x
,
'Y'
:
broadcast_y
,
},
outputs
=
{
'Out'
:
[
out
]},
)
helper
=
LayerHelper
(
"where"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
return
out
helper
.
append_op
(
type
=
'where'
,
inputs
=
{
'Condition'
:
broadcast_condition
,
'X'
:
broadcast_x
,
'Y'
:
broadcast_y
,
},
outputs
=
{
'Out'
:
[
out
]},
)
return
out
def
index_sample
(
x
,
index
):
...
...
@@ -785,30 +738,27 @@ def index_sample(x, index):
if
in_dygraph_mode
():
return
_C_ops
.
index_sample
(
x
,
index
)
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
index_sample
(
x
,
index
)
else
:
helper
=
LayerHelper
(
"index_sample"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'paddle.tensor.search.index_sample'
,
)
check_variable_and_dtype
(
index
,
'index'
,
[
'int32'
,
'int64'
],
'paddle.tensor.search.index_sample'
,
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
=
LayerHelper
(
"index_sample"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'paddle.tensor.search.index_sample'
,
)
check_variable_and_dtype
(
index
,
'index'
,
[
'int32'
,
'int64'
],
'paddle.tensor.search.index_sample'
,
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'index_sample'
,
inputs
=
{
'X'
:
x
,
'Index'
:
index
},
outputs
=
{
'Out'
:
out
},
)
return
out
helper
.
append_op
(
type
=
'index_sample'
,
inputs
=
{
'X'
:
x
,
'Index'
:
index
},
outputs
=
{
'Out'
:
out
},
)
return
out
def
masked_select
(
x
,
mask
,
name
=
None
):
...
...
@@ -843,24 +793,24 @@ def masked_select(x, mask, name=None):
if
in_dygraph_mode
():
return
_C_ops
.
masked_select
(
x
,
mask
)
if
_in_legacy_dygraph
()
:
return
_legacy_C_ops
.
masked_select
(
x
,
mask
)
helper
=
LayerHelper
(
"masked_select"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x
'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'paddle.tensor.search.mask_select'
,
)
check_variable_and_dtype
(
mask
,
'mask'
,
[
'bool'
],
'paddle.tensor.search.masked_select'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'masked_select'
,
inputs
=
{
'X'
:
x
,
'Mask'
:
mask
},
outputs
=
{
'Y'
:
out
}
)
return
out
else
:
helper
=
LayerHelper
(
"masked_select"
,
**
locals
()
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
]
,
'paddle.tensor.search.mask_select
'
,
)
check_variable_and_dtype
(
mask
,
'mask'
,
[
'bool'
],
'paddle.tensor.search.masked_select'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'masked_select'
,
inputs
=
{
'X'
:
x
,
'Mask'
:
mask
},
outputs
=
{
'Y'
:
out
},
)
return
out
def
topk
(
x
,
k
,
axis
=
None
,
largest
=
True
,
sorted
=
True
,
name
=
None
):
...
...
@@ -916,49 +866,30 @@ def topk(x, k, axis=None, largest=True, sorted=True, name=None):
axis
=
-
1
out
,
indices
=
_C_ops
.
topk
(
x
,
k
,
axis
,
largest
,
sorted
)
return
out
,
indices
if
_non_static_mode
():
if
axis
is
None
:
out
,
indices
=
_legacy_C_ops
.
top_k_v2
(
x
,
'k'
,
int
(
k
),
'largest'
,
largest
,
'sorted'
,
sorted
)
else
:
out
,
indices
=
_legacy_C_ops
.
top_k_v2
(
x
,
'k'
,
int
(
k
),
'axis'
,
axis
,
'largest'
,
largest
,
'sorted'
,
sorted
,
)
return
out
,
indices
helper
=
LayerHelper
(
"top_k_v2"
,
**
locals
())
inputs
=
{
"X"
:
[
x
]}
attrs
=
{}
if
isinstance
(
k
,
Variable
):
inputs
[
'K'
]
=
[
k
]
else
:
attrs
=
{
'k'
:
k
}
attrs
[
'largest'
]
=
largest
attrs
[
'sorted'
]
=
sorted
if
axis
is
not
None
:
attrs
[
'axis'
]
=
axis
helper
=
LayerHelper
(
"top_k_v2"
,
**
locals
())
inputs
=
{
"X"
:
[
x
]}
attrs
=
{}
if
isinstance
(
k
,
Variable
):
inputs
[
'K'
]
=
[
k
]
else
:
attrs
=
{
'k'
:
k
}
attrs
[
'largest'
]
=
largest
attrs
[
'sorted'
]
=
sorted
if
axis
is
not
None
:
attrs
[
'axis'
]
=
axis
values
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
indices
=
helper
.
create_variable_for_type_inference
(
dtype
=
"int64"
)
values
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
indices
=
helper
.
create_variable_for_type_inference
(
dtype
=
"int64"
)
helper
.
append_op
(
type
=
"top_k_v2"
,
inputs
=
inputs
,
outputs
=
{
"Out"
:
[
values
],
"Indices"
:
[
indices
]},
attrs
=
attrs
,
)
indices
.
stop_gradient
=
True
return
values
,
indices
helper
.
append_op
(
type
=
"top_k_v2"
,
inputs
=
inputs
,
outputs
=
{
"Out"
:
[
values
],
"Indices"
:
[
indices
]},
attrs
=
attrs
,
)
indices
.
stop_gradient
=
True
return
values
,
indices
def
bucketize
(
x
,
sorted_sequence
,
out_int32
=
False
,
right
=
False
,
name
=
None
):
...
...
@@ -1065,36 +996,31 @@ def searchsorted(
"""
if
in_dygraph_mode
():
return
_C_ops
.
searchsorted
(
sorted_sequence
,
values
,
out_int32
,
right
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
searchsorted
(
sorted_sequence
,
values
,
"out_int32"
,
out_int32
,
"right"
,
right
else
:
check_variable_and_dtype
(
sorted_sequence
,
'SortedSequence'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'paddle.searchsorted'
,
)
check_variable_and_dtype
(
values
,
'Values'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'paddle.searchsorted'
,
)
check_variable_and_dtype
(
sorted_sequence
,
'SortedSequence'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'paddle.searchsorted'
,
)
check_variable_and_dtype
(
values
,
'Values'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'paddle.searchsorted'
,
)
helper
=
LayerHelper
(
'searchsorted'
,
**
locals
())
out_type
=
'int32'
if
out_int32
else
'int64'
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
out_type
)
helper
.
append_op
(
type
=
'searchsorted'
,
inputs
=
{
'SortedSequence'
:
sorted_sequence
,
"Values"
:
values
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
"out_int32"
:
out_int32
,
"right"
:
right
},
)
helper
=
LayerHelper
(
'searchsorted'
,
**
locals
())
out_type
=
'int32'
if
out_int32
else
'int64'
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
out_type
)
helper
.
append_op
(
type
=
'searchsorted'
,
inputs
=
{
'SortedSequence'
:
sorted_sequence
,
"Values"
:
values
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
"out_int32"
:
out_int32
,
"right"
:
right
},
)
return
out
return
out
def
kthvalue
(
x
,
k
,
axis
=
None
,
keepdim
=
False
,
name
=
None
):
...
...
@@ -1135,16 +1061,10 @@ def kthvalue(x, k, axis=None, keepdim=False, name=None):
# [[0, 2],
# [1, 2]]))
"""
if
_non_static
_mode
():
if
in_dygraph
_mode
():
if
axis
is
not
None
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
kthvalue
(
x
,
'k'
,
k
,
"axis"
,
axis
,
"keepdim"
,
keepdim
)
return
_C_ops
.
kthvalue
(
x
,
k
,
axis
,
keepdim
)
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
kthvalue
(
x
,
'k'
,
k
,
"keepdim"
,
keepdim
)
return
_C_ops
.
kthvalue
(
x
,
k
,
-
1
,
keepdim
)
helper
=
LayerHelper
(
"kthvalue"
,
**
locals
())
...
...
python/paddle/tensor/stat.py
浏览文件 @
861fef52
...
...
@@ -16,7 +16,7 @@
import
paddle
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle.fluid.framework
import
_in_legacy_dygraph
,
in_dygraph_mode
from
paddle.fluid.framework
import
in_dygraph_mode
from
..fluid.data_feeder
import
check_type
,
check_variable_and_dtype
from
..framework
import
LayerHelper
,
core
...
...
@@ -81,39 +81,37 @@ def mean(x, axis=None, keepdim=False, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
mean
(
x
,
axis
,
keepdim
)
reduce_all
,
axis
=
_get_reduce_axis_with_tensor
(
axis
,
x
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
reduce_mean
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
else
:
reduce_all
,
axis
=
_get_reduce_axis_with_tensor
(
axis
,
x
)
check_variable_and_dtype
(
x
,
'x/input'
,
[
'uint16'
,
'float16'
,
'float32'
,
'float64'
],
'mean/reduce_mean'
,
)
check_type
(
axis
,
'axis/dim'
,
(
int
,
list
,
tuple
,
Variable
),
'mean/reduce_mean'
)
if
isinstance
(
axis
,
(
list
,
tuple
)):
for
item
in
axis
:
check_type
(
item
,
'elements of axis/dim'
,
(
int
,
Variable
),
'mean/reduce_mean'
,
)
check_variable_and_dtype
(
x
,
'x/input'
,
[
'uint16'
,
'float16'
,
'float32'
,
'float64'
],
'mean/reduce_mean'
,
)
check_type
(
axis
,
'axis/dim'
,
(
int
,
list
,
tuple
,
Variable
),
'mean/reduce_mean'
)
if
isinstance
(
axis
,
(
list
,
tuple
)):
for
item
in
axis
:
check_type
(
item
,
'elements of axis/dim'
,
(
int
,
Variable
),
'mean/reduce_mean'
,
)
helper
=
LayerHelper
(
'mean'
,
**
locals
())
helper
=
LayerHelper
(
'mean'
,
**
locals
())
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
}
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'reduce_mean'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
return
out
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
}
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'reduce_mean'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
,
)
return
out
def
var
(
x
,
axis
=
None
,
unbiased
=
True
,
keepdim
=
False
,
name
=
None
):
...
...
@@ -146,7 +144,7 @@ def var(x, axis=None, unbiased=True, keepdim=False, name=None):
out2 = paddle.var(x, axis=1)
# [1. 4.33333333]
"""
if
not
paddle
.
in_dynamic
_mode
():
if
not
in_dygraph
_mode
():
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'var'
)
u
=
mean
(
x
,
axis
,
True
,
name
)
...
...
@@ -211,7 +209,7 @@ def std(x, axis=None, unbiased=True, keepdim=False, name=None):
# [1. 2.081666]
"""
if
not
paddle
.
in_dynamic
_mode
():
if
not
in_dygraph
_mode
():
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'std'
)
out
=
var
(
**
locals
())
...
...
@@ -243,17 +241,15 @@ def numel(x, name=None):
"""
if
in_dygraph_mode
():
return
_C_ops
.
numel
(
x
)
elif
_in_legacy_dygraph
():
return
_legacy_C_ops
.
size
(
x
)
if
not
isinstance
(
x
,
Variable
):
raise
TypeError
(
"x must be a Tensor in numel"
)
helper
=
LayerHelper
(
'numel'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
core
.
VarDesc
.
VarType
.
INT64
)
helper
.
append_op
(
type
=
'size'
,
inputs
=
{
'Input'
:
x
},
outputs
=
{
'Out'
:
out
})
return
out
else
:
if
not
isinstance
(
x
,
Variable
):
raise
TypeError
(
"x must be a Tensor in numel"
)
helper
=
LayerHelper
(
'numel'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
core
.
VarDesc
.
VarType
.
INT64
)
helper
.
append_op
(
type
=
'size'
,
inputs
=
{
'Input'
:
x
},
outputs
=
{
'Out'
:
out
})
return
out
def
nanmedian
(
x
,
axis
=
None
,
keepdim
=
True
,
name
=
None
):
...
...
@@ -331,27 +327,30 @@ def nanmedian(x, axis=None, keepdim=True, name=None):
if
len
(
axis
)
!=
len
(
set
(
axis
)):
raise
ValueError
(
"Axis has duplicated elements."
)
if
_in_legacy_dygraph
():
if
in_dygraph_mode
():
median_index
,
out
=
_legacy_C_ops
.
nanmedian
(
x
,
'axis'
,
axis
,
'keepdim'
,
keepdim
)
return
out
else
:
check_variable_and_dtype
(
x
,
'X'
,
[
'int32'
,
'int64'
,
'float16'
,
'float32'
,
'float64'
],
'nanmedian'
,
)
check_variable_and_dtype
(
x
,
'X'
,
[
'int32'
,
'int64'
,
'float16'
,
'float32'
,
'float64'
],
'nanmedian'
)
helper
=
LayerHelper
(
'nanmedian'
,
**
locals
())
attrs
=
{
'axis'
:
axis
,
'keepdim'
:
keepdim
}
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
medians
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'nanmedian'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
,
'MedianIndex'
:
medians
},
attrs
=
attrs
,
)
return
out
helper
=
LayerHelper
(
'nanmedian'
,
**
locals
())
attrs
=
{
'axis'
:
axis
,
'keepdim'
:
keepdim
}
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
medians
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'nanmedian'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
,
'MedianIndex'
:
medians
},
attrs
=
attrs
,
)
return
out
def
median
(
x
,
axis
=
None
,
keepdim
=
False
,
name
=
None
):
...
...
@@ -534,7 +533,7 @@ def _compute_quantile(x, q, axis=None, keepdim=False, ignore_nan=False):
for
q_num
in
q
:
if
q_num
<
0
or
q_num
>
1
:
raise
ValueError
(
"q should be in range [0, 1]"
)
if
paddle
.
in_dynamic
_mode
():
if
in_dygraph
_mode
():
q_num
=
paddle
.
to_tensor
(
q_num
,
dtype
=
'float64'
)
if
ignore_nan
:
indices
.
append
(
q_num
*
(
valid_counts
-
1
))
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录