Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
861fef52
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
861fef52
编写于
12月 27, 2022
作者:
W
wanghuancoder
提交者:
GitHub
12月 27, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
delete legacy dygraph code in python/paddle/tensor (#49286)
* delete _in_legacy_dygraph
上级
ea741aff
变更
15
隐藏空白更改
内联
并排
Showing
15 changed file
with
4091 addition
and
5104 deletion
+4091
-5104
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+1
-2
python/paddle/fluid/tests/unittests/test_unique.py
python/paddle/fluid/tests/unittests/test_unique.py
+13
-6
python/paddle/tensor/array.py
python/paddle/tensor/array.py
+68
-66
python/paddle/tensor/attribute.py
python/paddle/tensor/attribute.py
+43
-51
python/paddle/tensor/creation.py
python/paddle/tensor/creation.py
+368
-438
python/paddle/tensor/einsum.py
python/paddle/tensor/einsum.py
+29
-32
python/paddle/tensor/layer_function_generator.py
python/paddle/tensor/layer_function_generator.py
+49
-45
python/paddle/tensor/linalg.py
python/paddle/tensor/linalg.py
+685
-921
python/paddle/tensor/logic.py
python/paddle/tensor/logic.py
+243
-268
python/paddle/tensor/manipulation.py
python/paddle/tensor/manipulation.py
+1223
-1476
python/paddle/tensor/math.py
python/paddle/tensor/math.py
+761
-1053
python/paddle/tensor/ops.py
python/paddle/tensor/ops.py
+194
-200
python/paddle/tensor/random.py
python/paddle/tensor/random.py
+153
-204
python/paddle/tensor/search.py
python/paddle/tensor/search.py
+201
-281
python/paddle/tensor/stat.py
python/paddle/tensor/stat.py
+60
-61
未找到文件。
python/paddle/fluid/framework.py
浏览文件 @
861fef52
...
@@ -255,8 +255,7 @@ def _test_eager_guard(place=None):
...
@@ -255,8 +255,7 @@ def _test_eager_guard(place=None):
try
:
try
:
yield
yield
finally
:
finally
:
if
not
already_fallback
:
pass
_enable_legacy_dygraph
()
global_ipu_index
=
-
1
global_ipu_index
=
-
1
...
...
python/paddle/fluid/tests/unittests/test_unique.py
浏览文件 @
861fef52
...
@@ -28,7 +28,9 @@ class TestUniqueOp(OpTest):
...
@@ -28,7 +28,9 @@ class TestUniqueOp(OpTest):
self
.
init_config
()
self
.
init_config
()
def
test_check_output
(
self
):
def
test_check_output
(
self
):
paddle
.
enable_static
()
self
.
check_output
()
self
.
check_output
()
paddle
.
disable_static
()
def
init_config
(
self
):
def
init_config
(
self
):
self
.
inputs
=
{
self
.
inputs
=
{
...
@@ -72,6 +74,8 @@ class TestRandom(TestUniqueOp):
...
@@ -72,6 +74,8 @@ class TestRandom(TestUniqueOp):
class
TestUniqueRaiseError
(
unittest
.
TestCase
):
class
TestUniqueRaiseError
(
unittest
.
TestCase
):
def
test_errors
(
self
):
def
test_errors
(
self
):
paddle
.
enable_static
()
def
test_type
():
def
test_type
():
paddle
.
unique
([
10
])
paddle
.
unique
([
10
])
...
@@ -82,6 +86,7 @@ class TestUniqueRaiseError(unittest.TestCase):
...
@@ -82,6 +86,7 @@ class TestUniqueRaiseError(unittest.TestCase):
paddle
.
unique
(
data
)
paddle
.
unique
(
data
)
self
.
assertRaises
(
TypeError
,
test_dtype
)
self
.
assertRaises
(
TypeError
,
test_dtype
)
paddle
.
disable_static
()
@
unittest
.
skipIf
(
@
unittest
.
skipIf
(
...
@@ -100,8 +105,10 @@ class TestOneGPU(TestUniqueOp):
...
@@ -100,8 +105,10 @@ class TestOneGPU(TestUniqueOp):
def
test_check_output
(
self
):
def
test_check_output
(
self
):
if
core
.
is_compiled_with_cuda
():
if
core
.
is_compiled_with_cuda
():
paddle
.
enable_static
()
place
=
core
.
CUDAPlace
(
0
)
place
=
core
.
CUDAPlace
(
0
)
self
.
check_output_with_place
(
place
,
atol
=
1e-5
)
self
.
check_output_with_place
(
place
,
atol
=
1e-5
)
paddle
.
disable_static
()
@
unittest
.
skipIf
(
@
unittest
.
skipIf
(
...
@@ -125,8 +132,10 @@ class TestRandomGPU(TestUniqueOp):
...
@@ -125,8 +132,10 @@ class TestRandomGPU(TestUniqueOp):
def
test_check_output
(
self
):
def
test_check_output
(
self
):
if
core
.
is_compiled_with_cuda
():
if
core
.
is_compiled_with_cuda
():
paddle
.
enable_static
()
place
=
core
.
CUDAPlace
(
0
)
place
=
core
.
CUDAPlace
(
0
)
self
.
check_output_with_place
(
place
,
atol
=
1e-5
)
self
.
check_output_with_place
(
place
,
atol
=
1e-5
)
paddle
.
disable_static
()
class
TestSortedUniqueOp
(
TestUniqueOp
):
class
TestSortedUniqueOp
(
TestUniqueOp
):
...
@@ -209,16 +218,13 @@ class TestUniqueOpAxis1(TestUniqueOp):
...
@@ -209,16 +218,13 @@ class TestUniqueOpAxis1(TestUniqueOp):
class
TestUniqueAPI
(
unittest
.
TestCase
):
class
TestUniqueAPI
(
unittest
.
TestCase
):
def
test_dygraph_api_out
(
self
):
def
test_dygraph_api_out
(
self
):
paddle
.
disable_static
()
x_data
=
x_data
=
np
.
random
.
randint
(
0
,
10
,
(
120
))
x_data
=
x_data
=
np
.
random
.
randint
(
0
,
10
,
(
120
))
x
=
paddle
.
to_tensor
(
x_data
)
x
=
paddle
.
to_tensor
(
x_data
)
out
=
paddle
.
unique
(
x
)
out
=
paddle
.
unique
(
x
)
expected_out
=
np
.
unique
(
x_data
)
expected_out
=
np
.
unique
(
x_data
)
self
.
assertTrue
((
out
.
numpy
()
==
expected_out
).
all
(),
True
)
self
.
assertTrue
((
out
.
numpy
()
==
expected_out
).
all
(),
True
)
paddle
.
enable_static
()
def
test_dygraph_api_attr
(
self
):
def
test_dygraph_api_attr
(
self
):
paddle
.
disable_static
()
x_data
=
np
.
random
.
random
((
3
,
5
,
5
)).
astype
(
"float32"
)
x_data
=
np
.
random
.
random
((
3
,
5
,
5
)).
astype
(
"float32"
)
x
=
paddle
.
to_tensor
(
x_data
)
x
=
paddle
.
to_tensor
(
x_data
)
out
,
index
,
inverse
,
counts
=
paddle
.
unique
(
out
,
index
,
inverse
,
counts
=
paddle
.
unique
(
...
@@ -239,10 +245,8 @@ class TestUniqueAPI(unittest.TestCase):
...
@@ -239,10 +245,8 @@ class TestUniqueAPI(unittest.TestCase):
self
.
assertTrue
((
index
.
numpy
()
==
np_index
).
all
(),
True
)
self
.
assertTrue
((
index
.
numpy
()
==
np_index
).
all
(),
True
)
self
.
assertTrue
((
inverse
.
numpy
()
==
np_inverse
).
all
(),
True
)
self
.
assertTrue
((
inverse
.
numpy
()
==
np_inverse
).
all
(),
True
)
self
.
assertTrue
((
counts
.
numpy
()
==
np_counts
).
all
(),
True
)
self
.
assertTrue
((
counts
.
numpy
()
==
np_counts
).
all
(),
True
)
paddle
.
enable_static
()
def
test_dygraph_attr_dtype
(
self
):
def
test_dygraph_attr_dtype
(
self
):
paddle
.
disable_static
()
x_data
=
x_data
=
np
.
random
.
randint
(
0
,
10
,
(
120
))
x_data
=
x_data
=
np
.
random
.
randint
(
0
,
10
,
(
120
))
x
=
paddle
.
to_tensor
(
x_data
)
x
=
paddle
.
to_tensor
(
x_data
)
out
,
indices
,
inverse
,
counts
=
paddle
.
unique
(
out
,
indices
,
inverse
,
counts
=
paddle
.
unique
(
...
@@ -259,9 +263,9 @@ class TestUniqueAPI(unittest.TestCase):
...
@@ -259,9 +263,9 @@ class TestUniqueAPI(unittest.TestCase):
self
.
assertTrue
((
indices
.
numpy
()
==
np_indices
).
all
(),
True
)
self
.
assertTrue
((
indices
.
numpy
()
==
np_indices
).
all
(),
True
)
self
.
assertTrue
((
inverse
.
numpy
()
==
np_inverse
).
all
(),
True
)
self
.
assertTrue
((
inverse
.
numpy
()
==
np_inverse
).
all
(),
True
)
self
.
assertTrue
((
counts
.
numpy
()
==
np_counts
).
all
(),
True
)
self
.
assertTrue
((
counts
.
numpy
()
==
np_counts
).
all
(),
True
)
paddle
.
enable_static
()
def
test_static_graph
(
self
):
def
test_static_graph
(
self
):
paddle
.
enable_static
()
with
paddle
.
static
.
program_guard
(
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
(),
paddle
.
static
.
Program
()
paddle
.
static
.
Program
(),
paddle
.
static
.
Program
()
):
):
...
@@ -281,6 +285,7 @@ class TestUniqueAPI(unittest.TestCase):
...
@@ -281,6 +285,7 @@ class TestUniqueAPI(unittest.TestCase):
np
.
testing
.
assert_allclose
(
result
[
0
],
np_unique
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
result
[
0
],
np_unique
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
result
[
1
],
np_inverse
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
result
[
1
],
np_inverse
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
result
[
2
],
np_counts
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
result
[
2
],
np_counts
,
rtol
=
1e-05
)
paddle
.
disable_static
()
class
TestUniqueError
(
unittest
.
TestCase
):
class
TestUniqueError
(
unittest
.
TestCase
):
...
@@ -295,6 +300,7 @@ class TestUniqueError(unittest.TestCase):
...
@@ -295,6 +300,7 @@ class TestUniqueError(unittest.TestCase):
self
.
assertRaises
(
TypeError
,
test_x_dtype
)
self
.
assertRaises
(
TypeError
,
test_x_dtype
)
def
test_attr
(
self
):
def
test_attr
(
self
):
paddle
.
enable_static
()
x
=
paddle
.
fluid
.
data
(
name
=
'x'
,
shape
=
[
10
,
10
],
dtype
=
'float64'
)
x
=
paddle
.
fluid
.
data
(
name
=
'x'
,
shape
=
[
10
,
10
],
dtype
=
'float64'
)
def
test_return_index
():
def
test_return_index
():
...
@@ -319,6 +325,7 @@ class TestUniqueError(unittest.TestCase):
...
@@ -319,6 +325,7 @@ class TestUniqueError(unittest.TestCase):
result
=
paddle
.
unique
(
x
,
dtype
=
'float64'
)
result
=
paddle
.
unique
(
x
,
dtype
=
'float64'
)
self
.
assertRaises
(
TypeError
,
test_axis
)
self
.
assertRaises
(
TypeError
,
test_axis
)
paddle
.
disable_static
()
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
...
...
python/paddle/tensor/array.py
浏览文件 @
861fef52
...
@@ -15,7 +15,7 @@
...
@@ -15,7 +15,7 @@
# Define functions about array.
# Define functions about array.
from
..fluid.data_feeder
import
check_type
,
check_variable_and_dtype
from
..fluid.data_feeder
import
check_type
,
check_variable_and_dtype
from
..framework
import
LayerHelper
,
_non_static_mode
,
cor
e
from
..framework
import
LayerHelper
,
core
,
in_dygraph_mod
e
from
..static
import
Variable
from
..static
import
Variable
__all__
=
[]
__all__
=
[]
...
@@ -45,27 +45,29 @@ def array_length(array):
...
@@ -45,27 +45,29 @@ def array_length(array):
arr_len = paddle.tensor.array_length(arr)
arr_len = paddle.tensor.array_length(arr)
print(arr_len) # 1
print(arr_len) # 1
"""
"""
if
_non_static
_mode
():
if
in_dygraph
_mode
():
assert
isinstance
(
assert
isinstance
(
array
,
list
array
,
list
),
"The 'array' in array_write must be a list in dygraph mode"
),
"The 'array' in array_write must be a list in dygraph mode"
return
len
(
array
)
return
len
(
array
)
else
:
if
(
not
isinstance
(
array
,
Variable
)
or
array
.
type
!=
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
):
raise
TypeError
(
"array should be tensor array vairable in array_length Op"
)
if
(
helper
=
LayerHelper
(
'array_length'
,
**
locals
())
not
isinstance
(
array
,
Variable
)
tmp
=
helper
.
create_variable_for_type_inference
(
dtype
=
'int64'
)
or
array
.
type
!=
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
tmp
.
stop_gradient
=
True
):
helper
.
append_op
(
raise
TypeError
(
type
=
'lod_array_length'
,
"array should be tensor array vairable in array_length Op"
inputs
=
{
'X'
:
[
array
]},
outputs
=
{
'Out'
:
[
tmp
]},
)
)
return
tmp
helper
=
LayerHelper
(
'array_length'
,
**
locals
())
tmp
=
helper
.
create_variable_for_type_inference
(
dtype
=
'int64'
)
tmp
.
stop_gradient
=
True
helper
.
append_op
(
type
=
'lod_array_length'
,
inputs
=
{
'X'
:
[
array
]},
outputs
=
{
'Out'
:
[
tmp
]}
)
return
tmp
def
array_read
(
array
,
i
):
def
array_read
(
array
,
i
):
...
@@ -107,7 +109,7 @@ def array_read(array, i):
...
@@ -107,7 +109,7 @@ def array_read(array, i):
item = paddle.tensor.array_read(arr, i)
item = paddle.tensor.array_read(arr, i)
print(item) # [[5., 5., 5.]]
print(item) # [[5., 5., 5.]]
"""
"""
if
_non_static
_mode
():
if
in_dygraph
_mode
():
assert
isinstance
(
assert
isinstance
(
array
,
list
array
,
list
),
"The 'array' in array_read must be list in dygraph mode"
),
"The 'array' in array_read must be list in dygraph mode"
...
@@ -119,21 +121,21 @@ def array_read(array, i):
...
@@ -119,21 +121,21 @@ def array_read(array, i):
],
"The shape of index 'i' should be [1] in dygraph mode"
],
"The shape of index 'i' should be [1] in dygraph mode"
i
=
i
.
numpy
().
item
(
0
)
i
=
i
.
numpy
().
item
(
0
)
return
array
[
i
]
return
array
[
i
]
else
:
check_variable_and_dtype
(
i
,
'i'
,
[
'int64'
],
'array_read'
)
check_variable_and_dtype
(
i
,
'i'
,
[
'int64'
],
'array_read'
)
helper
=
LayerHelper
(
'array_read'
,
**
locals
())
helper
=
LayerHelper
(
'array_read'
,
**
locals
())
if
(
if
(
not
isinstance
(
array
,
Variable
)
not
isinstance
(
array
,
Variable
)
or
array
.
type
!=
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
or
array
.
type
!=
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
):
):
raise
TypeError
(
"array should be tensor array vairable"
)
raise
TypeError
(
"array should be tensor array vairable"
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
array
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
array
.
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'read_from_array'
,
type
=
'read_from_array'
,
inputs
=
{
'X'
:
[
array
],
'I'
:
[
i
]},
inputs
=
{
'X'
:
[
array
],
'I'
:
[
i
]},
outputs
=
{
'Out'
:
[
out
]},
outputs
=
{
'Out'
:
[
out
]},
)
)
return
out
return
out
def
array_write
(
x
,
i
,
array
=
None
):
def
array_write
(
x
,
i
,
array
=
None
):
...
@@ -167,7 +169,7 @@ def array_write(x, i, array=None):
...
@@ -167,7 +169,7 @@ def array_write(x, i, array=None):
item = paddle.tensor.array_read(arr, i)
item = paddle.tensor.array_read(arr, i)
print(item) # [[5., 5., 5.]]
print(item) # [[5., 5., 5.]]
"""
"""
if
_non_static
_mode
():
if
in_dygraph
_mode
():
assert
isinstance
(
assert
isinstance
(
x
,
Variable
x
,
Variable
),
"The input data 'x' in array_write must be Variable in dygraph mode"
),
"The input data 'x' in array_write must be Variable in dygraph mode"
...
@@ -191,30 +193,30 @@ def array_write(x, i, array=None):
...
@@ -191,30 +193,30 @@ def array_write(x, i, array=None):
else
:
else
:
array
.
append
(
x
)
array
.
append
(
x
)
return
array
return
array
else
:
check_variable_and_dtype
(
i
,
'i'
,
[
'int64'
],
'array_write'
)
check_variable_and_dtype
(
i
,
'i'
,
[
'int64'
],
'array_write'
)
check_type
(
x
,
'x'
,
(
Variable
),
'array_write'
)
check_type
(
x
,
'x'
,
(
Variable
),
'array_write'
)
helper
=
LayerHelper
(
'array_write'
,
**
locals
())
helper
=
LayerHelper
(
'array_write'
,
**
locals
())
if
array
is
not
None
:
if
array
is
not
None
:
if
(
if
(
not
isinstance
(
array
,
Variable
)
not
isinstance
(
array
,
Variable
)
or
array
.
type
!=
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
or
array
.
type
!=
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
):
):
raise
TypeError
(
raise
TypeError
(
"array should be tensor array vairable in array_write Op"
"array should be tensor array vairable in array_write Op"
)
if
array
is
None
:
array
=
helper
.
create_variable
(
name
=
"{0}.out"
.
format
(
helper
.
name
),
type
=
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
,
dtype
=
x
.
dtype
,
)
)
if
array
is
None
:
helper
.
append_op
(
array
=
helper
.
create_variable
(
type
=
'write_to_array'
,
name
=
"{0}.out"
.
format
(
helper
.
name
),
inputs
=
{
'X'
:
[
x
],
'I'
:
[
i
]},
type
=
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
,
outputs
=
{
'Out'
:
[
array
]},
dtype
=
x
.
dtype
,
)
)
helper
.
append_op
(
return
array
type
=
'write_to_array'
,
inputs
=
{
'X'
:
[
x
],
'I'
:
[
i
]},
outputs
=
{
'Out'
:
[
array
]},
)
return
array
def
create_array
(
dtype
,
initialized_list
=
None
):
def
create_array
(
dtype
,
initialized_list
=
None
):
...
@@ -265,17 +267,17 @@ def create_array(dtype, initialized_list=None):
...
@@ -265,17 +267,17 @@ def create_array(dtype, initialized_list=None):
)
)
)
)
if
_non_static
_mode
():
if
in_dygraph
_mode
():
return
array
return
array
else
:
helper
=
LayerHelper
(
"array"
,
**
locals
())
tensor_array
=
helper
.
create_variable
(
name
=
"{0}.out"
.
format
(
helper
.
name
),
type
=
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
,
dtype
=
dtype
,
)
helper
=
LayerHelper
(
"array"
,
**
locals
())
for
val
in
array
:
tensor_array
=
helper
.
create_variable
(
array_write
(
x
=
val
,
i
=
array_length
(
tensor_array
),
array
=
tensor_array
)
name
=
"{0}.out"
.
format
(
helper
.
name
),
type
=
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
,
dtype
=
dtype
,
)
for
val
in
array
:
array_write
(
x
=
val
,
i
=
array_length
(
tensor_array
),
array
=
tensor_array
)
return
tensor_array
return
tensor_array
python/paddle/tensor/attribute.py
浏览文件 @
861fef52
...
@@ -17,10 +17,10 @@
...
@@ -17,10 +17,10 @@
import
numpy
as
np
import
numpy
as
np
import
paddle
import
paddle
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle
import
_C_ops
from
..fluid.data_feeder
import
check_type
,
check_variable_and_dtype
from
..fluid.data_feeder
import
check_type
,
check_variable_and_dtype
from
..fluid.framework
import
_in_legacy_dygraph
,
in_dygraph_mode
from
..fluid.framework
import
in_dygraph_mode
from
..framework
import
LayerHelper
,
core
from
..framework
import
LayerHelper
,
core
from
..static
import
Variable
from
..static
import
Variable
from
.creation
import
_complex_to_real_dtype
,
assign
from
.creation
import
_complex_to_real_dtype
,
assign
...
@@ -107,36 +107,32 @@ def shape(input):
...
@@ -107,36 +107,32 @@ def shape(input):
out
=
_C_ops
.
shape
(
input
)
out
=
_C_ops
.
shape
(
input
)
out
.
stop_gradient
=
True
out
.
stop_gradient
=
True
return
out
return
out
if
_in_legacy_dygraph
():
else
:
out
=
_legacy_C_ops
.
shape
(
input
)
check_variable_and_dtype
(
out
.
stop_gradient
=
True
input
,
return
out
'input'
,
[
check_variable_and_dtype
(
'bool'
,
input
,
'float16'
,
'input'
,
'float32'
,
[
'float64'
,
'bool'
,
'int32'
,
'float16'
,
'int64'
,
'float32'
,
'complex64'
,
'float64'
,
'complex128'
,
'int32'
,
],
'int64'
,
'shape'
,
'complex64'
,
)
'complex128'
,
helper
=
LayerHelper
(
'shape'
,
**
locals
())
],
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'int32'
)
'shape'
,
helper
.
append_op
(
)
type
=
'shape'
,
helper
=
LayerHelper
(
'shape'
,
**
locals
())
inputs
=
{
'Input'
:
input
},
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'int32'
)
outputs
=
{
'Out'
:
out
},
helper
.
append_op
(
stop_gradient
=
True
,
type
=
'shape'
,
)
inputs
=
{
'Input'
:
input
},
outputs
=
{
'Out'
:
out
},
stop_gradient
=
True
,
)
return
out
return
out
def
is_complex
(
x
):
def
is_complex
(
x
):
...
@@ -289,16 +285,14 @@ def real(x, name=None):
...
@@ -289,16 +285,14 @@ def real(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
real
(
x
)
return
_C_ops
.
real
(
x
)
if
_in_legacy_dygraph
():
else
:
return
_legacy_C_ops
.
real
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'complex64'
,
'complex128'
],
'real'
)
helper
=
LayerHelper
(
'real'
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'complex64'
,
'complex128'
],
'real'
)
out
=
helper
.
create_variable_for_type_inference
(
helper
=
LayerHelper
(
'real'
,
**
locals
())
dtype
=
_complex_to_real_dtype
(
helper
.
input_dtype
())
out
=
helper
.
create_variable_for_type_inference
(
)
dtype
=
_complex_to_real_dtype
(
helper
.
input_dtype
())
helper
.
append_op
(
type
=
'real'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
})
)
return
out
helper
.
append_op
(
type
=
'real'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
})
return
out
def
imag
(
x
,
name
=
None
):
def
imag
(
x
,
name
=
None
):
...
@@ -336,13 +330,11 @@ def imag(x, name=None):
...
@@ -336,13 +330,11 @@ def imag(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
imag
(
x
)
return
_C_ops
.
imag
(
x
)
if
_in_legacy_dygraph
():
else
:
return
_legacy_C_ops
.
imag
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'complex64'
,
'complex128'
],
'imag'
)
helper
=
LayerHelper
(
'imag'
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'complex64'
,
'complex128'
],
'imag'
)
out
=
helper
.
create_variable_for_type_inference
(
helper
=
LayerHelper
(
'imag'
,
**
locals
())
dtype
=
_complex_to_real_dtype
(
helper
.
input_dtype
())
out
=
helper
.
create_variable_for_type_inference
(
)
dtype
=
_complex_to_real_dtype
(
helper
.
input_dtype
())
helper
.
append_op
(
type
=
'imag'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
})
)
return
out
helper
.
append_op
(
type
=
'imag'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
})
return
out
python/paddle/tensor/creation.py
浏览文件 @
861fef52
...
@@ -33,7 +33,6 @@ from ..fluid.data_feeder import (
...
@@ -33,7 +33,6 @@ from ..fluid.data_feeder import (
from
..fluid.framework
import
(
from
..fluid.framework
import
(
Variable
,
Variable
,
_in_eager_without_dygraph_check
,
_in_eager_without_dygraph_check
,
_in_legacy_dygraph
,
device_guard
,
device_guard
,
)
)
from
..fluid.initializer
import
Constant
,
Initializer
from
..fluid.initializer
import
Constant
,
Initializer
...
@@ -43,7 +42,6 @@ from ..framework import (
...
@@ -43,7 +42,6 @@ from ..framework import (
LayerHelper
,
LayerHelper
,
_current_expected_place
,
_current_expected_place
,
_get_paddle_place
,
_get_paddle_place
,
_non_static_mode
,
convert_np_dtype_to_dtype_
,
convert_np_dtype_to_dtype_
,
core
,
core
,
in_dygraph_mode
,
in_dygraph_mode
,
...
@@ -324,65 +322,65 @@ def linspace(start, stop, num, dtype=None, name=None):
...
@@ -324,65 +322,65 @@ def linspace(start, stop, num, dtype=None, name=None):
dtype
,
dtype
,
_current_expected_place
(),
_current_expected_place
(),
)
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
linspace
(
tensor_start
,
tensor_stop
,
tensor_num
,
'dtype'
,
dtype
)
helper
=
LayerHelper
(
"linspace"
,
**
locals
())
start_dtype
=
convert_dtype
(
tensor_start
.
dtype
)
stop_dtype
=
convert_dtype
(
tensor_stop
.
dtype
)
out_dtype
=
convert_dtype
(
dtype
)
if
isinstance
(
start
,
Variable
):
check_dtype
(
start
.
dtype
,
'start'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'linspace'
,
)
else
:
else
:
check_type
(
start
,
'start'
,
(
int
,
float
),
'linspace'
)
helper
=
LayerHelper
(
"linspace"
,
**
locals
())
start_dtype
=
convert_dtype
(
tensor_start
.
dtype
)
stop_dtype
=
convert_dtype
(
tensor_stop
.
dtype
)
out_dtype
=
convert_dtype
(
dtype
)
if
isinstance
(
start
,
Variable
):
check_dtype
(
start
.
dtype
,
'start'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'linspace'
,
)
else
:
check_type
(
start
,
'start'
,
(
int
,
float
),
'linspace'
)
if
isinstance
(
stop
,
Variable
):
if
isinstance
(
stop
,
Variable
):
check_dtype
(
stop
.
dtype
,
'stop'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'linspace'
,
)
else
:
check_type
(
stop
,
'stop'
,
(
int
,
float
),
'linspace'
)
if
isinstance
(
num
,
Variable
):
check_dtype
(
num
.
dtype
,
'num'
,
[
'int32'
],
'linspace'
)
check_dtype
(
check_dtype
(
stop
.
dtype
,
dtype
,
'dtype'
,
[
'int32'
,
'int64'
,
'float32'
,
'float64'
],
'linspace'
'stop'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'linspace'
,
)
)
else
:
if
(
check_type
(
stop
,
'stop'
,
(
int
,
float
),
'linspace'
)
(
stop_dtype
==
"float64"
or
start_dtype
==
"float64"
)
if
isinstance
(
num
,
Variable
):
and
out_dtype
in
[
"float32"
,
"int32"
]
check_dtype
(
num
.
dtype
,
'num'
,
[
'int32'
],
'linspace'
)
)
or
(
check_dtype
(
(
stop_dtype
==
"int64"
or
start_dtype
==
"int64"
)
dtype
,
'dtype'
,
[
'int32'
,
'int64'
,
'float32'
,
'float64'
],
'linspace'
and
out_dtype
==
"int32"
)
):
if
(
raise
ValueError
(
(
stop_dtype
==
"float64"
or
start_dtype
==
"float64"
)
"The dtype of start/stop is {}/{} but the attr(dtype) of linspace is {}, "
and
out_dtype
in
[
"float32"
,
"int32"
]
"which may cause data type overflows. Please reset attr(dtype) of linspace."
.
format
(
)
or
(
start_dtype
,
stop_dtype
,
dtype
(
stop_dtype
==
"int64"
or
start_dtype
==
"int64"
)
)
and
out_dtype
==
"int32"
):
raise
ValueError
(
"The dtype of start/stop is {}/{} but the attr(dtype) of linspace is {}, "
"which may cause data type overflows. Please reset attr(dtype) of linspace."
.
format
(
start_dtype
,
stop_dtype
,
dtype
)
)
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'linspace'
,
type
=
'linspace'
,
inputs
=
{
'Start'
:
tensor_start
,
'Stop'
:
tensor_stop
,
'Num'
:
tensor_num
},
inputs
=
{
attrs
=
{
'dtype'
:
dtype
},
'Start'
:
tensor_start
,
outputs
=
{
'Out'
:
[
out
]},
'Stop'
:
tensor_stop
,
)
'Num'
:
tensor_num
,
if
isinstance
(
num
,
int
):
},
out
.
desc
.
set_shape
((
num
,))
attrs
=
{
'dtype'
:
dtype
},
return
out
outputs
=
{
'Out'
:
[
out
]},
)
if
isinstance
(
num
,
int
):
out
.
desc
.
set_shape
((
num
,))
return
out
def
logspace
(
start
,
stop
,
num
,
base
=
10.0
,
dtype
=
None
,
name
=
None
):
def
logspace
(
start
,
stop
,
num
,
base
=
10.0
,
dtype
=
None
,
name
=
None
):
...
@@ -446,91 +444,91 @@ def logspace(start, stop, num, base=10.0, dtype=None, name=None):
...
@@ -446,91 +444,91 @@ def logspace(start, stop, num, base=10.0, dtype=None, name=None):
if
not
isinstance
(
base
,
Variable
):
if
not
isinstance
(
base
,
Variable
):
with
device_guard
(
"cpu"
):
with
device_guard
(
"cpu"
):
tensor_base
=
fill_constant
([
1
],
dtype
,
base
)
tensor_base
=
fill_constant
([
1
],
dtype
,
base
)
if
_non_static
_mode
():
if
in_dygraph
_mode
():
return
_legacy_C_ops
.
logspace
(
return
_legacy_C_ops
.
logspace
(
tensor_start
,
tensor_stop
,
tensor_num
,
tensor_base
,
'dtype'
,
dtype
tensor_start
,
tensor_stop
,
tensor_num
,
tensor_base
,
'dtype'
,
dtype
)
)
else
:
helper
=
LayerHelper
(
"logspace"
,
**
locals
())
helper
=
LayerHelper
(
"logspace"
,
**
locals
())
start_dtype
=
convert_dtype
(
tensor_start
.
dtype
)
stop_dtype
=
convert_dtype
(
tensor_stop
.
dtype
)
base_dtype
=
convert_dtype
(
tensor_base
.
dtype
)
out_dtype
=
convert_dtype
(
dtype
)
if
isinstance
(
start
,
Variable
):
check_dtype
(
start
.
dtype
,
'start'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'logspace'
,
)
else
:
check_type
(
start
,
'start'
,
(
int
,
float
),
'logspace'
)
start_dtype
=
convert_dtype
(
tensor_start
.
dtype
)
if
isinstance
(
stop
,
Variable
):
stop_dtype
=
convert_dtype
(
tensor_stop
.
dtype
)
check_dtype
(
base_dtype
=
convert_dtype
(
tensor_base
.
dtype
)
stop
.
dtype
,
out_dtype
=
convert_dtype
(
dtype
)
'stop'
,
if
isinstance
(
start
,
Variable
):
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
check_dtype
(
'logspace'
,
start
.
dtype
,
)
'start'
,
else
:
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
check_type
(
stop
,
'stop'
,
(
int
,
float
),
'logspace'
)
'logspace'
,
)
else
:
check_type
(
start
,
'start'
,
(
int
,
float
),
'logspace'
)
if
isinstance
(
stop
,
Variable
):
if
isinstance
(
num
,
Variable
):
check_dtype
(
check_dtype
(
num
.
dtype
,
'num'
,
[
'int32'
],
'logspace'
)
stop
.
dtype
,
'stop'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'logspace'
,
)
else
:
check_type
(
stop
,
'stop'
,
(
int
,
float
),
'logspace'
)
if
isinstance
(
num
,
Variable
):
if
isinstance
(
base
,
Variable
):
check_dtype
(
num
.
dtype
,
'num'
,
[
'int32'
],
'logspace'
)
check_dtype
(
base
.
dtype
,
'base'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'logspace'
,
)
else
:
check_type
(
base
,
'base'
,
(
int
,
float
),
'logspace'
)
if
isinstance
(
base
,
Variable
):
check_dtype
(
check_dtype
(
base
.
dtype
,
dtype
,
'dtype'
,
[
'int32'
,
'int64'
,
'float32'
,
'float64'
],
'logspace'
'base'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'logspace'
,
)
)
else
:
if
(
check_type
(
base
,
'base'
,
(
int
,
float
),
'logspace'
)
(
stop_dtype
==
"float64"
check_dtype
(
or
start_dtype
==
"float64"
dtype
,
'dtype'
,
[
'int32'
,
'int64'
,
'float32'
,
'float64'
],
'logspace'
or
base_dtype
==
"float64"
)
)
if
(
and
out_dtype
in
[
"float32"
,
"int32"
]
(
)
or
(
stop_dtype
==
"float64"
(
or
start_dtype
==
"float64"
stop_dtype
==
"int64"
or
base_dtype
==
"float64"
or
start_dtype
==
"int64"
)
or
base_dtype
==
"int64"
and
out_dtype
in
[
"float32"
,
"int32"
]
)
)
or
(
and
out_dtype
==
"int32"
(
):
stop_dtype
==
"int64"
raise
ValueError
(
or
start_dtype
==
"int64"
"The dtype of start/stop/base is {}/{}/{} but the attr(dtype) of logspace is {}, "
or
base_dtype
==
"int64"
"which may cause data type overflows. Please reset attr(dtype) of logspace."
.
format
(
)
start_dtype
,
stop_dtype
,
base_dtype
,
dtype
and
out_dtype
==
"int32"
)
):
raise
ValueError
(
"The dtype of start/stop/base is {}/{}/{} but the attr(dtype) of logspace is {}, "
"which may cause data type overflows. Please reset attr(dtype) of logspace."
.
format
(
start_dtype
,
stop_dtype
,
base_dtype
,
dtype
)
)
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'logspace'
,
type
=
'logspace'
,
inputs
=
{
inputs
=
{
'Start'
:
tensor_start
,
'Start'
:
tensor_start
,
'Stop'
:
tensor_stop
,
'Stop'
:
tensor_stop
,
'Num'
:
tensor_num
,
'Num'
:
tensor_num
,
'Base'
:
tensor_base
,
'Base'
:
tensor_base
,
},
},
attrs
=
{
'dtype'
:
dtype
},
attrs
=
{
'dtype'
:
dtype
},
outputs
=
{
'Out'
:
[
out
]},
outputs
=
{
'Out'
:
[
out
]},
)
)
if
isinstance
(
num
,
int
):
if
isinstance
(
num
,
int
):
out
.
desc
.
set_shape
((
num
,))
out
.
desc
.
set_shape
((
num
,))
return
out
return
out
def
_to_tensor_non_static
(
data
,
dtype
=
None
,
place
=
None
,
stop_gradient
=
True
):
def
_to_tensor_non_static
(
data
,
dtype
=
None
,
place
=
None
,
stop_gradient
=
True
):
...
@@ -746,7 +744,7 @@ def to_tensor(data, dtype=None, place=None, stop_gradient=True):
...
@@ -746,7 +744,7 @@ def to_tensor(data, dtype=None, place=None, stop_gradient=True):
if
place
is
None
:
if
place
is
None
:
place
=
_current_expected_place
()
place
=
_current_expected_place
()
if
_non_static_mode
():
if
paddle
.
fluid
.
framework
.
_non_static_mode
():
return
_to_tensor_non_static
(
data
,
dtype
,
place
,
stop_gradient
)
return
_to_tensor_non_static
(
data
,
dtype
,
place
,
stop_gradient
)
# call assign for static graph
# call assign for static graph
...
@@ -785,44 +783,53 @@ def full_like(x, fill_value, dtype=None, name=None):
...
@@ -785,44 +783,53 @@ def full_like(x, fill_value, dtype=None, name=None):
# [[2. 2. 2.]
# [[2. 2. 2.]
# [2. 2. 2.]]
# [2. 2. 2.]]
"""
"""
if
dtype
is
None
:
if
dtype
is
None
:
dtype
=
x
.
dtype
dtype
=
x
.
dtype
else
:
else
:
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
full_like
(
x
,
fill_value
,
dtype
,
x
.
place
)
return
_C_ops
.
full_like
(
x
,
fill_value
,
dtype
,
x
.
place
)
else
:
if
_in_legacy_dygraph
():
helper
=
LayerHelper
(
"full_like"
,
**
locals
())
return
_legacy_C_ops
.
fill_any_like
(
check_variable_and_dtype
(
x
,
'value'
,
fill_value
,
'dtype'
,
dtype
x
,
'x'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
],
'full_like'
,
)
)
check_dtype
(
dtype
,
'dtype'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
],
'full_like/zeros_like/ones_like'
,
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
helper
=
LayerHelper
(
"full_like"
,
**
locals
())
helper
.
append_op
(
check_variable_and_dtype
(
type
=
'fill_any_like'
,
x
,
inputs
=
{
'X'
:
[
x
]},
'x'
,
attrs
=
{
'value'
:
fill_value
,
"dtype"
:
dtype
},
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
],
outputs
=
{
'Out'
:
[
out
]},
'full_like'
,
)
)
out
.
stop_gradient
=
True
check_dtype
(
return
out
dtype
,
'dtype'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
],
'full_like/zeros_like/ones_like'
,
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
helper
.
append_op
(
type
=
'fill_any_like'
,
inputs
=
{
'X'
:
[
x
]},
attrs
=
{
'value'
:
fill_value
,
"dtype"
:
dtype
},
outputs
=
{
'Out'
:
[
out
]},
)
out
.
stop_gradient
=
True
return
out
def
ones
(
shape
,
dtype
=
None
,
name
=
None
):
def
ones
(
shape
,
dtype
=
None
,
name
=
None
):
...
@@ -1011,7 +1018,7 @@ def eye(num_rows, num_columns=None, dtype=None, name=None):
...
@@ -1011,7 +1018,7 @@ def eye(num_rows, num_columns=None, dtype=None, name=None):
"""
"""
def
_check_attr
(
attr
,
message
):
def
_check_attr
(
attr
,
message
):
if
isinstance
(
attr
,
((
Variable
,
core
.
VarBase
,
core
.
eager
.
Tensor
))):
if
isinstance
(
attr
,
((
Variable
,
core
.
eager
.
Tensor
))):
assert
len
(
attr
.
shape
)
==
1
and
attr
.
shape
[
0
]
in
[
1
,
-
1
]
assert
len
(
attr
.
shape
)
==
1
and
attr
.
shape
[
0
]
in
[
1
,
-
1
]
elif
not
isinstance
(
attr
,
int
)
or
attr
<
0
:
elif
not
isinstance
(
attr
,
int
)
or
attr
<
0
:
raise
TypeError
(
"{} should be a non-negative int."
.
format
(
message
))
raise
TypeError
(
"{} should be a non-negative int."
.
format
(
message
))
...
@@ -1027,16 +1034,10 @@ def eye(num_rows, num_columns=None, dtype=None, name=None):
...
@@ -1027,16 +1034,10 @@ def eye(num_rows, num_columns=None, dtype=None, name=None):
else
:
else
:
num_columns
=
num_rows
num_columns
=
num_rows
if
_non_static_mode
():
if
in_dygraph_mode
():
if
in_dygraph_mode
():
out
=
_C_ops
.
eye
(
out
=
_C_ops
.
eye
(
num_rows
,
num_columns
,
dtype
,
_current_expected_place
()
num_rows
,
num_columns
,
dtype
,
_current_expected_place
()
)
)
elif
_in_legacy_dygraph
():
out
=
_legacy_C_ops
.
eye
(
'dtype'
,
dtype
,
'num_rows'
,
num_rows
,
'num_columns'
,
num_columns
)
else
:
else
:
helper
=
LayerHelper
(
"eye"
,
**
locals
())
helper
=
LayerHelper
(
"eye"
,
**
locals
())
check_dtype
(
check_dtype
(
...
@@ -1211,27 +1212,25 @@ def arange(start=0, end=None, step=1, dtype=None, name=None):
...
@@ -1211,27 +1212,25 @@ def arange(start=0, end=None, step=1, dtype=None, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
arange
(
start
,
end
,
step
,
dtype
,
_current_expected_place
())
return
_C_ops
.
arange
(
start
,
end
,
step
,
dtype
,
_current_expected_place
())
else
:
if
_in_legacy_dygraph
():
check_dtype
(
out
=
_legacy_C_ops
.
range
(
start
,
end
,
step
)
dtype
,
'dtype'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'range/arange'
,
)
helper
=
LayerHelper
(
'range'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
,
shape
=
out_shape
)
helper
.
append_op
(
type
=
'range'
,
inputs
=
{
'Start'
:
start
,
'End'
:
end
,
'Step'
:
step
},
outputs
=
{
'Out'
:
out
},
)
out
.
stop_gradient
=
True
out
.
stop_gradient
=
True
if
out_shape
is
not
None
:
out
.
desc
.
set_shape
(
out_shape
)
return
out
return
out
check_dtype
(
dtype
,
'dtype'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'range/arange'
)
helper
=
LayerHelper
(
'range'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
,
shape
=
out_shape
)
helper
.
append_op
(
type
=
'range'
,
inputs
=
{
'Start'
:
start
,
'End'
:
end
,
'Step'
:
step
},
outputs
=
{
'Out'
:
out
},
)
out
.
stop_gradient
=
True
if
out_shape
is
not
None
:
out
.
desc
.
set_shape
(
out_shape
)
return
out
def
_tril_triu_op
(
helper
):
def
_tril_triu_op
(
helper
):
"""Base op of tril_op and triu_op"""
"""Base op of tril_op and triu_op"""
...
@@ -1328,12 +1327,8 @@ def tril(x, diagonal=0, name=None):
...
@@ -1328,12 +1327,8 @@ def tril(x, diagonal=0, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
tril
(
x
,
diagonal
,
True
)
return
_C_ops
.
tril
(
x
,
diagonal
,
True
)
else
:
if
_in_legacy_dygraph
():
return
_tril_triu_op
(
LayerHelper
(
'tril'
,
**
locals
()))
op
=
getattr
(
_legacy_C_ops
,
'tril_triu'
)
return
op
(
x
,
'diagonal'
,
diagonal
,
"lower"
,
True
)
return
_tril_triu_op
(
LayerHelper
(
'tril'
,
**
locals
()))
def
triu
(
x
,
diagonal
=
0
,
name
=
None
):
def
triu
(
x
,
diagonal
=
0
,
name
=
None
):
...
@@ -1394,12 +1389,8 @@ def triu(x, diagonal=0, name=None):
...
@@ -1394,12 +1389,8 @@ def triu(x, diagonal=0, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
triu
(
x
,
diagonal
,
False
)
return
_C_ops
.
triu
(
x
,
diagonal
,
False
)
else
:
if
_in_legacy_dygraph
():
return
_tril_triu_op
(
LayerHelper
(
'triu'
,
**
locals
()))
op
=
getattr
(
_legacy_C_ops
,
'tril_triu'
)
return
op
(
x
,
'diagonal'
,
diagonal
,
"lower"
,
False
)
return
_tril_triu_op
(
LayerHelper
(
'triu'
,
**
locals
()))
def
meshgrid
(
*
args
,
**
kwargs
):
def
meshgrid
(
*
args
,
**
kwargs
):
...
@@ -1437,37 +1428,35 @@ def meshgrid(*args, **kwargs):
...
@@ -1437,37 +1428,35 @@ def meshgrid(*args, **kwargs):
if
len
(
args
)
==
1
and
isinstance
(
args
[
0
],
(
list
,
tuple
)):
if
len
(
args
)
==
1
and
isinstance
(
args
[
0
],
(
list
,
tuple
)):
args
=
args
[
0
]
args
=
args
[
0
]
if
_in_legacy_dygraph
():
num
=
len
(
args
)
out
=
_legacy_C_ops
.
meshgrid
(
list
(
args
),
num
)
return
out
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
meshgrid
(
list
(
args
))
return
_C_ops
.
meshgrid
(
list
(
args
))
else
:
name
=
kwargs
.
get
(
"name"
,
None
)
helper
=
LayerHelper
(
'meshgrid'
,
**
locals
())
name
=
kwargs
.
get
(
"name"
,
None
)
if
not
isinstance
(
args
,
(
list
,
tuple
)):
helper
=
LayerHelper
(
'meshgrid'
,
**
locals
())
raise
TypeError
(
"The type of input args in meshgrid should be list."
)
if
not
isinstance
(
args
,
(
list
,
tuple
)):
for
id
,
input_
in
enumerate
(
args
):
raise
TypeError
(
"The type of input args in meshgrid should be list."
)
check_dtype
(
input_
.
dtype
,
'create data type'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'meshgrid'
,
)
for
id
,
input_
in
enumerate
(
args
):
num
=
len
(
args
)
check_dtype
(
out
=
[
input_
.
dtype
,
helper
.
create_variable_for_type_inference
(
dtype
=
args
[
i
].
dtype
)
'create data type'
,
for
i
in
range
(
num
)
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
]
'meshgrid'
,
helper
.
append_op
(
type
=
'meshgrid'
,
inputs
=
{
'X'
:
list
(
args
)},
outputs
=
{
'Out'
:
out
}
)
)
num
=
len
(
args
)
return
out
out
=
[
helper
.
create_variable_for_type_inference
(
dtype
=
args
[
i
].
dtype
)
for
i
in
range
(
num
)
]
helper
.
append_op
(
type
=
'meshgrid'
,
inputs
=
{
'X'
:
list
(
args
)},
outputs
=
{
'Out'
:
out
}
)
return
out
def
diagflat
(
x
,
offset
=
0
,
name
=
None
):
def
diagflat
(
x
,
offset
=
0
,
name
=
None
):
...
@@ -1555,62 +1544,49 @@ def diagflat(x, offset=0, name=None):
...
@@ -1555,62 +1544,49 @@ def diagflat(x, offset=0, name=None):
# [0, 0, 3, 0, 0],
# [0, 0, 3, 0, 0],
# [0, 0, 0, 4, 0]])
# [0, 0, 0, 4, 0]])
"""
"""
padding_value
=
0
if
in_dygraph_mode
():
if
in_dygraph_mode
():
if
len
(
x
.
shape
)
<=
1
:
if
len
(
x
.
shape
)
<=
1
:
return
_C_ops
.
diag
(
x
,
offset
,
padding_value
)
return
_C_ops
.
diag
(
x
,
offset
,
0
)
else
:
else
:
y
=
_C_ops
.
flatten
(
x
,
0
,
-
1
)
y
=
_C_ops
.
flatten
(
x
,
0
,
-
1
)
return
_C_ops
.
diag
(
y
,
offset
,
padding_value
)
return
_C_ops
.
diag
(
y
,
offset
,
0
)
else
:
padding_value
=
0
check_type
(
x
,
'x'
,
(
Variable
),
'diagflat'
)
check_dtype
(
x
.
dtype
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'diagflat'
)
check_type
(
offset
,
'offset'
,
(
int
),
'diagflat'
)
if
_in_legacy_dygraph
():
helper
=
LayerHelper
(
"diagflat"
,
**
locals
())
if
len
(
x
.
shape
)
==
1
:
out1
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
return
_legacy_C_ops
.
diag_v2
(
out1_shape
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
x
,
"offset"
,
offset
,
"padding_value"
,
padding_value
out2
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
if
len
(
x
.
shape
)
<=
1
:
helper
.
append_op
(
type
=
'diag_v2'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out2
},
attrs
=
{
'offset'
:
offset
,
'padding_value'
:
padding_value
},
)
)
else
:
else
:
y
,
_
=
_legacy_C_ops
.
flatten_contiguous_range
(
helper
.
append_op
(
x
,
"start_axis"
,
0
,
"stop_axis"
,
-
1
type
=
'flatten_contiguous_range'
,
)
inputs
=
{
'X'
:
x
},
return
_legacy_C_ops
.
diag_v2
(
outputs
=
{
'Out'
:
out1
,
'XShape'
:
out1_shape
},
y
,
"offset"
,
offset
,
"padding_value"
,
padding_value
attrs
=
{
'start_axis'
:
0
,
'stop_axis'
:
-
1
},
)
)
out1
.
stop_gradient
=
True
check_type
(
x
,
'x'
,
(
Variable
),
'diagflat'
)
helper
.
append_op
(
check_dtype
(
type
=
'diag_v2'
,
x
.
dtype
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'diagflat'
inputs
=
{
'X'
:
out1
},
)
outputs
=
{
'Out'
:
out2
},
check_type
(
offset
,
'offset'
,
(
int
),
'diagflat'
)
attrs
=
{
'offset'
:
offset
,
'padding_value'
:
padding_value
},
)
helper
=
LayerHelper
(
"diagflat"
,
**
locals
())
out2
.
stop_gradient
=
True
out1
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
return
out2
out1_shape
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
out2
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
if
len
(
x
.
shape
)
<=
1
:
helper
.
append_op
(
type
=
'diag_v2'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out2
},
attrs
=
{
'offset'
:
offset
,
'padding_value'
:
padding_value
},
)
else
:
helper
.
append_op
(
type
=
'flatten_contiguous_range'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out1
,
'XShape'
:
out1_shape
},
attrs
=
{
'start_axis'
:
0
,
'stop_axis'
:
-
1
},
)
out1
.
stop_gradient
=
True
helper
.
append_op
(
type
=
'diag_v2'
,
inputs
=
{
'X'
:
out1
},
outputs
=
{
'Out'
:
out2
},
attrs
=
{
'offset'
:
offset
,
'padding_value'
:
padding_value
},
)
out2
.
stop_gradient
=
True
return
out2
def
diag
(
x
,
offset
=
0
,
padding_value
=
0
,
name
=
None
):
def
diag
(
x
,
offset
=
0
,
padding_value
=
0
,
name
=
None
):
...
@@ -1691,40 +1667,35 @@ def diag(x, offset=0, padding_value=0, name=None):
...
@@ -1691,40 +1667,35 @@ def diag(x, offset=0, padding_value=0, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
diag
(
x
,
offset
,
padding_value
)
return
_C_ops
.
diag
(
x
,
offset
,
padding_value
)
else
:
else
:
if
_in_legacy_dygraph
():
check_type
(
x
,
'x'
,
(
Variable
),
'diag_v2'
)
return
_legacy_C_ops
.
diag_v2
(
check_dtype
(
x
,
"offset"
,
offset
,
"padding_value"
,
padding_value
x
.
dtype
,
)
'x'
,
else
:
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
check_type
(
x
,
'x'
,
(
Variable
),
'diag_v2'
)
'diag_v2'
,
check_dtype
(
)
x
.
dtype
,
check_type
(
offset
,
'offset'
,
(
int
),
'diag_v2'
)
'x'
,
check_type
(
padding_value
,
'padding_value'
,
(
int
,
float
),
'diag_v2'
)
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
if
len
(
x
.
shape
)
!=
1
and
len
(
x
.
shape
)
!=
2
:
'diag_v2'
,
raise
ValueError
(
)
"The dimension of input x must be either 1 or 2, but received {}"
.
format
(
check_type
(
offset
,
'offset'
,
(
int
),
'diag_v2'
)
len
(
x
.
shape
)
check_type
(
padding_value
,
'padding_value'
,
(
int
,
float
),
'diag_v2'
)
if
len
(
x
.
shape
)
!=
1
and
len
(
x
.
shape
)
!=
2
:
raise
ValueError
(
"The dimension of input x must be either 1 or 2, but received {}"
.
format
(
len
(
x
.
shape
)
)
)
)
)
helper
=
LayerHelper
(
"diag_v2"
,
**
locals
())
helper
=
LayerHelper
(
"diag_v2"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'diag_v2'
,
type
=
'diag_v2'
,
inputs
=
{
'X'
:
x
},
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'offset'
:
offset
,
'padding_value'
:
padding_value
},
attrs
=
{
'offset'
:
offset
,
'padding_value'
:
padding_value
},
)
)
out
.
stop_gradient
=
True
out
.
stop_gradient
=
True
return
out
return
out
def
empty
(
shape
,
dtype
=
None
,
name
=
None
):
def
empty
(
shape
,
dtype
=
None
,
name
=
None
):
...
@@ -1782,45 +1753,37 @@ def empty(shape, dtype=None, name=None):
...
@@ -1782,45 +1753,37 @@ def empty(shape, dtype=None, name=None):
)
)
out
.
stop_gradient
=
True
out
.
stop_gradient
=
True
return
out
return
out
else
:
helper
=
LayerHelper
(
"empty"
,
**
locals
())
inputs
=
{}
if
_in_legacy_dygraph
():
check_dtype
(
shape
=
utils
.
convert_shape_to_list
(
shape
)
dtype
,
out
=
_legacy_C_ops
.
empty
(
'dtype'
,
'shape'
,
shape
,
'dtype'
,
convert_np_dtype_to_dtype_
(
dtype
)
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'empty'
,
)
)
out
.
stop_gradient
=
True
check_type
(
shape
,
'shape'
,
(
Variable
,
list
,
tuple
),
'empty'
)
return
out
helper
=
LayerHelper
(
"empty"
,
**
locals
())
inputs
=
{}
check_dtype
(
dtype
,
'dtype'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'empty'
,
)
check_type
(
shape
,
'shape'
,
(
Variable
,
list
,
tuple
),
'empty'
)
if
isinstance
(
shape
,
Variable
):
if
isinstance
(
shape
,
Variable
):
check_dtype
(
shape
.
dtype
,
'shape'
,
[
'int32'
,
'int64'
],
'empty'
)
check_dtype
(
shape
.
dtype
,
'shape'
,
[
'int32'
,
'int64'
],
'empty'
)
attrs
=
{}
attrs
=
{}
utils
.
get_shape_tensor_inputs
(
utils
.
get_shape_tensor_inputs
(
inputs
=
inputs
,
attrs
=
attrs
,
shape
=
shape
,
op_type
=
'empty'
inputs
=
inputs
,
attrs
=
attrs
,
shape
=
shape
,
op_type
=
'empty'
)
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
attrs
[
'dtype'
]
=
convert_np_dtype_to_dtype_
(
dtype
)
attrs
[
'dtype'
]
=
convert_np_dtype_to_dtype_
(
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'empty'
,
type
=
'empty'
,
inputs
=
inputs
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
[
out
]},
outputs
=
{
'Out'
:
[
out
]},
attrs
=
attrs
,
attrs
=
attrs
,
stop_gradient
=
True
,
stop_gradient
=
True
,
)
)
out
.
stop_gradient
=
True
out
.
stop_gradient
=
True
return
out
return
out
def
empty_like
(
x
,
dtype
=
None
,
name
=
None
):
def
empty_like
(
x
,
dtype
=
None
,
name
=
None
):
...
@@ -1863,47 +1826,40 @@ def empty_like(x, dtype=None, name=None):
...
@@ -1863,47 +1826,40 @@ def empty_like(x, dtype=None, name=None):
)
)
out
.
stop_gradient
=
True
out
.
stop_gradient
=
True
return
out
return
out
else
:
helper
=
LayerHelper
(
"empty_like"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'empty_like'
,
)
check_dtype
(
dtype
,
'dtype'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'empty_like'
,
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
if
_in_legacy_dygraph
():
inputs
=
{}
out
=
_legacy_C_ops
.
empty
(
attrs
=
{}
'shape'
,
x
.
shape
,
'dtype'
,
convert_np_dtype_to_dtype_
(
dtype
)
attrs
[
'dtype'
]
=
convert_np_dtype_to_dtype_
(
dtype
)
shape
=
paddle
.
shape
(
x
)
utils
.
get_shape_tensor_inputs
(
inputs
=
inputs
,
attrs
=
attrs
,
shape
=
shape
,
op_type
=
'empty_like'
)
helper
.
append_op
(
type
=
'empty'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
[
out
]},
attrs
=
attrs
,
stop_gradient
=
True
,
)
)
out
.
stop_gradient
=
True
out
.
stop_gradient
=
True
return
out
return
out
helper
=
LayerHelper
(
"empty_like"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'empty_like'
,
)
check_dtype
(
dtype
,
'dtype'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'empty_like'
,
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
inputs
=
{}
attrs
=
{}
attrs
[
'dtype'
]
=
convert_np_dtype_to_dtype_
(
dtype
)
shape
=
paddle
.
shape
(
x
)
utils
.
get_shape_tensor_inputs
(
inputs
=
inputs
,
attrs
=
attrs
,
shape
=
shape
,
op_type
=
'empty_like'
)
helper
.
append_op
(
type
=
'empty'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
[
out
]},
attrs
=
attrs
,
stop_gradient
=
True
,
)
out
.
stop_gradient
=
True
return
out
def
assign
(
x
,
output
=
None
):
def
assign
(
x
,
output
=
None
):
"""
"""
...
@@ -1958,10 +1914,6 @@ def assign(x, output=None):
...
@@ -1958,10 +1914,6 @@ def assign(x, output=None):
output
=
_C_ops
.
assign
(
input
)
output
=
_C_ops
.
assign
(
input
)
else
:
else
:
_C_ops
.
assign_out_
(
input
,
output
)
_C_ops
.
assign_out_
(
input
,
output
)
elif
_in_legacy_dygraph
():
if
output
is
None
:
output
=
core
.
VarBase
()
_legacy_C_ops
.
assign
(
input
,
output
)
else
:
else
:
check_dtype
(
check_dtype
(
input
.
dtype
,
input
.
dtype
,
...
@@ -2060,18 +2012,6 @@ def assign(x, output=None):
...
@@ -2060,18 +2012,6 @@ def assign(x, output=None):
values
,
values
,
_current_expected_place
(),
_current_expected_place
(),
)
)
elif
_in_legacy_dygraph
():
if
output
is
None
:
output
=
core
.
VarBase
()
_legacy_C_ops
.
assign_value
(
output
,
'shape'
,
list
(
input
.
shape
),
'dtype'
,
dtype
,
value_name
,
values
,
)
else
:
else
:
if
output
is
None
:
if
output
is
None
:
output
=
helper
.
create_variable_for_type_inference
(
output
=
helper
.
create_variable_for_type_inference
(
...
@@ -2087,9 +2027,6 @@ def assign(x, output=None):
...
@@ -2087,9 +2027,6 @@ def assign(x, output=None):
},
},
)
)
if
is_inplace
and
_in_legacy_dygraph
():
output
.
_bump_inplace_version
()
return
output
return
output
...
@@ -2227,23 +2164,26 @@ def complex(real, imag, name=None):
...
@@ -2227,23 +2164,26 @@ def complex(real, imag, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
complex
(
real
,
imag
)
return
_C_ops
.
complex
(
real
,
imag
)
else
:
check_variable_and_dtype
(
real
,
'real'
,
[
'float32'
,
'float64'
],
'complex'
)
check_variable_and_dtype
(
imag
,
'imag'
,
[
'float32'
,
'float64'
],
'complex'
)
if
paddle
.
in_dynamic_mode
():
op_type
=
"complex"
return
paddle
.
_legacy_C_ops
.
complex
(
real
,
imag
)
helper
=
LayerHelper
(
op_type
,
**
locals
())
inputs
=
{
"X"
:
real
,
"Y"
:
imag
}
check_variable_and_dtype
(
real
,
'real'
,
[
'float32'
,
'float64'
],
'complex'
)
out
=
helper
.
create_variable_for_type_inference
(
check_variable_and_dtype
(
imag
,
'imag'
,
[
'float32'
,
'float64'
],
'complex'
)
dtype
=
_real_to_complex_dtype
(
real
.
dtype
)
)
op_type
=
"complex"
outputs
=
{
"Out"
:
out
}
helper
=
LayerHelper
(
op_type
,
**
locals
())
attrs
=
{}
inputs
=
{
"X"
:
real
,
"Y"
:
imag
}
helper
.
append_op
(
out
=
helper
.
create_variable_for_type_inference
(
type
=
op_type
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
outputs
dtype
=
_real_to_complex_dtype
(
real
.
dtype
)
)
)
return
out
outputs
=
{
"Out"
:
out
}
attrs
=
{}
helper
.
append_op
(
type
=
op_type
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
outputs
)
return
out
def
tril_indices
(
row
,
col
,
offset
=
0
,
dtype
=
'int64'
):
def
tril_indices
(
row
,
col
,
offset
=
0
,
dtype
=
'int64'
):
...
@@ -2291,34 +2231,29 @@ def tril_indices(row, col, offset=0, dtype='int64'):
...
@@ -2291,34 +2231,29 @@ def tril_indices(row, col, offset=0, dtype='int64'):
# [[ 1, 2, 2, 3, 3, 3],
# [[ 1, 2, 2, 3, 3, 3],
# [ 0, 0, 1, 0, 1, 2]]
# [ 0, 0, 1, 0, 1, 2]]
"""
"""
if
not
isinstance
(
row
,
int
)
or
row
<
0
:
raise
TypeError
(
"row should be a non-negative int"
)
if
col
is
not
None
:
if
not
isinstance
(
col
,
int
)
or
col
<
0
:
raise
TypeError
(
"col should be a non-negative int"
)
else
:
col
=
row
if
not
isinstance
(
offset
,
int
):
raise
TypeError
(
"offset should be a int"
)
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
if
col
is
None
:
col
=
row
out
=
_C_ops
.
tril_indices
(
out
=
_C_ops
.
tril_indices
(
row
,
col
,
offset
,
dtype
,
_current_expected_place
()
row
,
col
,
offset
,
dtype
,
_current_expected_place
()
)
)
return
out
return
out
else
:
if
not
isinstance
(
row
,
int
)
or
row
<
0
:
raise
TypeError
(
"row should be a non-negative int"
)
if
_in_legacy_dygraph
():
if
col
is
not
None
:
out
=
_legacy_C_ops
.
tril_indices
(
if
not
isinstance
(
col
,
int
)
or
col
<
0
:
'rows'
,
row
,
'cols'
,
col
,
'offset'
,
offset
,
"dtype"
,
dtype
raise
TypeError
(
"col should be a non-negative int"
)
)
else
:
return
out
col
=
row
if
not
isinstance
(
offset
,
int
):
raise
TypeError
(
"offset should be a int"
)
else
:
helper
=
LayerHelper
(
"tril_indices"
,
**
locals
())
helper
=
LayerHelper
(
"tril_indices"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
...
@@ -2375,34 +2310,29 @@ def triu_indices(row, col=None, offset=0, dtype='int64'):
...
@@ -2375,34 +2310,29 @@ def triu_indices(row, col=None, offset=0, dtype='int64'):
# [[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 3, 3],
# [[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 3, 3],
# [0, 1, 2, 3, 0, 1, 2, 3, 1, 2, 3, 2, 3]]
# [0, 1, 2, 3, 0, 1, 2, 3, 1, 2, 3, 2, 3]]
"""
"""
if
not
isinstance
(
row
,
int
)
or
row
<
0
:
raise
TypeError
(
"row should be a non-negative int"
)
if
col
is
not
None
:
if
not
isinstance
(
col
,
int
)
or
col
<
0
:
raise
TypeError
(
"col should be a non-negative int"
)
else
:
col
=
row
if
not
isinstance
(
offset
,
int
):
raise
TypeError
(
"offset should be a int"
)
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
if
col
is
None
:
col
=
row
out
=
_C_ops
.
triu_indices
(
out
=
_C_ops
.
triu_indices
(
row
,
col
,
offset
,
dtype
,
_current_expected_place
()
row
,
col
,
offset
,
dtype
,
_current_expected_place
()
)
)
return
out
return
out
else
:
if
not
isinstance
(
row
,
int
)
or
row
<
0
:
raise
TypeError
(
"row should be a non-negative int"
)
if
_in_legacy_dygraph
():
if
col
is
not
None
:
out
=
_legacy_C_ops
.
triu_indices
(
if
not
isinstance
(
col
,
int
)
or
col
<
0
:
'row'
,
row
,
'col'
,
col
,
'offset'
,
offset
,
"dtype"
,
dtype
raise
TypeError
(
"col should be a non-negative int"
)
)
else
:
return
out
col
=
row
if
not
isinstance
(
offset
,
int
):
raise
TypeError
(
"offset should be a int"
)
else
:
helper
=
LayerHelper
(
"triu_indices"
,
**
locals
())
helper
=
LayerHelper
(
"triu_indices"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
...
...
python/paddle/tensor/einsum.py
浏览文件 @
861fef52
...
@@ -20,10 +20,10 @@ import string
...
@@ -20,10 +20,10 @@ import string
import
numpy
as
np
import
numpy
as
np
import
opt_einsum
import
opt_einsum
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle
import
_C_ops
from
..fluid.data_feeder
import
check_type
,
check_variable_and_dtype
from
..fluid.data_feeder
import
check_type
,
check_variable_and_dtype
from
..fluid.framework
import
_in_legacy_dygraph
,
in_dygraph_mode
from
..fluid.framework
import
in_dygraph_mode
from
..fluid.layer_helper
import
LayerHelper
from
..fluid.layer_helper
import
LayerHelper
from
.linalg
import
matmul
,
transpose
from
.linalg
import
matmul
,
transpose
from
.manipulation
import
reshape
,
squeeze
,
unsqueeze
from
.manipulation
import
reshape
,
squeeze
,
unsqueeze
...
@@ -829,38 +829,35 @@ def gen_einsum_op(equation, *operands):
...
@@ -829,38 +829,35 @@ def gen_einsum_op(equation, *operands):
"""
"""
EinsumOp Python Interface:
EinsumOp Python Interface:
"""
"""
assert
len
(
operands
)
<=
2
,
"Only support two operands in EinsumOp."
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
einsum
(
operands
,
equation
)[
0
]
return
_C_ops
.
einsum
(
operands
,
equation
)[
0
]
else
:
if
_in_legacy_dygraph
():
assert
len
(
operands
)
<=
2
,
"Only support two operands in EinsumOp."
# dygraph
for
inp
in
operands
:
return
_legacy_C_ops
.
einsum
(
check_variable_and_dtype
(
operands
,
len
(
operands
),
len
(
operands
),
'equation'
,
equation
inp
,
'dtype'
,
[
'float32'
,
'float64'
],
'einsum'
)[
0
]
)
check_type
(
equation
,
'equation'
,
str
,
'einsum'
)
for
inp
in
operands
:
helper
=
LayerHelper
(
'einsum'
,
**
locals
())
check_variable_and_dtype
(
inp
,
'dtype'
,
[
'float32'
,
'float64'
],
'einsum'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
operands
[
0
].
dtype
)
check_type
(
equation
,
'equation'
,
str
,
'einsum'
)
attrs
=
dict
()
helper
=
LayerHelper
(
'einsum'
,
**
locals
())
attrs
[
'equation'
]
=
equation
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
operands
[
0
].
dtype
)
caches
=
[
attrs
=
dict
()
helper
.
create_variable_for_type_inference
(
dtype
=
operands
[
0
].
dtype
)
attrs
[
'equation'
]
=
equation
for
i
in
range
(
len
(
operands
))
caches
=
[
]
helper
.
create_variable_for_type_inference
(
dtype
=
operands
[
0
].
dtype
)
xshape
=
[
for
i
in
range
(
len
(
operands
))
helper
.
create_variable_for_type_inference
(
dtype
=
operands
[
0
].
dtype
)
]
for
i
in
range
(
len
(
operands
))
xshape
=
[
]
helper
.
create_variable_for_type_inference
(
dtype
=
operands
[
0
].
dtype
)
helper
.
append_op
(
for
i
in
range
(
len
(
operands
))
type
=
'einsum'
,
]
inputs
=
{
'Operands'
:
operands
},
helper
.
append_op
(
outputs
=
{
'Out'
:
out
,
"InnerCache"
:
caches
,
"XShape"
:
xshape
},
type
=
'einsum'
,
attrs
=
attrs
,
inputs
=
{
'Operands'
:
operands
},
)
outputs
=
{
'Out'
:
out
,
"InnerCache"
:
caches
,
"XShape"
:
xshape
},
return
out
attrs
=
attrs
,
)
return
out
def
einsum
(
equation
,
*
operands
):
def
einsum
(
equation
,
*
operands
):
...
...
python/paddle/tensor/layer_function_generator.py
浏览文件 @
861fef52
...
@@ -24,7 +24,6 @@ from ..fluid.proto import framework_pb2
...
@@ -24,7 +24,6 @@ from ..fluid.proto import framework_pb2
from
..framework
import
(
from
..framework
import
(
LayerHelper
,
LayerHelper
,
OpProtoHolder
,
OpProtoHolder
,
_non_static_mode
,
convert_np_dtype_to_dtype_
,
convert_np_dtype_to_dtype_
,
core
,
core
,
in_dygraph_mode
,
in_dygraph_mode
,
...
@@ -274,41 +273,44 @@ def generate_activation_fn(op_type):
...
@@ -274,41 +273,44 @@ def generate_activation_fn(op_type):
op_proto
=
OpProtoHolder
.
instance
().
get_op_proto
(
op_type
)
op_proto
=
OpProtoHolder
.
instance
().
get_op_proto
(
op_type
)
def
func
(
x
,
name
=
None
):
def
func
(
x
,
name
=
None
):
if
in_dygraph_mode
()
and
hasattr
(
_C_ops
,
op_type
):
if
in_dygraph_mode
():
op
=
getattr
(
_C_ops
,
op_type
)
if
hasattr
(
_C_ops
,
op_type
):
return
op
(
x
)
op
=
getattr
(
_C_ops
,
op_type
)
# TODO(dev): Because some ops' yaml has not been migrated.
return
op
(
x
)
# Replace it with _in_legacy_dygraph while all yaml work is done.
else
:
if
_non_static_mode
():
# TODO(dev): Because some ops' yaml has not been migrated.
op
=
getattr
(
_legacy_C_ops
,
op_type
)
# Replace it with _C_ops while all yaml work is done.
return
op
(
x
)
op
=
getattr
(
_legacy_C_ops
,
op_type
)
return
op
(
x
)
if
op_type
not
in
[
"abs"
,
"exp"
,
"square"
]:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
op_type
)
else
:
else
:
# abs exp square ops support dtype(int32, int64, float16, float32, float64)
if
op_type
not
in
[
"abs"
,
"exp"
,
"square"
]:
check_variable_and_dtype
(
check_variable_and_dtype
(
x
,
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
op_type
'x'
,
)
[
else
:
'int32'
,
# abs exp square ops support dtype(int32, int64, float16, float32, float64)
'int64'
,
check_variable_and_dtype
(
'float16'
,
x
,
'float32'
,
'x'
,
'float64'
,
[
'complex64'
,
'int32'
,
'complex128'
,
'int64'
,
],
'float16'
,
op_type
,
'float32'
,
'float64'
,
'complex64'
,
'complex128'
,
],
op_type
,
)
helper
=
LayerHelper
(
op_type
,
**
locals
())
output
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
op_type
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
output
}
)
)
return
output
helper
=
LayerHelper
(
op_type
,
**
locals
())
output
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
op_type
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
output
})
return
output
func
.
__name__
=
op_type
func
.
__name__
=
op_type
func
.
__doc__
=
_generate_doc_string_
(
func
.
__doc__
=
_generate_doc_string_
(
...
@@ -332,18 +334,20 @@ def generate_inplace_fn(inplace_op_type):
...
@@ -332,18 +334,20 @@ def generate_inplace_fn(inplace_op_type):
origin_op_type
=
inplace_op_type
[:
-
1
]
origin_op_type
=
inplace_op_type
[:
-
1
]
def
func
(
x
,
name
=
None
):
def
func
(
x
,
name
=
None
):
if
in_dygraph_mode
()
and
hasattr
(
_C_ops
,
inplace_op_type
):
if
in_dygraph_mode
():
op
=
getattr
(
_C_ops
,
inplace_op_type
)
if
hasattr
(
_C_ops
,
inplace_op_type
):
return
op
(
x
)
op
=
getattr
(
_C_ops
,
inplace_op_type
)
if
_non_static_mode
():
return
op
(
x
)
op
=
getattr
(
_legacy_C_ops
,
inplace_op_type
)
else
:
return
op
(
x
)
op
=
getattr
(
_legacy_C_ops
,
inplace_op_type
)
warnings
.
warn
(
return
op
(
x
)
"In static mode, {}() is the same as {}() and does not perform inplace operation."
.
format
(
else
:
inplace_op_type
,
origin_op_type
warnings
.
warn
(
"In static mode, {}() is the same as {}() and does not perform inplace operation."
.
format
(
inplace_op_type
,
origin_op_type
)
)
)
)
return
generate_activation_fn
(
origin_op_type
)(
x
,
name
)
return
generate_activation_fn
(
origin_op_type
)(
x
,
name
)
func
.
__name__
=
inplace_op_type
func
.
__name__
=
inplace_op_type
func
.
__doc__
=
"""
func
.
__doc__
=
"""
...
...
python/paddle/tensor/linalg.py
浏览文件 @
861fef52
...
@@ -15,7 +15,7 @@
...
@@ -15,7 +15,7 @@
import
numpy
as
np
import
numpy
as
np
import
paddle
import
paddle
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle
import
_C_ops
from
paddle.common_ops_import
import
VarDesc
from
paddle.common_ops_import
import
VarDesc
from
..fluid.data_feeder
import
(
from
..fluid.data_feeder
import
(
...
@@ -23,8 +23,7 @@ from ..fluid.data_feeder import (
...
@@ -23,8 +23,7 @@ from ..fluid.data_feeder import (
check_type
,
check_type
,
check_variable_and_dtype
,
check_variable_and_dtype
,
)
)
from
..fluid.framework
import
_in_legacy_dygraph
from
..framework
import
LayerHelper
,
in_dygraph_mode
from
..framework
import
LayerHelper
,
_non_static_mode
,
in_dygraph_mode
from
..static
import
Variable
from
..static
import
Variable
from
.creation
import
full
from
.creation
import
full
from
.logic
import
logical_not
from
.logic
import
logical_not
...
@@ -90,53 +89,49 @@ def transpose(x, perm, name=None):
...
@@ -90,53 +89,49 @@ def transpose(x, perm, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
transpose
(
x
,
perm
)
return
_C_ops
.
transpose
(
x
,
perm
)
else
:
else
:
if
_in_legacy_dygraph
():
check_variable_and_dtype
(
out
,
_
=
_legacy_C_ops
.
transpose2
(
x
,
'axis'
,
perm
)
x
,
return
out
'x'
,
[
check_variable_and_dtype
(
'bool'
,
x
,
'float16'
,
'x'
,
'float32'
,
[
'float64'
,
'bool'
,
'int32'
,
'float16'
,
'int64'
,
'float32'
,
'complex64'
,
'float64'
,
'complex128'
,
'int32'
,
],
'int64'
,
'transpose'
,
'complex64'
,
'complex128'
,
],
'transpose'
,
)
check_type
(
perm
,
'perm'
,
(
list
,
tuple
),
'transpose'
)
if
isinstance
(
perm
,
tuple
):
perm
=
list
(
perm
)
if
len
(
perm
)
!=
len
(
x
.
shape
):
raise
ValueError
(
"Input(perm) is the permutation of dimensions of Input(x), "
"its length should be equal to dimensions of Input(x), "
"but received dimension of Input(x) is %s, "
"the length of Input(perm) is %s."
%
(
len
(
x
.
shape
),
len
(
perm
))
)
)
for
idx
,
dim
in
enumerate
(
perm
):
check_type
(
perm
,
'perm'
,
(
list
,
tuple
),
'transpose'
)
if
dim
>=
len
(
x
.
shape
):
if
isinstance
(
perm
,
tuple
):
perm
=
list
(
perm
)
if
len
(
perm
)
!=
len
(
x
.
shape
):
raise
ValueError
(
raise
ValueError
(
"Each element in Input(perm) should be less than Input(x)'s dimension, "
"Input(perm) is the permutation of dimensions of Input(x), "
"but %d-th element in Input(perm) is %d which exceeds Input(x)'s "
"its length should be equal to dimensions of Input(x), "
"dimension %d."
%
(
idx
,
perm
[
idx
],
len
(
x
.
shape
))
"but received dimension of Input(x) is %s, "
"the length of Input(perm) is %s."
%
(
len
(
x
.
shape
),
len
(
perm
))
)
)
for
idx
,
dim
in
enumerate
(
perm
):
if
dim
>=
len
(
x
.
shape
):
raise
ValueError
(
"Each element in Input(perm) should be less than Input(x)'s dimension, "
"but %d-th element in Input(perm) is %d which exceeds Input(x)'s "
"dimension %d."
%
(
idx
,
perm
[
idx
],
len
(
x
.
shape
))
)
helper
=
LayerHelper
(
'transpose'
,
**
locals
())
helper
=
LayerHelper
(
'transpose'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
x_shape
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
x_shape
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'transpose2'
,
type
=
'transpose2'
,
inputs
=
{
'X'
:
[
x
]},
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'Out'
:
[
out
],
'XShape'
:
[
x_shape
]},
outputs
=
{
'Out'
:
[
out
],
'XShape'
:
[
x_shape
]},
attrs
=
{
'axis'
:
perm
},
attrs
=
{
'axis'
:
perm
},
)
)
return
out
return
out
def
matmul
(
x
,
y
,
transpose_x
=
False
,
transpose_y
=
False
,
name
=
None
):
def
matmul
(
x
,
y
,
transpose_x
=
False
,
transpose_y
=
False
,
name
=
None
):
...
@@ -235,38 +230,39 @@ def matmul(x, y, transpose_x=False, transpose_y=False, name=None):
...
@@ -235,38 +230,39 @@ def matmul(x, y, transpose_x=False, transpose_y=False, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
matmul
(
x
,
y
,
transpose_x
,
transpose_y
)
return
_C_ops
.
matmul
(
x
,
y
,
transpose_x
,
transpose_y
)
else
:
attrs
=
{
'trans_x'
:
transpose_x
,
'trans_y'
:
transpose_y
,
}
if
_in_legacy_dygraph
():
def
__check_input
(
x
,
y
):
op_type
=
'matmul_v2'
var_names
=
{
'x'
:
x
,
'y'
:
y
}
op
=
getattr
(
_legacy_C_ops
,
op_type
)
for
name
,
val
in
var_names
.
items
():
return
op
(
x
,
y
,
'trans_x'
,
transpose_x
,
'trans_y'
,
transpose_y
)
check_variable_and_dtype
(
val
,
attrs
=
{
name
,
'trans_x'
:
transpose_x
,
[
'trans_y'
:
transpose_y
,
'float16'
,
}
'float32'
,
'float64'
,
def
__check_input
(
x
,
y
):
'complex64'
,
var_names
=
{
'x'
:
x
,
'y'
:
y
}
'complex128'
,
for
name
,
val
in
var_names
.
items
():
],
check_variable_and_dtype
(
'matmul'
,
val
,
)
name
,
[
'float16'
,
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
'matmul'
,
)
__check_input
(
x
,
y
)
__check_input
(
x
,
y
)
helper
=
LayerHelper
(
'matmul_v2'
,
**
locals
())
helper
=
LayerHelper
(
'matmul_v2'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'matmul_v2'
,
type
=
'matmul_v2'
,
inputs
=
{
'X'
:
x
,
'Y'
:
y
},
inputs
=
{
'X'
:
x
,
'Y'
:
y
},
outputs
=
{
'Out'
:
out
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
,
attrs
=
attrs
,
)
)
return
out
return
out
def
norm
(
x
,
p
=
'fro'
,
axis
=
None
,
keepdim
=
False
,
name
=
None
):
def
norm
(
x
,
p
=
'fro'
,
axis
=
None
,
keepdim
=
False
,
name
=
None
):
...
@@ -373,33 +369,26 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
...
@@ -373,33 +369,26 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
if
dim
is
None
:
if
dim
is
None
:
return
_C_ops
.
frobenius_norm
(
input
,
[],
keepdim
,
True
)
return
_C_ops
.
frobenius_norm
(
input
,
[],
keepdim
,
True
)
return
_C_ops
.
frobenius_norm
(
input
,
dim
,
keepdim
,
False
)
return
_C_ops
.
frobenius_norm
(
input
,
dim
,
keepdim
,
False
)
if
_in_legacy_dygraph
():
else
:
attrs
=
{
'dim'
:
dim
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
False
}
if
dim
is
None
:
if
dim
is
None
:
return
_legacy_C_ops
.
frobenius_norm
(
attrs
[
'reduce_all'
]
=
True
input
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
True
check_variable_and_dtype
(
)
input
,
'input'
,
[
'float32'
,
'float64'
],
'frobenius_norm'
return
_legacy_C_ops
.
frobenius_norm
(
input
,
'dim'
,
dim
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
False
)
)
attrs
=
{
'dim'
:
dim
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
False
}
if
dim
is
None
:
attrs
[
'reduce_all'
]
=
True
check_variable_and_dtype
(
input
,
'input'
,
[
'float32'
,
'float64'
],
'frobenius_norm'
)
helper
=
LayerHelper
(
'frobenius_norm'
,
**
locals
())
helper
=
LayerHelper
(
'frobenius_norm'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
()
dtype
=
helper
.
input_dtype
()
)
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'frobenius_norm'
,
type
=
'frobenius_norm'
,
inputs
=
{
'X'
:
input
},
inputs
=
{
'X'
:
input
},
outputs
=
{
'Out'
:
out
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
,
attrs
=
attrs
,
)
)
return
out
return
out
def
vector_norm
(
def
vector_norm
(
input
,
porder
=
None
,
axis
=
None
,
keepdim
=
False
,
asvector
=
False
,
name
=
None
input
,
porder
=
None
,
axis
=
None
,
keepdim
=
False
,
asvector
=
False
,
name
=
None
...
@@ -416,49 +405,34 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
...
@@ -416,49 +405,34 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
if
axis
is
None
:
if
axis
is
None
:
axis
=
-
1
axis
=
-
1
return
_C_ops
.
p_norm
(
input
,
porder
,
axis
,
1e-12
,
keepdim
,
asvector
)
return
_C_ops
.
p_norm
(
input
,
porder
,
axis
,
1e-12
,
keepdim
,
asvector
)
else
:
if
porder
is
not
None
:
check_type
(
porder
,
'porder'
,
(
float
,
int
),
'p_norm'
)
if
axis
is
not
None
:
check_type
(
axis
,
'axis'
,
(
int
),
'p_norm'
)
check_variable_and_dtype
(
input
,
'input'
,
[
'float32'
,
'float64'
],
'p_norm'
)
if
_in_legacy_dygraph
():
attrs
=
{
if
axis
is
None
:
'axis'
:
axis
if
axis
is
not
None
else
-
1
,
axis
=
-
1
'porder'
:
float
(
porder
)
if
porder
is
not
None
else
2.0
,
return
_legacy_C_ops
.
p_norm
(
'keepdim'
:
keepdim
,
input
,
'asvector'
:
asvector
,
'porder'
,
'epsilon'
:
1e-12
,
porder
,
}
'axis'
,
helper
=
LayerHelper
(
'p_norm'
,
**
locals
())
axis
,
out
=
helper
.
create_variable_for_type_inference
(
'keepdim'
,
dtype
=
helper
.
input_dtype
()
keepdim
,
)
'asvector'
,
asvector
,
)
if
porder
is
not
None
:
check_type
(
porder
,
'porder'
,
(
float
,
int
),
'p_norm'
)
if
axis
is
not
None
:
check_type
(
axis
,
'axis'
,
(
int
),
'p_norm'
)
check_variable_and_dtype
(
input
,
'input'
,
[
'float32'
,
'float64'
],
'p_norm'
)
attrs
=
{
'axis'
:
axis
if
axis
is
not
None
else
-
1
,
'porder'
:
float
(
porder
)
if
porder
is
not
None
else
2.0
,
'keepdim'
:
keepdim
,
'asvector'
:
asvector
,
'epsilon'
:
1e-12
,
}
helper
=
LayerHelper
(
'p_norm'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
()
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'p_norm'
,
type
=
'p_norm'
,
inputs
=
{
'X'
:
input
},
inputs
=
{
'X'
:
input
},
outputs
=
{
'Out'
:
out
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
,
attrs
=
attrs
,
)
)
return
out
return
out
def
inf_norm
(
def
inf_norm
(
input
,
porder
=
None
,
axis
=
axis
,
keepdim
=
False
,
asvector
=
False
,
name
=
None
input
,
porder
=
None
,
axis
=
axis
,
keepdim
=
False
,
asvector
=
False
,
name
=
None
...
@@ -469,30 +443,38 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
...
@@ -469,30 +443,38 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
return
_C_ops
.
max
(
out
,
axis
,
keepdim
)
return
_C_ops
.
max
(
out
,
axis
,
keepdim
)
else
:
else
:
return
_C_ops
.
min
(
out
,
axis
,
keepdim
)
return
_C_ops
.
min
(
out
,
axis
,
keepdim
)
else
:
helper
=
LayerHelper
(
'inf_norm'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
()
)
helper
.
append_op
(
type
=
'abs'
,
inputs
=
{
'X'
:
input
},
outputs
=
{
'Out'
:
out
}
)
reduce_out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
()
)
helper
=
LayerHelper
(
'inf_norm'
,
**
locals
())
reduce_all
=
(
out
=
helper
.
create_variable_for_type_inference
(
True
if
axis
is
None
or
axis
==
[]
or
asvector
else
False
dtype
=
helper
.
input_dtype
()
)
)
axis
=
axis
if
axis
is
not
None
and
axis
!=
[]
else
[
0
]
helper
.
append_op
(
type
=
'abs'
,
inputs
=
{
'X'
:
input
},
outputs
=
{
'Out'
:
out
})
reduce_out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
()
)
reduce_all
=
True
if
axis
is
None
or
axis
==
[]
or
asvector
else
False
axis
=
axis
if
axis
is
not
None
and
axis
!=
[]
else
[
0
]
reduce_type
=
(
reduce_type
=
(
'reduce_max'
if
porder
==
np
.
float64
(
'inf'
)
else
'reduce_min'
'reduce_max'
if
porder
==
np
.
float64
(
'inf'
)
else
'reduce_min'
)
)
helper
.
append_op
(
helper
.
append_op
(
type
=
reduce_type
,
type
=
reduce_type
,
inputs
=
{
'X'
:
out
},
inputs
=
{
'X'
:
out
},
outputs
=
{
'Out'
:
reduce_out
},
outputs
=
{
'Out'
:
reduce_out
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
},
attrs
=
{
)
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
,
},
)
return
reduce_out
return
reduce_out
def
p_matrix_norm
(
input
,
porder
=
1.0
,
axis
=
axis
,
keepdim
=
False
,
name
=
None
):
def
p_matrix_norm
(
input
,
porder
=
1.0
,
axis
=
axis
,
keepdim
=
False
,
name
=
None
):
"""
"""
...
@@ -846,40 +828,6 @@ def cond(x, p=None, name=None):
...
@@ -846,40 +828,6 @@ def cond(x, p=None, name=None):
return
_C_ops
.
max
(
sum_out
,
[
-
1
],
False
)
return
_C_ops
.
max
(
sum_out
,
[
-
1
],
False
)
if
porder
==
-
1
or
porder
==
-
np
.
inf
:
if
porder
==
-
1
or
porder
==
-
np
.
inf
:
return
_C_ops
.
min
(
sum_out
,
[
-
1
],
False
)
return
_C_ops
.
min
(
sum_out
,
[
-
1
],
False
)
elif
_in_legacy_dygraph
():
reduce_all
=
True
if
axis
is
None
or
axis
==
[]
else
False
axis
=
axis
if
axis
is
not
None
and
axis
!=
[]
else
[
0
]
abs_out
=
_legacy_C_ops
.
abs
(
input
)
sum_out
=
_legacy_C_ops
.
reduce_sum
(
abs_out
,
'dim'
,
axis
,
'keepdim'
,
False
,
'reduce_all'
,
reduce_all
,
)
if
porder
==
1
or
porder
==
np
.
inf
:
return
_legacy_C_ops
.
reduce_max
(
sum_out
,
'dim'
,
[
-
1
],
'keepdim'
,
False
,
'reduce_all'
,
reduce_all
,
)
if
porder
==
-
1
or
porder
==
-
np
.
inf
:
return
_legacy_C_ops
.
reduce_min
(
sum_out
,
'dim'
,
[
-
1
],
'keepdim'
,
False
,
'reduce_all'
,
reduce_all
,
)
else
:
else
:
reduce_all
=
True
if
axis
is
None
or
axis
==
[]
else
False
reduce_all
=
True
if
axis
is
None
or
axis
==
[]
else
False
axis
=
axis
if
axis
is
not
None
and
axis
!=
[]
else
[
0
]
axis
=
axis
if
axis
is
not
None
and
axis
!=
[]
else
[
0
]
...
@@ -940,68 +888,54 @@ def cond(x, p=None, name=None):
...
@@ -940,68 +888,54 @@ def cond(x, p=None, name=None):
sum_out_1
=
_C_ops
.
sum
(
pow_out
,
axis
,
None
,
False
)
sum_out_1
=
_C_ops
.
sum
(
pow_out
,
axis
,
None
,
False
)
sum_out_2
=
_C_ops
.
sum
(
sum_out_1
,
axis
,
None
,
False
)
sum_out_2
=
_C_ops
.
sum
(
sum_out_1
,
axis
,
None
,
False
)
return
_C_ops
.
pow
(
sum_out_2
,
float
(
1.0
/
porder
))
return
_C_ops
.
pow
(
sum_out_2
,
float
(
1.0
/
porder
))
el
if
paddle
.
in_dynamic_mode
()
:
el
se
:
reduce_all
=
True
if
axis
is
None
or
axis
==
[]
else
False
reduce_all
=
True
if
axis
is
None
or
axis
==
[]
else
False
pow_out
=
_legacy_C_ops
.
pow
(
input
,
'factor'
,
porder
)
block
=
LayerHelper
(
'norm'
,
**
locals
())
sum_out_1
=
_legacy_C_ops
.
reduce_sum
(
pow_out
=
block
.
create_variable_for_type_inference
(
pow_out
,
dtype
=
block
.
input_dtype
()
'dim'
,
)
axis
,
sum_out_1
=
block
.
create_variable_for_type_inference
(
'keepdim'
,
dtype
=
block
.
input_dtype
()
False
,
)
'reduce_all'
,
sum_out_2
=
block
.
create_variable_for_type_inference
(
reduce_all
,
dtype
=
block
.
input_dtype
()
)
)
sum_out_2
=
_legacy_C_ops
.
reduce_sum
(
out
=
block
.
create_variable_for_type_inference
(
sum_out_1
,
dtype
=
block
.
input_dtype
()
'dim'
,
)
axis
,
block
.
append_op
(
'keepdim'
,
type
=
'pow'
,
False
,
inputs
=
{
'X'
:
input
},
'reduce_all'
,
outputs
=
{
'Out'
:
pow_out
},
reduce_all
,
attrs
=
{
'factor'
:
porder
},
)
)
return
_legacy_C_ops
.
pow
(
sum_out_2
,
'factor'
,
float
(
1.0
/
porder
))
block
.
append_op
(
type
=
'reduce_sum'
,
reduce_all
=
True
if
axis
is
None
or
axis
==
[]
else
False
inputs
=
{
'X'
:
pow_out
},
block
=
LayerHelper
(
'norm'
,
**
locals
())
outputs
=
{
'Out'
:
sum_out_1
},
pow_out
=
block
.
create_variable_for_type_inference
(
attrs
=
{
dtype
=
block
.
input_dtype
()
'dim'
:
axis
,
)
'keep_dim'
:
False
,
sum_out_1
=
block
.
create_variable_for_type_inference
(
'reduce_all'
:
reduce_all
,
dtype
=
block
.
input_dtype
()
},
)
)
sum_out_2
=
block
.
create_variable_for_type_inference
(
block
.
append_op
(
dtype
=
block
.
input_dtype
()
type
=
'reduce_sum'
,
)
inputs
=
{
'X'
:
sum_out_1
},
out
=
block
.
create_variable_for_type_inference
(
outputs
=
{
'Out'
:
sum_out_2
},
dtype
=
block
.
input_dtype
()
attrs
=
{
)
'dim'
:
axis
,
block
.
append_op
(
'keep_dim'
:
False
,
type
=
'pow'
,
'reduce_all'
:
reduce_all
,
inputs
=
{
'X'
:
input
},
},
outputs
=
{
'Out'
:
pow_out
},
)
attrs
=
{
'factor'
:
porder
},
block
.
append_op
(
)
type
=
'pow'
,
block
.
append_op
(
inputs
=
{
'X'
:
sum_out_2
},
type
=
'reduce_sum'
,
outputs
=
{
'Out'
:
out
},
inputs
=
{
'X'
:
pow_out
},
attrs
=
{
'factor'
:
float
(
1.0
/
porder
)},
outputs
=
{
'Out'
:
sum_out_1
},
)
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
False
,
'reduce_all'
:
reduce_all
},
return
out
)
block
.
append_op
(
type
=
'reduce_sum'
,
inputs
=
{
'X'
:
sum_out_1
},
outputs
=
{
'Out'
:
sum_out_2
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
False
,
'reduce_all'
:
reduce_all
},
)
block
.
append_op
(
type
=
'pow'
,
inputs
=
{
'X'
:
sum_out_2
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'factor'
:
float
(
1.0
/
porder
)},
)
return
out
def
svd_norm
(
input
,
porder
,
axis
=
[
-
1
]):
def
svd_norm
(
input
,
porder
,
axis
=
[
-
1
]):
"""
"""
...
@@ -1009,101 +943,80 @@ def cond(x, p=None, name=None):
...
@@ -1009,101 +943,80 @@ def cond(x, p=None, name=None):
Calculate the matrix norm, which is related to singular values, of a matrix
Calculate the matrix norm, which is related to singular values, of a matrix
or batches of matrices, including nuclear norm, 2-norm and (-2)-norm.
or batches of matrices, including nuclear norm, 2-norm and (-2)-norm.
"""
"""
if
not
in_dygraph_mode
():
reduce_all
=
True
if
axis
is
None
or
axis
==
[]
else
False
u
,
s
,
vh
=
svd
(
input
,
full_matrices
=
False
)
u
,
s
,
vh
=
svd
(
input
,
full_matrices
=
False
)
if
_non_static
_mode
():
if
in_dygraph
_mode
():
if
porder
==
"nuc"
:
if
porder
==
"nuc"
:
if
in_dygraph_mode
():
return
_C_ops
.
sum
(
s
,
axis
,
None
,
False
)
return
_C_ops
.
sum
(
s
,
axis
,
None
,
False
)
max_out
=
_C_ops
.
max
(
s
,
axis
,
False
)
else
:
min_out
=
_C_ops
.
min
(
s
,
axis
,
False
)
return
_legacy_C_ops
.
reduce_sum
(
if
porder
==
2
:
s
,
return
_C_ops
.
divide
(
max_out
,
min_out
)
'dim'
,
if
porder
==
-
2
:
axis
,
return
_C_ops
.
divide
(
min_out
,
max_out
)
'keepdim'
,
else
:
False
,
reduce_all
=
True
if
axis
is
None
or
axis
==
[]
else
False
'reduce_all'
,
block
=
LayerHelper
(
'norm'
,
**
locals
())
reduce_all
,
out
=
block
.
create_variable_for_type_inference
(
)
dtype
=
block
.
input_dtype
()
if
in_dygraph_mode
():
)
max_out
=
_C_ops
.
max
(
s
,
axis
,
False
)
if
porder
==
"nuc"
:
min_out
=
_C_ops
.
min
(
s
,
axis
,
False
)
block
.
append_op
(
if
porder
==
2
:
type
=
'reduce_sum'
,
return
_C_ops
.
divide
(
max_out
,
min_out
)
inputs
=
{
'X'
:
s
},
if
porder
==
-
2
:
outputs
=
{
'Out'
:
out
},
return
_C_ops
.
divide
(
min_out
,
max_out
)
attrs
=
{
'dim'
:
axis
,
else
:
'keep_dim'
:
False
,
max_out
=
_legacy_C_ops
.
reduce_max
(
'reduce_all'
:
reduce_all
,
s
,
'dim'
,
axis
,
'keepdim'
,
False
,
'reduce_all'
,
reduce_all
},
)
min_out
=
_legacy_C_ops
.
reduce_min
(
s
,
'dim'
,
axis
,
'keepdim'
,
False
,
'reduce_all'
,
reduce_all
)
)
if
porder
==
2
:
return
out
return
_legacy_C_ops
.
elementwise_div
(
max_out
=
block
.
create_variable_for_type_inference
(
max_out
,
min_out
,
'aixs'
,
axis
,
'use_mkldnn'
,
False
dtype
=
block
.
input_dtype
()
)
)
if
porder
==
-
2
:
min_out
=
block
.
create_variable_for_type_inference
(
return
_legacy_C_ops
.
elementwise_div
(
dtype
=
block
.
input_dtype
()
min_out
,
max_out
,
'aixs'
,
axis
,
'use_mkldnn'
,
False
)
)
block
=
LayerHelper
(
'norm'
,
**
locals
())
out
=
block
.
create_variable_for_type_inference
(
dtype
=
block
.
input_dtype
()
)
if
porder
==
"nuc"
:
block
.
append_op
(
block
.
append_op
(
type
=
'reduce_
sum
'
,
type
=
'reduce_
max
'
,
inputs
=
{
'X'
:
s
},
inputs
=
{
'X'
:
s
},
outputs
=
{
'Out'
:
out
},
outputs
=
{
'Out'
:
max_
out
},
attrs
=
{
attrs
=
{
'dim'
:
axis
,
'dim'
:
axis
,
'keep_dim'
:
False
,
'keep_dim'
:
False
,
'reduce_all'
:
reduce_all
,
'reduce_all'
:
reduce_all
,
},
},
)
)
return
out
max_out
=
block
.
create_variable_for_type_inference
(
dtype
=
block
.
input_dtype
()
)
min_out
=
block
.
create_variable_for_type_inference
(
dtype
=
block
.
input_dtype
()
)
block
.
append_op
(
type
=
'reduce_max'
,
inputs
=
{
'X'
:
s
},
outputs
=
{
'Out'
:
max_out
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
False
,
'reduce_all'
:
reduce_all
},
)
block
.
append_op
(
type
=
'reduce_min'
,
inputs
=
{
'X'
:
s
},
outputs
=
{
'Out'
:
min_out
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
False
,
'reduce_all'
:
reduce_all
},
)
if
porder
==
2
:
block
.
append_op
(
type
=
'elementwise_div'
,
inputs
=
{
'X'
:
max_out
,
'Y'
:
min_out
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'aixs'
:
axis
,
'use_mkldnn'
:
False
},
)
return
out
if
porder
==
-
2
:
block
.
append_op
(
block
.
append_op
(
type
=
'elementwise_div'
,
type
=
'reduce_min'
,
inputs
=
{
'X'
:
min_out
,
'Y'
:
max_out
},
inputs
=
{
'X'
:
s
},
outputs
=
{
'Out'
:
out
},
outputs
=
{
'Out'
:
min_out
},
attrs
=
{
'aixs'
:
axis
,
'use_mkldnn'
:
False
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
False
,
'reduce_all'
:
reduce_all
,
},
)
)
return
out
if
porder
==
2
:
block
.
append_op
(
type
=
'elementwise_div'
,
inputs
=
{
'X'
:
max_out
,
'Y'
:
min_out
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'aixs'
:
axis
,
'use_mkldnn'
:
False
},
)
return
out
if
porder
==
-
2
:
block
.
append_op
(
type
=
'elementwise_div'
,
inputs
=
{
'X'
:
min_out
,
'Y'
:
max_out
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'aixs'
:
axis
,
'use_mkldnn'
:
False
},
)
return
out
def
empty_tensor
(
input
,
shape
):
def
empty_tensor
(
input
,
shape
):
if
paddle
.
in_dynamic
_mode
():
if
in_dygraph
_mode
():
return
input
.
reshape
(
shape
)
return
input
.
reshape
(
shape
)
raise
ValueError
(
"only support x is nonempty tensor in static mode"
)
raise
ValueError
(
"only support x is nonempty tensor in static mode"
)
...
@@ -1186,32 +1099,30 @@ def dot(x, y, name=None):
...
@@ -1186,32 +1099,30 @@ def dot(x, y, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
dot
(
x
,
y
)
return
_C_ops
.
dot
(
x
,
y
)
if
_in_legacy_dygraph
():
else
:
return
_legacy_C_ops
.
dot
(
x
,
y
)
op_type
=
'dot'
op_type
=
'dot'
assert
x
is
not
None
,
'x cannot be None in {}'
.
format
(
op_type
)
assert
x
is
not
None
,
'x cannot be None in {}'
.
format
(
op_type
)
assert
y
is
not
None
,
'y cannot be None in {}'
.
format
(
op_type
)
assert
y
is
not
None
,
'y cannot be None in {}'
.
format
(
op_type
)
check_variable_and_dtype
(
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
op_type
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
op_type
)
)
check_variable_and_dtype
(
check_variable_and_dtype
(
y
,
'y'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
op_type
y
,
'y'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
op_type
)
)
helper
=
LayerHelper
(
op_type
,
**
locals
())
helper
=
LayerHelper
(
op_type
,
**
locals
())
if
name
is
None
:
if
name
is
None
:
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
else
:
else
:
out
=
helper
.
create_variable
(
out
=
helper
.
create_variable
(
name
=
name
,
dtype
=
x
.
dtype
,
persistable
=
False
name
=
name
,
dtype
=
x
.
dtype
,
persistable
=
False
)
helper
.
append_op
(
type
=
"dot"
,
inputs
=
{
'X'
:
x
,
'Y'
:
y
},
attrs
=
{},
outputs
=
{
"Out"
:
out
}
)
)
helper
.
append_op
(
return
out
type
=
"dot"
,
inputs
=
{
'X'
:
x
,
'Y'
:
y
},
attrs
=
{},
outputs
=
{
"Out"
:
out
}
)
return
out
def
cov
(
x
,
rowvar
=
True
,
ddof
=
True
,
fweights
=
None
,
aweights
=
None
,
name
=
None
):
def
cov
(
x
,
rowvar
=
True
,
ddof
=
True
,
fweights
=
None
,
aweights
=
None
,
name
=
None
):
...
@@ -1389,36 +1300,28 @@ def t(input, name=None):
...
@@ -1389,36 +1300,28 @@ def t(input, name=None):
perm
=
[
1
,
0
]
perm
=
[
1
,
0
]
out
=
_C_ops
.
transpose
(
input
,
perm
)
out
=
_C_ops
.
transpose
(
input
,
perm
)
return
out
return
out
else
:
check_variable_and_dtype
(
input
,
'input'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'transpose'
,
)
if
_in_legacy_dygraph
():
helper
=
LayerHelper
(
't'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
input
.
dtype
)
input_shape
=
helper
.
create_variable_for_type_inference
(
input
.
dtype
)
if
len
(
input
.
shape
)
==
1
:
if
len
(
input
.
shape
)
==
1
:
return
input
out
=
input
# 2-D tensor
else
:
perm
=
[
1
,
0
]
helper
.
append_op
(
out
,
_
=
_legacy_C_ops
.
transpose2
(
input
,
'axis'
,
perm
)
type
=
'transpose2'
,
inputs
=
{
'X'
:
[
input
]},
outputs
=
{
'Out'
:
[
out
],
'XShape'
:
[
input_shape
]},
attrs
=
{
'axis'
:
[
1
,
0
]},
)
return
out
return
out
check_variable_and_dtype
(
input
,
'input'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'transpose'
,
)
helper
=
LayerHelper
(
't'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
input
.
dtype
)
input_shape
=
helper
.
create_variable_for_type_inference
(
input
.
dtype
)
if
len
(
input
.
shape
)
==
1
:
out
=
input
else
:
helper
.
append_op
(
type
=
'transpose2'
,
inputs
=
{
'X'
:
[
input
]},
outputs
=
{
'Out'
:
[
out
],
'XShape'
:
[
input_shape
]},
attrs
=
{
'axis'
:
[
1
,
0
]},
)
return
out
def
cross
(
x
,
y
,
axis
=
9
,
name
=
None
):
def
cross
(
x
,
y
,
axis
=
9
,
name
=
None
):
"""
"""
...
@@ -1462,24 +1365,18 @@ def cross(x, y, axis=9, name=None):
...
@@ -1462,24 +1365,18 @@ def cross(x, y, axis=9, name=None):
axis
=
K_DEFAULT_DIM
if
axis
is
None
else
axis
axis
=
K_DEFAULT_DIM
if
axis
is
None
else
axis
return
_C_ops
.
cross
(
x
,
y
,
axis
)
return
_C_ops
.
cross
(
x
,
y
,
axis
)
else
:
else
:
if
_in_legacy_dygraph
():
helper
=
LayerHelper
(
"cross"
,
**
locals
())
if
axis
is
not
None
:
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
return
_legacy_C_ops
.
cross
(
x
,
y
,
'dim'
,
axis
)
attrs
=
dict
()
else
:
attrs
[
'dim'
]
=
axis
return
_legacy_C_ops
.
cross
(
x
,
y
)
else
:
helper
=
LayerHelper
(
"cross"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
attrs
=
dict
()
attrs
[
'dim'
]
=
axis
helper
.
append_op
(
helper
.
append_op
(
type
=
'cross'
,
type
=
'cross'
,
inputs
=
{
'X'
:
x
,
'Y'
:
y
},
inputs
=
{
'X'
:
x
,
'Y'
:
y
},
outputs
=
{
'Out'
:
out
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
,
attrs
=
attrs
,
)
)
return
out
return
out
def
cholesky
(
x
,
upper
=
False
,
name
=
None
):
def
cholesky
(
x
,
upper
=
False
,
name
=
None
):
...
@@ -1520,21 +1417,18 @@ def cholesky(x, upper=False, name=None):
...
@@ -1520,21 +1417,18 @@ def cholesky(x, upper=False, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
cholesky
(
x
,
upper
)
return
_C_ops
.
cholesky
(
x
,
upper
)
else
:
if
_in_legacy_dygraph
():
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'cholesky'
)
return
_legacy_C_ops
.
cholesky
(
x
,
"upper"
,
upper
)
check_type
(
upper
,
'upper'
,
bool
,
'cholesky'
)
helper
=
LayerHelper
(
'cholesky'
,
**
locals
())
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'cholesky'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
check_type
(
upper
,
'upper'
,
bool
,
'cholesky'
)
helper
.
append_op
(
helper
=
LayerHelper
(
'cholesky'
,
**
locals
())
type
=
'cholesky'
,
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
inputs
=
{
'X'
:
[
x
]},
helper
.
append_op
(
outputs
=
{
'Out'
:
out
},
type
=
'cholesky'
,
attrs
=
{
'upper'
:
upper
},
inputs
=
{
'X'
:
[
x
]},
)
outputs
=
{
'Out'
:
out
},
return
out
attrs
=
{
'upper'
:
upper
},
)
return
out
def
matrix_rank
(
x
,
tol
=
None
,
hermitian
=
False
,
name
=
None
):
def
matrix_rank
(
x
,
tol
=
None
,
hermitian
=
False
,
name
=
None
):
...
@@ -1594,59 +1488,32 @@ def matrix_rank(x, tol=None, hermitian=False, name=None):
...
@@ -1594,59 +1488,32 @@ def matrix_rank(x, tol=None, hermitian=False, name=None):
tol_attr
=
float
(
tol
)
tol_attr
=
float
(
tol
)
use_default_tol
=
False
use_default_tol
=
False
return
_C_ops
.
matrix_rank
(
x
,
tol_attr
,
hermitian
,
use_default_tol
)
return
_C_ops
.
matrix_rank
(
x
,
tol_attr
,
hermitian
,
use_default_tol
)
else
:
if
_in_legacy_dygraph
():
inputs
=
{}
attrs
=
{}
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'matrix_rank'
)
inputs
[
'X'
]
=
x
if
tol
is
None
:
if
tol
is
None
:
tol_tensor
=
None
attrs
[
'use_default_tol'
]
=
True
tol_attr
=
0.0
use_default_tol
=
True
elif
isinstance
(
tol
,
Variable
):
elif
isinstance
(
tol
,
Variable
):
attrs
[
'use_default_tol'
]
=
False
if
tol
.
dtype
!=
x
.
dtype
:
if
tol
.
dtype
!=
x
.
dtype
:
tol_tensor
=
cast
(
tol
,
x
.
dtype
)
inputs
[
'TolTensor'
]
=
cast
(
tol
,
x
.
dtype
)
else
:
else
:
tol_tensor
=
tol
inputs
[
'TolTensor'
]
=
tol
tol_attr
=
0.0
use_default_tol
=
False
else
:
tol_tensor
=
None
tol_attr
=
float
(
tol
)
use_default_tol
=
False
return
_legacy_C_ops
.
matrix_rank
(
x
,
tol_tensor
,
"tol"
,
tol_attr
,
'hermitian'
,
hermitian
,
'use_default_tol'
,
use_default_tol
,
)
inputs
=
{}
attrs
=
{}
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'matrix_rank'
)
inputs
[
'X'
]
=
x
if
tol
is
None
:
attrs
[
'use_default_tol'
]
=
True
elif
isinstance
(
tol
,
Variable
):
attrs
[
'use_default_tol'
]
=
False
if
tol
.
dtype
!=
x
.
dtype
:
inputs
[
'TolTensor'
]
=
cast
(
tol
,
x
.
dtype
)
else
:
else
:
inputs
[
'TolTensor'
]
=
tol
check_type
(
tol
,
'tol'
,
float
,
'matrix_rank'
)
else
:
attrs
[
'use_default_tol'
]
=
False
check_type
(
tol
,
'tol'
,
float
,
'matrix_rank'
)
attrs
[
'tol'
]
=
tol
attrs
[
'use_default_tol'
]
=
False
check_type
(
hermitian
,
'hermitian'
,
bool
,
'matrix_rank'
)
attrs
[
'tol'
]
=
tol
attrs
[
'hermitian'
]
=
hermitian
check_type
(
hermitian
,
'hermitian'
,
bool
,
'matrix_rank'
)
attrs
[
'hermitian'
]
=
hermitian
helper
=
LayerHelper
(
'matrix_rank'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'int32'
)
helper
=
LayerHelper
(
'matrix_rank'
,
**
locals
())
helper
.
append_op
(
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'int32'
)
type
=
'matrix_rank'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
helper
.
append_op
(
)
type
=
'matrix_rank'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
return
out
)
return
out
def
bmm
(
x
,
y
,
name
=
None
):
def
bmm
(
x
,
y
,
name
=
None
):
...
@@ -1711,14 +1578,13 @@ def bmm(x, y, name=None):
...
@@ -1711,14 +1578,13 @@ def bmm(x, y, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
bmm
(
x
,
y
)
return
_C_ops
.
bmm
(
x
,
y
)
else
:
if
paddle
.
in_dynamic_mode
():
helper
=
LayerHelper
(
'bmm'
,
**
locals
())
return
_legacy_C_ops
.
bmm
(
x
,
y
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
helper
=
LayerHelper
(
'bmm'
,
**
locals
())
type
=
'bmm'
,
inputs
=
{
'X'
:
x
,
'Y'
:
y
},
outputs
=
{
'Out'
:
out
}
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
)
helper
.
append_op
(
type
=
'bmm'
,
inputs
=
{
'X'
:
x
,
'Y'
:
y
},
outputs
=
{
'Out'
:
out
})
return
out
return
out
def
histogram
(
input
,
bins
=
100
,
min
=
0
,
max
=
0
,
name
=
None
):
def
histogram
(
input
,
bins
=
100
,
min
=
0
,
max
=
0
,
name
=
None
):
...
@@ -1748,24 +1614,19 @@ def histogram(input, bins=100, min=0, max=0, name=None):
...
@@ -1748,24 +1614,19 @@ def histogram(input, bins=100, min=0, max=0, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
histogram
(
input
,
bins
,
min
,
max
)
return
_C_ops
.
histogram
(
input
,
bins
,
min
,
max
)
else
:
if
_in_legacy_dygraph
():
helper
=
LayerHelper
(
'histogram'
,
**
locals
())
return
_legacy_C_ops
.
histogram
(
check_variable_and_dtype
(
input
,
"bins"
,
bins
,
"min"
,
min
,
"max"
,
max
input
,
'X'
,
[
'int32'
,
'int64'
,
'float32'
,
'float64'
],
'histogram'
)
)
out
=
helper
.
create_variable_for_type_inference
(
VarDesc
.
VarType
.
INT64
)
helper
=
LayerHelper
(
'histogram'
,
**
locals
())
helper
.
append_op
(
check_variable_and_dtype
(
type
=
'histogram'
,
input
,
'X'
,
[
'int32'
,
'int64'
,
'float32'
,
'float64'
],
'histogram'
inputs
=
{
'X'
:
input
},
)
outputs
=
{
'Out'
:
out
},
out
=
helper
.
create_variable_for_type_inference
(
VarDesc
.
VarType
.
INT64
)
attrs
=
{
'bins'
:
bins
,
'min'
:
min
,
'max'
:
max
},
helper
.
append_op
(
)
type
=
'histogram'
,
return
out
inputs
=
{
'X'
:
input
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'bins'
:
bins
,
'min'
:
min
,
'max'
:
max
},
)
return
out
def
bincount
(
x
,
weights
=
None
,
minlength
=
0
,
name
=
None
):
def
bincount
(
x
,
weights
=
None
,
minlength
=
0
,
name
=
None
):
...
@@ -1800,30 +1661,28 @@ def bincount(x, weights=None, minlength=0, name=None):
...
@@ -1800,30 +1661,28 @@ def bincount(x, weights=None, minlength=0, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
bincount
(
x
,
weights
,
minlength
)
return
_C_ops
.
bincount
(
x
,
weights
,
minlength
)
elif
_in_legacy_dygraph
():
else
:
return
_legacy_C_ops
.
bincount
(
x
,
weights
,
"minlength"
,
minlength
)
helper
=
LayerHelper
(
'bincount'
,
**
locals
())
helper
=
LayerHelper
(
'bincount'
,
**
locals
())
check_variable_and_dtype
(
x
,
'X'
,
[
'int32'
,
'int64'
],
'bincount'
)
check_variable_and_dtype
(
x
,
'X'
,
[
'int32'
,
'int64'
],
'bincount'
)
if
weights
is
not
None
:
if
weights
is
not
None
:
check_variable_and_dtype
(
check_variable_and_dtype
(
weights
,
weights
,
'Weights'
,
'Weights'
,
[
'int32'
,
'int64'
,
'float32'
,
'float64'
],
[
'int32'
,
'int64'
,
'float32'
,
'float64'
],
'bincount'
,
'bincount'
,
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
weights
.
dtype
)
else
:
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'bincount'
,
inputs
=
{
'X'
:
x
,
'Weights'
:
weights
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'minlength'
:
minlength
},
)
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
weights
.
dtype
)
return
out
else
:
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'bincount'
,
inputs
=
{
'X'
:
x
,
'Weights'
:
weights
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'minlength'
:
minlength
},
)
return
out
def
mv
(
x
,
vec
,
name
=
None
):
def
mv
(
x
,
vec
,
name
=
None
):
...
@@ -1859,40 +1718,36 @@ def mv(x, vec, name=None):
...
@@ -1859,40 +1718,36 @@ def mv(x, vec, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
mv
(
x
,
vec
)
return
_C_ops
.
mv
(
x
,
vec
)
else
:
else
:
if
_in_legacy_dygraph
():
out
=
_legacy_C_ops
.
mv
(
x
,
vec
)
return
out
else
:
def
__check_input
(
x
,
vec
):
def
__check_input
(
x
,
vec
):
var_names
=
{
'x'
:
x
,
'vec'
:
vec
}
var_names
=
{
'x'
:
x
,
'vec'
:
vec
}
for
name
,
val
in
var_names
.
items
():
for
name
,
val
in
var_names
.
items
():
check_variable_and_dtype
(
check_variable_and_dtype
(
val
,
name
,
[
'float32'
,
'float64'
],
'mv'
val
,
name
,
[
'float32'
,
'float64'
],
'mv'
)
)
x_shape
=
list
(
x
.
shape
)
x_shape
=
list
(
x
.
shape
)
vec_shape
=
list
(
vec
.
shape
)
vec_shape
=
list
(
vec
.
shape
)
if
len
(
x_shape
)
!=
2
:
if
len
(
x_shape
)
!=
2
:
raise
ValueError
(
raise
ValueError
(
"x should be 2-dimensional. But received x's dimention: {}"
.
format
(
"x should be 2-dimensional. But received x's dimention: {}"
.
format
(
x_shape
x_shape
)
)
)
if
len
(
vec_shape
)
!=
1
:
)
raise
ValueError
(
if
len
(
vec_shape
)
!=
1
:
"vec should be 1-dimensional. But received vec's dimention: {}"
.
format
(
raise
ValueError
(
vec_shape
"vec should be 1-dimensional. But received vec's dimention: {}"
.
format
(
)
vec_shape
)
)
)
__check_input
(
x
,
vec
)
__check_input
(
x
,
vec
)
helper
=
LayerHelper
(
'mv'
,
**
locals
())
helper
=
LayerHelper
(
'mv'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'mv'
,
inputs
=
{
'X'
:
x
,
'Vec'
:
vec
},
outputs
=
{
'Out'
:
out
}
type
=
'mv'
,
inputs
=
{
'X'
:
x
,
'Vec'
:
vec
},
outputs
=
{
'Out'
:
out
}
)
)
return
out
return
out
def
det
(
x
,
name
=
None
):
def
det
(
x
,
name
=
None
):
...
@@ -1927,31 +1782,28 @@ def det(x, name=None):
...
@@ -1927,31 +1782,28 @@ def det(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
det
(
x
)
return
_C_ops
.
det
(
x
)
else
:
check_dtype
(
x
.
dtype
,
'Input'
,
[
'float32'
,
'float64'
],
'det'
)
if
_in_legacy_dygraph
():
input_shape
=
list
(
x
.
shape
)
return
_legacy_C_ops
.
determinant
(
x
)
assert
len
(
input_shape
)
>=
2
,
(
"The x must be at least 2-dimensional, "
check_dtype
(
x
.
dtype
,
'Input'
,
[
'float32'
,
'float64'
],
'det'
)
"but received Input x's dimensional: %s.
\n
"
%
len
(
input_shape
)
)
input_shape
=
list
(
x
.
shape
)
assert
len
(
input_shape
)
>=
2
,
(
"The x must be at least 2-dimensional, "
"but received Input x's dimensional: %s.
\n
"
%
len
(
input_shape
)
)
assert
(
assert
(
input_shape
[
-
1
]
==
input_shape
[
-
2
]
input_shape
[
-
1
]
==
input_shape
[
-
2
]
),
"Expect squared input,"
"but received %s by %s matrix.
\n
"
%
(
),
"Expect squared input,"
"but received %s by %s matrix.
\n
"
%
(
input_shape
[
-
2
],
input_shape
[
-
2
],
input_shape
[
-
1
],
input_shape
[
-
1
],
)
)
helper
=
LayerHelper
(
'determinant'
,
**
locals
())
helper
=
LayerHelper
(
'determinant'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'determinant'
,
inputs
=
{
'Input'
:
[
x
]},
outputs
=
{
'Out'
:
[
out
]}
type
=
'determinant'
,
inputs
=
{
'Input'
:
[
x
]},
outputs
=
{
'Out'
:
[
out
]}
)
)
return
out
return
out
def
slogdet
(
x
,
name
=
None
):
def
slogdet
(
x
,
name
=
None
):
...
@@ -1989,31 +1841,30 @@ def slogdet(x, name=None):
...
@@ -1989,31 +1841,30 @@ def slogdet(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
slogdet
(
x
)
return
_C_ops
.
slogdet
(
x
)
else
:
check_dtype
(
x
.
dtype
,
'Input'
,
[
'float32'
,
'float64'
],
'slogdet'
)
elif
paddle
.
in_dynamic_mode
():
input_shape
=
list
(
x
.
shape
)
return
_legacy_C_ops
.
slogdeterminant
(
x
)
assert
len
(
input_shape
)
>=
2
,
(
"The x must be at least 2-dimensional, "
check_dtype
(
x
.
dtype
,
'Input'
,
[
'float32'
,
'float64'
],
'slogdet'
)
"but received Input x's dimensional: %s.
\n
"
%
len
(
input_shape
)
)
input_shape
=
list
(
x
.
shape
)
assert
len
(
input_shape
)
>=
2
,
(
"The x must be at least 2-dimensional, "
"but received Input x's dimensional: %s.
\n
"
%
len
(
input_shape
)
)
assert
(
assert
(
input_shape
[
-
1
]
==
input_shape
[
-
2
]
input_shape
[
-
1
]
==
input_shape
[
-
2
]
),
"Expect squared input,"
"but received %s by %s matrix.
\n
"
%
(
),
"Expect squared input,"
"but received %s by %s matrix.
\n
"
%
(
input_shape
[
-
2
],
input_shape
[
-
2
],
input_shape
[
-
1
],
input_shape
[
-
1
],
)
)
helper
=
LayerHelper
(
'slogdeterminant'
,
**
locals
())
helper
=
LayerHelper
(
'slogdeterminant'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'slogdeterminant'
,
inputs
=
{
'Input'
:
[
x
]},
outputs
=
{
'Out'
:
[
out
]}
type
=
'slogdeterminant'
,
)
inputs
=
{
'Input'
:
[
x
]},
return
out
outputs
=
{
'Out'
:
[
out
]},
)
return
out
def
svd
(
x
,
full_matrices
=
False
,
name
=
None
):
def
svd
(
x
,
full_matrices
=
False
,
name
=
None
):
...
@@ -2071,23 +1922,22 @@ def svd(x, full_matrices=False, name=None):
...
@@ -2071,23 +1922,22 @@ def svd(x, full_matrices=False, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
svd
(
x
,
full_matrices
)
return
_C_ops
.
svd
(
x
,
full_matrices
)
if
_in_legacy_dygraph
():
else
:
return
_legacy_C_ops
.
svd
(
x
,
'full_matrices'
,
full_matrices
)
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'svd'
)
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'svd'
)
check_type
(
full_matrices
,
'full_matrices'
,
bool
,
'svd'
)
check_type
(
full_matrices
,
'full_matrices'
,
bool
,
'svd'
)
helper
=
LayerHelper
(
'svd'
,
**
locals
())
helper
=
LayerHelper
(
'svd'
,
**
locals
())
u
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
u
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
vh
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
vh
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
s
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
s
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
attrs
=
dict
()
attrs
=
dict
()
attrs
[
'full_matrices'
]
=
full_matrices
attrs
[
'full_matrices'
]
=
full_matrices
helper
.
append_op
(
helper
.
append_op
(
type
=
'svd'
,
type
=
'svd'
,
inputs
=
{
'X'
:
[
x
]},
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'U'
:
u
,
'VH'
:
vh
,
'S'
:
s
},
outputs
=
{
'U'
:
u
,
'VH'
:
vh
,
'S'
:
s
},
attrs
=
attrs
,
attrs
=
attrs
,
)
)
return
u
,
s
,
vh
return
u
,
s
,
vh
def
matrix_power
(
x
,
n
,
name
=
None
):
def
matrix_power
(
x
,
n
,
name
=
None
):
...
@@ -2146,21 +1996,20 @@ def matrix_power(x, n, name=None):
...
@@ -2146,21 +1996,20 @@ def matrix_power(x, n, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
matrix_power
(
x
,
n
)
return
_C_ops
.
matrix_power
(
x
,
n
)
else
:
if
_in_legacy_dygraph
():
check_variable_and_dtype
(
return
_legacy_C_ops
.
matrix_power
(
x
,
"n"
,
n
)
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'matrix_power'
)
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'matrix_power'
)
check_type
(
n
,
'n'
,
int
,
'matrix_power'
)
check_type
(
n
,
'n'
,
int
,
'matrix_power'
)
helper
=
LayerHelper
(
'matrix_power'
,
**
locals
())
helper
=
LayerHelper
(
'matrix_power'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'matrix_power'
,
type
=
'matrix_power'
,
inputs
=
{
'X'
:
x
},
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'n'
:
n
},
attrs
=
{
'n'
:
n
},
)
)
return
out
return
out
def
qr
(
x
,
mode
=
"reduced"
,
name
=
None
):
def
qr
(
x
,
mode
=
"reduced"
,
name
=
None
):
...
@@ -2211,26 +2060,21 @@ def qr(x, mode="reduced", name=None):
...
@@ -2211,26 +2060,21 @@ def qr(x, mode="reduced", name=None):
return
r
return
r
else
:
else
:
return
q
,
r
return
q
,
r
if
_in_legacy_dygraph
():
else
:
q
,
r
=
_legacy_C_ops
.
qr
(
x
,
'mode'
,
mode
)
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'qr'
)
check_type
(
mode
,
'mode'
,
str
,
'qr'
)
helper
=
LayerHelper
(
'qr'
,
**
locals
())
q
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
r
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
attrs
=
dict
()
attrs
[
'mode'
]
=
mode
helper
.
append_op
(
type
=
'qr'
,
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'Q'
:
q
,
'R'
:
r
},
attrs
=
attrs
)
if
mode
==
"r"
:
if
mode
==
"r"
:
return
r
return
r
else
:
else
:
return
q
,
r
return
q
,
r
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'qr'
)
check_type
(
mode
,
'mode'
,
str
,
'qr'
)
helper
=
LayerHelper
(
'qr'
,
**
locals
())
q
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
r
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
attrs
=
dict
()
attrs
[
'mode'
]
=
mode
helper
.
append_op
(
type
=
'qr'
,
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'Q'
:
q
,
'R'
:
r
},
attrs
=
attrs
)
if
mode
==
"r"
:
return
r
else
:
return
q
,
r
def
lu
(
x
,
pivot
=
True
,
get_infos
=
False
,
name
=
None
):
def
lu
(
x
,
pivot
=
True
,
get_infos
=
False
,
name
=
None
):
...
@@ -2315,8 +2159,6 @@ def lu(x, pivot=True, get_infos=False, name=None):
...
@@ -2315,8 +2159,6 @@ def lu(x, pivot=True, get_infos=False, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
lu
,
p
,
info
=
_C_ops
.
lu
(
x
,
pivot
)
lu
,
p
,
info
=
_C_ops
.
lu
(
x
,
pivot
)
elif
paddle
.
in_dynamic_mode
():
lu
,
p
,
info
=
_legacy_C_ops
.
lu
(
x
,
'pivot'
,
pivot
)
else
:
else
:
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'lu'
)
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'lu'
)
helper
=
LayerHelper
(
'lu'
,
**
locals
())
helper
=
LayerHelper
(
'lu'
,
**
locals
())
...
@@ -2413,29 +2255,25 @@ def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None):
...
@@ -2413,29 +2255,25 @@ def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
P
,
L
,
U
=
_C_ops
.
lu_unpack
(
x
,
y
,
unpack_ludata
,
unpack_pivots
)
P
,
L
,
U
=
_C_ops
.
lu_unpack
(
x
,
y
,
unpack_ludata
,
unpack_pivots
)
return
P
,
L
,
U
return
P
,
L
,
U
else
:
if
paddle
.
in_dynamic_mode
():
check_variable_and_dtype
(
P
,
L
,
U
=
_legacy_C_ops
.
lu_unpack
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'lu_unpack'
x
,
y
,
'unpack_ludata'
,
unpack_ludata
,
'unpack_pivots'
,
unpack_pivots
)
)
return
P
,
L
,
U
helper
=
LayerHelper
(
'lu_unpack'
,
**
locals
())
p
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'lu_unpack'
)
l
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
=
LayerHelper
(
'lu_unpack'
,
**
locals
())
u
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
p
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
l
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
u
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
attrs
=
dict
()
attrs
=
dict
()
attrs
[
'unpack_ludata'
]
=
unpack_ludata
attrs
[
'unpack_ludata'
]
=
unpack_ludata
attrs
[
'unpack_pivots'
]
=
unpack_pivots
attrs
[
'unpack_pivots'
]
=
unpack_pivots
helper
.
append_op
(
helper
.
append_op
(
type
=
'lu_unpack'
,
type
=
'lu_unpack'
,
inputs
=
{
'X'
:
x
,
'Pivots'
:
y
},
inputs
=
{
'X'
:
x
,
'Pivots'
:
y
},
outputs
=
{
'Pmat'
:
p
,
'L'
:
l
,
'U'
:
u
},
outputs
=
{
'Pmat'
:
p
,
'L'
:
l
,
'U'
:
u
},
attrs
=
attrs
,
attrs
=
attrs
,
)
)
return
p
,
l
,
u
return
p
,
l
,
u
def
eig
(
x
,
name
=
None
):
def
eig
(
x
,
name
=
None
):
...
@@ -2486,23 +2324,20 @@ def eig(x, name=None):
...
@@ -2486,23 +2324,20 @@ def eig(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
eig
(
x
)
return
_C_ops
.
eig
(
x
)
elif
paddle
.
in_dynamic_mode
():
else
:
w
,
v
=
_legacy_C_ops
.
eig
(
x
)
check_variable_and_dtype
(
return
w
,
v
x
,
'X'
,
[
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
'eig'
)
check_variable_and_dtype
(
helper
=
LayerHelper
(
'eig'
,
**
locals
())
x
,
'X'
,
[
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
'eig'
)
helper
=
LayerHelper
(
'eig'
,
**
locals
())
w
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
w
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
v
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
v
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
inputs
=
{
'X'
:
x
}
inputs
=
{
'X'
:
x
}
outputs
=
{
'Eigenvalues'
:
w
,
'Eigenvectors'
:
v
}
outputs
=
{
'Eigenvalues'
:
w
,
'Eigenvectors'
:
v
}
helper
.
append_op
(
type
=
'eig'
,
inputs
=
inputs
,
outputs
=
outputs
)
helper
.
append_op
(
type
=
'eig'
,
inputs
=
inputs
,
outputs
=
outputs
)
return
w
,
v
return
w
,
v
def
eigvals
(
x
,
name
=
None
):
def
eigvals
(
x
,
name
=
None
):
...
@@ -2562,13 +2397,11 @@ def eigvals(x, name=None):
...
@@ -2562,13 +2397,11 @@ def eigvals(x, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
eigvals
(
x
)
return
_C_ops
.
eigvals
(
x
)
elif
paddle
.
in_dynamic_mode
():
else
:
return
_legacy_C_ops
.
eigvals
(
x
)
helper
=
LayerHelper
(
'eigvals'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
=
LayerHelper
(
'eigvals'
,
**
locals
())
helper
.
append_op
(
type
=
'eigvals'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
})
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
return
out
helper
.
append_op
(
type
=
'eigvals'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
})
return
out
def
multi_dot
(
x
,
name
=
None
):
def
multi_dot
(
x
,
name
=
None
):
...
@@ -2627,29 +2460,29 @@ def multi_dot(x, name=None):
...
@@ -2627,29 +2460,29 @@ def multi_dot(x, name=None):
# [10, 7]
# [10, 7]
"""
"""
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
multi_dot
(
x
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
multi_dot
(
x
)
return
_C_ops
.
multi_dot
(
x
)
else
:
check_type
(
x
,
'x'
,
(
list
,
tuple
),
'multi_dot'
)
for
id
,
item
in
enumerate
(
x
):
check_variable_and_dtype
(
item
,
'x['
+
str
(
id
)
+
']'
,
[
'float16'
,
'float32'
,
'float64'
],
'multi_dot'
,
)
if
item
.
dtype
!=
x
[
0
].
dtype
:
raise
TypeError
(
"All the Tensors in the input must have the same data type."
)
check_type
(
x
,
'x'
,
(
list
,
tuple
),
'multi_dot'
)
helper
=
LayerHelper
(
'multi_dot'
,
**
locals
())
for
id
,
item
in
enumerate
(
x
):
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
check_variable_and_dtype
(
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
item
,
helper
.
append_op
(
'x['
+
str
(
id
)
+
']'
,
type
=
'multi_dot'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
}
[
'float16'
,
'float32'
,
'float64'
],
'multi_dot'
,
)
)
if
item
.
dtype
!=
x
[
0
].
dtype
:
return
out
raise
TypeError
(
"All the Tensors in the input must have the same data type."
)
helper
=
LayerHelper
(
'multi_dot'
,
**
locals
())
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
'multi_dot'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
def
eigh
(
x
,
UPLO
=
'L'
,
name
=
None
):
def
eigh
(
x
,
UPLO
=
'L'
,
name
=
None
):
...
@@ -2687,45 +2520,46 @@ def eigh(x, UPLO='L', name=None):
...
@@ -2687,45 +2520,46 @@ def eigh(x, UPLO='L', name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
eigh
(
x
,
UPLO
)
return
_C_ops
.
eigh
(
x
,
UPLO
)
else
:
if
_in_legacy_dygraph
():
def
__check_input
(
x
,
UPLO
):
return
_legacy_C_ops
.
eigh
(
x
,
'UPLO'
,
UPLO
)
x_shape
=
list
(
x
.
shape
)
if
len
(
x
.
shape
)
<
2
:
def
__check_input
(
x
,
UPLO
):
raise
ValueError
(
x_shape
=
list
(
x
.
shape
)
"Input(input) only support >=2 tensor, but received "
if
len
(
x
.
shape
)
<
2
:
"length of Input(input) is %s."
%
len
(
x
.
shape
)
raise
ValueError
(
)
"Input(input) only support >=2 tensor, but received "
if
x_shape
[
-
1
]
!=
x_shape
[
-
2
]:
"length of Input(input) is %s."
%
len
(
x
.
shape
)
raise
ValueError
(
)
"The input matrix must be batches of square matrices. But received x's dimention: {}"
.
format
(
if
x_shape
[
-
1
]
!=
x_shape
[
-
2
]:
x_shape
raise
ValueError
(
)
"The input matrix must be batches of square matrices. But received x's dimention: {}"
.
format
(
)
x_shape
if
UPLO
!=
'L'
and
UPLO
!=
'U'
:
raise
ValueError
(
"UPLO must be L or U. But received UPLO is: {}"
.
format
(
UPLO
)
)
)
)
if
UPLO
!=
'L'
and
UPLO
!=
'U'
:
raise
ValueError
(
"UPLO must be L or U. But received UPLO is: {}"
.
format
(
UPLO
)
)
__check_input
(
x
,
UPLO
)
__check_input
(
x
,
UPLO
)
helper
=
LayerHelper
(
'eigh'
,
**
locals
())
helper
=
LayerHelper
(
'eigh'
,
**
locals
())
check_variable_and_dtype
(
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
'eigh'
x
,
)
'dtype'
,
[
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
'eigh'
,
)
out_value
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out_value
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out_vector
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out_vector
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'eigh'
,
type
=
'eigh'
,
inputs
=
{
'X'
:
x
},
inputs
=
{
'X'
:
x
},
outputs
=
{
'Eigenvalues'
:
out_value
,
'Eigenvectors'
:
out_vector
},
outputs
=
{
'Eigenvalues'
:
out_value
,
'Eigenvectors'
:
out_vector
},
attrs
=
{
'UPLO'
:
UPLO
},
attrs
=
{
'UPLO'
:
UPLO
},
)
)
return
out_value
,
out_vector
return
out_value
,
out_vector
def
pinv
(
x
,
rcond
=
1e-15
,
hermitian
=
False
,
name
=
None
):
def
pinv
(
x
,
rcond
=
1e-15
,
hermitian
=
False
,
name
=
None
):
...
@@ -2838,68 +2672,6 @@ def pinv(x, rcond=1e-15, hermitian=False, name=None):
...
@@ -2838,68 +2672,6 @@ def pinv(x, rcond=1e-15, hermitian=False, name=None):
u_conj
=
_C_ops
.
conj
(
u
)
u_conj
=
_C_ops
.
conj
(
u
)
out_2
=
_C_ops
.
matmul
(
out_1
,
u_conj
,
False
,
True
)
out_2
=
_C_ops
.
matmul
(
out_1
,
u_conj
,
False
,
True
)
return
out_2
return
out_2
if
_in_legacy_dygraph
():
if
not
hermitian
:
# combine svd and matmul op
u
,
s
,
vt
=
_legacy_C_ops
.
svd
(
x
,
'full_matrices'
,
False
)
max_singular_val
=
_legacy_C_ops
.
reduce_max
(
s
,
'dim'
,
[
-
1
],
'keep_dim'
,
True
,
'reduce_all'
,
False
)
rcond
=
paddle
.
to_tensor
(
rcond
,
dtype
=
x
.
dtype
)
cutoff
=
rcond
*
max_singular_val
y
=
float
(
'inf'
)
y
=
paddle
.
to_tensor
(
y
,
dtype
=
x
.
dtype
)
condition
=
s
>
cutoff
cond_int
=
cast
(
condition
,
s
.
dtype
)
cond_not_int
=
cast
(
logical_not
(
condition
),
s
.
dtype
)
out1
=
multiply
(
1
/
s
,
cond_int
)
out2
=
multiply
(
1
/
y
,
cond_not_int
)
singular
=
add
(
out1
,
out2
)
st
,
_
=
_legacy_C_ops
.
unsqueeze2
(
singular
,
'axes'
,
[
-
2
])
dims
=
list
(
range
(
len
(
vt
.
shape
)))
perm
=
dims
[:
-
2
]
+
[
dims
[
-
1
]]
+
[
dims
[
-
2
]]
v
,
_
=
_legacy_C_ops
.
transpose2
(
vt
,
'axis'
,
perm
)
out_1
=
v
*
st
if
in_dygraph_mode
():
out_2
=
_C_ops
.
matmul
(
out_1
,
u
,
False
,
True
)
else
:
out_2
=
_legacy_C_ops
.
matmul_v2
(
out_1
,
u
,
'trans_x'
,
False
,
'trans_y'
,
True
)
return
out_2
else
:
# combine eigh and matmul op
s
,
u
=
_legacy_C_ops
.
eigh
(
x
,
'UPLO'
,
'L'
)
s_abs
=
paddle
.
abs
(
s
)
max_singular_val
=
_legacy_C_ops
.
reduce_max
(
s_abs
,
'dim'
,
[
-
1
],
'keep_dim'
,
True
,
'reduce_all'
,
False
)
rcond
=
paddle
.
to_tensor
(
rcond
,
dtype
=
s
.
dtype
)
cutoff
=
rcond
*
max_singular_val
y
=
float
(
'inf'
)
y
=
paddle
.
to_tensor
(
y
,
dtype
=
s
.
dtype
)
condition
=
s_abs
>
cutoff
cond_int
=
cast
(
condition
,
s
.
dtype
)
cond_not_int
=
cast
(
logical_not
(
condition
),
s
.
dtype
)
out1
=
multiply
(
1
/
s
,
cond_int
)
out2
=
multiply
(
1
/
y
,
cond_not_int
)
singular
=
add
(
out1
,
out2
)
st
,
_
=
_legacy_C_ops
.
unsqueeze2
(
singular
,
'axes'
,
[
-
2
])
out_1
=
u
*
st
u_conj
=
_legacy_C_ops
.
conj
(
u
)
if
in_dygraph_mode
():
out_2
=
_C_ops
.
matmul
(
out_1
,
u_conj
,
False
,
True
)
else
:
out_2
=
_legacy_C_ops
.
matmul_v2
(
out_1
,
u_conj
,
'trans_x'
,
False
,
'trans_y'
,
True
)
return
out_2
else
:
else
:
if
not
hermitian
:
if
not
hermitian
:
helper
=
LayerHelper
(
'pinv'
,
**
locals
())
helper
=
LayerHelper
(
'pinv'
,
**
locals
())
...
@@ -3098,20 +2870,17 @@ def solve(x, y, name=None):
...
@@ -3098,20 +2870,17 @@ def solve(x, y, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
solve
(
x
,
y
)
return
_C_ops
.
solve
(
x
,
y
)
else
:
inputs
=
{
"X"
:
[
x
],
"Y"
:
[
y
]}
helper
=
LayerHelper
(
"solve"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'solve'
)
check_variable_and_dtype
(
y
,
'y'
,
[
'float32'
,
'float64'
],
'solve'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
if
_in_legacy_dygraph
():
helper
.
append_op
(
return
_legacy_C_ops
.
solve
(
x
,
y
)
type
=
"solve"
,
inputs
=
{
"X"
:
x
,
"Y"
:
y
},
outputs
=
{
"Out"
:
out
}
)
inputs
=
{
"X"
:
[
x
],
"Y"
:
[
y
]}
return
out
helper
=
LayerHelper
(
"solve"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'solve'
)
check_variable_and_dtype
(
y
,
'y'
,
[
'float32'
,
'float64'
],
'solve'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
"solve"
,
inputs
=
{
"X"
:
x
,
"Y"
:
y
},
outputs
=
{
"Out"
:
out
}
)
return
out
def
triangular_solve
(
def
triangular_solve
(
...
@@ -3170,36 +2939,28 @@ def triangular_solve(
...
@@ -3170,36 +2939,28 @@ def triangular_solve(
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
triangular_solve
(
x
,
y
,
upper
,
transpose
,
unitriangular
)
return
_C_ops
.
triangular_solve
(
x
,
y
,
upper
,
transpose
,
unitriangular
)
else
:
if
paddle
.
in_dynamic_mode
():
inputs
=
{
"X"
:
[
x
],
"Y"
:
[
y
]}
return
_legacy_C_ops
.
triangular_solve
(
helper
=
LayerHelper
(
"triangular_solve"
,
**
locals
())
x
,
check_variable_and_dtype
(
y
,
x
,
'x'
,
[
'float32'
,
'float64'
],
'triangular_solve'
'upper'
,
upper
,
'transpose'
,
transpose
,
'unitriangular'
,
unitriangular
,
)
)
check_variable_and_dtype
(
y
,
'y'
,
[
'float32'
,
'float64'
],
'triangular_solve'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
inputs
=
{
"X"
:
[
x
],
"Y"
:
[
y
]}
helper
.
append_op
(
helper
=
LayerHelper
(
"triangular_solve"
,
**
locals
())
type
=
'triangular_solve'
,
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'triangular_solve'
)
inputs
=
{
'X'
:
x
,
'Y'
:
y
},
check_variable_and_dtype
(
y
,
'y'
,
[
'float32'
,
'float64'
],
'triangular_solve'
)
outputs
=
{
'Out'
:
out
},
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
attrs
=
{
'upper'
:
upper
,
helper
.
append_op
(
'transpose'
:
transpose
,
type
=
'triangular_solve'
,
'unitriangular'
:
unitriangular
,
inputs
=
{
'X'
:
x
,
'Y'
:
y
},
},
outputs
=
{
'Out'
:
out
},
)
attrs
=
{
return
out
'upper'
:
upper
,
'transpose'
:
transpose
,
'unitriangular'
:
unitriangular
,
},
)
return
out
def
cholesky_solve
(
x
,
y
,
upper
=
False
,
name
=
None
):
def
cholesky_solve
(
x
,
y
,
upper
=
False
,
name
=
None
):
...
@@ -3237,22 +2998,23 @@ def cholesky_solve(x, y, upper=False, name=None):
...
@@ -3237,22 +2998,23 @@ def cholesky_solve(x, y, upper=False, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
cholesky_solve
(
x
,
y
,
upper
)
return
_C_ops
.
cholesky_solve
(
x
,
y
,
upper
)
else
:
helper
=
LayerHelper
(
"cholesky_solve"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'cholesky_solve'
)
check_variable_and_dtype
(
y
,
'y'
,
[
'float32'
,
'float64'
],
'cholesky_solve'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
if
_in_legacy_dygraph
():
helper
.
append_op
(
return
_legacy_C_ops
.
cholesky_solve
(
x
,
y
,
'upper'
,
upper
)
type
=
'cholesky_solve'
,
inputs
=
{
'X'
:
x
,
'Y'
:
y
},
helper
=
LayerHelper
(
"cholesky_solve"
,
**
locals
())
outputs
=
{
'Out'
:
out
},
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'cholesky_solve'
)
attrs
=
{
'upper'
:
upper
},
check_variable_and_dtype
(
y
,
'y'
,
[
'float32'
,
'float64'
],
'cholesky_solve'
)
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
return
out
helper
.
append_op
(
type
=
'cholesky_solve'
,
inputs
=
{
'X'
:
x
,
'Y'
:
y
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'upper'
:
upper
},
)
return
out
def
eigvalsh
(
x
,
UPLO
=
'L'
,
name
=
None
):
def
eigvalsh
(
x
,
UPLO
=
'L'
,
name
=
None
):
...
@@ -3284,51 +3046,47 @@ def eigvalsh(x, UPLO='L', name=None):
...
@@ -3284,51 +3046,47 @@ def eigvalsh(x, UPLO='L', name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
values
,
_
=
_C_ops
.
eigvalsh
(
x
,
UPLO
,
x
.
stop_gradient
)
values
,
_
=
_C_ops
.
eigvalsh
(
x
,
UPLO
,
x
.
stop_gradient
)
return
values
return
values
else
:
elif
paddle
.
in_dynamic_mode
(
):
def
__check_input
(
x
,
UPLO
):
is_test
=
x
.
stop_gradient
x_shape
=
list
(
x
.
shape
)
values
,
_
=
_legacy_C_ops
.
eigvalsh
(
x
,
'UPLO'
,
UPLO
,
'is_test'
,
is_test
)
if
len
(
x
.
shape
)
<
2
:
return
values
raise
ValueError
(
"Input(input) only support >=2 tensor, but received "
def
__check_input
(
x
,
UPLO
):
"length of Input(input) is %s."
%
len
(
x
.
shape
)
x_shape
=
list
(
x
.
shape
)
)
if
len
(
x
.
shape
)
<
2
:
if
x_shape
[
-
1
]
!=
x_shape
[
-
2
]
:
raise
ValueError
(
raise
ValueError
(
"Input(input) only support >=2 tensor, but received "
"The input matrix must be batches of square matrices. But received x's dimention: {}"
.
format
(
"length of Input(input) is %s."
%
len
(
x
.
shape
)
x_shape
)
)
if
x_shape
[
-
1
]
!=
x_shape
[
-
2
]:
)
raise
ValueError
(
if
UPLO
!=
'L'
and
UPLO
!=
'U'
:
"The input matrix must be batches of square matrices. But received x's dimention: {}"
.
format
(
raise
ValueError
(
x_shape
"UPLO must be L or U. But received UPLO is: {}"
.
format
(
UPLO
)
)
)
)
if
UPLO
!=
'L'
and
UPLO
!=
'U'
:
raise
ValueError
(
"UPLO must be L or U. But received UPLO is: {}"
.
format
(
UPLO
)
)
__check_input
(
x
,
UPLO
)
__check_input
(
x
,
UPLO
)
helper
=
LayerHelper
(
'eigvalsh'
,
**
locals
())
helper
=
LayerHelper
(
'eigvalsh'
,
**
locals
())
check_variable_and_dtype
(
check_variable_and_dtype
(
x
,
x
,
'dtype'
,
'dtype'
,
[
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
[
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
'eigvalsh'
,
'eigvalsh'
,
)
)
out_value
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out_value
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out_vector
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out_vector
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
is_test
=
x
.
stop_gradient
is_test
=
x
.
stop_gradient
helper
.
append_op
(
helper
.
append_op
(
type
=
'eigvalsh'
,
type
=
'eigvalsh'
,
inputs
=
{
'X'
:
x
},
inputs
=
{
'X'
:
x
},
outputs
=
{
'Eigenvalues'
:
out_value
,
'Eigenvectors'
:
out_vector
},
outputs
=
{
'Eigenvalues'
:
out_value
,
'Eigenvectors'
:
out_vector
},
attrs
=
{
'UPLO'
:
UPLO
,
'is_test'
:
is_test
},
attrs
=
{
'UPLO'
:
UPLO
,
'is_test'
:
is_test
},
)
)
return
out_value
return
out_value
def
lstsq
(
x
,
y
,
rcond
=
None
,
driver
=
None
,
name
=
None
):
def
lstsq
(
x
,
y
,
rcond
=
None
,
driver
=
None
,
name
=
None
):
...
@@ -3423,16 +3181,10 @@ def lstsq(x, y, rcond=None, driver=None, name=None):
...
@@ -3423,16 +3181,10 @@ def lstsq(x, y, rcond=None, driver=None, name=None):
elif
x
.
dtype
==
paddle
.
float64
:
elif
x
.
dtype
==
paddle
.
float64
:
rcond
=
1e-15
*
max
(
x
.
shape
[
-
2
],
x
.
shape
[
-
1
])
rcond
=
1e-15
*
max
(
x
.
shape
[
-
2
],
x
.
shape
[
-
1
])
if
_non_static_mode
():
if
in_dygraph_mode
():
if
in_dygraph_mode
():
solution
,
residuals
,
rank
,
singular_values
=
_C_ops
.
lstsq
(
solution
,
residuals
,
rank
,
singular_values
=
_C_ops
.
lstsq
(
x
,
y
,
rcond
,
driver
x
,
y
,
rcond
,
driver
)
)
else
:
solution
,
residuals
,
rank
,
singular_values
=
_legacy_C_ops
.
lstsq
(
x
,
y
,
'rcond'
,
rcond
,
'driver'
,
driver
)
if
driver
==
"gels"
:
if
driver
==
"gels"
:
rank
=
paddle
.
empty
(
shape
=
[
0
],
dtype
=
paddle
.
int32
)
rank
=
paddle
.
empty
(
shape
=
[
0
],
dtype
=
paddle
.
int32
)
singular_values
=
paddle
.
empty
(
shape
=
[
0
],
dtype
=
x
.
dtype
)
singular_values
=
paddle
.
empty
(
shape
=
[
0
],
dtype
=
x
.
dtype
)
...
@@ -3440,39 +3192,51 @@ def lstsq(x, y, rcond=None, driver=None, name=None):
...
@@ -3440,39 +3192,51 @@ def lstsq(x, y, rcond=None, driver=None, name=None):
singular_values
=
paddle
.
empty
(
shape
=
[
0
],
dtype
=
x
.
dtype
)
singular_values
=
paddle
.
empty
(
shape
=
[
0
],
dtype
=
x
.
dtype
)
return
solution
,
residuals
,
rank
,
singular_values
return
solution
,
residuals
,
rank
,
singular_values
else
:
helper
=
LayerHelper
(
'lstsq'
,
**
locals
())
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
'lstsq'
,
)
check_variable_and_dtype
(
y
,
'dtype'
,
[
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
'lstsq'
,
)
helper
=
LayerHelper
(
'lstsq'
,
**
locals
())
solution
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
check_variable_and_dtype
(
residuals
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
x
,
'dtype'
,
[
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
'lstsq'
rank
=
helper
.
create_variable_for_type_inference
(
dtype
=
paddle
.
int32
)
)
singular_values
=
helper
.
create_variable_for_type_inference
(
check_variable_and_dtype
(
dtype
=
x
.
dtype
y
,
'dtype'
,
[
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
'lstsq'
)
)
solution
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
residuals
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
rank
=
helper
.
create_variable_for_type_inference
(
dtype
=
paddle
.
int32
)
singular_values
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'lstsq'
,
type
=
'lstsq'
,
inputs
=
{
'X'
:
x
,
'Y'
:
y
},
inputs
=
{
'X'
:
x
,
'Y'
:
y
},
outputs
=
{
outputs
=
{
'Solution'
:
solution
,
'Solution'
:
solution
,
'Residuals'
:
residuals
,
'Residuals'
:
residuals
,
'Rank'
:
rank
,
'Rank'
:
rank
,
'SingularValues'
:
singular_values
,
'SingularValues'
:
singular_values
,
},
},
attrs
=
{
'rcond'
:
rcond
,
'driver'
:
driver
},
attrs
=
{
'rcond'
:
rcond
,
'driver'
:
driver
},
)
)
if
driver
==
"gels"
:
if
driver
==
"gels"
:
rank
=
paddle
.
static
.
data
(
name
=
'rank'
,
shape
=
[
0
])
rank
=
paddle
.
static
.
data
(
name
=
'rank'
,
shape
=
[
0
])
singular_values
=
paddle
.
static
.
data
(
name
=
'singular_values'
,
shape
=
[
0
])
singular_values
=
paddle
.
static
.
data
(
elif
driver
==
"gelsy"
:
name
=
'singular_values'
,
shape
=
[
0
]
singular_values
=
paddle
.
static
.
data
(
name
=
'singular_values'
,
shape
=
[
0
])
)
elif
driver
==
"gelsy"
:
singular_values
=
paddle
.
static
.
data
(
name
=
'singular_values'
,
shape
=
[
0
]
)
return
solution
,
residuals
,
rank
,
singular_values
return
solution
,
residuals
,
rank
,
singular_values
def
corrcoef
(
x
,
rowvar
=
True
,
name
=
None
):
def
corrcoef
(
x
,
rowvar
=
True
,
name
=
None
):
...
...
python/paddle/tensor/logic.py
浏览文件 @
861fef52
...
@@ -26,10 +26,9 @@ if _in_eager_mode_:
...
@@ -26,10 +26,9 @@ if _in_eager_mode_:
else
:
else
:
from
..framework
import
VarBase
as
Tensor
from
..framework
import
VarBase
as
Tensor
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle
import
_C_ops
from
paddle.tensor.creation
import
full
from
paddle.tensor.creation
import
full
from
..fluid.framework
import
_in_legacy_dygraph
from
..framework
import
LayerHelper
,
in_dygraph_mode
from
..framework
import
LayerHelper
,
in_dygraph_mode
__all__
=
[]
__all__
=
[]
...
@@ -42,47 +41,52 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
...
@@ -42,47 +41,52 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
return
op
(
x
,
y
)
return
op
(
x
,
y
)
else
:
else
:
return
op
(
x
)
return
op
(
x
)
elif
_in_legacy_dygraph
():
else
:
op
=
getattr
(
_legacy_C_ops
,
op_name
)
if
binary_op
:
return
op
(
x
,
y
)
else
:
return
op
(
x
)
check_variable_and_dtype
(
x
,
"x"
,
[
"bool"
,
"int8"
,
"int16"
,
"int32"
,
"int64"
,
"float32"
,
"float64"
],
op_name
,
)
if
y
is
not
None
:
check_variable_and_dtype
(
check_variable_and_dtype
(
y
,
x
,
"
y
"
,
"
x
"
,
[
"bool"
,
"int8"
,
"int16"
,
"int32"
,
"int64"
,
"float32"
,
"float64"
],
[
"bool"
,
"int8"
,
"int16"
,
"int32"
,
"int64"
,
"float32"
,
"float64"
],
op_name
,
op_name
,
)
)
if
out
is
not
None
:
if
y
is
not
None
:
check_type
(
out
,
"out"
,
Variable
,
op_name
)
check_variable_and_dtype
(
y
,
"y"
,
[
"bool"
,
"int8"
,
"int16"
,
"int32"
,
"int64"
,
"float32"
,
"float64"
,
],
op_name
,
)
if
out
is
not
None
:
check_type
(
out
,
"out"
,
Variable
,
op_name
)
helper
=
LayerHelper
(
op_name
,
**
locals
())
helper
=
LayerHelper
(
op_name
,
**
locals
())
if
binary_op
and
x
.
dtype
!=
y
.
dtype
:
if
binary_op
and
x
.
dtype
!=
y
.
dtype
:
raise
ValueError
(
raise
ValueError
(
"(InvalidArgument) The DataType of %s Op's Variable must be consistent, but received %s and %s."
"(InvalidArgument) The DataType of %s Op's Variable must be consistent, but received %s and %s."
%
(
op_name
,
x
.
dtype
,
y
.
dtype
)
%
(
op_name
,
x
.
dtype
,
y
.
dtype
)
)
)
if
out
is
None
:
if
out
is
None
:
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
if
binary_op
:
if
binary_op
:
helper
.
append_op
(
helper
.
append_op
(
type
=
op_name
,
inputs
=
{
"X"
:
x
,
"Y"
:
y
},
outputs
=
{
"Out"
:
out
}
type
=
op_name
,
inputs
=
{
"X"
:
x
,
"Y"
:
y
},
outputs
=
{
"Out"
:
out
}
)
)
else
:
else
:
helper
.
append_op
(
type
=
op_name
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
helper
.
append_op
(
type
=
op_name
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
}
)
return
out
return
out
def
logical_and
(
x
,
y
,
out
=
None
,
name
=
None
):
def
logical_and
(
x
,
y
,
out
=
None
,
name
=
None
):
...
@@ -288,21 +292,19 @@ def is_empty(x, name=None):
...
@@ -288,21 +292,19 @@ def is_empty(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
is_empty
(
x
)
return
_C_ops
.
is_empty
(
x
)
if
_in_legacy_dygraph
():
else
:
return
_legacy_C_ops
.
is_empty
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'is_empty'
check_variable_and_dtype
(
)
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'is_empty'
check_type
(
name
,
"name"
,
(
str
,
type
(
None
)),
"is_empty"
)
)
check_type
(
name
,
"name"
,
(
str
,
type
(
None
)),
"is_empty"
)
helper
=
LayerHelper
(
"is_empty"
,
**
locals
())
helper
=
LayerHelper
(
"is_empty"
,
**
locals
())
cond
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
cond
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
cond
.
stop_gradient
=
True
cond
.
stop_gradient
=
True
helper
.
append_op
(
helper
.
append_op
(
type
=
'is_empty'
,
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'Out'
:
[
cond
]}
type
=
'is_empty'
,
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'Out'
:
[
cond
]}
)
)
return
cond
return
cond
def
equal_all
(
x
,
y
,
name
=
None
):
def
equal_all
(
x
,
y
,
name
=
None
):
...
@@ -336,16 +338,15 @@ def equal_all(x, y, name=None):
...
@@ -336,16 +338,15 @@ def equal_all(x, y, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
equal_all
(
x
,
y
)
return
_C_ops
.
equal_all
(
x
,
y
)
else
:
if
paddle
.
in_dynamic_mode
():
helper
=
LayerHelper
(
"equal_all"
,
**
locals
())
return
_legacy_C_ops
.
equal_all
(
x
,
y
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
helper
.
append_op
(
helper
=
LayerHelper
(
"equal_all"
,
**
locals
())
type
=
'equal_all'
,
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
inputs
=
{
'X'
:
[
x
],
'Y'
:
[
y
]},
helper
.
append_op
(
outputs
=
{
'Out'
:
[
out
]},
type
=
'equal_all'
,
inputs
=
{
'X'
:
[
x
],
'Y'
:
[
y
]},
outputs
=
{
'Out'
:
[
out
]}
)
)
return
out
return
out
@
templatedoc
()
@
templatedoc
()
...
@@ -393,27 +394,24 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
...
@@ -393,27 +394,24 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
allclose
(
x
,
y
,
rtol
,
atol
,
equal_nan
)
return
_C_ops
.
allclose
(
x
,
y
,
rtol
,
atol
,
equal_nan
)
if
_in_legacy_dygraph
():
else
:
return
_legacy_C_ops
.
allclose
(
check_variable_and_dtype
(
x
,
"input"
,
[
'float32'
,
'float64'
],
'allclose'
)
x
,
y
,
'rtol'
,
str
(
rtol
),
'atol'
,
str
(
atol
),
'equal_nan'
,
equal_nan
check_variable_and_dtype
(
y
,
"input"
,
[
'float32'
,
'float64'
],
'allclose'
)
check_type
(
rtol
,
'rtol'
,
float
,
'allclose'
)
check_type
(
atol
,
'atol'
,
float
,
'allclose'
)
check_type
(
equal_nan
,
'equal_nan'
,
bool
,
'allclose'
)
helper
=
LayerHelper
(
"allclose"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
inputs
=
{
'Input'
:
x
,
'Other'
:
y
}
outputs
=
{
'Out'
:
out
}
attrs
=
{
'rtol'
:
str
(
rtol
),
'atol'
:
str
(
atol
),
'equal_nan'
:
equal_nan
}
helper
.
append_op
(
type
=
'allclose'
,
inputs
=
inputs
,
outputs
=
outputs
,
attrs
=
attrs
)
)
check_variable_and_dtype
(
x
,
"input"
,
[
'float32'
,
'float64'
],
'allclose'
)
check_variable_and_dtype
(
y
,
"input"
,
[
'float32'
,
'float64'
],
'allclose'
)
check_type
(
rtol
,
'rtol'
,
float
,
'allclose'
)
check_type
(
atol
,
'atol'
,
float
,
'allclose'
)
check_type
(
equal_nan
,
'equal_nan'
,
bool
,
'allclose'
)
helper
=
LayerHelper
(
"allclose"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
inputs
=
{
'Input'
:
x
,
'Other'
:
y
}
outputs
=
{
'Out'
:
out
}
attrs
=
{
'rtol'
:
str
(
rtol
),
'atol'
:
str
(
atol
),
'equal_nan'
:
equal_nan
}
helper
.
append_op
(
type
=
'allclose'
,
inputs
=
inputs
,
outputs
=
outputs
,
attrs
=
attrs
)
return
out
return
out
@
templatedoc
()
@
templatedoc
()
...
@@ -457,31 +455,28 @@ def equal(x, y, name=None):
...
@@ -457,31 +455,28 @@ def equal(x, y, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
equal
(
x
,
y
)
return
_C_ops
.
equal
(
x
,
y
)
else
:
else
:
if
_in_legacy_dygraph
():
check_variable_and_dtype
(
return
_legacy_C_ops
.
equal
(
x
,
y
)
x
,
else
:
"x"
,
check_variable_and_dtype
(
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
x
,
"equal"
,
"x"
,
)
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
check_variable_and_dtype
(
"equal"
,
y
,
)
"y"
,
check_variable_and_dtype
(
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
y
,
"equal"
,
"y"
,
)
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
helper
=
LayerHelper
(
"equal"
,
**
locals
())
"equal"
,
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
)
out
.
stop_gradient
=
True
helper
=
LayerHelper
(
"equal"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
out
.
stop_gradient
=
True
helper
.
append_op
(
helper
.
append_op
(
type
=
'equal'
,
type
=
'equal'
,
inputs
=
{
'X'
:
[
x
],
'Y'
:
[
y
]},
inputs
=
{
'X'
:
[
x
],
'Y'
:
[
y
]},
outputs
=
{
'Out'
:
[
out
]},
outputs
=
{
'Out'
:
[
out
]},
)
)
return
out
return
out
@
templatedoc
()
@
templatedoc
()
...
@@ -513,31 +508,28 @@ def greater_equal(x, y, name=None):
...
@@ -513,31 +508,28 @@ def greater_equal(x, y, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
greater_equal
(
x
,
y
)
return
_C_ops
.
greater_equal
(
x
,
y
)
else
:
else
:
if
_in_legacy_dygraph
():
check_variable_and_dtype
(
return
_legacy_C_ops
.
greater_equal
(
x
,
y
)
x
,
else
:
"x"
,
check_variable_and_dtype
(
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
x
,
"greater_equal"
,
"x"
,
)
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
check_variable_and_dtype
(
"greater_equal"
,
y
,
)
"y"
,
check_variable_and_dtype
(
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
y
,
"greater_equal"
,
"y"
,
)
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
helper
=
LayerHelper
(
"greater_equal"
,
**
locals
())
"greater_equal"
,
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
)
out
.
stop_gradient
=
True
helper
=
LayerHelper
(
"greater_equal"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
out
.
stop_gradient
=
True
helper
.
append_op
(
helper
.
append_op
(
type
=
'greater_equal'
,
type
=
'greater_equal'
,
inputs
=
{
'X'
:
[
x
],
'Y'
:
[
y
]},
inputs
=
{
'X'
:
[
x
],
'Y'
:
[
y
]},
outputs
=
{
'Out'
:
[
out
]},
outputs
=
{
'Out'
:
[
out
]},
)
)
return
out
return
out
@
templatedoc
()
@
templatedoc
()
...
@@ -569,31 +561,28 @@ def greater_than(x, y, name=None):
...
@@ -569,31 +561,28 @@ def greater_than(x, y, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
greater_than
(
x
,
y
)
return
_C_ops
.
greater_than
(
x
,
y
)
else
:
else
:
if
_in_legacy_dygraph
():
check_variable_and_dtype
(
return
_legacy_C_ops
.
greater_than
(
x
,
y
)
x
,
else
:
"x"
,
check_variable_and_dtype
(
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
x
,
"greater_than"
,
"x"
,
)
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
check_variable_and_dtype
(
"greater_than"
,
y
,
)
"y"
,
check_variable_and_dtype
(
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
y
,
"greater_than"
,
"y"
,
)
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
helper
=
LayerHelper
(
"greater_than"
,
**
locals
())
"greater_than"
,
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
)
out
.
stop_gradient
=
True
helper
=
LayerHelper
(
"greater_than"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
out
.
stop_gradient
=
True
helper
.
append_op
(
helper
.
append_op
(
type
=
'greater_than'
,
type
=
'greater_than'
,
inputs
=
{
'X'
:
[
x
],
'Y'
:
[
y
]},
inputs
=
{
'X'
:
[
x
],
'Y'
:
[
y
]},
outputs
=
{
'Out'
:
[
out
]},
outputs
=
{
'Out'
:
[
out
]},
)
)
return
out
return
out
@
templatedoc
()
@
templatedoc
()
...
@@ -626,31 +615,28 @@ def less_equal(x, y, name=None):
...
@@ -626,31 +615,28 @@ def less_equal(x, y, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
less_equal
(
x
,
y
)
return
_C_ops
.
less_equal
(
x
,
y
)
else
:
else
:
if
_in_legacy_dygraph
():
check_variable_and_dtype
(
return
_legacy_C_ops
.
less_equal
(
x
,
y
)
x
,
else
:
"x"
,
check_variable_and_dtype
(
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
x
,
"less_equal"
,
"x"
,
)
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
check_variable_and_dtype
(
"less_equal"
,
y
,
)
"y"
,
check_variable_and_dtype
(
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
y
,
"less_equal"
,
"y"
,
)
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
helper
=
LayerHelper
(
"less_equal"
,
**
locals
())
"less_equal"
,
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
)
out
.
stop_gradient
=
True
helper
=
LayerHelper
(
"less_equal"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
out
.
stop_gradient
=
True
helper
.
append_op
(
helper
.
append_op
(
type
=
'less_equal'
,
type
=
'less_equal'
,
inputs
=
{
'X'
:
[
x
],
'Y'
:
[
y
]},
inputs
=
{
'X'
:
[
x
],
'Y'
:
[
y
]},
outputs
=
{
'Out'
:
[
out
]},
outputs
=
{
'Out'
:
[
out
]},
)
)
return
out
return
out
@
templatedoc
()
@
templatedoc
()
...
@@ -683,31 +669,28 @@ def less_than(x, y, name=None):
...
@@ -683,31 +669,28 @@ def less_than(x, y, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
less_than
(
x
,
y
)
return
_C_ops
.
less_than
(
x
,
y
)
else
:
else
:
if
_in_legacy_dygraph
():
check_variable_and_dtype
(
return
_legacy_C_ops
.
less_than
(
x
,
y
)
x
,
else
:
"x"
,
check_variable_and_dtype
(
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
x
,
"less_than"
,
"x"
,
)
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
check_variable_and_dtype
(
"less_than"
,
y
,
)
"y"
,
check_variable_and_dtype
(
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
y
,
"less_than"
,
"y"
,
)
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
helper
=
LayerHelper
(
"less_than"
,
**
locals
())
"less_than"
,
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
)
out
.
stop_gradient
=
True
helper
=
LayerHelper
(
"less_than"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
out
.
stop_gradient
=
True
helper
.
append_op
(
helper
.
append_op
(
type
=
'less_than'
,
type
=
'less_than'
,
inputs
=
{
'X'
:
[
x
],
'Y'
:
[
y
]},
inputs
=
{
'X'
:
[
x
],
'Y'
:
[
y
]},
outputs
=
{
'Out'
:
[
out
]},
outputs
=
{
'Out'
:
[
out
]},
)
)
return
out
return
out
@
templatedoc
()
@
templatedoc
()
...
@@ -740,31 +723,28 @@ def not_equal(x, y, name=None):
...
@@ -740,31 +723,28 @@ def not_equal(x, y, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
not_equal
(
x
,
y
)
return
_C_ops
.
not_equal
(
x
,
y
)
else
:
else
:
if
_in_legacy_dygraph
():
check_variable_and_dtype
(
return
_legacy_C_ops
.
not_equal
(
x
,
y
)
x
,
else
:
"x"
,
check_variable_and_dtype
(
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
x
,
"not_equal"
,
"x"
,
)
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
check_variable_and_dtype
(
"not_equal"
,
y
,
)
"y"
,
check_variable_and_dtype
(
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
y
,
"not_equal"
,
"y"
,
)
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
helper
=
LayerHelper
(
"not_equal"
,
**
locals
())
"not_equal"
,
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
)
out
.
stop_gradient
=
True
helper
=
LayerHelper
(
"not_equal"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
out
.
stop_gradient
=
True
helper
.
append_op
(
helper
.
append_op
(
type
=
'not_equal'
,
type
=
'not_equal'
,
inputs
=
{
'X'
:
[
x
],
'Y'
:
[
y
]},
inputs
=
{
'X'
:
[
x
],
'Y'
:
[
y
]},
outputs
=
{
'Out'
:
[
out
]},
outputs
=
{
'Out'
:
[
out
]},
)
)
return
out
return
out
def
is_tensor
(
x
):
def
is_tensor
(
x
):
...
@@ -802,41 +782,40 @@ def _bitwise_op(op_name, x, y, out=None, name=None, binary_op=True):
...
@@ -802,41 +782,40 @@ def _bitwise_op(op_name, x, y, out=None, name=None, binary_op=True):
return
op
(
x
,
y
)
return
op
(
x
,
y
)
else
:
else
:
return
op
(
x
)
return
op
(
x
)
elif
_in_legacy_dygraph
():
else
:
op
=
getattr
(
_legacy_C_ops
,
op_name
)
if
binary_op
:
return
op
(
x
,
y
)
else
:
return
op
(
x
)
check_variable_and_dtype
(
x
,
"x"
,
[
"bool"
,
"uint8"
,
"int8"
,
"int16"
,
"int32"
,
"int64"
],
op_name
)
if
y
is
not
None
:
check_variable_and_dtype
(
check_variable_and_dtype
(
y
,
x
,
"
y
"
,
"
x
"
,
[
"bool"
,
"uint8"
,
"int8"
,
"int16"
,
"int32"
,
"int64"
],
[
"bool"
,
"uint8"
,
"int8"
,
"int16"
,
"int32"
,
"int64"
],
op_name
,
op_name
,
)
)
if
out
is
not
None
:
if
y
is
not
None
:
check_type
(
out
,
"out"
,
Variable
,
op_name
)
check_variable_and_dtype
(
y
,
"y"
,
[
"bool"
,
"uint8"
,
"int8"
,
"int16"
,
"int32"
,
"int64"
],
op_name
,
)
if
out
is
not
None
:
check_type
(
out
,
"out"
,
Variable
,
op_name
)
helper
=
LayerHelper
(
op_name
,
**
locals
())
helper
=
LayerHelper
(
op_name
,
**
locals
())
if
binary_op
:
if
binary_op
:
assert
x
.
dtype
==
y
.
dtype
assert
x
.
dtype
==
y
.
dtype
if
out
is
None
:
if
out
is
None
:
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
if
binary_op
:
if
binary_op
:
helper
.
append_op
(
helper
.
append_op
(
type
=
op_name
,
inputs
=
{
"X"
:
x
,
"Y"
:
y
},
outputs
=
{
"Out"
:
out
}
type
=
op_name
,
inputs
=
{
"X"
:
x
,
"Y"
:
y
},
outputs
=
{
"Out"
:
out
}
)
)
else
:
else
:
helper
.
append_op
(
type
=
op_name
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
helper
.
append_op
(
type
=
op_name
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
}
)
return
out
return
out
@
templatedoc
()
@
templatedoc
()
...
@@ -998,24 +977,20 @@ def isclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
...
@@ -998,24 +977,20 @@ def isclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
isclose
(
x
,
y
,
rtol
,
atol
,
equal_nan
)
return
_C_ops
.
isclose
(
x
,
y
,
rtol
,
atol
,
equal_nan
)
if
_in_legacy_dygraph
():
else
:
return
_legacy_C_ops
.
isclose
(
check_variable_and_dtype
(
x
,
"input"
,
[
'float32'
,
'float64'
],
'isclose'
)
x
,
y
,
'rtol'
,
str
(
rtol
),
'atol'
,
str
(
atol
),
'equal_nan'
,
equal_nan
check_variable_and_dtype
(
y
,
"input"
,
[
'float32'
,
'float64'
],
'isclose'
)
check_type
(
rtol
,
'rtol'
,
float
,
'isclose'
)
check_type
(
atol
,
'atol'
,
float
,
'isclose'
)
check_type
(
equal_nan
,
'equal_nan'
,
bool
,
'isclose'
)
helper
=
LayerHelper
(
"isclose"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
inputs
=
{
'Input'
:
x
,
'Other'
:
y
}
outputs
=
{
'Out'
:
out
}
attrs
=
{
'rtol'
:
str
(
rtol
),
'atol'
:
str
(
atol
),
'equal_nan'
:
equal_nan
}
helper
.
append_op
(
type
=
'isclose'
,
inputs
=
inputs
,
outputs
=
outputs
,
attrs
=
attrs
)
)
return
out
check_variable_and_dtype
(
x
,
"input"
,
[
'float32'
,
'float64'
],
'isclose'
)
check_variable_and_dtype
(
y
,
"input"
,
[
'float32'
,
'float64'
],
'isclose'
)
check_type
(
rtol
,
'rtol'
,
float
,
'isclose'
)
check_type
(
atol
,
'atol'
,
float
,
'isclose'
)
check_type
(
equal_nan
,
'equal_nan'
,
bool
,
'isclose'
)
helper
=
LayerHelper
(
"isclose"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
inputs
=
{
'Input'
:
x
,
'Other'
:
y
}
outputs
=
{
'Out'
:
out
}
attrs
=
{
'rtol'
:
str
(
rtol
),
'atol'
:
str
(
atol
),
'equal_nan'
:
equal_nan
}
helper
.
append_op
(
type
=
'isclose'
,
inputs
=
inputs
,
outputs
=
outputs
,
attrs
=
attrs
)
return
out
python/paddle/tensor/manipulation.py
浏览文件 @
861fef52
...
@@ -19,17 +19,16 @@ from collections import Counter
...
@@ -19,17 +19,16 @@ from collections import Counter
import
numpy
as
np
import
numpy
as
np
import
paddle
import
paddle
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle
import
_C_ops
from
paddle.utils.inplace_utils
import
inplace_apis_in_dygraph_only
from
paddle.utils.inplace_utils
import
inplace_apis_in_dygraph_only
from
..common_ops_import
import
_varbase_creator
,
fill_constant
from
..common_ops_import
import
fill_constant
from
..fluid.data_feeder
import
(
from
..fluid.data_feeder
import
(
check_dtype
,
check_dtype
,
check_type
,
check_type
,
check_variable_and_dtype
,
check_variable_and_dtype
,
convert_dtype
,
convert_dtype
,
)
)
from
..fluid.framework
import
_in_legacy_dygraph
,
_non_static_mode
from
..fluid.layers
import
utils
from
..fluid.layers
import
utils
from
..framework
import
(
from
..framework
import
(
LayerHelper
,
LayerHelper
,
...
@@ -124,7 +123,7 @@ def tensor_array_to_tensor(input, axis=1, use_stack=False, name=None):
...
@@ -124,7 +123,7 @@ def tensor_array_to_tensor(input, axis=1, use_stack=False, name=None):
paddle.tensor.array.array_write(x1, i + 1, array)
paddle.tensor.array.array_write(x1, i + 1, array)
output, output_index = paddle.tensor.manipulation.tensor_array_to_tensor(input=array)
output, output_index = paddle.tensor.manipulation.tensor_array_to_tensor(input=array)
"""
"""
if
_non_static
_mode
():
if
in_dygraph
_mode
():
assert
isinstance
(
assert
isinstance
(
input
,
list
input
,
list
),
"The 'input' in tensor_array_to_tensor must be list"
),
"The 'input' in tensor_array_to_tensor must be list"
...
@@ -136,26 +135,28 @@ def tensor_array_to_tensor(input, axis=1, use_stack=False, name=None):
...
@@ -136,26 +135,28 @@ def tensor_array_to_tensor(input, axis=1, use_stack=False, name=None):
np
.
array
(
list
(
map
(
lambda
x
:
int
(
x
.
shape
[
axis
]),
input
)))
np
.
array
(
list
(
map
(
lambda
x
:
int
(
x
.
shape
[
axis
]),
input
)))
)
)
return
res
,
sizes
return
res
,
sizes
else
:
check_type
(
input
,
'input'
,
(
list
,
Variable
),
'tensor_array_to_tensor'
)
check_type
(
input
,
'input'
,
(
list
,
Variable
),
'tensor_array_to_tensor'
)
if
isinstance
(
input
,
list
):
if
isinstance
(
input
,
list
):
for
i
,
input_x
in
enumerate
(
input
):
for
i
,
input_x
in
enumerate
(
input
):
check_type
(
check_type
(
input_x
,
input_x
,
'input['
+
str
(
i
)
+
']'
,
'input['
+
str
(
i
)
+
']'
,
Variable
,
Variable
,
'tensor_array_to_tensor'
,
'tensor_array_to_tensor'
,
)
)
helper
=
LayerHelper
(
'tensor_array_to_tensor'
,
**
locals
())
helper
=
LayerHelper
(
'tensor_array_to_tensor'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
())
out
=
helper
.
create_variable_for_type_inference
(
out_index
=
helper
.
create_variable_for_type_inference
(
dtype
=
"int32"
)
dtype
=
helper
.
input_dtype
()
helper
.
append_op
(
)
type
=
'tensor_array_to_tensor'
,
out_index
=
helper
.
create_variable_for_type_inference
(
dtype
=
"int32"
)
inputs
=
{
'X'
:
input
},
helper
.
append_op
(
outputs
=
{
'Out'
:
[
out
],
'OutIndex'
:
[
out_index
]},
type
=
'tensor_array_to_tensor'
,
attrs
=
{
'axis'
:
axis
,
'use_stack'
:
use_stack
},
inputs
=
{
'X'
:
input
},
)
outputs
=
{
'Out'
:
[
out
],
'OutIndex'
:
[
out_index
]},
return
out
,
out_index
attrs
=
{
'axis'
:
axis
,
'use_stack'
:
use_stack
},
)
return
out
,
out_index
def
cast
(
x
,
dtype
):
def
cast
(
x
,
dtype
):
...
@@ -186,59 +187,53 @@ def cast(x, dtype):
...
@@ -186,59 +187,53 @@ def cast(x, dtype):
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
return
_C_ops
.
cast
(
x
,
dtype
)
return
_C_ops
.
cast
(
x
,
dtype
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
,
'uint16'
,
],
'cast'
,
)
check_dtype
(
dtype
,
'dtype'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int8'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
,
'uint16'
,
],
'cast'
,
)
if
_non_static_mode
():
helper
=
LayerHelper
(
'cast'
,
**
locals
())
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
dtype
=
dtype
,
stop_gradient
=
x
.
stop_gradient
out
=
_legacy_C_ops
.
cast
(
x
,
'in_dtype'
,
x
.
dtype
,
'out_dtype'
,
dtype
)
)
helper
.
append_op
(
type
=
'cast'
,
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'Out'
:
[
out
]},
attrs
=
{
'in_dtype'
:
x
.
dtype
,
'out_dtype'
:
out
.
dtype
},
)
return
out
return
out
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
,
'uint16'
,
],
'cast'
,
)
check_dtype
(
dtype
,
'dtype'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int8'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
,
'uint16'
,
],
'cast'
,
)
helper
=
LayerHelper
(
'cast'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
,
stop_gradient
=
x
.
stop_gradient
)
helper
.
append_op
(
type
=
'cast'
,
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'Out'
:
[
out
]},
attrs
=
{
'in_dtype'
:
x
.
dtype
,
'out_dtype'
:
out
.
dtype
},
)
return
out
def
slice
(
input
,
axes
,
starts
,
ends
):
def
slice
(
input
,
axes
,
starts
,
ends
):
"""
"""
...
@@ -362,134 +357,69 @@ def slice(input, axes, starts, ends):
...
@@ -362,134 +357,69 @@ def slice(input, axes, starts, ends):
return
_C_ops
.
slice
(
input
,
axes
,
starts
,
ends
,
infer_flags
,
[])
return
_C_ops
.
slice
(
input
,
axes
,
starts
,
ends
,
infer_flags
,
[])
else
:
else
:
if
_in_legacy_dygraph
():
if
not
isinstance
(
starts
,
(
list
,
tuple
,
Variable
)):
attrs
=
()
raise
ValueError
(
starts_tensor
=
None
"Input starts must be an Variable, python list or tuple."
ends_tensor
=
None
)
if
not
isinstance
(
ends
,
(
list
,
tuple
,
Variable
)):
if
isinstance
(
axes
,
(
list
,
tuple
)):
raise
ValueError
(
axes
=
list
(
axes
)
"Input ends must be an Variable, python list or tuple."
if
len
(
axes
)
==
0
:
)
raise
ValueError
(
"Input axes should not be an empty list/tuple."
)
for
i
in
range
(
len
(
axes
)):
if
axes
[
i
]
<
0
:
axes
[
i
]
=
max
(
0
,
axes
[
i
]
+
len
(
input
.
shape
))
else
:
axes
[
i
]
=
min
(
len
(
input
.
shape
)
-
1
,
axes
[
i
])
else
:
helper
=
LayerHelper
(
'slice'
,
**
locals
())
raise
ValueError
(
"Input axes must be a python list or tuple, but reveived {}"
.
format
(
inputs
=
{
'Input'
:
input
}
type
(
axes
)
attrs
=
{
'axes'
:
axes
}
)
infer_flags
=
list
(
1
for
i
in
range
(
len
(
axes
)))
# starts
if
isinstance
(
starts
,
Variable
):
starts
.
stop_gradient
=
True
inputs
[
'StartsTensor'
]
=
starts
infer_flags
=
list
(
-
1
for
i
in
range
(
len
(
axes
)))
elif
isinstance
(
starts
,
(
list
,
tuple
)):
attrs
[
'starts'
]
=
[]
if
utils
.
_contain_var
(
starts
):
inputs
[
'StartsTensorList'
]
=
utils
.
_convert_to_tensor_list
(
starts
)
)
for
i
,
dim
in
enumerate
(
starts
):
if
isinstance
(
dim
,
Variable
):
attrs
[
'starts'
].
append
(
-
1
)
infer_flags
[
i
]
=
-
1
else
:
attrs
[
'starts'
].
append
(
dim
)
else
:
attrs
[
'starts'
]
=
starts
infer_flags
=
list
(
1
for
i
in
range
(
len
(
axes
)))
# ends
if
isinstance
(
ends
,
Variable
):
tmp_tensor_type
=
Variable
ends
.
stop_gradient
=
True
inputs
[
'EndsTensor'
]
=
ends
if
isinstance
(
starts
,
(
list
,
tuple
)):
infer_flags
=
list
(
-
1
for
i
in
range
(
len
(
axes
)))
starts
=
[
elif
isinstance
(
ends
,
(
list
,
tuple
)):
item
.
numpy
().
item
(
0
)
attrs
[
'ends'
]
=
[]
if
isinstance
(
item
,
tmp_tensor_type
)
if
utils
.
_contain_var
(
ends
):
else
item
inputs
[
'EndsTensorList'
]
=
utils
.
_convert_to_tensor_list
(
ends
)
for
item
in
starts
for
i
,
dim
in
enumerate
(
ends
):
]
if
isinstance
(
dim
,
Variable
):
attrs
+=
(
'starts'
,
starts
)
attrs
[
'ends'
].
append
(
-
1
)
elif
isinstance
(
starts
,
tmp_tensor_type
):
infer_flags
[
i
]
=
-
1
starts_tensor
=
starts
else
:
starts
.
stop_gradient
=
True
attrs
[
'ends'
].
append
(
dim
)
infer_flags
=
list
(
-
1
for
i
in
range
(
len
(
axes
)))
else
:
attrs
[
'ends'
]
=
ends
if
isinstance
(
ends
,
(
list
,
tuple
)):
ends
=
[
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
tmp_tensor_type
)
else
item
for
item
in
ends
]
attrs
+=
(
'ends'
,
ends
)
elif
isinstance
(
ends
,
tmp_tensor_type
):
ends_tensor
=
ends
ends_tensor
.
stop_gradient
=
True
infer_flags
=
list
(
-
1
for
i
in
range
(
len
(
axes
)))
return
_legacy_C_ops
.
slice
(
input
,
starts_tensor
,
ends_tensor
,
None
,
None
,
'axes'
,
axes
,
'infer_flags'
,
infer_flags
,
*
attrs
,
)
if
not
isinstance
(
starts
,
(
list
,
tuple
,
Variable
)):
# infer_flags
raise
ValueError
(
attrs
[
'infer_flags'
]
=
infer_flags
"Input starts must be an Variable, python list or tuple."
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
(
'input'
)
)
)
if
not
isinstance
(
ends
,
(
list
,
tuple
,
Variable
)):
helper
.
append_op
(
raise
ValueError
(
type
=
'slice'
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
'Out'
:
out
}
"Input ends must be an Variable, python list or tuple."
)
)
helper
=
LayerHelper
(
'slice'
,
**
locals
())
return
out
inputs
=
{
'Input'
:
input
}
attrs
=
{
'axes'
:
axes
}
infer_flags
=
list
(
1
for
i
in
range
(
len
(
axes
)))
# starts
if
isinstance
(
starts
,
Variable
):
starts
.
stop_gradient
=
True
inputs
[
'StartsTensor'
]
=
starts
infer_flags
=
list
(
-
1
for
i
in
range
(
len
(
axes
)))
elif
isinstance
(
starts
,
(
list
,
tuple
)):
attrs
[
'starts'
]
=
[]
if
utils
.
_contain_var
(
starts
):
inputs
[
'StartsTensorList'
]
=
utils
.
_convert_to_tensor_list
(
starts
)
for
i
,
dim
in
enumerate
(
starts
):
if
isinstance
(
dim
,
Variable
):
attrs
[
'starts'
].
append
(
-
1
)
infer_flags
[
i
]
=
-
1
else
:
attrs
[
'starts'
].
append
(
dim
)
else
:
attrs
[
'starts'
]
=
starts
# ends
if
isinstance
(
ends
,
Variable
):
ends
.
stop_gradient
=
True
inputs
[
'EndsTensor'
]
=
ends
infer_flags
=
list
(
-
1
for
i
in
range
(
len
(
axes
)))
elif
isinstance
(
ends
,
(
list
,
tuple
)):
attrs
[
'ends'
]
=
[]
if
utils
.
_contain_var
(
ends
):
inputs
[
'EndsTensorList'
]
=
utils
.
_convert_to_tensor_list
(
ends
)
for
i
,
dim
in
enumerate
(
ends
):
if
isinstance
(
dim
,
Variable
):
attrs
[
'ends'
].
append
(
-
1
)
infer_flags
[
i
]
=
-
1
else
:
attrs
[
'ends'
].
append
(
dim
)
else
:
attrs
[
'ends'
]
=
ends
# infer_flags
attrs
[
'infer_flags'
]
=
infer_flags
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
(
'input'
)
)
helper
.
append_op
(
type
=
'slice'
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
'Out'
:
out
}
)
return
out
def
transpose
(
x
,
perm
,
name
=
None
):
def
transpose
(
x
,
perm
,
name
=
None
):
...
@@ -545,53 +475,49 @@ def transpose(x, perm, name=None):
...
@@ -545,53 +475,49 @@ def transpose(x, perm, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
transpose
(
x
,
perm
)
return
_C_ops
.
transpose
(
x
,
perm
)
else
:
else
:
if
_in_legacy_dygraph
():
check_variable_and_dtype
(
out
,
_
=
_legacy_C_ops
.
transpose2
(
x
,
'axis'
,
perm
)
x
,
return
out
'x'
,
[
check_variable_and_dtype
(
'bool'
,
x
,
'float16'
,
'x'
,
'float32'
,
[
'float64'
,
'bool'
,
'int32'
,
'float16'
,
'int64'
,
'float32'
,
'complex64'
,
'float64'
,
'complex128'
,
'int32'
,
],
'int64'
,
'transpose'
,
'complex64'
,
'complex128'
,
],
'transpose'
,
)
check_type
(
perm
,
'perm'
,
(
list
,
tuple
),
'transpose'
)
if
isinstance
(
perm
,
tuple
):
perm
=
list
(
perm
)
if
len
(
perm
)
!=
len
(
x
.
shape
):
raise
ValueError
(
"Input(perm) is the permutation of dimensions of Input(x), "
"its length should be equal to dimensions of Input(x), "
"but received dimension of Input(x) is %s, "
"the length of Input(perm) is %s."
%
(
len
(
x
.
shape
),
len
(
perm
))
)
)
for
idx
,
dim
in
enumerate
(
perm
):
check_type
(
perm
,
'perm'
,
(
list
,
tuple
),
'transpose'
)
if
dim
>=
len
(
x
.
shape
):
if
isinstance
(
perm
,
tuple
):
perm
=
list
(
perm
)
if
len
(
perm
)
!=
len
(
x
.
shape
):
raise
ValueError
(
raise
ValueError
(
"Each element in Input(perm) should be less than Input(x)'s dimension, "
"Input(perm) is the permutation of dimensions of Input(x), "
"but %d-th element in Input(perm) is %d which exceeds Input(x)'s "
"its length should be equal to dimensions of Input(x), "
"dimension %d."
%
(
idx
,
perm
[
idx
],
len
(
x
.
shape
))
"but received dimension of Input(x) is %s, "
"the length of Input(perm) is %s."
%
(
len
(
x
.
shape
),
len
(
perm
))
)
)
for
idx
,
dim
in
enumerate
(
perm
):
if
dim
>=
len
(
x
.
shape
):
raise
ValueError
(
"Each element in Input(perm) should be less than Input(x)'s dimension, "
"but %d-th element in Input(perm) is %d which exceeds Input(x)'s "
"dimension %d."
%
(
idx
,
perm
[
idx
],
len
(
x
.
shape
))
)
helper
=
LayerHelper
(
'transpose'
,
**
locals
())
helper
=
LayerHelper
(
'transpose'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
x_shape
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
x_shape
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'transpose2'
,
type
=
'transpose2'
,
inputs
=
{
'X'
:
[
x
]},
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'Out'
:
[
out
],
'XShape'
:
[
x_shape
]},
outputs
=
{
'Out'
:
[
out
],
'XShape'
:
[
x_shape
]},
attrs
=
{
'axis'
:
perm
},
attrs
=
{
'axis'
:
perm
},
)
)
return
out
return
out
def
unstack
(
x
,
axis
=
0
,
num
=
None
):
def
unstack
(
x
,
axis
=
0
,
num
=
None
):
...
@@ -625,32 +551,25 @@ def unstack(x, axis=0, num=None):
...
@@ -625,32 +551,25 @@ def unstack(x, axis=0, num=None):
if
num
==
0
:
if
num
==
0
:
return
[]
return
[]
return
_C_ops
.
unstack
(
x
,
axis
,
num
)
return
_C_ops
.
unstack
(
x
,
axis
,
num
)
else
:
if
_non_static_mode
():
helper
=
LayerHelper
(
'unstack'
,
**
locals
())
if
num
is
None
:
if
num
is
None
:
num
=
x
.
shape
[
axis
]
if
axis
is
None
or
x
.
shape
[
axis
]
<=
0
:
if
num
==
0
:
raise
ValueError
(
'unknown unstack number'
)
return
[]
else
:
return
_legacy_C_ops
.
unstack
(
x
,
num
,
'axis'
,
int
(
axis
),
'num'
,
num
)
num
=
x
.
shape
[
axis
]
helper
=
LayerHelper
(
'unstack'
,
**
locals
())
if
num
is
None
:
if
axis
is
None
or
x
.
shape
[
axis
]
<=
0
:
raise
ValueError
(
'unknown unstack number'
)
else
:
num
=
x
.
shape
[
axis
]
outs
=
[]
outs
=
[]
for
_
in
range
(
num
):
for
_
in
range
(
num
):
outs
.
append
(
helper
.
create_variable_for_type_inference
(
x
.
dtype
))
outs
.
append
(
helper
.
create_variable_for_type_inference
(
x
.
dtype
))
helper
.
append_op
(
helper
.
append_op
(
type
=
'unstack'
,
type
=
'unstack'
,
inputs
=
{
'X'
:
[
x
]},
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'Y'
:
outs
},
outputs
=
{
'Y'
:
outs
},
attrs
=
{
'axis'
:
axis
,
'num'
:
num
},
attrs
=
{
'axis'
:
axis
,
'num'
:
num
},
)
)
return
outs
return
outs
def
shard_index
(
input
,
index_num
,
nshards
,
shard_id
,
ignore_value
=-
1
):
def
shard_index
(
input
,
index_num
,
nshards
,
shard_id
,
ignore_value
=-
1
):
...
@@ -959,12 +878,7 @@ def fill_(x, value):
...
@@ -959,12 +878,7 @@ def fill_(x, value):
"The type of 'value' must be int or float, but received %s."
"The type of 'value' must be int or float, but received %s."
%
(
type
(
value
))
%
(
type
(
value
))
)
)
if
in_dygraph_mode
():
return
_C_ops
.
fill_
(
x
,
value
)
return
_C_ops
.
fill_
(
x
,
value
)
else
:
return
_legacy_C_ops
.
fill_any_
(
x
,
"value_float"
,
float
(
value
),
"value_int"
,
int
(
value
)
)
@
dygraph_only
@
dygraph_only
...
@@ -992,12 +906,7 @@ def zero_(x):
...
@@ -992,12 +906,7 @@ def zero_(x):
print(tensor.tolist()) #[0, 0, 0, 0, 0]
print(tensor.tolist()) #[0, 0, 0, 0, 0]
"""
"""
if
in_dygraph_mode
():
return
_C_ops
.
fill_
(
x
,
0.0
)
return
_C_ops
.
fill_
(
x
,
0.0
)
else
:
return
_legacy_C_ops
.
fill_any_
(
x
,
"value_float"
,
0.0
,
"value_int"
,
int
(
0
)
)
@
dygraph_only
@
dygraph_only
...
@@ -1025,39 +934,11 @@ def fill_diagonal_(x, value, offset=0, wrap=False, name=None):
...
@@ -1025,39 +934,11 @@ def fill_diagonal_(x, value, offset=0, wrap=False, name=None):
x.fill_diagonal_(1.0)
x.fill_diagonal_(1.0)
print(x.tolist()) #[[1.0, 2.0, 2.0], [2.0, 1.0, 2.0], [2.0, 2.0, 1.0], [2.0, 2.0, 2.0]]
print(x.tolist()) #[[1.0, 2.0, 2.0], [2.0, 1.0, 2.0], [2.0, 2.0, 1.0], [2.0, 2.0, 2.0]]
"""
"""
helper
=
LayerHelper
(
"fill_diagonal_"
,
**
locals
())
check_type
(
x
,
'X'
,
(
Variable
),
'fill_diagonal_'
)
dtype
=
helper
.
input_dtype
(
'x'
)
check_dtype
(
dtype
,
'X'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'fill_diagonal_'
,
)
check_type
(
value
,
'value'
,
(
bool
,
int
,
float
),
'fill_diagonal_'
)
check_type
(
wrap
,
'wrap'
,
(
bool
),
'fill_diagonal_'
)
inshape
=
x
.
shape
inshapeset
=
set
(
inshape
)
assert
len
(
inshape
)
>=
2
,
'Tensor dims should >= 2 in fill_diagonal_ API'
if
len
(
inshape
)
>
2
:
assert
(
len
(
inshapeset
)
==
1
),
'Tensor dims should be equal while input dims > 2 in fill_diagonal_ API'
if
in_dygraph_mode
():
if
in_dygraph_mode
():
if
len
(
in
shape
)
==
2
:
if
len
(
x
.
shape
)
==
2
:
return
_C_ops
.
fill_diagonal_
(
x
,
value
,
offset
,
wrap
)
return
_C_ops
.
fill_diagonal_
(
x
,
value
,
offset
,
wrap
)
return
_C_ops
.
fill_diagonal_
(
x
,
value
,
offset
,
True
)
return
_C_ops
.
fill_diagonal_
(
x
,
value
,
offset
,
True
)
if
len
(
inshape
)
==
2
:
return
_legacy_C_ops
.
fill_diagonal_
(
x
,
'value'
,
value
,
'offset'
,
offset
,
'wrap'
,
wrap
)
return
_legacy_C_ops
.
fill_diagonal_
(
x
,
'value'
,
value
,
'offset'
,
offset
,
'wrap'
,
True
)
def
_fill_diagonal_tensor_impl
(
x
,
y
,
offset
=
0
,
dim1
=
0
,
dim2
=
1
,
inplace
=
False
):
def
_fill_diagonal_tensor_impl
(
x
,
y
,
offset
=
0
,
dim1
=
0
,
dim2
=
1
,
inplace
=
False
):
inshape
=
x
.
shape
inshape
=
x
.
shape
...
@@ -1087,18 +968,8 @@ def _fill_diagonal_tensor_impl(x, y, offset=0, dim1=0, dim2=1, inplace=False):
...
@@ -1087,18 +968,8 @@ def _fill_diagonal_tensor_impl(x, y, offset=0, dim1=0, dim2=1, inplace=False):
y
=
y
.
reshape
([
1
,
-
1
])
y
=
y
.
reshape
([
1
,
-
1
])
if
inplace
:
if
inplace
:
if
in_dygraph_mode
():
return
_C_ops
.
fill_diagonal_tensor_
(
x
,
y
,
offset
,
dim1
,
dim2
)
return
_C_ops
.
fill_diagonal_tensor_
(
x
,
y
,
offset
,
dim1
,
dim2
)
return
_C_ops
.
fill_diagonal_tensor
(
x
,
y
,
offset
,
dim1
,
dim2
)
else
:
return
_legacy_C_ops
.
fill_diagonal_tensor_
(
x
,
y
,
'offset'
,
offset
,
'dim1'
,
dim1
,
'dim2'
,
dim2
)
if
in_dygraph_mode
():
return
_C_ops
.
fill_diagonal_tensor
(
x
,
y
,
offset
,
dim1
,
dim2
)
else
:
return
_legacy_C_ops
.
fill_diagonal_tensor
(
x
,
y
,
'offset'
,
offset
,
'dim1'
,
dim1
,
'dim2'
,
dim2
)
def
fill_diagonal_tensor_
(
x
,
y
,
offset
=
0
,
dim1
=
0
,
dim2
=
1
,
name
=
None
):
def
fill_diagonal_tensor_
(
x
,
y
,
offset
=
0
,
dim1
=
0
,
dim2
=
1
,
name
=
None
):
...
@@ -1248,84 +1119,80 @@ def concat(x, axis=0, name=None):
...
@@ -1248,84 +1119,80 @@ def concat(x, axis=0, name=None):
if
not
isinstance
(
input
,
Variable
):
if
not
isinstance
(
input
,
Variable
):
input
=
[
t
for
t
in
input
if
t
.
shape
.
count
(
0
)
==
0
]
input
=
[
t
for
t
in
input
if
t
.
shape
.
count
(
0
)
==
0
]
return
_C_ops
.
concat
(
input
,
axis
)
return
_C_ops
.
concat
(
input
,
axis
)
else
:
if
_in_legacy_dygraph
():
check_type
(
input
,
'input'
,
(
list
,
tuple
,
Variable
),
'concat'
)
if
isinstance
(
axis
,
Variable
):
axis
=
axis
.
numpy
()
axis
=
axis
.
item
(
0
)
if
not
isinstance
(
input
,
Variable
):
if
not
isinstance
(
input
,
Variable
):
input
=
[
t
for
t
in
input
if
t
.
shape
.
count
(
0
)
==
0
]
for
id
,
x
in
enumerate
(
input
):
out
=
_varbase_creator
()
check_variable_and_dtype
(
_legacy_C_ops
.
concat
(
input
,
out
,
'axis'
,
axis
)
x
,
return
out
'input['
+
str
(
id
)
+
']'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
,
'int8'
,
'unit8'
,
],
'concat'
,
)
if
x
.
dtype
!=
input
[
0
].
dtype
:
raise
TypeError
(
"All the Tensors in the input must have the same data type."
)
else
:
input
=
[
input
]
check_type
(
axis
,
'axis'
,
(
int
,
Variable
),
'concat'
)
check_type
(
input
,
'input'
,
(
list
,
tuple
,
Variable
),
'concat'
)
if
isinstance
(
axis
,
Variable
):
if
not
isinstance
(
input
,
Variable
):
check_dtype
(
for
id
,
x
in
enumerate
(
input
):
axis
.
dtype
,
check_variable_and_dtype
(
'axis'
,
x
,
[
'int32'
,
'int64'
],
'input['
+
str
(
id
)
+
']'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
,
'int8'
,
'unit8'
,
],
'concat'
,
'concat'
,
"The data type of axis must be int32 or int64 when axis is a Tensor"
,
)
)
if
x
.
dtype
!=
input
[
0
].
dtype
:
raise
TypeError
(
"All the Tensors in the input must have the same data type."
)
else
:
input
=
[
input
]
check_type
(
axis
,
'axis'
,
(
int
,
Variable
),
'concat'
)
if
isinstance
(
axis
,
Variable
):
helper
=
LayerHelper
(
'concat'
,
**
locals
())
check_dtype
(
out
=
helper
.
create_variable_for_type_inference
(
axis
.
dtype
,
dtype
=
helper
.
input_dtype
()
'axis'
,
[
'int32'
,
'int64'
],
'concat'
,
"The data type of axis must be int32 or int64 when axis is a Tensor"
,
)
)
helper
=
LayerHelper
(
'concat'
,
**
locals
())
if
input
[
0
].
desc
.
type
()
==
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
:
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
())
# NOTE(liym27): Don't remove this if branch!
# This feature is supported for Dynamic-to-Static, because after transformed, the type of inputs[0]
# is LOD_TENSOR_ARRAY in some scenarios. And this feature can be used in static mode.
if
input
[
0
].
desc
.
type
()
==
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
:
assert
len
(
input
)
==
1
,
(
# NOTE(liym27): Don't remove this if branch!
"If the elements of 'input' in concat are Variable(LoDTensorArray), "
# This feature is supported for Dynamic-to-Static, because after transformed, the type of inputs[0]
"number of the elements must be 1, but received %s."
# is LOD_TENSOR_ARRAY in some scenarios. And this feature can be used in static mode.
%
len
(
input
)
)
assert
len
(
input
)
==
1
,
(
out_index
=
helper
.
create_variable_for_type_inference
(
dtype
=
"int32"
)
"If the elements of 'input' in concat are Variable(LoDTensorArray), "
helper
.
append_op
(
"number of the elements must be 1, but received %s."
%
len
(
input
)
type
=
'tensor_array_to_tensor'
,
)
inputs
=
{
'X'
:
input
[
0
]},
out_index
=
helper
.
create_variable_for_type_inference
(
dtype
=
"int32"
)
outputs
=
{
'Out'
:
[
out
],
'OutIndex'
:
[
out_index
]},
helper
.
append_op
(
attrs
=
{
'axis'
:
axis
,
'use_stack'
:
False
},
type
=
'tensor_array_to_tensor'
,
)
inputs
=
{
'X'
:
input
[
0
]},
outputs
=
{
'Out'
:
[
out
],
'OutIndex'
:
[
out_index
]},
attrs
=
{
'axis'
:
axis
,
'use_stack'
:
False
},
)
else
:
inputs
=
{
'X'
:
input
}
attrs
=
{}
if
isinstance
(
axis
,
Variable
):
axis
.
stop_gradient
=
True
inputs
[
'AxisTensor'
]
=
axis
else
:
else
:
attrs
[
'axis'
]
=
axis
inputs
=
{
'X'
:
input
}
attrs
=
{}
if
isinstance
(
axis
,
Variable
):
axis
.
stop_gradient
=
True
inputs
[
'AxisTensor'
]
=
axis
else
:
attrs
[
'axis'
]
=
axis
helper
.
append_op
(
helper
.
append_op
(
type
=
'concat'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
[
out
]},
attrs
=
attrs
type
=
'concat'
,
)
inputs
=
inputs
,
return
out
outputs
=
{
'Out'
:
[
out
]},
attrs
=
attrs
,
)
return
out
def
broadcast_tensors
(
input
,
name
=
None
):
def
broadcast_tensors
(
input
,
name
=
None
):
...
@@ -1358,80 +1225,81 @@ def broadcast_tensors(input, name=None):
...
@@ -1358,80 +1225,81 @@ def broadcast_tensors(input, name=None):
"""
"""
num_inputs
=
len
(
input
)
num_inputs
=
len
(
input
)
if
paddle
.
framework
.
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
broadcast_tensors
(
input
)
return
_C_ops
.
broadcast_tensors
(
input
)
if
paddle
.
framework
.
_non_static_mode
():
else
:
return
_legacy_C_ops
.
broadcast_tensors
(
input
,
num_inputs
)
check_type
(
input
,
'input'
,
(
list
,
tuple
),
'broadcast_tensors'
)
if
num_inputs
<
1
:
check_type
(
input
,
'input'
,
(
list
,
tuple
),
'broadcast_tensors'
)
if
num_inputs
<
1
:
raise
TypeError
(
"At least 1 tensor is needed to perform broadcast_tensors"
)
# Check input types
for
id
,
x
in
enumerate
(
input
):
check_variable_and_dtype
(
x
,
'input['
+
str
(
id
)
+
']'
,
[
'bool'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'broadcast_tensors'
,
)
if
x
.
dtype
!=
input
[
0
].
dtype
:
raise
TypeError
(
raise
TypeError
(
"A
ll the Tensors in the input must have the same data type.
"
"A
t least 1 tensor is needed to perform broadcast_tensors
"
)
)
# Check bcast semantics
# Check input types
output_shape_r_last_tensor_index
=
[]
for
id
,
x
in
enumerate
(
input
):
output_shape_r
=
[]
check_variable_and_dtype
(
x
,
# Use while loop due to weird behaviour of "range()"
'input['
+
str
(
id
)
+
']'
,
j
=
0
[
'bool'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
while
j
<
len
(
input
):
'broadcast_tensors'
,
tensor
=
input
[
j
]
)
shape
=
list
(
reversed
(
tensor
.
shape
))
if
x
.
dtype
!=
input
[
0
].
dtype
:
raise
TypeError
(
"All the Tensors in the input must have the same data type."
)
# Check bcast semantics
output_shape_r_last_tensor_index
=
[]
output_shape_r
=
[]
# Use while loop due to weird behaviour of "range()"
j
=
0
while
j
<
len
(
input
):
tensor
=
input
[
j
]
shape
=
list
(
reversed
(
tensor
.
shape
))
i
=
0
while
i
<
len
(
shape
):
if
len
(
output_shape_r
)
<=
i
:
output_shape_r
.
append
(
shape
[
i
])
output_shape_r_last_tensor_index
.
append
(
j
)
else
:
invalid
=
(
output_shape_r
[
i
]
!=
shape
[
i
]
and
output_shape_r
[
i
]
!=
1
and
shape
[
i
]
!=
1
)
if
invalid
:
last_index
=
output_shape_r_last_tensor_index
[
i
]
raise
TypeError
(
"Input tensors to broadcast_tensors does not follow bcast semantics"
"Tensor {last_index} conflicts with Tensor {j} in reversed dimension {i}"
)
if
output_shape_r
[
i
]
<=
shape
[
i
]:
output_shape_r
[
i
]
=
shape
[
i
]
output_shape_r_last_tensor_index
[
i
]
=
j
i
+=
1
# while i < len(shape)
j
+=
1
# while j < len(input)
helper
=
LayerHelper
(
'broadcast_tensors'
,
**
locals
())
i
=
0
i
=
0
while
i
<
len
(
shape
):
out
=
[]
if
len
(
output_shape_r
)
<=
i
:
while
i
<
num_inputs
:
output_shape_r
.
append
(
shape
[
i
])
out
.
append
(
output_shape_r_last_tensor_index
.
append
(
j
)
helper
.
create_variable_for_type_inference
(
else
:
dtype
=
helper
.
input_dtype
()
invalid
=
(
output_shape_r
[
i
]
!=
shape
[
i
]
and
output_shape_r
[
i
]
!=
1
and
shape
[
i
]
!=
1
)
)
if
invalid
:
last_index
=
output_shape_r_last_tensor_index
[
i
]
raise
TypeError
(
"Input tensors to broadcast_tensors does not follow bcast semantics"
"Tensor {last_index} conflicts with Tensor {j} in reversed dimension {i}"
)
if
output_shape_r
[
i
]
<=
shape
[
i
]:
output_shape_r
[
i
]
=
shape
[
i
]
output_shape_r_last_tensor_index
[
i
]
=
j
i
+=
1
# while i < len(shape)
j
+=
1
# while j < len(input)
helper
=
LayerHelper
(
'broadcast_tensors'
,
**
locals
())
i
=
0
out
=
[]
while
i
<
num_inputs
:
out
.
append
(
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
()
)
)
)
i
+=
1
i
+=
1
inputs
=
{
'X'
:
input
}
inputs
=
{
'X'
:
input
}
helper
.
append_op
(
helper
.
append_op
(
type
=
'broadcast_tensors'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
{}
type
=
'broadcast_tensors'
,
)
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
{},
)
return
out
return
out
def
flip
(
x
,
axis
,
name
=
None
):
def
flip
(
x
,
axis
,
name
=
None
):
...
@@ -1465,29 +1333,31 @@ def flip(x, axis, name=None):
...
@@ -1465,29 +1333,31 @@ def flip(x, axis, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
flip
(
x
,
axis
)
return
_C_ops
.
flip
(
x
,
axis
)
else
:
helper
=
LayerHelper
(
"flip"
,
**
locals
())
check_type
(
x
,
'X'
,
(
Variable
),
'flip'
)
dtype
=
helper
.
input_dtype
(
'x'
)
check_dtype
(
dtype
,
'X'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
,
'bool'
],
'flip'
,
)
check_type
(
axis
,
'axis'
,
(
list
,
tuple
),
'flip'
)
if
name
is
None
:
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
else
:
out
=
helper
.
create_variable
(
name
=
name
,
dtype
=
dtype
,
persistable
=
False
)
if
paddle
.
in_dynamic_mode
():
helper
.
append_op
(
return
_legacy_C_ops
.
flip
(
x
,
"axis"
,
axis
)
type
=
"flip"
,
inputs
=
{
"X"
:
x
},
helper
=
LayerHelper
(
"flip"
,
**
locals
())
outputs
=
{
"Out"
:
out
},
check_type
(
x
,
'X'
,
(
Variable
),
'flip'
)
attrs
=
{
"axis"
:
axis
},
dtype
=
helper
.
input_dtype
(
'x'
)
)
check_dtype
(
return
out
dtype
,
'X'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
,
'bool'
],
'flip'
,
)
check_type
(
axis
,
'axis'
,
(
list
,
tuple
),
'flip'
)
if
name
is
None
:
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
else
:
out
=
helper
.
create_variable
(
name
=
name
,
dtype
=
dtype
,
persistable
=
False
)
helper
.
append_op
(
type
=
"flip"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
},
attrs
=
{
"axis"
:
axis
}
)
return
out
def
rot90
(
x
,
k
=
1
,
axes
=
[
0
,
1
],
name
=
None
):
def
rot90
(
x
,
k
=
1
,
axes
=
[
0
,
1
],
name
=
None
):
...
@@ -1705,23 +1575,17 @@ def flatten(x, start_axis=0, stop_axis=-1, name=None):
...
@@ -1705,23 +1575,17 @@ def flatten(x, start_axis=0, stop_axis=-1, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
flatten
(
x
,
start_axis
,
stop_axis
)
return
_C_ops
.
flatten
(
x
,
start_axis
,
stop_axis
)
else
:
if
_in_legacy_dygraph
():
helper
=
LayerHelper
(
'flatten'
,
**
locals
())
dy_out
,
_
=
_legacy_C_ops
.
flatten_contiguous_range
(
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
x
,
'start_axis'
,
start_axis
,
'stop_axis'
,
stop_axis
x_shape
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'flatten_contiguous_range'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
'Out'
:
out
,
'XShape'
:
x_shape
},
attrs
=
{
"start_axis"
:
start_axis
,
"stop_axis"
:
stop_axis
},
)
)
return
dy_out
return
out
helper
=
LayerHelper
(
'flatten'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
x_shape
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'flatten_contiguous_range'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
'Out'
:
out
,
'XShape'
:
x_shape
},
attrs
=
{
"start_axis"
:
start_axis
,
"stop_axis"
:
stop_axis
},
)
return
out
@
inplace_apis_in_dygraph_only
@
inplace_apis_in_dygraph_only
...
@@ -1760,12 +1624,6 @@ def flatten_(x, start_axis=0, stop_axis=-1, name=None):
...
@@ -1760,12 +1624,6 @@ def flatten_(x, start_axis=0, stop_axis=-1, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
flatten_
(
x
,
start_axis
,
stop_axis
)
return
_C_ops
.
flatten_
(
x
,
start_axis
,
stop_axis
)
if
_in_legacy_dygraph
():
dy_out
,
_
=
_legacy_C_ops
.
flatten_contiguous_range_
(
x
,
'start_axis'
,
start_axis
,
'stop_axis'
,
stop_axis
)
return
dy_out
def
roll
(
x
,
shifts
,
axis
=
None
,
name
=
None
):
def
roll
(
x
,
shifts
,
axis
=
None
,
name
=
None
):
"""
"""
...
@@ -1830,31 +1688,28 @@ def roll(x, shifts, axis=None, name=None):
...
@@ -1830,31 +1688,28 @@ def roll(x, shifts, axis=None, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
roll
(
x
,
shifts
,
axis
)
return
_C_ops
.
roll
(
x
,
shifts
,
axis
)
else
:
helper
=
LayerHelper
(
"roll"
,
**
locals
())
check_type
(
axis
,
'axis'
,
(
list
,
tuple
),
'roll'
)
if
_in_legacy_dygraph
():
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
return
_legacy_C_ops
.
roll
(
x
,
'axis'
,
axis
,
'shifts'
,
shifts
)
helper
=
LayerHelper
(
"roll"
,
**
locals
())
check_type
(
axis
,
'axis'
,
(
list
,
tuple
),
'roll'
)
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
if
isinstance
(
shifts
,
Variable
):
if
isinstance
(
shifts
,
Variable
):
helper
.
append_op
(
helper
.
append_op
(
type
=
'roll'
,
type
=
'roll'
,
inputs
=
{
'X'
:
x
,
"ShiftsTensor"
:
shifts
},
inputs
=
{
'X'
:
x
,
"ShiftsTensor"
:
shifts
},
outputs
=
{
'Out'
:
out
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'axis'
:
axis
},
attrs
=
{
'axis'
:
axis
},
)
)
else
:
else
:
check_type
(
shifts
,
'shifts'
,
(
list
,
tuple
),
'roll'
)
check_type
(
shifts
,
'shifts'
,
(
list
,
tuple
),
'roll'
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'roll'
,
type
=
'roll'
,
inputs
=
{
'X'
:
x
},
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'axis'
:
axis
,
'shifts'
:
shifts
},
attrs
=
{
'axis'
:
axis
,
'shifts'
:
shifts
},
)
)
return
out
return
out
def
stack
(
x
,
axis
=
0
,
name
=
None
):
def
stack
(
x
,
axis
=
0
,
name
=
None
):
...
@@ -1947,62 +1802,59 @@ def stack(x, axis=0, name=None):
...
@@ -1947,62 +1802,59 @@ def stack(x, axis=0, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
stack
(
x
,
axis
)
return
_C_ops
.
stack
(
x
,
axis
)
else
:
if
not
isinstance
(
x
,
list
)
and
not
isinstance
(
x
,
tuple
):
# NOTE:(zhiqiu) Only support Variable as input if the Variable is a LOD_TENSOR_ARRAY create by create_array, array_write, array_read, etc.
# In that case, Variable is array of tensors indeed.
if
(
isinstance
(
x
,
Variable
)
and
x
.
desc
.
type
()
==
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
):
x
=
[
x
]
else
:
raise
TypeError
(
"The type of '%s' in %s must be %s, but received %s"
%
(
'x'
,
'stack'
,
'list[Tensor], tuple[Tensor] or TensorArray'
,
type
(
x
),
)
)
if
_in_legacy_dygraph
():
helper
=
LayerHelper
(
'stack'
,
**
locals
())
return
_legacy_C_ops
.
stack
(
x
,
'axis'
,
axis
)
out
=
helper
.
create_variable_for_type_inference
(
x
[
0
].
dtype
)
if
not
isinstance
(
x
,
list
)
and
not
isinstance
(
x
,
tuple
):
if
x
[
0
].
desc
.
type
()
==
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
:
# NOTE:(zhiqiu) Only support Variable as input if the Variable is a LOD_TENSOR_ARRAY create by create_array, array_write, array_read, etc.
assert
len
(
x
)
==
1
,
(
# In that case, Variable is array of tensors indeed.
"If the elements of 'x' in stack are Variable(LoDTensorArray), "
if
(
"number of the elements must be 1, but received %s."
%
len
(
x
)
isinstance
(
x
,
Variable
)
)
and
x
.
desc
.
type
()
==
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
out_index
=
helper
.
create_variable_for_type_inference
(
dtype
=
"int32"
)
):
x
=
[
x
]
for
i
in
x
:
else
:
check_variable_and_dtype
(
raise
TypeError
(
i
,
"The type of '%s' in %s must be %s, but received %s"
%
(
'x'
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'stack'
,
'stack'
,
'list[Tensor], tuple[Tensor] or TensorArray'
,
type
(
x
),
)
)
)
helper
=
LayerHelper
(
'stack'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
[
0
].
dtype
)
if
x
[
0
].
desc
.
type
()
==
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
:
assert
len
(
x
)
==
1
,
(
"If the elements of 'x' in stack are Variable(LoDTensorArray), "
"number of the elements must be 1, but received %s."
%
len
(
x
)
)
out_index
=
helper
.
create_variable_for_type_inference
(
dtype
=
"int32"
)
for
i
in
x
:
helper
.
append_op
(
check_variable_and_dtype
(
type
=
'tensor_array_to_tensor'
,
i
,
inputs
=
{
'X'
:
x
[
0
]},
'x'
,
outputs
=
{
'Out'
:
[
out
],
'OutIndex'
:
[
out_index
]},
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
attrs
=
{
'axis'
:
axis
,
'use_stack'
:
True
},
'stack'
,
)
else
:
helper
.
append_op
(
type
=
'stack'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Y'
:
out
},
attrs
=
{
'axis'
:
axis
},
)
)
helper
.
append_op
(
return
out
type
=
'tensor_array_to_tensor'
,
inputs
=
{
'X'
:
x
[
0
]},
outputs
=
{
'Out'
:
[
out
],
'OutIndex'
:
[
out_index
]},
attrs
=
{
'axis'
:
axis
,
'use_stack'
:
True
},
)
else
:
helper
.
append_op
(
type
=
'stack'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Y'
:
out
},
attrs
=
{
'axis'
:
axis
},
)
return
out
def
split
(
x
,
num_or_sections
,
axis
=
0
,
name
=
None
):
def
split
(
x
,
num_or_sections
,
axis
=
0
,
name
=
None
):
...
@@ -2055,7 +1907,7 @@ def split(x, num_or_sections, axis=0, name=None):
...
@@ -2055,7 +1907,7 @@ def split(x, num_or_sections, axis=0, name=None):
"""
"""
input
=
x
input
=
x
dim
=
axis
dim
=
axis
if
_non_static
_mode
():
if
in_dygraph
_mode
():
num
=
None
num
=
None
attrs
=
()
attrs
=
()
...
@@ -2085,108 +1937,111 @@ def split(x, num_or_sections, axis=0, name=None):
...
@@ -2085,108 +1937,111 @@ def split(x, num_or_sections, axis=0, name=None):
"The type of 'num_or_sections' in split must be int, list or tuple in imperative mode, but "
"The type of 'num_or_sections' in split must be int, list or tuple in imperative mode, but "
"received %s."
%
(
type
(
num_or_sections
))
"received %s."
%
(
type
(
num_or_sections
))
)
)
if
in_dygraph_mode
():
if
isinstance
(
num_or_sections
,
int
):
if
isinstance
(
num_or_sections
,
int
):
return
_C_ops
.
split_with_num
(
input
,
num_or_sections
,
dim
)
return
_C_ops
.
split_with_num
(
input
,
num_or_sections
,
dim
)
else
:
else
:
return
_C_ops
.
split
(
input
,
num_or_sections
,
dim
)
return
_C_ops
.
split
(
input
,
num_or_sections
,
dim
)
else
:
elif
_in_legacy_dygraph
():
check_variable_and_dtype
(
out
=
[
_varbase_creator
()
for
n
in
range
(
num
)]
input
,
_legacy_C_ops
.
split
(
input
,
out
,
*
attrs
)
'input'
,
return
out
[
'bool'
,
check_variable_and_dtype
(
'float16'
,
input
,
'float32'
,
'input'
,
'float64'
,
[
'int32'
,
'bool'
,
'int64'
,
'float16'
,
'uint8'
,
'float32'
,
'int8'
,
'float64'
,
],
'int32'
,
'split'
,
'int64'
,
)
'uint8'
,
check_type
(
'int8'
,
num_or_sections
,
'num_or_sections'
,
(
list
,
int
,
tuple
),
'split'
],
)
'split'
,
check_type
(
dim
,
'dim'
,
(
int
,
Variable
),
'split'
)
)
if
isinstance
(
dim
,
Variable
):
check_type
(
num_or_sections
,
'num_or_sections'
,
(
list
,
int
,
tuple
),
'split'
)
check_dtype
(
dim
.
dtype
,
'dim'
,
[
'int32'
,
'int64'
],
'split'
)
check_type
(
dim
,
'dim'
,
(
int
,
Variable
),
'split'
)
if
isinstance
(
dim
,
Variable
):
check_dtype
(
dim
.
dtype
,
'dim'
,
[
'int32'
,
'int64'
],
'split'
)
helper
=
LayerHelper
(
'split'
,
**
locals
())
helper
=
LayerHelper
(
'split'
,
**
locals
())
input_shape
=
input
.
shape
input_shape
=
input
.
shape
inputs
=
{
'X'
:
input
}
inputs
=
{
'X'
:
input
}
attrs
=
{
'num'
:
num_or_sections
if
isinstance
(
num_or_sections
,
int
)
else
0
}
attrs
=
{
'num'
:
num_or_sections
if
isinstance
(
num_or_sections
,
int
)
else
0
}
def
_get_SectionsTensorList
(
one_list
):
def
_get_SectionsTensorList
(
one_list
):
tensor_list
=
[]
tensor_list
=
[]
unk_dim_idx
=
-
1
unk_dim_idx
=
-
1
for
idx
,
dim_size
in
enumerate
(
one_list
):
for
idx
,
dim_size
in
enumerate
(
one_list
):
if
isinstance
(
dim_size
,
Variable
):
if
isinstance
(
dim_size
,
Variable
):
dim_size
.
stop_gradient
=
True
dim_size
.
stop_gradient
=
True
tensor_list
.
append
(
dim_size
)
tensor_list
.
append
(
dim_size
)
else
:
else
:
assert
isinstance
(
dim_size
,
int
)
assert
isinstance
(
dim_size
,
int
)
if
dim_size
==
-
1
:
if
dim_size
==
-
1
:
assert
unk_dim_idx
==
-
1
,
(
assert
unk_dim_idx
==
-
1
,
(
"Only one value of 'num_or_section' in split can "
"Only one value of 'num_or_section' in split can "
"be -1. But received num_or_section[%d] is also -1."
"be -1. But received num_or_section[%d] is also -1."
%
idx
%
idx
)
unk_dim_idx
=
idx
temp_out
=
helper
.
create_variable_for_type_inference
(
'int32'
)
)
unk_dim_idx
=
idx
fill_constant
(
temp_out
=
helper
.
create_variable_for_type_inference
(
'int32'
)
[
1
],
'int32'
,
dim_size
,
force_cpu
=
True
,
out
=
temp_out
fill_constant
(
)
[
1
],
'int32'
,
dim_size
,
force_cpu
=
True
,
out
=
temp_out
tensor_list
.
append
(
temp_out
)
)
return
tensor_list
tensor_list
.
append
(
temp_out
)
return
tensor_list
if
isinstance
(
dim
,
Variable
):
if
isinstance
(
dim
,
Variable
):
dim
.
stop_gradient
=
True
dim
.
stop_gradient
=
True
inputs
[
'AxisTensor'
]
=
dim
inputs
[
'AxisTensor'
]
=
dim
else
:
else
:
assert
len
(
input
.
shape
)
+
dim
>=
0
,
"(rank(x) + axis) must >= 0"
assert
len
(
input
.
shape
)
+
dim
>=
0
,
"(rank(x) + axis) must >= 0"
dim
=
(
len
(
input_shape
)
+
dim
)
if
dim
<
0
else
dim
dim
=
(
len
(
input_shape
)
+
dim
)
if
dim
<
0
else
dim
attrs
[
'axis'
]
=
dim
attrs
[
'axis'
]
=
dim
if
isinstance
(
num_or_sections
,
int
):
if
isinstance
(
num_or_sections
,
int
):
assert
num_or_sections
>
1
,
'num_or_sections must be more than 1.'
assert
num_or_sections
>
1
,
'num_or_sections must be more than 1.'
if
isinstance
(
dim
,
int
)
and
input_shape
[
dim
]
>
0
:
if
isinstance
(
dim
,
int
)
and
input_shape
[
dim
]
>
0
:
assert
input_shape
[
dim
]
%
num_or_sections
==
0
,
(
assert
input_shape
[
dim
]
%
num_or_sections
==
0
,
(
"The input's size along the split dimension "
"The input's size along the split dimension "
"must be evenly divisible by Attr(num_or_sections). "
"must be evenly divisible by Attr(num_or_sections). "
"But %d is not evenly divisible by %d. "
"But %d is not evenly divisible by %d. "
%
(
num_or_sections
,
input_shape
[
dim
])
%
(
num_or_sections
,
input_shape
[
dim
])
)
num
=
num_or_sections
else
:
if
isinstance
(
dim
,
int
)
and
input_shape
[
dim
]
>
0
:
assert
(
len
(
num_or_sections
)
<=
input_shape
[
dim
]
),
'len(num_or_sections) must not be more than input.shape[dim].'
num
=
len
(
num_or_sections
)
attrs
[
'sections'
]
=
list
(
map
(
lambda
ele
:
-
1
if
isinstance
(
ele
,
Variable
)
else
ele
,
num_or_sections
,
)
)
)
num
=
num_or_sections
if
utils
.
_contain_var
(
num_or_sections
):
else
:
inputs
[
'SectionsTensorList'
]
=
_get_SectionsTensorList
(
if
isinstance
(
dim
,
int
)
and
input_shape
[
dim
]
>
0
:
num_or_sections
assert
(
)
len
(
num_or_sections
)
<=
input_shape
[
dim
]
),
'len(num_or_sections) must not be more than input.shape[dim].'
outs
=
[
num
=
len
(
num_or_sections
)
helper
.
create_variable_for_type_inference
(
attrs
[
'sections'
]
=
list
(
dtype
=
helper
.
input_dtype
()
map
(
lambda
ele
:
-
1
if
isinstance
(
ele
,
Variable
)
else
ele
,
num_or_sections
,
)
)
for
i
in
range
(
num
)
]
helper
.
append_op
(
type
=
'split'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
outs
},
attrs
=
attrs
)
)
if
utils
.
_contain_var
(
num_or_sections
):
return
outs
inputs
[
'SectionsTensorList'
]
=
_get_SectionsTensorList
(
num_or_sections
)
outs
=
[
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
())
for
i
in
range
(
num
)
]
helper
.
append_op
(
type
=
'split'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
outs
},
attrs
=
attrs
)
return
outs
def
vsplit
(
x
,
num_or_sections
,
name
=
None
):
def
vsplit
(
x
,
num_or_sections
,
name
=
None
):
...
@@ -2317,49 +2172,46 @@ def squeeze(x, axis=None, name=None):
...
@@ -2317,49 +2172,46 @@ def squeeze(x, axis=None, name=None):
axes
=
axis
axes
=
axis
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
squeeze
(
input
,
axes
)
return
_C_ops
.
squeeze
(
input
,
axes
)
if
_in_legacy_dygraph
():
else
:
out
,
_
=
_legacy_C_ops
.
squeeze2
(
input
,
'axes'
,
axes
)
helper
=
LayerHelper
(
"squeeze"
,
**
locals
())
return
out
check_variable_and_dtype
(
input
,
helper
=
LayerHelper
(
"squeeze"
,
**
locals
())
'input'
,
check_variable_and_dtype
(
[
input
,
'float16'
,
'input'
,
'float32'
,
[
'float64'
,
'float16'
,
'bool'
,
'float32'
,
'int8'
,
'float64'
,
'int32'
,
'bool'
,
'int64'
,
'int8'
,
'complex64'
,
'int32'
,
'complex128'
,
'int64'
,
],
'complex64'
,
'squeeze'
,
'complex128'
,
)
],
'squeeze'
,
)
check_type
(
axes
,
'axis/axes'
,
(
int
,
list
,
tuple
,
Variable
),
'squeeze'
)
check_type
(
axes
,
'axis/axes'
,
(
int
,
list
,
tuple
,
Variable
),
'squeeze'
)
attrs
=
{}
attrs
=
{}
if
isinstance
(
axes
,
Variable
):
if
isinstance
(
axes
,
Variable
):
axes
.
stop_gradient
=
True
axes
.
stop_gradient
=
True
attrs
[
"axes"
]
=
axes
elif
isinstance
(
axes
,
(
list
,
tuple
)):
if
utils
.
_contain_var
(
axes
):
attrs
[
"axes"
]
=
utils
.
_convert_to_tensor_list
(
axes
)
else
:
attrs
[
"axes"
]
=
axes
attrs
[
"axes"
]
=
axes
elif
isinstance
(
axes
,
(
list
,
tuple
)):
if
utils
.
_contain_var
(
axes
):
attrs
[
"axes"
]
=
utils
.
_convert_to_tensor_list
(
axes
)
else
:
attrs
[
"axes"
]
=
axes
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
x_shape
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
x_shape
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
"squeeze2"
,
type
=
"squeeze2"
,
inputs
=
{
"X"
:
input
},
inputs
=
{
"X"
:
input
},
attrs
=
attrs
,
attrs
=
attrs
,
outputs
=
{
"Out"
:
out
,
"XShape"
:
x_shape
},
outputs
=
{
"Out"
:
out
,
"XShape"
:
x_shape
},
)
)
return
out
return
out
@
inplace_apis_in_dygraph_only
@
inplace_apis_in_dygraph_only
...
@@ -2379,9 +2231,6 @@ def squeeze_(x, axis=None, name=None):
...
@@ -2379,9 +2231,6 @@ def squeeze_(x, axis=None, name=None):
axes
=
axis
axes
=
axis
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
squeeze_
(
input
,
axes
)
return
_C_ops
.
squeeze_
(
input
,
axes
)
if
_in_legacy_dygraph
():
out
,
_
=
_legacy_C_ops
.
squeeze2_
(
input
,
'axes'
,
axes
)
return
out
def
unique_consecutive
(
def
unique_consecutive
(
...
@@ -2473,65 +2322,49 @@ def unique_consecutive(
...
@@ -2473,65 +2322,49 @@ def unique_consecutive(
if
len
(
outs
)
==
1
:
if
len
(
outs
)
==
1
:
return
outs
[
0
]
return
outs
[
0
]
return
tuple
(
outs
)
return
tuple
(
outs
)
el
if
paddle
.
in_dynamic_mode
()
:
el
se
:
out
,
inverse
,
counts
=
_legacy_C_ops
.
unique_consecutiv
e
(
check_variable_and_dtyp
e
(
x
,
x
,
'dtype'
,
"input"
,
attr_dtype
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'return_inverse'
,
'unique_consecutive'
,
return_inverse
,
)
'return_counts'
,
check_type
(
return_inverse
,
'return_inverse'
,
bool
,
'unique_consecutive'
)
return_counts
,
check_type
(
return_counts
,
'return_counts'
,
bool
,
'unique_consecutive'
)
'axis'
,
check_dtype
(
dtype
,
'dtype'
,
[
'int32'
,
'int64'
],
'unique_consecutive'
)
axis
,
if
len
(
axis
)
!=
0
:
check_type
(
axis
[
0
],
'axis'
,
int
,
'unique_consecutive'
)
helper
=
LayerHelper
(
'unique_consecutive'
,
**
locals
())
attrs
=
{
'dtype'
:
attr_dtype
,
"return_inverse"
:
return_inverse
,
"return_counts"
:
return_counts
,
"axis"
:
axis
,
}
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
,
stop_gradient
=
True
)
)
inverse
=
helper
.
create_variable_for_type_inference
(
dtype
=
attr_dtype
,
stop_gradient
=
True
)
counts
=
helper
.
create_variable_for_type_inference
(
dtype
=
attr_dtype
,
stop_gradient
=
True
)
outputs
=
{
"Out"
:
out
,
"Index"
:
inverse
,
"Counts"
:
counts
}
outs
=
[
out
]
outs
=
[
out
]
if
return_inverse
:
if
return_inverse
:
outs
.
append
(
inverse
)
outs
.
append
(
inverse
)
if
return_counts
:
if
return_counts
:
outs
.
append
(
counts
)
outs
.
append
(
counts
)
helper
.
append_op
(
type
=
"unique_consecutive"
,
inputs
=
{
"X"
:
x
},
attrs
=
attrs
,
outputs
=
outputs
,
)
if
len
(
outs
)
==
1
:
if
len
(
outs
)
==
1
:
return
outs
[
0
]
return
outs
[
0
]
return
tuple
(
outs
)
return
tuple
(
outs
)
check_variable_and_dtype
(
x
,
"input"
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'unique_consecutive'
,
)
check_type
(
return_inverse
,
'return_inverse'
,
bool
,
'unique_consecutive'
)
check_type
(
return_counts
,
'return_counts'
,
bool
,
'unique_consecutive'
)
check_dtype
(
dtype
,
'dtype'
,
[
'int32'
,
'int64'
],
'unique_consecutive'
)
if
len
(
axis
)
!=
0
:
check_type
(
axis
[
0
],
'axis'
,
int
,
'unique_consecutive'
)
helper
=
LayerHelper
(
'unique_consecutive'
,
**
locals
())
attrs
=
{
'dtype'
:
attr_dtype
,
"return_inverse"
:
return_inverse
,
"return_counts"
:
return_counts
,
"axis"
:
axis
,
}
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
,
stop_gradient
=
True
)
inverse
=
helper
.
create_variable_for_type_inference
(
dtype
=
attr_dtype
,
stop_gradient
=
True
)
counts
=
helper
.
create_variable_for_type_inference
(
dtype
=
attr_dtype
,
stop_gradient
=
True
)
outputs
=
{
"Out"
:
out
,
"Index"
:
inverse
,
"Counts"
:
counts
}
outs
=
[
out
]
if
return_inverse
:
outs
.
append
(
inverse
)
if
return_counts
:
outs
.
append
(
counts
)
helper
.
append_op
(
type
=
"unique_consecutive"
,
inputs
=
{
"X"
:
x
},
attrs
=
attrs
,
outputs
=
outputs
)
if
len
(
outs
)
==
1
:
return
outs
[
0
]
return
tuple
(
outs
)
def
unique
(
def
unique
(
...
@@ -2604,27 +2437,10 @@ def unique(
...
@@ -2604,27 +2437,10 @@ def unique(
else
:
else
:
axis
=
[
axis
]
axis
=
[
axis
]
attr_dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
attr_dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
_non_static_mode
():
if
in_dygraph_mode
():
if
in_dygraph_mode
():
out
,
indices
,
inverse
,
counts
=
_C_ops
.
unique
(
out
,
indices
,
inverse
,
counts
=
_C_ops
.
unique
(
x
,
return_index
,
return_inverse
,
return_counts
,
axis
,
attr_dtype
x
,
return_index
,
return_inverse
,
return_counts
,
axis
,
attr_dtype
)
)
if
_in_legacy_dygraph
():
out
,
inverse
,
indices
,
counts
=
_legacy_C_ops
.
unique
(
x
,
'dtype'
,
attr_dtype
,
'return_index'
,
return_index
,
'return_inverse'
,
return_inverse
,
'return_counts'
,
return_counts
,
'axis'
,
axis
,
"is_sorted"
,
True
,
)
outs
=
[
out
]
outs
=
[
out
]
if
return_index
:
if
return_index
:
outs
.
append
(
indices
)
outs
.
append
(
indices
)
...
@@ -2637,60 +2453,60 @@ def unique(
...
@@ -2637,60 +2453,60 @@ def unique(
return
outs
[
0
]
return
outs
[
0
]
return
tuple
(
outs
)
return
tuple
(
outs
)
else
:
check_variable_and_dtype
(
x
,
"input"
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'unique'
)
check_type
(
return_index
,
'return_index'
,
bool
,
'unique'
)
check_type
(
return_inverse
,
'return_inverse'
,
bool
,
'unique'
)
check_type
(
return_counts
,
'return_counts'
,
bool
,
'unique'
)
check_dtype
(
dtype
,
'dtype'
,
[
'int32'
,
'int64'
],
'unique'
)
if
len
(
axis
)
!=
0
:
check_type
(
axis
[
0
],
'axis'
,
int
,
'unique'
)
helper
=
LayerHelper
(
'unique'
,
**
locals
())
attrs
=
{
'dtype'
:
attr_dtype
,
"return_index"
:
return_index
,
"return_inverse"
:
return_inverse
,
"return_counts"
:
return_counts
,
"axis"
:
axis
,
"is_sorted"
:
True
,
}
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
,
stop_gradient
=
True
)
indices
=
helper
.
create_variable_for_type_inference
(
dtype
=
attr_dtype
,
stop_gradient
=
True
)
inverse
=
helper
.
create_variable_for_type_inference
(
dtype
=
attr_dtype
,
stop_gradient
=
True
)
counts
=
helper
.
create_variable_for_type_inference
(
dtype
=
attr_dtype
,
stop_gradient
=
True
)
outputs
=
{
"Out"
:
out
,
"Indices"
:
indices
,
"Index"
:
inverse
,
"Counts"
:
counts
,
}
outs
=
[
out
]
if
return_index
:
outs
.
append
(
indices
)
if
return_inverse
:
outs
.
append
(
inverse
)
if
return_counts
:
outs
.
append
(
counts
)
check_variable_and_dtype
(
helper
.
append_op
(
x
,
"input"
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'unique'
type
=
"unique"
,
inputs
=
{
"X"
:
x
},
attrs
=
attrs
,
outputs
=
outputs
)
)
check_type
(
return_index
,
'return_index'
,
bool
,
'unique'
)
check_type
(
return_inverse
,
'return_inverse'
,
bool
,
'unique'
)
check_type
(
return_counts
,
'return_counts'
,
bool
,
'unique'
)
check_dtype
(
dtype
,
'dtype'
,
[
'int32'
,
'int64'
],
'unique'
)
if
len
(
axis
)
!=
0
:
check_type
(
axis
[
0
],
'axis'
,
int
,
'unique'
)
helper
=
LayerHelper
(
'unique'
,
**
locals
())
attrs
=
{
'dtype'
:
attr_dtype
,
"return_index"
:
return_index
,
"return_inverse"
:
return_inverse
,
"return_counts"
:
return_counts
,
"axis"
:
axis
,
"is_sorted"
:
True
,
}
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
,
stop_gradient
=
True
)
indices
=
helper
.
create_variable_for_type_inference
(
dtype
=
attr_dtype
,
stop_gradient
=
True
)
inverse
=
helper
.
create_variable_for_type_inference
(
dtype
=
attr_dtype
,
stop_gradient
=
True
)
counts
=
helper
.
create_variable_for_type_inference
(
dtype
=
attr_dtype
,
stop_gradient
=
True
)
outputs
=
{
"Out"
:
out
,
"Indices"
:
indices
,
"Index"
:
inverse
,
"Counts"
:
counts
,
}
outs
=
[
out
]
if
return_index
:
outs
.
append
(
indices
)
if
return_inverse
:
outs
.
append
(
inverse
)
if
return_counts
:
outs
.
append
(
counts
)
helper
.
append_op
(
type
=
"unique"
,
inputs
=
{
"X"
:
x
},
attrs
=
attrs
,
outputs
=
outputs
)
if
len
(
outs
)
==
1
:
if
len
(
outs
)
==
1
:
return
outs
[
0
]
return
outs
[
0
]
return
tuple
(
outs
)
return
tuple
(
outs
)
def
unsqueeze
(
x
,
axis
,
name
=
None
):
def
unsqueeze
(
x
,
axis
,
name
=
None
):
...
@@ -2741,7 +2557,7 @@ def unsqueeze(x, axis, name=None):
...
@@ -2741,7 +2557,7 @@ def unsqueeze(x, axis, name=None):
"""
"""
input
=
x
input
=
x
axes
=
axis
axes
=
axis
if
_non_static
_mode
():
if
in_dygraph
_mode
():
if
isinstance
(
axes
,
int
):
if
isinstance
(
axes
,
int
):
axes
=
[
axes
]
axes
=
[
axes
]
elif
isinstance
(
axes
,
Variable
):
elif
isinstance
(
axes
,
Variable
):
...
@@ -2751,54 +2567,51 @@ def unsqueeze(x, axis, name=None):
...
@@ -2751,54 +2567,51 @@ def unsqueeze(x, axis, name=None):
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
Variable
)
else
item
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
Variable
)
else
item
for
item
in
axes
for
item
in
axes
]
]
if
_in_legacy_dygraph
():
out
,
_
=
_legacy_C_ops
.
unsqueeze2
(
input
,
'axes'
,
axes
)
return
out
return
_C_ops
.
unsqueeze
(
input
,
axes
)
return
_C_ops
.
unsqueeze
(
input
,
axes
)
else
:
check_type
(
axes
,
'axis/axes'
,
(
int
,
list
,
tuple
,
Variable
),
'unsqueeze'
)
check_variable_and_dtype
(
input
,
'input'
,
[
'float16'
,
'float32'
,
'float64'
,
'bool'
,
'int8'
,
'int16'
,
'int32'
,
'int64'
,
'complex64'
,
'complex128'
,
],
'unsqueeze'
,
)
helper
=
LayerHelper
(
"unsqueeze2"
,
**
locals
())
inputs
=
{
"X"
:
input
}
attrs
=
{}
check_type
(
axes
,
'axis/axes'
,
(
int
,
list
,
tuple
,
Variable
),
'unsqueeze'
)
if
isinstance
(
axes
,
int
):
check_variable_and_dtype
(
axes
=
[
axes
]
input
,
if
isinstance
(
axes
,
Variable
):
'input'
,
axes
.
stop_gradient
=
True
[
inputs
[
"AxesTensor"
]
=
axes
'float16'
,
elif
isinstance
(
axes
,
(
list
,
tuple
)):
'float32'
,
if
utils
.
_contain_var
(
axes
):
'float64'
,
inputs
[
"AxesTensorList"
]
=
utils
.
_convert_to_tensor_list
(
axes
)
'bool'
,
else
:
'int8'
,
attrs
[
"axes"
]
=
axes
'int16'
,
'int32'
,
'int64'
,
'complex64'
,
'complex128'
,
],
'unsqueeze'
,
)
helper
=
LayerHelper
(
"unsqueeze2"
,
**
locals
())
inputs
=
{
"X"
:
input
}
attrs
=
{}
if
isinstance
(
axes
,
int
):
axes
=
[
axes
]
if
isinstance
(
axes
,
Variable
):
axes
.
stop_gradient
=
True
inputs
[
"AxesTensor"
]
=
axes
elif
isinstance
(
axes
,
(
list
,
tuple
)):
if
utils
.
_contain_var
(
axes
):
inputs
[
"AxesTensorList"
]
=
utils
.
_convert_to_tensor_list
(
axes
)
else
:
attrs
[
"axes"
]
=
axes
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
x_shape
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
x_shape
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
"unsqueeze2"
,
type
=
"unsqueeze2"
,
inputs
=
inputs
,
inputs
=
inputs
,
attrs
=
attrs
,
attrs
=
attrs
,
outputs
=
{
"Out"
:
out
,
"XShape"
:
x_shape
},
outputs
=
{
"Out"
:
out
,
"XShape"
:
x_shape
},
)
)
return
out
return
out
@
inplace_apis_in_dygraph_only
@
inplace_apis_in_dygraph_only
...
@@ -2818,10 +2631,7 @@ def unsqueeze_(x, axis, name=None):
...
@@ -2818,10 +2631,7 @@ def unsqueeze_(x, axis, name=None):
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
Variable
)
else
item
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
Variable
)
else
item
for
item
in
axes
for
item
in
axes
]
]
if
in_dygraph_mode
():
return
_C_ops
.
unsqueeze_
(
input
,
axes
)
return
_C_ops
.
unsqueeze_
(
input
,
axes
)
out
,
_
=
_legacy_C_ops
.
unsqueeze2_
(
input
,
'axes'
,
axes
)
return
out
def
gather
(
x
,
index
,
axis
=
None
,
name
=
None
):
def
gather
(
x
,
index
,
axis
=
None
,
name
=
None
):
...
@@ -2874,42 +2684,45 @@ def gather(x, index, axis=None, name=None):
...
@@ -2874,42 +2684,45 @@ def gather(x, index, axis=None, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
gather
(
x
,
index
,
axis
)
return
_C_ops
.
gather
(
x
,
index
,
axis
)
if
_in_legacy_dygraph
():
else
:
axis
=
axis
.
item
()
if
isinstance
(
axis
,
paddle
.
Tensor
)
else
axis
check_variable_and_dtype
(
return
_legacy_C_ops
.
gather
(
x
,
x
,
index
,
None
,
"axis"
,
axis
,
"overwrite"
,
False
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
,
],
'gather'
,
)
)
check_variable_and_dtype
(
index
,
'index'
,
[
'int32'
,
'int64'
],
'gather'
)
check_variable_and_dtype
(
if
isinstance
(
axis
,
Variable
):
x
,
check_variable_and_dtype
(
axis
,
'axis'
,
[
'int32'
,
'int64'
],
'gather'
)
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
],
'gather'
,
)
check_variable_and_dtype
(
index
,
'index'
,
[
'int32'
,
'int64'
],
'gather'
)
if
isinstance
(
axis
,
Variable
):
check_variable_and_dtype
(
axis
,
'axis'
,
[
'int32'
,
'int64'
],
'gather'
)
helper
=
LayerHelper
(
'gather'
,
**
locals
())
helper
=
LayerHelper
(
'gather'
,
**
locals
())
dtype
=
helper
.
input_dtype
(
'x'
)
dtype
=
helper
.
input_dtype
(
'x'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
if
not
isinstance
(
axis
,
Variable
):
if
not
isinstance
(
axis
,
Variable
):
helper
.
append_op
(
helper
.
append_op
(
type
=
"gather"
,
type
=
"gather"
,
inputs
=
{
"X"
:
x
,
"Index"
:
index
},
inputs
=
{
"X"
:
x
,
"Index"
:
index
},
attrs
=
{
'axis'
:
axis
,
'overwrite'
:
False
},
attrs
=
{
'axis'
:
axis
,
'overwrite'
:
False
},
outputs
=
{
"Out"
:
out
},
outputs
=
{
"Out"
:
out
},
)
)
else
:
else
:
helper
.
append_op
(
helper
.
append_op
(
type
=
"gather"
,
type
=
"gather"
,
inputs
=
{
"X"
:
x
,
"Index"
:
index
,
"Axis"
:
axis
},
inputs
=
{
"X"
:
x
,
"Index"
:
index
,
"Axis"
:
axis
},
attrs
=
{
"overwrite"
:
False
},
attrs
=
{
"overwrite"
:
False
},
outputs
=
{
"Out"
:
out
},
outputs
=
{
"Out"
:
out
},
)
)
return
out
return
out
def
unbind
(
input
,
axis
=
0
):
def
unbind
(
input
,
axis
=
0
):
...
@@ -2945,36 +2758,36 @@ def unbind(input, axis=0):
...
@@ -2945,36 +2758,36 @@ def unbind(input, axis=0):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
unbind
(
input
,
axis
)
return
_C_ops
.
unbind
(
input
,
axis
)
else
:
if
not
isinstance
(
axis
,
(
int
)):
if
not
isinstance
(
axis
,
(
int
)):
raise
TypeError
(
raise
TypeError
(
"The type of 'axis' must be int, but received %s."
%
(
type
(
axis
))
"The type of 'axis' must be int, but received %s."
%
(
type
(
axis
))
)
if
isinstance
(
axis
,
np
.
generic
):
axis
=
np
.
asscalar
(
axis
)
input_shape
=
input
.
shape
axis_
=
axis
if
axis
>=
0
else
len
(
input_shape
)
+
axis
num
=
input_shape
[
axis_
]
helper
=
LayerHelper
(
"unbind"
,
**
locals
())
check_type
(
input
,
'input'
,
(
Variable
),
'unbind'
)
dtype
=
helper
.
input_dtype
()
check_dtype
(
dtype
,
'unbind'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'unbind'
)
)
if
isinstance
(
axis
,
np
.
generic
):
outs
=
[
axis
=
np
.
asscalar
(
axis
)
helper
.
create_variable_for_type_inference
(
input_shape
=
input
.
shape
dtype
=
helper
.
input_dtype
()
axis_
=
axis
if
axis
>=
0
else
len
(
input_shape
)
+
axis
)
num
=
input_shape
[
axis_
]
for
i
in
range
(
num
)
if
_in_legacy_dygraph
():
]
return
_legacy_C_ops
.
unbind
(
input
,
num
,
'axis'
,
axis
)
helper
.
append_op
(
type
=
"unbind"
,
helper
=
LayerHelper
(
"unbind"
,
**
locals
())
inputs
=
{
"X"
:
input
},
check_type
(
input
,
'input'
,
(
Variable
),
'unbind'
)
outputs
=
{
"Out"
:
outs
},
dtype
=
helper
.
input_dtype
()
attrs
=
{
"axis"
:
axis
},
check_dtype
(
)
dtype
,
'unbind'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'unbind'
return
outs
)
outs
=
[
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
())
for
i
in
range
(
num
)
]
helper
.
append_op
(
type
=
"unbind"
,
inputs
=
{
"X"
:
input
},
outputs
=
{
"Out"
:
outs
},
attrs
=
{
"axis"
:
axis
},
)
return
outs
def
scatter
(
x
,
index
,
updates
,
overwrite
=
True
,
name
=
None
):
def
scatter
(
x
,
index
,
updates
,
overwrite
=
True
,
name
=
None
):
...
@@ -3054,27 +2867,22 @@ def scatter(x, index, updates, overwrite=True, name=None):
...
@@ -3054,27 +2867,22 @@ def scatter(x, index, updates, overwrite=True, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
scatter
(
x
,
index
,
updates
,
overwrite
)
return
_C_ops
.
scatter
(
x
,
index
,
updates
,
overwrite
)
else
:
else
:
if
_in_legacy_dygraph
():
check_variable_and_dtype
(
return
_legacy_C_ops
.
scatter
(
x
,
x
,
index
,
updates
,
'overwrite'
,
overwrite
'dtype'
,
)
[
'float32'
,
'float64'
,
'float16'
,
'int32'
,
'int64'
],
else
:
'scatter'
,
check_variable_and_dtype
(
)
x
,
check_type
(
overwrite
,
'overwrite'
,
bool
,
'scatter'
)
'dtype'
,
helper
=
LayerHelper
(
'scatter'
,
**
locals
())
[
'float32'
,
'float64'
,
'float16'
,
'int32'
,
'int64'
],
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
'scatter'
,
helper
.
append_op
(
)
type
=
"scatter"
,
check_type
(
overwrite
,
'overwrite'
,
bool
,
'scatter'
)
inputs
=
{
"X"
:
x
,
"Ids"
:
index
,
"Updates"
:
updates
},
helper
=
LayerHelper
(
'scatter'
,
**
locals
())
attrs
=
{
'overwrite'
:
overwrite
},
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
outputs
=
{
"Out"
:
out
},
helper
.
append_op
(
)
type
=
"scatter"
,
return
out
inputs
=
{
"X"
:
x
,
"Ids"
:
index
,
"Updates"
:
updates
},
attrs
=
{
'overwrite'
:
overwrite
},
outputs
=
{
"Out"
:
out
},
)
return
out
@
inplace_apis_in_dygraph_only
@
inplace_apis_in_dygraph_only
...
@@ -3083,9 +2891,7 @@ def scatter_(x, index, updates, overwrite=True, name=None):
...
@@ -3083,9 +2891,7 @@ def scatter_(x, index, updates, overwrite=True, name=None):
Inplace version of ``scatter`` API, the output Tensor will be inplaced with input ``x``.
Inplace version of ``scatter`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_paddle_tensor_scatter`.
Please refer to :ref:`api_paddle_tensor_scatter`.
"""
"""
if
in_dygraph_mode
():
return
_C_ops
.
scatter_
(
x
,
index
,
updates
,
overwrite
)
return
_C_ops
.
scatter_
(
x
,
index
,
updates
,
overwrite
)
return
_legacy_C_ops
.
scatter_
(
x
,
index
,
updates
,
'overwrite'
,
overwrite
)
def
scatter_nd_add
(
x
,
index
,
updates
,
name
=
None
):
def
scatter_nd_add
(
x
,
index
,
updates
,
name
=
None
):
...
@@ -3160,22 +2966,18 @@ def scatter_nd_add(x, index, updates, name=None):
...
@@ -3160,22 +2966,18 @@ def scatter_nd_add(x, index, updates, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
scatter_nd_add
(
x
,
index
,
updates
)
return
_C_ops
.
scatter_nd_add
(
x
,
index
,
updates
)
else
:
else
:
if
_in_legacy_dygraph
():
if
x
.
dtype
!=
updates
.
dtype
:
op
=
getattr
(
_legacy_C_ops
,
'scatter_nd_add'
)
raise
ValueError
(
"x and updates must have same data type."
)
return
op
(
x
,
index
,
updates
)
else
:
if
x
.
dtype
!=
updates
.
dtype
:
raise
ValueError
(
"x and updates must have same data type."
)
helper
=
LayerHelper
(
'scatter_nd_add'
,
**
locals
())
helper
=
LayerHelper
(
'scatter_nd_add'
,
**
locals
())
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
output
=
helper
.
create_variable_for_type_inference
(
dtype
)
output
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
"scatter_nd_add"
,
type
=
"scatter_nd_add"
,
inputs
=
{
"X"
:
x
,
"Index"
:
index
,
"Updates"
:
updates
},
inputs
=
{
"X"
:
x
,
"Index"
:
index
,
"Updates"
:
updates
},
outputs
=
{
"Out"
:
output
},
outputs
=
{
"Out"
:
output
},
)
)
return
output
return
output
def
scatter_nd
(
index
,
updates
,
shape
,
name
=
None
):
def
scatter_nd
(
index
,
updates
,
shape
,
name
=
None
):
...
@@ -3307,71 +3109,70 @@ def tile(x, repeat_times, name=None):
...
@@ -3307,71 +3109,70 @@ def tile(x, repeat_times, name=None):
repeat_times
=
repeat_times
.
numpy
().
tolist
()
repeat_times
=
repeat_times
.
numpy
().
tolist
()
return
_C_ops
.
tile
(
x
,
repeat_times
)
return
_C_ops
.
tile
(
x
,
repeat_times
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
tile
(
x
,
'repeat_times'
,
repeat_times
)
check_type
(
repeat_times
,
'repeat_times'
,
(
list
,
tuple
,
Variable
),
'tile'
)
if
isinstance
(
repeat_times
,
Variable
):
assert
(
len
(
repeat_times
.
shape
)
==
1
),
'repeat_times must be an 1-D Tensor.'
else
:
else
:
for
elem
in
repeat_times
:
check_type
(
if
isinstance
(
elem
,
Variable
):
repeat_times
,
'repeat_times'
,
(
list
,
tuple
,
Variable
),
'tile'
assert
(
)
len
(
elem
.
shape
)
==
1
if
isinstance
(
repeat_times
,
Variable
):
),
'Elements in repeat_times must be 1-D Tensors or integers.'
assert
(
else
:
len
(
repeat_times
.
shape
)
==
1
type_tuple
=
(
int
,
np
.
int32
,
np
.
int64
)
),
'repeat_times must be an 1-D Tensor.'
assert
isinstance
(
else
:
elem
,
type_tuple
for
elem
in
repeat_times
:
),
'Elements in repeat_times must be 1-D Tensors or integers.'
if
isinstance
(
elem
,
Variable
):
assert
(
len
(
elem
.
shape
)
==
1
),
'Elements in repeat_times must be 1-D Tensors or integers.'
else
:
type_tuple
=
(
int
,
np
.
int32
,
np
.
int64
)
assert
isinstance
(
elem
,
type_tuple
),
'Elements in repeat_times must be 1-D Tensors or integers.'
check_variable_and_dtype
(
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'tile'
x
,
'x'
,
[
'bool'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'tile'
)
if
convert_dtype
(
x
.
dtype
)
==
'bool'
and
not
x
.
stop_gradient
:
raise
ValueError
(
"When the date type is bool for the input 'x' of tile op, you "
"must set its stop_gradient to be True by "
"some_var.stop_gradient == True supporting some_var is the input."
)
)
if
convert_dtype
(
x
.
dtype
)
==
'bool'
and
not
x
.
stop_gradient
:
raise
ValueError
(
"When the date type is bool for the input 'x' of tile op, you "
"must set its stop_gradient to be True by "
"some_var.stop_gradient == True supporting some_var is the input."
)
helper
=
LayerHelper
(
'tile'
,
**
locals
())
helper
=
LayerHelper
(
'tile'
,
**
locals
())
inputs
=
{
"X"
:
[
x
]}
inputs
=
{
"X"
:
[
x
]}
attrs
=
{}
attrs
=
{}
def
get_attr_repeat_times
(
list_repeat_times
):
def
get_attr_repeat_times
(
list_repeat_times
):
attrs_repeat_times
=
[]
attrs_repeat_times
=
[]
for
idx
,
times
in
enumerate
(
list_repeat_times
):
for
idx
,
times
in
enumerate
(
list_repeat_times
):
if
isinstance
(
times
,
Variable
):
if
isinstance
(
times
,
Variable
):
attrs_repeat_times
.
append
(
-
1
)
attrs_repeat_times
.
append
(
-
1
)
else
:
else
:
attrs_repeat_times
.
append
(
times
)
attrs_repeat_times
.
append
(
times
)
assert
(
assert
(
times
>
0
times
>
0
),
"All elements in repeat_times must be positive for tile."
),
"All elements in repeat_times must be positive for tile."
return
attrs_repeat_times
return
attrs_repeat_times
if
isinstance
(
repeat_times
,
Variable
):
if
isinstance
(
repeat_times
,
Variable
):
repeat_times
.
stop_gradient
=
True
repeat_times
.
stop_gradient
=
True
inputs
[
'RepeatTimes'
]
=
repeat_times
inputs
[
'RepeatTimes'
]
=
repeat_times
attrs
[
'repeat_times'
]
=
[
-
1
]
attrs
[
'repeat_times'
]
=
[
-
1
]
elif
isinstance
(
repeat_times
,
(
list
,
tuple
)):
elif
isinstance
(
repeat_times
,
(
list
,
tuple
)):
attrs
[
'repeat_times'
]
=
get_attr_repeat_times
(
repeat_times
)
attrs
[
'repeat_times'
]
=
get_attr_repeat_times
(
repeat_times
)
if
utils
.
_contain_var
(
repeat_times
):
if
utils
.
_contain_var
(
repeat_times
):
inputs
[
'repeat_times_tensor'
]
=
utils
.
_convert_to_tensor_list
(
inputs
[
'repeat_times_tensor'
]
=
utils
.
_convert_to_tensor_list
(
repeat_times
repeat_times
)
)
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'tile'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
type
=
'tile'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
)
return
out
return
out
def
expand_as
(
x
,
y
,
name
=
None
):
def
expand_as
(
x
,
y
,
name
=
None
):
...
@@ -3404,34 +3205,34 @@ def expand_as(x, y, name=None):
...
@@ -3404,34 +3205,34 @@ def expand_as(x, y, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
expand_as
(
x
,
None
,
y
.
shape
)
return
_C_ops
.
expand_as
(
x
,
None
,
y
.
shape
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'expand_as'
,
)
check_type
(
y
,
'y'
,
Variable
,
'expand_as'
)
if
_non_static_mode
():
if
convert_dtype
(
x
.
dtype
)
==
'bool'
and
not
x
.
stop_gradient
:
return
_legacy_C_ops
.
expand_as_v2
(
x
,
'target_shape'
,
y
.
shape
)
raise
ValueError
(
"When the data type of input 'x' for expand_as is bool, "
check_variable_and_dtype
(
"you must set its stop_gradient to be False by "
x
,
'x'
,
[
'bool'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'expand_as'
"some_var.stop_gradient = True, supporting "
)
"some_var as the input 'x'."
check_type
(
y
,
'y'
,
Variable
,
'expand_as'
)
)
inputs
=
{
"X"
:
[
x
],
"Y"
:
[
y
]}
if
convert_dtype
(
x
.
dtype
)
==
'bool'
and
not
x
.
stop_gradient
:
helper
=
LayerHelper
(
'expand_as'
,
**
locals
())
raise
ValueError
(
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
"When the data type of input 'x' for expand_as is bool, "
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
"you must set its stop_gradient to be False by "
helper
.
append_op
(
"some_var.stop_gradient = True, supporting "
type
=
'expand_as_v2'
,
"some_var as the input 'x'."
inputs
=
inputs
,
attrs
=
{
'target_shape'
:
y
.
shape
},
outputs
=
{
'Out'
:
out
},
)
)
inputs
=
{
"X"
:
[
x
],
"Y"
:
[
y
]}
return
out
helper
=
LayerHelper
(
'expand_as'
,
**
locals
())
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
'expand_as_v2'
,
inputs
=
inputs
,
attrs
=
{
'target_shape'
:
y
.
shape
},
outputs
=
{
'Out'
:
out
},
)
return
out
def
broadcast_to
(
x
,
shape
,
name
=
None
):
def
broadcast_to
(
x
,
shape
,
name
=
None
):
...
@@ -3463,68 +3264,69 @@ def broadcast_to(x, shape, name=None):
...
@@ -3463,68 +3264,69 @@ def broadcast_to(x, shape, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
expand
(
x
,
shape
)
return
_C_ops
.
expand
(
x
,
shape
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
expand_v2
(
x
,
'shape'
,
shape
)
if
isinstance
(
shape
,
Variable
):
assert
len
(
shape
.
shape
)
==
1
,
'shape must be an 1-D Tensor.'
else
:
else
:
for
elem
in
shape
:
if
isinstance
(
shape
,
Variable
):
if
isinstance
(
elem
,
Variable
):
assert
len
(
shape
.
shape
)
==
1
,
'shape must be an 1-D Tensor.'
assert
(
else
:
len
(
elem
.
shape
)
==
1
for
elem
in
shape
:
),
'Elements in shape must be 1-D Tensors or integers.'
if
isinstance
(
elem
,
Variable
):
else
:
assert
(
type_tuple
=
(
int
,
np
.
int32
,
np
.
int64
)
len
(
elem
.
shape
)
==
1
assert
isinstance
(
),
'Elements in shape must be 1-D Tensors or integers.'
elem
,
type_tuple
else
:
),
'Elements in shape must be 1-D Tensors or integers.'
type_tuple
=
(
int
,
np
.
int32
,
np
.
int64
)
assert
isinstance
(
elem
,
type_tuple
),
'Elements in shape must be 1-D Tensors or integers.'
check_variable_and_dtype
(
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'broadcast_to'
x
,
)
'x'
,
check_type
(
shape
,
'shape'
,
(
list
,
tuple
,
Variable
),
'broadcast_to'
)
[
'bool'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
if
convert_dtype
(
x
.
dtype
)
==
'bool'
and
not
x
.
stop_gradient
:
'broadcast_to'
,
raise
ValueError
(
"When the data type of input 'x' for broadcast_to is bool, "
"you must set its stop_gradient to be False by "
"some_var.stop_gradient = True, supporting "
"some_var as the input."
)
)
check_type
(
shape
,
'shape'
,
(
list
,
tuple
,
Variable
),
'broadcast_to'
)
if
convert_dtype
(
x
.
dtype
)
==
'bool'
and
not
x
.
stop_gradient
:
raise
ValueError
(
"When the data type of input 'x' for broadcast_to is bool, "
"you must set its stop_gradient to be False by "
"some_var.stop_gradient = True, supporting "
"some_var as the input."
)
inputs
=
{
"X"
:
[
x
]}
inputs
=
{
"X"
:
[
x
]}
attrs
=
{}
attrs
=
{}
helper
=
LayerHelper
(
'expand'
,
**
locals
())
helper
=
LayerHelper
(
'expand'
,
**
locals
())
def
get_attr_expand_shape
(
list_expand_shape
):
def
get_attr_expand_shape
(
list_expand_shape
):
attrs_expand_shape
=
[]
attrs_expand_shape
=
[]
for
idx
,
shape
in
enumerate
(
list_expand_shape
):
for
idx
,
shape
in
enumerate
(
list_expand_shape
):
if
isinstance
(
shape
,
Variable
):
if
isinstance
(
shape
,
Variable
):
attrs_expand_shape
.
append
(
-
1
)
attrs_expand_shape
.
append
(
-
1
)
else
:
else
:
attrs_expand_shape
.
append
(
shape
)
attrs_expand_shape
.
append
(
shape
)
assert
(
assert
(
shape
>
0
or
shape
==
-
1
shape
>
0
or
shape
==
-
1
),
"All elements in shape of broadcast_to must be positive or -1."
),
"All elements in shape of broadcast_to must be positive or -1."
return
attrs_expand_shape
return
attrs_expand_shape
if
isinstance
(
shape
,
Variable
):
if
isinstance
(
shape
,
Variable
):
shape
.
stop_gradient
=
True
shape
.
stop_gradient
=
True
inputs
[
'Shape'
]
=
shape
inputs
[
'Shape'
]
=
shape
elif
isinstance
(
shape
,
(
list
,
tuple
)):
elif
isinstance
(
shape
,
(
list
,
tuple
)):
attrs
[
'shape'
]
=
get_attr_expand_shape
(
shape
)
attrs
[
'shape'
]
=
get_attr_expand_shape
(
shape
)
if
utils
.
_contain_var
(
shape
):
if
utils
.
_contain_var
(
shape
):
inputs
[
'expand_shapes_tensor'
]
=
utils
.
_convert_to_tensor_list
(
inputs
[
'expand_shapes_tensor'
]
=
utils
.
_convert_to_tensor_list
(
shape
shape
)
)
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'expand_v2'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
type
=
'expand_v2'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
)
return
out
return
out
def
expand
(
x
,
shape
,
name
=
None
):
def
expand
(
x
,
shape
,
name
=
None
):
...
@@ -3557,72 +3359,69 @@ def expand(x, shape, name=None):
...
@@ -3557,72 +3359,69 @@ def expand(x, shape, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
expand
(
x
,
shape
)
return
_C_ops
.
expand
(
x
,
shape
)
if
paddle
.
in_dynamic_mode
():
return
_legacy_C_ops
.
expand_v2
(
x
,
'shape'
,
shape
)
if
isinstance
(
shape
,
Variable
):
assert
len
(
shape
.
shape
)
==
1
,
'shape must be an 1-D Tensor.'
else
:
else
:
for
elem
in
shape
:
if
isinstance
(
shape
,
Variable
):
if
isinstance
(
elem
,
Variable
):
assert
len
(
shape
.
shape
)
==
1
,
'shape must be an 1-D Tensor.'
assert
(
else
:
len
(
elem
.
shape
)
==
1
for
elem
in
shape
:
),
'Elements in shape must be 1-D Tensors or integers.'
if
isinstance
(
elem
,
Variable
):
else
:
assert
(
type_tuple
=
(
int
,
np
.
int32
,
np
.
int64
)
len
(
elem
.
shape
)
==
1
assert
isinstance
(
),
'Elements in shape must be 1-D Tensors or integers.'
elem
,
type_tuple
else
:
),
'Elements in shape must be 1-D Tensors or integers.'
type_tuple
=
(
int
,
np
.
int32
,
np
.
int64
)
assert
isinstance
(
elem
,
type_tuple
),
'Elements in shape must be 1-D Tensors or integers.'
check_variable_and_dtype
(
check_variable_and_dtype
(
x
,
x
,
'x'
,
'x'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'expand'
,
'expand'
,
)
check_type
(
shape
,
'shape'
,
(
list
,
tuple
,
Variable
),
'expand'
)
if
convert_dtype
(
x
.
dtype
)
==
'bool'
and
not
x
.
stop_gradient
:
raise
ValueError
(
"When the data type of input 'x' for expand is bool, "
"you must set its stop_gradient to be False by "
"some_var.stop_gradient = True, supporting "
"some_var as the input."
)
)
check_type
(
shape
,
'shape'
,
(
list
,
tuple
,
Variable
),
'expand'
)
if
convert_dtype
(
x
.
dtype
)
==
'bool'
and
not
x
.
stop_gradient
:
raise
ValueError
(
"When the data type of input 'x' for expand is bool, "
"you must set its stop_gradient to be False by "
"some_var.stop_gradient = True, supporting "
"some_var as the input."
)
inputs
=
{
"X"
:
[
x
]}
inputs
=
{
"X"
:
[
x
]}
attrs
=
{}
attrs
=
{}
helper
=
LayerHelper
(
'expand'
,
**
locals
())
helper
=
LayerHelper
(
'expand'
,
**
locals
())
def
get_attr_expand_shape
(
list_expand_shape
):
def
get_attr_expand_shape
(
list_expand_shape
):
attrs_expand_shape
=
[]
attrs_expand_shape
=
[]
for
idx
,
shape
in
enumerate
(
list_expand_shape
):
for
idx
,
shape
in
enumerate
(
list_expand_shape
):
if
isinstance
(
shape
,
Variable
):
if
isinstance
(
shape
,
Variable
):
attrs_expand_shape
.
append
(
-
2
)
attrs_expand_shape
.
append
(
-
2
)
else
:
else
:
attrs_expand_shape
.
append
(
shape
)
attrs_expand_shape
.
append
(
shape
)
assert
(
assert
(
shape
>
0
or
shape
==
-
1
shape
>
0
or
shape
==
-
1
),
"All elements in shape of expand must be positive or -1."
),
"All elements in shape of expand must be positive or -1."
return
attrs_expand_shape
return
attrs_expand_shape
if
isinstance
(
shape
,
Variable
):
if
isinstance
(
shape
,
Variable
):
shape
.
stop_gradient
=
True
shape
.
stop_gradient
=
True
inputs
[
'Shape'
]
=
shape
inputs
[
'Shape'
]
=
shape
elif
isinstance
(
shape
,
(
list
,
tuple
)):
elif
isinstance
(
shape
,
(
list
,
tuple
)):
attrs
[
'shape'
]
=
get_attr_expand_shape
(
shape
)
attrs
[
'shape'
]
=
get_attr_expand_shape
(
shape
)
if
utils
.
_contain_var
(
shape
):
if
utils
.
_contain_var
(
shape
):
inputs
[
'expand_shapes_tensor'
]
=
utils
.
_convert_to_tensor_list
(
inputs
[
'expand_shapes_tensor'
]
=
utils
.
_convert_to_tensor_list
(
shape
shape
)
)
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'expand_v2'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
type
=
'expand_v2'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
)
return
out
return
out
def
reshape
(
x
,
shape
,
name
=
None
):
def
reshape
(
x
,
shape
,
name
=
None
):
...
@@ -3710,109 +3509,92 @@ def reshape(x, shape, name=None):
...
@@ -3710,109 +3509,92 @@ def reshape(x, shape, name=None):
return
out
return
out
else
:
else
:
if
_in_legacy_dygraph
():
check_variable_and_dtype
(
tmp_tensor_type
=
Variable
x
,
if
isinstance
(
shape
,
(
list
,
tuple
)):
'x'
,
shape
=
[
[
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
Variable
)
else
item
'float16'
,
for
item
in
shape
'float32'
,
]
'float64'
,
out
,
_
=
_legacy_C_ops
.
reshape2
(
x
,
None
,
'shape'
,
shape
)
'int16'
,
elif
isinstance
(
shape
,
tmp_tensor_type
):
'int32'
,
shape
.
stop_gradient
=
True
'int64'
,
out
,
_
=
_legacy_C_ops
.
reshape2
(
x
,
shape
)
'bool'
,
else
:
'uint16'
,
raise
ValueError
(
],
"shape must be an instance of `list`, `tuple` or `Variable`,"
'reshape'
,
" got '{}.'"
.
format
(
type
(
shape
))
)
)
check_type
(
shape
,
'shape'
,
(
list
,
tuple
,
Variable
),
'reshape'
)
check_type
(
return
out
actual_shape
,
'actual_shape'
,
(
Variable
,
type
(
None
)),
'reshape'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
'bool'
,
'uint16'
,
],
'reshape'
,
)
check_type
(
shape
,
'shape'
,
(
list
,
tuple
,
Variable
),
'reshape'
)
check_type
(
actual_shape
,
'actual_shape'
,
(
Variable
,
type
(
None
)),
'reshape'
)
helper
=
LayerHelper
(
"reshape2"
,
**
locals
())
helper
=
LayerHelper
(
"reshape2"
,
**
locals
())
def
get_attr_shape
(
list_shape
):
def
get_attr_shape
(
list_shape
):
unk_dim_idx
=
-
1
unk_dim_idx
=
-
1
attrs_shape
=
[]
attrs_shape
=
[]
for
dim_idx
,
dim_size
in
enumerate
(
list_shape
):
for
dim_idx
,
dim_size
in
enumerate
(
list_shape
):
if
isinstance
(
dim_size
,
Variable
):
if
isinstance
(
dim_size
,
Variable
):
attrs_shape
.
append
(
-
1
)
attrs_shape
.
append
(
-
1
)
else
:
attrs_shape
.
append
(
dim_size
)
if
dim_size
==
-
1
:
assert
unk_dim_idx
==
-
1
,
(
"Only one dimension value of 'shape' in reshape can "
"be -1. But received shape[%d] is also -1.
\n
"
"
\n\t
# N = x.shape()[2]
\t\t
# N is an int. "
"(NOT recommend under @to_static)
\n\t
N = paddle.shape(x)[2]
\t\t
"
"# N is a Tensor. (Recommend)
\n\t
z = paddle.reshape([N, -1, 4])"
"
\t
# z.shape is [-1, -1, 4]
\n\n
"
" If your target shape in Reshape represents dynamic shape, "
"please turn it into a Tensor under @to_static. See above example for details."
%
dim_idx
)
unk_dim_idx
=
dim_idx
elif
dim_size
==
0
:
assert
dim_idx
<
len
(
x
.
shape
),
(
"The index of 0 in `shape` must be less than "
"the input tensor X's dimensions. "
"But received shape[%d] = 0, X's dimensions = %d."
%
(
dim_idx
,
len
(
x
.
shape
))
)
else
:
else
:
assert
dim_size
>
0
,
(
attrs_shape
.
append
(
dim_size
)
"Each dimension value of 'shape' in reshape must not "
if
dim_size
==
-
1
:
"be negative except one unknown dimension. "
assert
unk_dim_idx
==
-
1
,
(
"But received shape[%d] = %s."
"Only one dimension value of 'shape' in reshape can "
%
(
dim_idx
,
str
(
dim_size
))
"be -1. But received shape[%d] is also -1.
\n
"
)
"
\n\t
# N = x.shape()[2]
\t\t
# N is an int. "
return
attrs_shape
"(NOT recommend under @to_static)
\n\t
N = paddle.shape(x)[2]
\t\t
"
"# N is a Tensor. (Recommend)
\n\t
z = paddle.reshape([N, -1, 4])"
inputs
=
{
"X"
:
x
}
"
\t
# z.shape is [-1, -1, 4]
\n\n
"
attrs
=
{}
" If your target shape in Reshape represents dynamic shape, "
if
isinstance
(
shape
,
Variable
):
"please turn it into a Tensor under @to_static. See above example for details."
shape
.
stop_gradient
=
True
%
dim_idx
inputs
[
"Shape"
]
=
shape
)
elif
isinstance
(
shape
,
(
list
,
tuple
)):
unk_dim_idx
=
dim_idx
assert
len
(
shape
)
>
0
,
(
elif
dim_size
==
0
:
"The size of 'shape' in reshape can't be zero, "
assert
dim_idx
<
len
(
x
.
shape
),
(
"but received %s."
%
len
(
shape
)
"The index of 0 in `shape` must be less than "
"the input tensor X's dimensions. "
"But received shape[%d] = 0, X's dimensions = %d."
%
(
dim_idx
,
len
(
x
.
shape
))
)
else
:
assert
dim_size
>
0
,
(
"Each dimension value of 'shape' in reshape must not "
"be negative except one unknown dimension. "
"But received shape[%d] = %s."
%
(
dim_idx
,
str
(
dim_size
))
)
return
attrs_shape
inputs
=
{
"X"
:
x
}
attrs
=
{}
if
isinstance
(
shape
,
Variable
):
shape
.
stop_gradient
=
True
inputs
[
"Shape"
]
=
shape
elif
isinstance
(
shape
,
(
list
,
tuple
)):
assert
len
(
shape
)
>
0
,
(
"The size of 'shape' in reshape can't be zero, "
"but received %s."
%
len
(
shape
)
)
attrs
[
"shape"
]
=
get_attr_shape
(
shape
)
if
utils
.
_contain_var
(
shape
):
inputs
[
'ShapeTensor'
]
=
utils
.
_convert_to_tensor_list
(
shape
)
elif
isinstance
(
actual_shape
,
Variable
):
actual_shape
.
stop_gradient
=
True
inputs
[
"Shape"
]
=
actual_shape
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
x_shape
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
"reshape2"
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
"Out"
:
out
,
"XShape"
:
x_shape
},
)
)
attrs
[
"shape"
]
=
get_attr_shape
(
shape
)
if
utils
.
_contain_var
(
shape
):
inputs
[
'ShapeTensor'
]
=
utils
.
_convert_to_tensor_list
(
shape
)
elif
isinstance
(
actual_shape
,
Variable
):
actual_shape
.
stop_gradient
=
True
inputs
[
"Shape"
]
=
actual_shape
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
x_shape
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
"reshape2"
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
"Out"
:
out
,
"XShape"
:
x_shape
},
)
return
out
return
out
@
inplace_apis_in_dygraph_only
@
inplace_apis_in_dygraph_only
...
@@ -3844,24 +3626,6 @@ def reshape_(x, shape, name=None):
...
@@ -3844,24 +3626,6 @@ def reshape_(x, shape, name=None):
)
)
return
out
return
out
else
:
if
isinstance
(
shape
,
(
list
,
tuple
)):
shape
=
[
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
Variable
)
else
item
for
item
in
shape
]
out
,
_
=
_legacy_C_ops
.
reshape2_
(
x
,
None
,
'shape'
,
shape
)
return
out
elif
isinstance
(
shape
,
Variable
):
shape
.
stop_gradient
=
True
# NOTE(pangyoki): Cannot support the case where the shape Tensor
# is negative. In the infer_shape stage, the input's dim will
# be changed to a negative number.
# Thus, convert Shape Tensor to list firstly and then call
# reshape inplace op.
shape_list
=
shape
.
numpy
().
tolist
()
out
,
_
=
_legacy_C_ops
.
reshape2_
(
x
,
None
,
'shape'
,
shape_list
)
return
out
def
gather_nd
(
x
,
index
,
name
=
None
):
def
gather_nd
(
x
,
index
,
name
=
None
):
...
@@ -3939,24 +3703,24 @@ def gather_nd(x, index, name=None):
...
@@ -3939,24 +3703,24 @@ def gather_nd(x, index, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
gather_nd
(
x
,
index
)
return
_C_ops
.
gather_nd
(
x
,
index
)
else
:
else
:
if
_in_legacy_dygraph
():
check_variable_and_dtype
(
return
_legacy_C_ops
.
gather_nd
(
x
,
index
)
x
,
check_variable_and_dtype
(
'x'
,
x
,
[
'bool'
,
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
]
,
'x
'
,
'gather_np
'
,
[
'bool'
,
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
],
)
'gather_np'
,
check_variable_and_dtype
(
)
index
,
'index'
,
[
'int32'
,
'int64'
],
'gather_np'
check_variable_and_dtype
(
index
,
'index'
,
[
'int32'
,
'int64'
],
'gather_np'
)
)
helper
=
LayerHelper
(
'gather_nd'
,
**
locals
())
helper
=
LayerHelper
(
'gather_nd'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
dtype
=
helper
.
input_dtype
()
output
=
helper
.
create_variable_for_type_inference
(
dtype
)
output
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
"gather_nd"
,
type
=
"gather_nd"
,
inputs
=
{
"X"
:
x
,
"Index"
:
index
},
inputs
=
{
"X"
:
x
,
"Index"
:
index
},
outputs
=
{
"Out"
:
output
},
outputs
=
{
"Out"
:
output
},
)
)
return
output
return
output
def
strided_slice
(
x
,
axes
,
starts
,
ends
,
strides
,
name
=
None
):
def
strided_slice
(
x
,
axes
,
starts
,
ends
,
strides
,
name
=
None
):
...
@@ -4043,63 +3807,58 @@ def strided_slice(x, axes, starts, ends, strides, name=None):
...
@@ -4043,63 +3807,58 @@ def strided_slice(x, axes, starts, ends, strides, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
strided_slice
(
x
,
axes
,
starts
,
ends
,
strides
)
return
_C_ops
.
strided_slice
(
x
,
axes
,
starts
,
ends
,
strides
)
else
:
helper
=
LayerHelper
(
'strided_slice'
,
**
locals
())
helper
=
LayerHelper
(
'strided_slice'
,
**
locals
())
check_variable_and_dtype
(
x
,
check_variable_and_dtype
(
'x'
,
x
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'x'
,
'strided_slice'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
)
'strided_slice'
,
check_type
(
axes
,
'axes'
,
(
list
,
tuple
),
'strided_slice'
)
)
check_type
(
starts
,
'starts'
,
(
list
,
tuple
,
Variable
),
'strided_slice'
)
check_type
(
axes
,
'axes'
,
(
list
,
tuple
),
'strided_slice'
)
check_type
(
ends
,
'ends'
,
(
list
,
tuple
,
Variable
),
'strided_slice'
)
check_type
(
starts
,
'starts'
,
(
list
,
tuple
,
Variable
),
'strided_slice'
)
check_type
(
strides
,
'strides'
,
(
list
,
tuple
,
Variable
),
'strided_slice'
)
check_type
(
ends
,
'ends'
,
(
list
,
tuple
,
Variable
),
'strided_slice'
)
check_type
(
strides
,
'strides'
,
(
list
,
tuple
,
Variable
),
'strided_slice'
)
def
check_list_elements_dtype
(
list_input
,
input_name
):
if
isinstance
(
list_input
,
Variable
):
def
check_list_elements_dtype
(
list_input
,
input_name
):
check_dtype
(
if
isinstance
(
list_input
,
Variable
):
list_input
.
dtype
,
input_name
,
[
'int32'
],
'strided_slice'
check_dtype
(
)
list_input
.
dtype
,
input_name
,
[
'int32'
],
'strided_slice'
)
else
:
for
i
,
var
in
enumerate
(
list_input
):
var_name
=
input_name
+
'['
+
str
(
i
)
+
']'
if
isinstance
(
var
,
Variable
):
check_dtype
(
var
.
dtype
,
var_name
,
[
'int32'
],
'strided_slice'
)
check_list_elements_dtype
(
axes
,
'axes'
)
check_list_elements_dtype
(
starts
,
'starts'
)
check_list_elements_dtype
(
ends
,
'ends'
)
check_list_elements_dtype
(
strides
,
'strides'
)
def
get_new_list_tensor
(
old_list
):
new_list_tensor
=
[]
for
dim
in
old_list
:
if
isinstance
(
dim
,
Variable
):
dim
.
stop_gradient
=
True
new_list_tensor
.
append
(
dim
)
else
:
else
:
assert
isinstance
(
dim
,
int
)
for
i
,
var
in
enumerate
(
list_input
):
temp_out
=
helper
.
create_variable_for_type_inference
(
'int32'
)
var_name
=
input_name
+
'['
+
str
(
i
)
+
']'
fill_constant
([
1
],
'int32'
,
dim
,
force_cpu
=
True
,
out
=
temp_out
)
if
isinstance
(
var
,
Variable
):
new_list_tensor
.
append
(
temp_out
)
check_dtype
(
return
new_list_tensor
var
.
dtype
,
var_name
,
[
'int32'
],
'strided_slice'
)
inputs
=
{
'Input'
:
x
}
attrs
=
{
'axes'
:
axes
}
check_list_elements_dtype
(
axes
,
'axes'
)
infer_flags
=
list
(
1
for
i
in
range
(
len
(
axes
)))
check_list_elements_dtype
(
starts
,
'starts'
)
check_list_elements_dtype
(
ends
,
'ends'
)
check_list_elements_dtype
(
strides
,
'strides'
)
def
get_new_list_tensor
(
old_list
):
new_list_tensor
=
[]
for
dim
in
old_list
:
if
isinstance
(
dim
,
Variable
):
dim
.
stop_gradient
=
True
new_list_tensor
.
append
(
dim
)
else
:
assert
isinstance
(
dim
,
int
)
temp_out
=
helper
.
create_variable_for_type_inference
(
'int32'
)
fill_constant
(
[
1
],
'int32'
,
dim
,
force_cpu
=
True
,
out
=
temp_out
)
new_list_tensor
.
append
(
temp_out
)
return
new_list_tensor
if
_in_legacy_dygraph
():
inputs
=
{
'Input'
:
x
}
inputs
=
{
'Input'
:
x
}
attrs
=
{
attrs
=
{
'axes'
:
axes
}
'axes'
:
axes
,
infer_flags
=
list
(
1
for
i
in
range
(
len
(
axes
)))
'starts'
:
starts
,
'ends'
:
ends
,
'strides'
:
strides
,
'infer_flags'
:
infer_flags
,
}
else
:
# starts
# starts
if
isinstance
(
starts
,
Variable
):
if
isinstance
(
starts
,
Variable
):
starts
.
stop_gradient
=
True
starts
.
stop_gradient
=
True
...
@@ -4151,14 +3910,17 @@ def strided_slice(x, axes, starts, ends, strides, name=None):
...
@@ -4151,14 +3910,17 @@ def strided_slice(x, axes, starts, ends, strides, name=None):
else
:
else
:
attrs
[
'strides'
]
=
strides
attrs
[
'strides'
]
=
strides
attrs
[
'infer_flags'
]
=
infer_flags
attrs
[
'infer_flags'
]
=
infer_flags
out
=
helper
.
create_variable_for_type_inference
(
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
(
'x'
)
dtype
=
helper
.
input_dtype
(
'x'
)
)
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'strided_slice'
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
'Out'
:
out
}
type
=
'strided_slice'
,
)
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
'Out'
:
out
},
)
return
out
return
out
def
tensordot
(
x
,
y
,
axes
=
2
,
name
=
None
):
def
tensordot
(
x
,
y
,
axes
=
2
,
name
=
None
):
...
@@ -4281,7 +4043,7 @@ def tensordot(x, y, axes=2, name=None):
...
@@ -4281,7 +4043,7 @@ def tensordot(x, y, axes=2, name=None):
check_type
(
axes
,
'axes'
,
(
int
,
tuple
,
list
,
Variable
),
op_type
)
check_type
(
axes
,
'axes'
,
(
int
,
tuple
,
list
,
Variable
),
op_type
)
def
_var_to_list
(
var
):
def
_var_to_list
(
var
):
if
paddle
.
in_dynamic
_mode
():
if
in_dygraph
_mode
():
return
tolist
(
var
)
return
tolist
(
var
)
raise
TypeError
(
raise
TypeError
(
"The 'axes' with type 'Tensor' in "
"The 'axes' with type 'Tensor' in "
...
@@ -4409,20 +4171,20 @@ def as_complex(x, name=None):
...
@@ -4409,20 +4171,20 @@ def as_complex(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
as_complex
(
x
)
return
_C_ops
.
as_complex
(
x
)
if
_in_legacy_dygraph
()
:
else
:
return
_legacy_C_ops
.
as_complex
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'as_complex'
)
op_type
=
"as_complex"
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'as_complex'
)
helper
=
LayerHelper
(
op_type
,
**
locals
()
)
op_type
=
"as_complex"
inputs
=
{
"X"
:
x
}
helper
=
LayerHelper
(
op_type
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
inputs
=
{
"X"
:
x
}
dtype
=
_real_to_complex_dtype
(
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
)
dtype
=
_real_to_complex_dtype
(
x
.
dtype
)
outputs
=
{
"Out"
:
out
}
)
attrs
=
{}
outputs
=
{
"Out"
:
out
}
helper
.
append_op
(
attrs
=
{}
type
=
op_type
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
outputs
helper
.
append_op
(
type
=
op_type
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
outputs
)
)
return
out
return
out
def
as_real
(
x
,
name
=
None
):
def
as_real
(
x
,
name
=
None
):
...
@@ -4462,19 +4224,17 @@ def as_real(x, name=None):
...
@@ -4462,19 +4224,17 @@ def as_real(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
as_real
(
x
)
return
_C_ops
.
as_real
(
x
)
if
_in_legacy_dygraph
():
else
:
return
_legacy_C_ops
.
as_real
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'complex64'
,
'complex128'
],
'as_real'
)
op_type
=
"as_real"
check_variable_and_dtype
(
x
,
'x'
,
[
'complex64'
,
'complex128'
],
'as_real'
)
helper
=
LayerHelper
(
op_type
,
**
locals
())
op_type
=
"as_real"
inputs
=
{
"X"
:
x
}
helper
=
LayerHelper
(
op_type
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
inputs
=
{
"X"
:
x
}
dtype
=
_complex_to_real_dtype
(
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
)
dtype
=
_complex_to_real_dtype
(
x
.
dtype
)
outputs
=
{
"Out"
:
out
}
)
helper
.
append_op
(
type
=
op_type
,
inputs
=
inputs
,
outputs
=
outputs
)
outputs
=
{
"Out"
:
out
}
return
out
helper
.
append_op
(
type
=
op_type
,
inputs
=
inputs
,
outputs
=
outputs
)
return
out
def
repeat_interleave
(
x
,
repeats
,
axis
=
None
,
name
=
None
):
def
repeat_interleave
(
x
,
repeats
,
axis
=
None
,
name
=
None
):
...
@@ -4633,38 +4393,34 @@ def moveaxis(x, source, destination, name=None):
...
@@ -4633,38 +4393,34 @@ def moveaxis(x, source, destination, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
out
=
_C_ops
.
transpose
(
x
,
perm
)
out
=
_C_ops
.
transpose
(
x
,
perm
)
return
out
return
out
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
,
'complex64'
,
'complex128'
,
],
'moveaxis'
,
)
if
_in_legacy_dygraph
():
helper
=
LayerHelper
(
'moveaxis'
,
**
locals
())
out
,
_
=
_legacy_C_ops
.
transpose2
(
x
,
'axis'
,
perm
)
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
x_shape
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'transpose2'
,
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'Out'
:
[
out
],
'XShape'
:
[
x_shape
]},
attrs
=
{
'axis'
:
perm
},
)
return
out
return
out
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
,
'complex64'
,
'complex128'
,
],
'moveaxis'
,
)
helper
=
LayerHelper
(
'moveaxis'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
x_shape
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'transpose2'
,
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'Out'
:
[
out
],
'XShape'
:
[
x_shape
]},
attrs
=
{
'axis'
:
perm
},
)
return
out
def
non_negative_axis
(
arr
,
axis
):
def
non_negative_axis
(
arr
,
axis
):
ndim
=
len
(
arr
.
shape
)
ndim
=
len
(
arr
.
shape
)
...
@@ -4727,39 +4483,38 @@ def take_along_axis(arr, indices, axis):
...
@@ -4727,39 +4483,38 @@ def take_along_axis(arr, indices, axis):
if
not
broadcast_shape
:
if
not
broadcast_shape
:
# if indices matrix have larger size than arr, arr should broadcast into indices shape.
# if indices matrix have larger size than arr, arr should broadcast into indices shape.
broadcast_shape
=
indices
.
shape
broadcast_shape
=
indices
.
shape
if
_non_static
_mode
():
if
in_dygraph
_mode
():
indices
=
paddle
.
broadcast_to
(
indices
,
broadcast_shape
)
indices
=
paddle
.
broadcast_to
(
indices
,
broadcast_shape
)
broadcast_shape_list
=
list
(
broadcast_shape
)
broadcast_shape_list
=
list
(
broadcast_shape
)
broadcast_shape_list
[
axis
]
=
list
(
arr
.
shape
)[
axis
]
broadcast_shape_list
[
axis
]
=
list
(
arr
.
shape
)[
axis
]
broadcast_shape
=
tuple
(
broadcast_shape_list
)
broadcast_shape
=
tuple
(
broadcast_shape_list
)
arr
=
paddle
.
broadcast_to
(
arr
,
broadcast_shape
)
arr
=
paddle
.
broadcast_to
(
arr
,
broadcast_shape
)
if
not
_in_legacy_dygraph
():
return
_C_ops
.
take_along_axis
(
arr
,
indices
,
axis
)
return
_C_ops
.
take_along_axis
(
arr
,
indices
,
axis
)
else
:
return
_legacy_C_ops
.
take_along_axis
(
arr
,
indices
,
'Axis'
,
axis
)
check_variable_and_dtype
(
check_variable_and_dtype
(
arr
,
arr
,
'x'
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
,
'uint8'
],
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
,
'uint8'
],
'take_along_axis'
,
'take_along_axis'
,
)
)
check_variable_and_dtype
(
check_variable_and_dtype
(
indices
,
'index'
,
[
'int32'
,
'int64'
],
'take_along_axis'
indices
,
'index'
,
[
'int32'
,
'int64'
],
'take_along_axis'
)
)
indices
=
paddle
.
broadcast_to
(
indices
,
broadcast_shape
)
indices
=
paddle
.
broadcast_to
(
indices
,
broadcast_shape
)
broadcast_shape_list
=
list
(
broadcast_shape
)
broadcast_shape_list
=
list
(
broadcast_shape
)
broadcast_shape_list
[
axis
]
=
list
(
arr
.
shape
)[
axis
]
broadcast_shape_list
[
axis
]
=
list
(
arr
.
shape
)[
axis
]
broadcast_shape
=
tuple
(
broadcast_shape_list
)
broadcast_shape
=
tuple
(
broadcast_shape_list
)
arr
=
paddle
.
broadcast_to
(
arr
,
broadcast_shape
)
arr
=
paddle
.
broadcast_to
(
arr
,
broadcast_shape
)
helper
=
LayerHelper
(
'take_along_axis'
,
**
locals
())
helper
=
LayerHelper
(
'take_along_axis'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
dtype
=
helper
.
input_dtype
()
result
=
helper
.
create_variable_for_type_inference
(
dtype
)
result
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
"take_along_axis"
,
type
=
"take_along_axis"
,
inputs
=
{
"Input"
:
arr
,
"Index"
:
indices
},
inputs
=
{
"Input"
:
arr
,
"Index"
:
indices
},
attrs
=
{
"Axis"
:
axis
},
attrs
=
{
"Axis"
:
axis
},
outputs
=
{
"Result"
:
result
},
outputs
=
{
"Result"
:
result
},
)
)
return
result
return
result
def
put_along_axis
(
arr
,
indices
,
values
,
axis
,
reduce
=
'assign'
):
def
put_along_axis
(
arr
,
indices
,
values
,
axis
,
reduce
=
'assign'
):
...
@@ -4797,7 +4552,7 @@ def put_along_axis(arr, indices, values, axis, reduce='assign'):
...
@@ -4797,7 +4552,7 @@ def put_along_axis(arr, indices, values, axis, reduce='assign'):
)
)
axis
=
non_negative_axis
(
arr
,
axis
)
axis
=
non_negative_axis
(
arr
,
axis
)
broadcast_shape
=
infer_broadcast_shape
(
arr
,
indices
,
axis
)
broadcast_shape
=
infer_broadcast_shape
(
arr
,
indices
,
axis
)
if
_non_static
_mode
():
if
in_dygraph
_mode
():
values
=
(
values
=
(
paddle
.
to_tensor
(
values
)
paddle
.
to_tensor
(
values
)
if
not
isinstance
(
values
,
paddle
.
Tensor
)
if
not
isinstance
(
values
,
paddle
.
Tensor
)
...
@@ -4806,34 +4561,30 @@ def put_along_axis(arr, indices, values, axis, reduce='assign'):
...
@@ -4806,34 +4561,30 @@ def put_along_axis(arr, indices, values, axis, reduce='assign'):
if
broadcast_shape
:
if
broadcast_shape
:
indices
=
paddle
.
broadcast_to
(
indices
,
broadcast_shape
)
indices
=
paddle
.
broadcast_to
(
indices
,
broadcast_shape
)
values
=
paddle
.
broadcast_to
(
values
,
indices
.
shape
)
values
=
paddle
.
broadcast_to
(
values
,
indices
.
shape
)
if
in_dygraph_mode
():
return
_C_ops
.
put_along_axis
(
arr
,
indices
,
values
,
axis
,
reduce
)
return
_C_ops
.
put_along_axis
(
arr
,
indices
,
values
,
axis
,
reduce
)
else
:
return
_legacy_C_ops
.
put_along_axis
(
check_variable_and_dtype
(
arr
,
indices
,
values
,
"Axis"
,
axis
,
"Reduce"
,
reduce
arr
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
,
'uint8'
],
'put_along_axis'
,
)
)
check_variable_and_dtype
(
check_variable_and_dtype
(
indices
,
'index'
,
[
'int32'
,
'int64'
],
'put_along_axis'
arr
,
)
'x'
,
if
broadcast_shape
:
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
,
'uint8'
],
indices
=
paddle
.
broadcast_to
(
indices
,
broadcast_shape
)
'put_along_axis'
,
values
=
paddle
.
broadcast_to
(
values
,
indices
.
shape
)
)
helper
=
LayerHelper
(
'put_along_axis'
,
**
locals
())
check_variable_and_dtype
(
dtype
=
helper
.
input_dtype
()
indices
,
'index'
,
[
'int32'
,
'int64'
],
'put_along_axis'
result
=
helper
.
create_variable_for_type_inference
(
dtype
)
)
helper
.
append_op
(
if
broadcast_shape
:
type
=
"put_along_axis"
,
indices
=
paddle
.
broadcast_to
(
indices
,
broadcast_shape
)
inputs
=
{
"Input"
:
arr
,
"Index"
:
indices
,
"Value"
:
values
},
values
=
paddle
.
broadcast_to
(
values
,
indices
.
shape
)
attrs
=
{
"Axis"
:
axis
,
"Reduce"
:
reduce
},
helper
=
LayerHelper
(
'put_along_axis'
,
**
locals
())
outputs
=
{
"Result"
:
result
},
dtype
=
helper
.
input_dtype
()
)
result
=
helper
.
create_variable_for_type_inference
(
dtype
)
return
result
helper
.
append_op
(
type
=
"put_along_axis"
,
inputs
=
{
"Input"
:
arr
,
"Index"
:
indices
,
"Value"
:
values
},
attrs
=
{
"Axis"
:
axis
,
"Reduce"
:
reduce
},
outputs
=
{
"Result"
:
result
},
)
return
result
@
inplace_apis_in_dygraph_only
@
inplace_apis_in_dygraph_only
...
@@ -4856,11 +4607,7 @@ def put_along_axis_(arr, indices, values, axis, reduce='assign'):
...
@@ -4856,11 +4607,7 @@ def put_along_axis_(arr, indices, values, axis, reduce='assign'):
if
broadcast_shape
:
if
broadcast_shape
:
indices
=
paddle
.
broadcast_to
(
indices
,
broadcast_shape
)
indices
=
paddle
.
broadcast_to
(
indices
,
broadcast_shape
)
values
=
paddle
.
broadcast_to
(
values
,
indices
.
shape
)
values
=
paddle
.
broadcast_to
(
values
,
indices
.
shape
)
if
in_dygraph_mode
():
return
_C_ops
.
put_along_axis_
(
arr
,
indices
,
values
,
axis
,
reduce
)
return
_C_ops
.
put_along_axis_
(
arr
,
indices
,
values
,
axis
,
reduce
)
return
_legacy_C_ops
.
put_along_axis_
(
arr
,
indices
,
values
,
"Axis"
,
axis
,
"Reduce"
,
reduce
)
def
index_add
(
x
,
index
,
axis
,
value
,
name
=
None
):
def
index_add
(
x
,
index
,
axis
,
value
,
name
=
None
):
...
...
python/paddle/tensor/math.py
浏览文件 @
861fef52
...
@@ -34,9 +34,6 @@ from ..fluid.data_feeder import (
...
@@ -34,9 +34,6 @@ from ..fluid.data_feeder import (
from
..fluid.layers
import
utils
from
..fluid.layers
import
utils
from
..framework
import
(
from
..framework
import
(
LayerHelper
,
LayerHelper
,
_in_legacy_dygraph
,
_non_static_mode
,
_varbase_creator
,
convert_np_dtype_to_dtype_
,
convert_np_dtype_to_dtype_
,
core
,
core
,
in_dygraph_mode
,
in_dygraph_mode
,
...
@@ -158,16 +155,14 @@ def log(x, name=None):
...
@@ -158,16 +155,14 @@ def log(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
log
(
x
)
return
_C_ops
.
log
(
x
)
if
_in_legacy_dygraph
():
else
:
return
_legacy_C_ops
.
log
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
"log"
)
inputs
=
{
'X'
:
[
x
]}
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
"log"
)
helper
=
LayerHelper
(
'log'
,
**
locals
())
inputs
=
{
'X'
:
[
x
]}
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
helper
=
LayerHelper
(
'log'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
helper
.
append_op
(
type
=
"log"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
return
out
helper
.
append_op
(
type
=
"log"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
def
scale
(
x
,
scale
=
1.0
,
bias
=
0.0
,
bias_after_scale
=
True
,
act
=
None
,
name
=
None
):
def
scale
(
x
,
scale
=
1.0
,
bias
=
0.0
,
bias_after_scale
=
True
,
act
=
None
,
name
=
None
):
...
@@ -220,51 +215,39 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
...
@@ -220,51 +215,39 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
out
=
_C_ops
.
scale
(
x
,
scale
,
float
(
bias
),
bias_after_scale
)
out
=
_C_ops
.
scale
(
x
,
scale
,
float
(
bias
),
bias_after_scale
)
return
dygraph_utils
.
_append_activation_in_dygraph
(
out
,
act
)
return
dygraph_utils
.
_append_activation_in_dygraph
(
out
,
act
)
elif
_in_legacy_dygraph
():
else
:
_scale
=
scale
.
numpy
().
item
(
0
)
if
isinstance
(
scale
,
Variable
)
else
scale
check_variable_and_dtype
(
out
=
_legacy_C_ops
.
scale
(
x
,
x
,
'scale'
,
"x"
,
float
(
_scale
),
[
'bias'
,
'float16'
,
float
(
bias
),
'uint16'
,
'bias_after_scale'
,
'float32'
,
bias_after_scale
,
'float64'
,
'int8'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
,
],
"scale"
,
)
)
return
dygraph_utils
.
_append_activation_in_dygraph
(
out
,
act
)
inputs
=
{
'X'
:
[
x
]}
attrs
=
{
check_variable_and_dtype
(
'bias'
:
float
(
bias
),
x
,
'bias_after_scale'
:
bias_after_scale
,
"x"
,
}
[
if
isinstance
(
scale
,
Variable
):
'float16'
,
inputs
[
'ScaleTensor'
]
=
[
scale
]
'uint16'
,
else
:
'float32'
,
attrs
[
'scale'
]
=
float
(
scale
)
'float64'
,
helper
=
LayerHelper
(
'scale'
,
**
locals
())
'int8'
,
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
'int16'
,
'int32'
,
'int64'
,
'uint8'
,
],
"scale"
,
)
inputs
=
{
'X'
:
[
x
]}
attrs
=
{
'bias'
:
float
(
bias
),
'bias_after_scale'
:
bias_after_scale
,
}
if
isinstance
(
scale
,
Variable
):
inputs
[
'ScaleTensor'
]
=
[
scale
]
else
:
attrs
[
'scale'
]
=
float
(
scale
)
helper
=
LayerHelper
(
'scale'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'scale'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
type
=
'scale'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
)
return
helper
.
append_activation
(
out
)
return
helper
.
append_activation
(
out
)
def
stanh
(
x
,
scale_a
=
0.67
,
scale_b
=
1.7159
,
name
=
None
):
def
stanh
(
x
,
scale_a
=
0.67
,
scale_b
=
1.7159
,
name
=
None
):
...
@@ -295,20 +278,22 @@ def stanh(x, scale_a=0.67, scale_b=1.7159, name=None):
...
@@ -295,20 +278,22 @@ def stanh(x, scale_a=0.67, scale_b=1.7159, name=None):
"""
"""
if
_non_static
_mode
():
if
in_dygraph
_mode
():
return
_legacy_C_ops
.
stanh
(
x
,
'scale_a'
,
scale_a
,
'scale_b'
,
scale_b
)
return
_legacy_C_ops
.
stanh
(
x
,
'scale_a'
,
scale_a
,
'scale_b'
,
scale_b
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'stanh'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'stanh'
)
helper
=
LayerHelper
(
'stanh'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
=
LayerHelper
(
'stanh'
,
**
locals
())
helper
.
append_op
(
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
type
=
'stanh'
,
helper
.
append_op
(
inputs
=
{
'X'
:
x
},
type
=
'stanh'
,
outputs
=
{
'Out'
:
out
},
inputs
=
{
'X'
:
x
},
attrs
=
{
'scale_a'
:
scale_a
,
'scale_b'
:
scale_b
},
outputs
=
{
'Out'
:
out
},
)
attrs
=
{
'scale_a'
:
scale_a
,
'scale_b'
:
scale_b
},
return
out
)
return
out
def
multiplex
(
inputs
,
index
,
name
=
None
):
def
multiplex
(
inputs
,
index
,
name
=
None
):
...
@@ -363,32 +348,32 @@ def multiplex(inputs, index, name=None):
...
@@ -363,32 +348,32 @@ def multiplex(inputs, index, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
multiplex
(
inputs
,
index
)
return
_C_ops
.
multiplex
(
inputs
,
index
)
elif
_in_legacy_dygraph
():
else
:
return
_legacy_C_ops
.
multiplex
(
index
,
inputs
)
helper
=
LayerHelper
(
'multiplex'
,
**
locals
())
helper
=
LayerHelper
(
'multiplex'
,
**
locals
())
check_type
(
inputs
,
'inputs'
,
(
list
),
'multiplex'
)
check_type
(
inputs
,
'inputs'
,
(
list
),
'multiplex'
)
if
len
(
inputs
)
<
2
:
if
len
(
inputs
)
<
2
:
raise
ValueError
(
raise
ValueError
(
"inputs should be a list object with at least 2 elements."
"inputs should be a list object with at least 2 elements."
)
)
for
id
,
x
in
enumerate
(
inputs
):
for
id
,
x
in
enumerate
(
inputs
):
check_variable_and_dtype
(
x
,
'input['
+
str
(
id
)
+
']'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'multiplex'
,
)
check_variable_and_dtype
(
check_variable_and_dtype
(
x
,
index
,
"index"
,
[
'int32'
,
'int64'
],
'multiplex'
'input['
+
str
(
id
)
+
']'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'multiplex'
,
)
)
check_variable_and_dtype
(
index
,
"index"
,
[
'int32'
,
'int64'
],
'multiplex'
)
out
=
helper
.
create_variable_for_type_inference
(
inputs
[
0
].
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
inputs
[
0
].
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'multiplex'
,
type
=
'multiplex'
,
inputs
=
{
'X'
:
inputs
,
'Ids'
:
index
},
inputs
=
{
'X'
:
inputs
,
'Ids'
:
index
},
outputs
=
{
'Out'
:
[
out
]},
outputs
=
{
'Out'
:
[
out
]},
)
)
return
out
return
out
@
inplace_apis_in_dygraph_only
@
inplace_apis_in_dygraph_only
...
@@ -399,17 +384,6 @@ def scale_(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
...
@@ -399,17 +384,6 @@ def scale_(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
scale_
(
x
,
scale
,
float
(
bias
),
bias_after_scale
)
return
_C_ops
.
scale_
(
x
,
scale
,
float
(
bias
),
bias_after_scale
)
if
_in_legacy_dygraph
():
_scale
=
scale
.
numpy
().
item
(
0
)
if
isinstance
(
scale
,
Variable
)
else
scale
return
_legacy_C_ops
.
scale_
(
x
,
'scale'
,
float
(
_scale
),
'bias'
,
float
(
bias
),
'bias_after_scale'
,
bias_after_scale
,
)
def
pow
(
x
,
y
,
name
=
None
):
def
pow
(
x
,
y
,
name
=
None
):
...
@@ -469,36 +443,26 @@ def pow(x, y, name=None):
...
@@ -469,36 +443,26 @@ def pow(x, y, name=None):
raise
TypeError
(
raise
TypeError
(
'y must be scalar or tensor type, but received: %s '
%
(
y
.
dtype
)
'y must be scalar or tensor type, but received: %s '
%
(
y
.
dtype
)
)
)
if
_in_legacy_dygraph
():
else
:
# in static graph mode
if
isinstance
(
y
,
(
int
,
float
)):
if
isinstance
(
y
,
(
int
,
float
)):
return
_legacy_C_ops
.
pow
(
x
,
'factor'
,
y
)
helper
=
LayerHelper
(
'pow'
,
**
locals
())
elif
isinstance
(
y
,
(
paddle
.
Tensor
,
Variable
)):
inputs
=
{
'X'
:
x
}
return
_elementwise_op_in_dygraph
(
attrs
=
{
'factor'
:
y
}
x
,
y
,
axis
=-
1
,
act
=
None
,
op_name
=
'elementwise_pow'
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'pow'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
)
return
out
elif
isinstance
(
y
,
(
paddle
.
Tensor
,
Variable
)):
# TODO A potential speed improvement is supporting different types in C++ and removing the cast ops here
helper
=
LayerHelper
(
'elementwise_pow'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
return
_elementwise_op
(
LayerHelper
(
'elementwise_pow'
,
**
locals
()))
else
:
else
:
raise
TypeError
(
raise
TypeError
(
'y must be scalar or tensor type, but received: %s '
%
(
y
.
dtype
)
'y must be scalar or tensor type, but received: %s '
%
(
type
(
y
)
)
)
)
# in static graph mode
if
isinstance
(
y
,
(
int
,
float
)):
helper
=
LayerHelper
(
'pow'
,
**
locals
())
inputs
=
{
'X'
:
x
}
attrs
=
{
'factor'
:
y
}
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'pow'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
return
out
elif
isinstance
(
y
,
(
paddle
.
Tensor
,
Variable
)):
# TODO A potential speed improvement is supporting different types in C++ and removing the cast ops here
helper
=
LayerHelper
(
'elementwise_pow'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
return
_elementwise_op
(
LayerHelper
(
'elementwise_pow'
,
**
locals
()))
else
:
raise
TypeError
(
'y must be scalar or tensor type, but received: %s '
%
(
type
(
y
))
)
OP_NAMEMAPPING
=
{
OP_NAMEMAPPING
=
{
...
@@ -531,11 +495,6 @@ def _elementwise_op_in_dygraph(
...
@@ -531,11 +495,6 @@ def _elementwise_op_in_dygraph(
OP_NAMEMAPPING
[
op_name
]
if
not
is_inplace
(
op_name
)
else
op_name
,
OP_NAMEMAPPING
[
op_name
]
if
not
is_inplace
(
op_name
)
else
op_name
,
)
)
out
=
op
(
x
,
y
)
out
=
op
(
x
,
y
)
if
_in_legacy_dygraph
():
op
=
getattr
(
_legacy_C_ops
,
op_name
)
out
=
op
(
x
,
y
,
'axis'
,
axis
,
'use_mkldnn'
,
use_mkldnn
)
return
dygraph_utils
.
_append_activation_in_dygraph
(
return
dygraph_utils
.
_append_activation_in_dygraph
(
out
,
act
,
use_mkldnn
=
use_mkldnn
out
,
act
,
use_mkldnn
=
use_mkldnn
)
)
...
@@ -643,10 +602,7 @@ def add(x, y, name=None):
...
@@ -643,10 +602,7 @@ def add(x, y, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
add
(
x
,
y
)
return
_C_ops
.
add
(
x
,
y
)
else
:
else
:
if
_in_legacy_dygraph
():
return
_elementwise_op
(
LayerHelper
(
'elementwise_add'
,
**
locals
()))
return
_legacy_C_ops
.
elementwise_add
(
x
,
y
)
else
:
return
_elementwise_op
(
LayerHelper
(
'elementwise_add'
,
**
locals
()))
@
inplace_apis_in_dygraph_only
@
inplace_apis_in_dygraph_only
...
@@ -735,12 +691,7 @@ def subtract(x, y, name=None):
...
@@ -735,12 +691,7 @@ def subtract(x, y, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
subtract
(
x
,
y
)
return
_C_ops
.
subtract
(
x
,
y
)
else
:
else
:
if
_in_legacy_dygraph
():
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
else
:
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
@
inplace_apis_in_dygraph_only
@
inplace_apis_in_dygraph_only
...
@@ -807,12 +758,7 @@ def divide(x, y, name=None):
...
@@ -807,12 +758,7 @@ def divide(x, y, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
divide
(
x
,
y
)
return
_C_ops
.
divide
(
x
,
y
)
else
:
else
:
if
_in_legacy_dygraph
():
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
else
:
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
def
floor_divide
(
x
,
y
,
name
=
None
):
def
floor_divide
(
x
,
y
,
name
=
None
):
...
@@ -853,10 +799,8 @@ def floor_divide(x, y, name=None):
...
@@ -853,10 +799,8 @@ def floor_divide(x, y, name=None):
axis
=
-
1
axis
=
-
1
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
floor_divide
(
x
,
y
)
return
_C_ops
.
floor_divide
(
x
,
y
)
elif
_in_legacy_dygraph
():
else
:
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
op_name
=
op_type
)
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
def
remainder
(
x
,
y
,
name
=
None
):
def
remainder
(
x
,
y
,
name
=
None
):
...
@@ -897,10 +841,8 @@ def remainder(x, y, name=None):
...
@@ -897,10 +841,8 @@ def remainder(x, y, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
remainder
(
x
,
y
)
return
_C_ops
.
remainder
(
x
,
y
)
elif
_in_legacy_dygraph
():
else
:
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
op_name
=
op_type
)
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
@
inplace_apis_in_dygraph_only
@
inplace_apis_in_dygraph_only
...
@@ -971,18 +913,13 @@ def multiply(x, y, name=None):
...
@@ -971,18 +913,13 @@ def multiply(x, y, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
multiply
(
x
,
y
)
return
_C_ops
.
multiply
(
x
,
y
)
else
:
else
:
if
_in_legacy_dygraph
():
if
x
.
dtype
!=
y
.
dtype
:
return
_elementwise_op_in_dygraph
(
raise
TypeError
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
'Input tensors must be same type, but received type of x: %s, type of y: %s '
%
(
x
.
dtype
,
y
.
dtype
)
)
)
else
:
if
x
.
dtype
!=
y
.
dtype
:
raise
TypeError
(
'Input tensors must be same type, but received type of x: %s, type of y: %s '
%
(
x
.
dtype
,
y
.
dtype
)
)
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
@
dygraph_only
@
dygraph_only
...
@@ -1017,12 +954,7 @@ def _add_with_axis(x, y, axis=-1, name=None):
...
@@ -1017,12 +954,7 @@ def _add_with_axis(x, y, axis=-1, name=None):
else
:
else
:
op_type
=
'elementwise_add'
op_type
=
'elementwise_add'
act
=
None
act
=
None
if
_in_legacy_dygraph
():
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
else
:
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
def
_subtract_with_axis
(
x
,
y
,
axis
=-
1
,
name
=
None
):
def
_subtract_with_axis
(
x
,
y
,
axis
=-
1
,
name
=
None
):
...
@@ -1034,12 +966,7 @@ def _subtract_with_axis(x, y, axis=-1, name=None):
...
@@ -1034,12 +966,7 @@ def _subtract_with_axis(x, y, axis=-1, name=None):
else
:
else
:
op_type
=
'elementwise_sub'
op_type
=
'elementwise_sub'
act
=
None
act
=
None
if
_in_legacy_dygraph
():
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
else
:
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
def
_multiply_with_axis
(
x
,
y
,
axis
=-
1
,
name
=
None
):
def
_multiply_with_axis
(
x
,
y
,
axis
=-
1
,
name
=
None
):
...
@@ -1051,12 +978,7 @@ def _multiply_with_axis(x, y, axis=-1, name=None):
...
@@ -1051,12 +978,7 @@ def _multiply_with_axis(x, y, axis=-1, name=None):
else
:
else
:
op_type
=
'elementwise_mul'
op_type
=
'elementwise_mul'
act
=
None
act
=
None
if
_in_legacy_dygraph
():
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
else
:
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
def
_divide_with_axis
(
x
,
y
,
axis
=-
1
,
name
=
None
):
def
_divide_with_axis
(
x
,
y
,
axis
=-
1
,
name
=
None
):
...
@@ -1066,12 +988,7 @@ def _divide_with_axis(x, y, axis=-1, name=None):
...
@@ -1066,12 +988,7 @@ def _divide_with_axis(x, y, axis=-1, name=None):
else
:
else
:
op_type
=
'elementwise_div'
op_type
=
'elementwise_div'
act
=
None
act
=
None
if
_in_legacy_dygraph
():
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
else
:
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
def
maximum
(
x
,
y
,
name
=
None
):
def
maximum
(
x
,
y
,
name
=
None
):
...
@@ -1135,11 +1052,8 @@ def maximum(x, y, name=None):
...
@@ -1135,11 +1052,8 @@ def maximum(x, y, name=None):
act
=
None
act
=
None
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
maximum
(
x
,
y
)
return
_C_ops
.
maximum
(
x
,
y
)
elif
_in_legacy_dygraph
():
else
:
return
_elementwise_op_in_dygraph
(
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
def
minimum
(
x
,
y
,
name
=
None
):
def
minimum
(
x
,
y
,
name
=
None
):
...
@@ -1203,11 +1117,8 @@ def minimum(x, y, name=None):
...
@@ -1203,11 +1117,8 @@ def minimum(x, y, name=None):
act
=
None
act
=
None
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
minimum
(
x
,
y
)
return
_C_ops
.
minimum
(
x
,
y
)
elif
_in_legacy_dygraph
():
else
:
return
_elementwise_op_in_dygraph
(
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
def
fmax
(
x
,
y
,
name
=
None
):
def
fmax
(
x
,
y
,
name
=
None
):
...
@@ -1273,11 +1184,8 @@ def fmax(x, y, name=None):
...
@@ -1273,11 +1184,8 @@ def fmax(x, y, name=None):
act
=
None
act
=
None
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
fmax
(
x
,
y
)
return
_C_ops
.
fmax
(
x
,
y
)
if
_in_legacy_dygraph
():
else
:
return
_elementwise_op_in_dygraph
(
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
def
fmin
(
x
,
y
,
name
=
None
):
def
fmin
(
x
,
y
,
name
=
None
):
...
@@ -1343,11 +1251,8 @@ def fmin(x, y, name=None):
...
@@ -1343,11 +1251,8 @@ def fmin(x, y, name=None):
act
=
None
act
=
None
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
fmin
(
x
,
y
)
return
_C_ops
.
fmin
(
x
,
y
)
if
_in_legacy_dygraph
():
else
:
return
_elementwise_op_in_dygraph
(
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
def
sum
(
x
,
axis
=
None
,
dtype
=
None
,
keepdim
=
False
,
name
=
None
):
def
sum
(
x
,
axis
=
None
,
dtype
=
None
,
keepdim
=
False
,
name
=
None
):
...
@@ -1417,68 +1322,46 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None):
...
@@ -1417,68 +1322,46 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
sum
(
x
,
axis
,
dtype
,
keepdim
)
return
_C_ops
.
sum
(
x
,
axis
,
dtype
,
keepdim
)
else
:
reduce_all
,
axis
=
_get_reduce_axis_with_tensor
(
axis
,
x
)
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
}
reduce_all
,
axis
=
_get_reduce_axis_with_tensor
(
axis
,
x
)
if
_in_legacy_dygraph
():
if
dtype_flag
:
if
dtype_flag
:
return
_legacy_C_ops
.
reduce_sum
(
attrs
.
update
({
'in_dtype'
:
x
.
dtype
,
'out_dtype'
:
dtype
})
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
,
'in_dtype'
,
x
.
dtype
,
'out_dtype'
,
dtype
,
)
else
:
return
_legacy_C_ops
.
reduce_sum
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
,
)
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
}
if
dtype_flag
:
check_variable_and_dtype
(
attrs
.
update
({
'in_dtype'
:
x
.
dtype
,
'out_dtype'
:
dtype
})
x
,
'x'
,
check_variable_and_dtype
(
[
x
,
'bool'
,
'x'
,
'float16'
,
[
'float32'
,
'bool'
,
'float64'
,
'float16'
,
'int16'
,
'float32'
,
'int32'
,
'float64'
,
'int64'
,
'int16'
,
'complex64'
,
'int32'
,
'complex128'
,
'int64'
,
],
'complex64'
,
'sum'
,
'complex128'
,
)
],
'sum'
,
)
check_type
(
axis
,
'axis'
,
(
int
,
list
,
tuple
,
type
(
None
),
Variable
),
'sum'
)
check_type
(
axis
,
'axis'
,
(
int
,
list
,
tuple
,
type
(
None
),
Variable
),
'sum'
)
helper
=
LayerHelper
(
'sum'
,
**
locals
())
helper
=
LayerHelper
(
'sum'
,
**
locals
())
if
dtype_flag
:
if
dtype_flag
:
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
else
:
else
:
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'reduce_sum'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
type
=
'reduce_sum'
,
)
inputs
=
{
'X'
:
x
},
return
out
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
,
)
return
out
def
nan_to_num
(
x
,
nan
=
0.0
,
posinf
=
None
,
neginf
=
None
,
name
=
None
):
def
nan_to_num
(
x
,
nan
=
0.0
,
posinf
=
None
,
neginf
=
None
,
name
=
None
):
...
@@ -1784,41 +1667,37 @@ def add_n(inputs, name=None):
...
@@ -1784,41 +1667,37 @@ def add_n(inputs, name=None):
if
isinstance
(
inputs
,
Variable
):
if
isinstance
(
inputs
,
Variable
):
inputs
=
[
inputs
]
inputs
=
[
inputs
]
return
_C_ops
.
add_n
(
inputs
)
return
_C_ops
.
add_n
(
inputs
)
if
_in_legacy_dygraph
():
if
isinstance
(
inputs
,
Variable
):
inputs
=
[
inputs
]
return
_legacy_C_ops
.
sum
(
inputs
,
'use_mkldnn'
,
False
)
helper
=
LayerHelper
(
'add_n'
,
**
locals
())
check_type
(
inputs
,
'inputs'
,
(
Variable
,
tuple
,
list
),
'add_n'
)
if
isinstance
(
inputs
,
list
)
or
isinstance
(
inputs
,
tuple
):
if
len
(
inputs
)
>
0
:
for
input
in
inputs
:
check_variable_and_dtype
(
input
,
"inputs"
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'add_n'
,
)
else
:
else
:
check_variable_and_dtype
(
helper
=
LayerHelper
(
'add_n'
,
**
locals
())
inputs
,
check_type
(
inputs
,
'inputs'
,
(
Variable
,
tuple
,
list
),
'add_n'
)
"inputs"
,
if
isinstance
(
inputs
,
list
)
or
isinstance
(
inputs
,
tuple
):
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
if
len
(
inputs
)
>
0
:
'add_n'
,
for
input
in
inputs
:
)
check_variable_and_dtype
(
input
,
"inputs"
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'add_n'
,
)
else
:
check_variable_and_dtype
(
inputs
,
"inputs"
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'add_n'
,
)
out
=
helper
.
create_variable_for_type_inference
(
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
(
'inputs'
)
dtype
=
helper
.
input_dtype
(
'inputs'
)
)
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'sum'
,
type
=
'sum'
,
inputs
=
{
'X'
:
inputs
},
inputs
=
{
'X'
:
inputs
},
outputs
=
{
'Out'
:
out
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'use_mkldnn'
:
False
},
attrs
=
{
'use_mkldnn'
:
False
},
)
)
return
out
return
out
def
trunc
(
input
,
name
=
None
):
def
trunc
(
input
,
name
=
None
):
...
@@ -1852,22 +1731,19 @@ def trunc(input, name=None):
...
@@ -1852,22 +1731,19 @@ def trunc(input, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
trunc
(
input
)
return
_C_ops
.
trunc
(
input
)
else
:
else
:
if
_in_legacy_dygraph
():
inputs
=
{
"X"
:
input
}
return
_legacy_C_ops
.
trunc
(
input
)
attrs
=
{}
else
:
inputs
=
{
"X"
:
input
}
attrs
=
{}
helper
=
LayerHelper
(
"trunc"
,
**
locals
())
helper
=
LayerHelper
(
"trunc"
,
**
locals
())
check_variable_and_dtype
(
check_variable_and_dtype
(
input
,
'X'
,
[
'int32'
,
'int64'
,
'float32'
,
'float64'
],
'trunc'
input
,
'X'
,
[
'int32'
,
'int64'
,
'float32'
,
'float64'
],
'trunc'
)
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
"trunc"
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
"Out"
:
out
}
type
=
"trunc"
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
"Out"
:
out
}
)
)
return
out
return
out
def
mm
(
input
,
mat2
,
name
=
None
):
def
mm
(
input
,
mat2
,
name
=
None
):
...
@@ -1939,53 +1815,54 @@ def mm(input, mat2, name=None):
...
@@ -1939,53 +1815,54 @@ def mm(input, mat2, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
matmul
(
input
,
mat2
,
False
,
False
)
return
_C_ops
.
matmul
(
input
,
mat2
,
False
,
False
)
elif
paddle
.
in_dynamic_mode
():
else
:
return
_legacy_C_ops
.
matmul_v2
(
input
,
mat2
)
def
__check_input
(
x
,
y
):
def
__check_input
(
x
,
y
):
var_names
=
{
'x'
:
x
,
'y'
:
y
}
var_names
=
{
'x'
:
x
,
'y'
:
y
}
for
name
,
val
in
var_names
.
items
():
for
name
,
val
in
var_names
.
items
():
check_variable_and_dtype
(
check_variable_and_dtype
(
val
,
name
,
[
'float16'
,
'float32'
,
'float64'
],
'mm'
val
,
name
,
[
'float16'
,
'float32'
,
'float64'
],
'mm'
)
x_shape
=
list
(
x
.
shape
)
y_shape
=
list
(
y
.
shape
)
if
len
(
x_shape
)
==
1
:
x_shape
=
[
1
]
+
x_shape
if
len
(
y_shape
)
==
1
:
y_shape
=
y_shape
+
[
1
]
# check the inner 2 dimensions
if
x_shape
[
-
1
]
!=
y_shape
[
-
2
]:
if
not
((
x_shape
[
-
1
]
==
-
1
)
or
(
y_shape
[
-
2
]
==
-
1
)):
raise
ValueError
(
"After performing an optional transpose, Input X's width should be "
"equal to Y's width for multiplication "
"prerequisites. But received X's shape: %s, Y's shape: %s
\n
"
%
(
x_shape
,
y_shape
)
)
)
x_shape
=
list
(
x
.
shape
)
y_shape
=
list
(
y
.
shape
)
if
len
(
x_shape
)
==
1
:
x_shape
=
[
1
]
+
x_shape
if
len
(
y_shape
)
==
1
:
y_shape
=
y_shape
+
[
1
]
if
len
(
y_shape
)
>
2
and
len
(
x_shape
)
>
2
:
# check the inner 2 dimensions
for
i
,
dim_x
in
enumerate
(
x_shape
[:
-
2
]):
if
x_shape
[
-
1
]
!=
y_shape
[
-
2
]:
# don't check neg shape
if
not
((
x_shape
[
-
1
]
==
-
1
)
or
(
y_shape
[
-
2
]
==
-
1
)):
if
dim_x
<
0
or
y_shape
[
i
]
<
0
:
continue
if
dim_x
!=
y_shape
[
i
]:
raise
ValueError
(
raise
ValueError
(
"
When the matrix is larger than 2 dimensions, the higher
"
"
After performing an optional transpose, Input X's width should be
"
"
dimensional values of the two matrices need to be equal.
"
"
equal to Y's width for multiplication
"
"
But received x_shape[%d] != y_shape[%d]. X's shape: %s,
"
"
prerequisites. But received X's shape: %s, Y's shape: %s
\n
"
"Y's shape: %s.
\n
"
%
(
i
,
i
,
x_shape
,
y_shape
)
%
(
x_shape
,
y_shape
)
)
)
__check_input
(
input
,
mat2
)
if
len
(
y_shape
)
>
2
and
len
(
x_shape
)
>
2
:
for
i
,
dim_x
in
enumerate
(
x_shape
[:
-
2
]):
helper
=
LayerHelper
(
'mm'
,
**
locals
())
# don't check neg shape
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
if
dim_x
<
0
or
y_shape
[
i
]
<
0
:
helper
.
append_op
(
continue
type
=
'matmul_v2'
,
inputs
=
{
'X'
:
input
,
'Y'
:
mat2
},
outputs
=
{
'Out'
:
out
}
if
dim_x
!=
y_shape
[
i
]:
)
raise
ValueError
(
return
out
"When the matrix is larger than 2 dimensions, the higher "
"dimensional values of the two matrices need to be equal. "
"But received x_shape[%d] != y_shape[%d]. X's shape: %s, "
"Y's shape: %s.
\n
"
%
(
i
,
i
,
x_shape
,
y_shape
)
)
__check_input
(
input
,
mat2
)
helper
=
LayerHelper
(
'mm'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
helper
.
append_op
(
type
=
'matmul_v2'
,
inputs
=
{
'X'
:
input
,
'Y'
:
mat2
},
outputs
=
{
'Out'
:
out
},
)
return
out
def
addmm
(
input
,
x
,
y
,
beta
=
1.0
,
alpha
=
1.0
,
name
=
None
):
def
addmm
(
input
,
x
,
y
,
beta
=
1.0
,
alpha
=
1.0
,
name
=
None
):
...
@@ -2080,25 +1957,21 @@ def addmm(input, x, y, beta=1.0, alpha=1.0, name=None):
...
@@ -2080,25 +1957,21 @@ def addmm(input, x, y, beta=1.0, alpha=1.0, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
addmm
(
input
,
x
,
y
,
beta
,
alpha
)
return
_C_ops
.
addmm
(
input
,
x
,
y
,
beta
,
alpha
)
else
:
else
:
if
_in_legacy_dygraph
():
inputs
=
{
'Input'
:
input
,
"X"
:
x
,
"Y"
:
y
}
out
=
_legacy_C_ops
.
addmm
(
input
,
x
,
y
,
"Alpha"
,
alpha
,
"Beta"
,
beta
)
attrs
=
{
'Alpha'
:
alpha
,
'Beta'
:
beta
}
return
out
else
:
inputs
=
{
'Input'
:
input
,
"X"
:
x
,
"Y"
:
y
}
attrs
=
{
'Alpha'
:
alpha
,
'Beta'
:
beta
}
helper
=
LayerHelper
(
"addmm"
,
**
locals
())
helper
=
LayerHelper
(
"addmm"
,
**
locals
())
check_variable_and_dtype
(
check_variable_and_dtype
(
input
,
'Input'
,
[
'float32'
,
'float64'
],
'addmm'
input
,
'Input'
,
[
'float32'
,
'float64'
],
'addmm'
)
)
check_variable_and_dtype
(
x
,
'X'
,
[
'float32'
,
'float64'
],
'addmm'
)
check_variable_and_dtype
(
x
,
'X'
,
[
'float32'
,
'float64'
],
'addmm'
)
check_variable_and_dtype
(
y
,
'Y'
,
[
'float32'
,
'float64'
],
'addmm'
)
check_variable_and_dtype
(
y
,
'Y'
,
[
'float32'
,
'float64'
],
'addmm'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
"addmm"
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
"Out"
:
out
}
type
=
"addmm"
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
"Out"
:
out
}
)
)
return
out
return
out
def
renorm
(
x
,
p
,
axis
,
max_norm
):
def
renorm
(
x
,
p
,
axis
,
max_norm
):
...
@@ -2154,22 +2027,17 @@ def renorm(x, p, axis, max_norm):
...
@@ -2154,22 +2027,17 @@ def renorm(x, p, axis, max_norm):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
out
=
_C_ops
.
renorm
(
x
,
p
,
axis
,
max_norm
)
out
=
_C_ops
.
renorm
(
x
,
p
,
axis
,
max_norm
)
return
out
return
out
elif
_in_legacy_dygraph
():
else
:
out
=
_legacy_C_ops
.
renorm
(
inputs
=
{
'X'
:
x
}
x
,
'p'
,
p
,
'axis'
,
axis
,
'max_norm'
,
max_norm
attrs
=
{
'p'
:
p
,
'axis'
:
axis
,
'max_norm'
:
max_norm
}
)
return
out
inputs
=
{
'X'
:
x
}
attrs
=
{
'p'
:
p
,
'axis'
:
axis
,
'max_norm'
:
max_norm
}
helper
=
LayerHelper
(
"renorm"
,
**
locals
())
helper
=
LayerHelper
(
"renorm"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
"renorm"
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
"Out"
:
out
}
type
=
"renorm"
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
"Out"
:
out
}
)
)
return
out
return
out
def
inner
(
x
,
y
,
name
=
None
):
def
inner
(
x
,
y
,
name
=
None
):
...
@@ -2213,36 +2081,37 @@ def inner(x, y, name=None):
...
@@ -2213,36 +2081,37 @@ def inner(x, y, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
matmul
(
nx
,
ny
.
T
,
False
,
False
).
reshape
(
dstshape
)
return
_C_ops
.
matmul
(
nx
,
ny
.
T
,
False
,
False
).
reshape
(
dstshape
)
elif
paddle
.
in_dynamic_mode
():
else
:
return
_legacy_C_ops
.
matmul_v2
(
nx
,
ny
.
T
).
reshape
(
dstshape
)
def
__check_input
(
x
,
y
):
var_names
=
{
'x'
:
x
,
'y'
:
y
}
for
name
,
val
in
var_names
.
items
():
check_variable_and_dtype
(
val
,
name
,
[
'float16'
,
'float32'
,
'float64'
],
'inner'
)
x_shape
=
list
(
xshape
)
y_shape
=
list
(
yshape
)
# check the inner 2 dimensions
def
__check_input
(
x
,
y
):
if
x_shape
[
-
1
]
!=
y_shape
[
-
1
]:
var_names
=
{
'x'
:
x
,
'y'
:
y
}
if
not
((
x_shape
[
-
1
]
==
-
1
)
or
(
y_shape
[
-
1
]
==
-
1
)):
for
name
,
val
in
var_names
.
items
():
raise
ValueError
(
check_variable_and_dtype
(
"After performing an optional transpose, Input X's last dim should be "
val
,
name
,
[
'float16'
,
'float32'
,
'float64'
],
'inner'
"equal to Y's last dim for multiplication "
"prerequisites. But received X's shape: %s, Y's shape: %s
\n
"
%
(
x_shape
,
y_shape
)
)
)
x_shape
=
list
(
xshape
)
__check_input
(
nx
,
ny
)
y_shape
=
list
(
yshape
)
helper
=
LayerHelper
(
'inner'
,
**
locals
())
# check the inner 2 dimensions
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
nx
.
dtype
)
if
x_shape
[
-
1
]
!=
y_shape
[
-
1
]:
helper
.
append_op
(
if
not
((
x_shape
[
-
1
]
==
-
1
)
or
(
y_shape
[
-
1
]
==
-
1
)):
type
=
'matmul_v2'
,
inputs
=
{
'X'
:
nx
,
'Y'
:
ny
.
T
},
outputs
=
{
'Out'
:
out
}
raise
ValueError
(
)
"After performing an optional transpose, Input X's last dim should be "
return
out
.
reshape
(
dstshape
)
"equal to Y's last dim for multiplication "
"prerequisites. But received X's shape: %s, Y's shape: %s
\n
"
%
(
x_shape
,
y_shape
)
)
__check_input
(
nx
,
ny
)
helper
=
LayerHelper
(
'inner'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
nx
.
dtype
)
helper
.
append_op
(
type
=
'matmul_v2'
,
inputs
=
{
'X'
:
nx
,
'Y'
:
ny
.
T
},
outputs
=
{
'Out'
:
out
},
)
return
out
.
reshape
(
dstshape
)
def
outer
(
x
,
y
,
name
=
None
):
def
outer
(
x
,
y
,
name
=
None
):
...
@@ -2279,24 +2148,23 @@ def outer(x, y, name=None):
...
@@ -2279,24 +2148,23 @@ def outer(x, y, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
matmul
(
nx
,
ny
,
False
,
False
)
return
_C_ops
.
matmul
(
nx
,
ny
,
False
,
False
)
elif
paddle
.
in_dynamic_mode
():
else
:
return
_legacy_C_ops
.
matmul_v2
(
nx
,
ny
)
def
__check_input
(
x
,
y
):
def
__check_input
(
x
,
y
):
var_names
=
{
'x'
:
x
,
'y'
:
y
}
var_names
=
{
'x'
:
x
,
'y'
:
y
}
for
name
,
val
in
var_names
.
items
():
for
name
,
val
in
var_names
.
items
():
check_variable_and_dtype
(
check_variable_and_dtype
(
val
,
name
,
[
'float16'
,
'float32'
,
'float64'
],
'inner'
val
,
name
,
[
'float16'
,
'float32'
,
'float64'
],
'inner'
)
)
__check_input
(
nx
,
ny
)
__check_input
(
nx
,
ny
)
helper
=
LayerHelper
(
'outer'
,
**
locals
())
helper
=
LayerHelper
(
'outer'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
nx
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
nx
.
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'matmul_v2'
,
inputs
=
{
'X'
:
nx
,
'Y'
:
ny
},
outputs
=
{
'Out'
:
out
}
type
=
'matmul_v2'
,
inputs
=
{
'X'
:
nx
,
'Y'
:
ny
},
outputs
=
{
'Out'
:
out
}
)
)
return
out
return
out
def
logsumexp
(
x
,
axis
=
None
,
keepdim
=
False
,
name
=
None
):
def
logsumexp
(
x
,
axis
=
None
,
keepdim
=
False
,
name
=
None
):
...
@@ -2345,20 +2213,16 @@ def logsumexp(x, axis=None, keepdim=False, name=None):
...
@@ -2345,20 +2213,16 @@ def logsumexp(x, axis=None, keepdim=False, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
logsumexp
(
x
,
axis
,
keepdim
,
reduce_all
)
return
_C_ops
.
logsumexp
(
x
,
axis
,
keepdim
,
reduce_all
)
if
_in_legacy_dygraph
():
else
:
return
_legacy_C_ops
.
logsumexp
(
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'logsumexp'
)
x
,
'axis'
,
axis
,
'keepdim'
,
keepdim
,
'reduce_all'
,
reduce_all
helper
=
LayerHelper
(
'logsumexp'
,
**
locals
())
attrs
=
{
'axis'
:
axis
,
'keepdim'
:
keepdim
,
'reduce_all'
:
reduce_all
}
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'logsumexp'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
)
return
out
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'logsumexp'
)
helper
=
LayerHelper
(
'logsumexp'
,
**
locals
())
attrs
=
{
'axis'
:
axis
,
'keepdim'
:
keepdim
,
'reduce_all'
:
reduce_all
}
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'logsumexp'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
return
out
def
inverse
(
x
,
name
=
None
):
def
inverse
(
x
,
name
=
None
):
...
@@ -2390,25 +2254,24 @@ def inverse(x, name=None):
...
@@ -2390,25 +2254,24 @@ def inverse(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
inverse
(
x
)
return
_C_ops
.
inverse
(
x
)
elif
paddle
.
in_dynamic_mode
():
else
:
return
_legacy_C_ops
.
inverse
(
x
)
def
_check_input
(
x
):
def
_check_input
(
x
):
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'inverse'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'inverse'
)
if
len
(
x
.
shape
)
<
2
:
if
len
(
x
.
shape
)
<
2
:
raise
ValueError
(
raise
ValueError
(
"The input of inverse is expected to be a Tensor whose number "
"The input of inverse is expected to be a Tensor whose number "
"of dimensions is no less than 2. But reviced: %d, "
"of dimensions is no less than 2. But reviced: %d, "
"x's shape: %s."
%
(
len
(
x
.
shape
),
x
.
shape
)
"x's shape: %s."
%
(
len
(
x
.
shape
),
x
.
shape
)
)
)
_check_input
(
x
)
_check_input
(
x
)
helper
=
LayerHelper
(
'inverse'
,
**
locals
())
helper
=
LayerHelper
(
'inverse'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'inverse'
,
inputs
=
{
'Input'
:
[
x
]},
outputs
=
{
'Output'
:
[
out
]}
type
=
'inverse'
,
inputs
=
{
'Input'
:
[
x
]},
outputs
=
{
'Output'
:
[
out
]}
)
)
return
out
return
out
def
max
(
x
,
axis
=
None
,
keepdim
=
False
,
name
=
None
):
def
max
(
x
,
axis
=
None
,
keepdim
=
False
,
name
=
None
):
...
@@ -2491,27 +2354,23 @@ def max(x, axis=None, keepdim=False, name=None):
...
@@ -2491,27 +2354,23 @@ def max(x, axis=None, keepdim=False, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
max
(
x
,
axis
,
keepdim
)
return
_C_ops
.
max
(
x
,
axis
,
keepdim
)
reduce_all
,
axis
=
_get_reduce_axis_with_tensor
(
axis
,
x
)
else
:
if
_in_legacy_dygraph
():
reduce_all
,
axis
=
_get_reduce_axis_with_tensor
(
axis
,
x
)
return
_legacy_C_ops
.
reduce_max
(
helper
=
LayerHelper
(
'max'
,
**
locals
())
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'max'
)
)
if
not
isinstance
(
axis
,
Variable
)
and
utils
.
_contain_var
(
axis
):
axis
=
utils
.
_convert_to_tensor_list
(
axis
)
helper
=
LayerHelper
(
'max'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
check_variable_and_dtype
(
helper
.
append_op
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'max'
type
=
'reduce_max'
,
)
inputs
=
{
'X'
:
x
},
if
not
isinstance
(
axis
,
Variable
)
and
utils
.
_contain_var
(
axis
):
outputs
=
{
'Out'
:
out
},
axis
=
utils
.
_convert_to_tensor_list
(
axis
)
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
},
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
return
out
helper
.
append_op
(
type
=
'reduce_max'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
},
)
return
out
def
min
(
x
,
axis
=
None
,
keepdim
=
False
,
name
=
None
):
def
min
(
x
,
axis
=
None
,
keepdim
=
False
,
name
=
None
):
...
@@ -2593,26 +2452,21 @@ def min(x, axis=None, keepdim=False, name=None):
...
@@ -2593,26 +2452,21 @@ def min(x, axis=None, keepdim=False, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
min
(
x
,
axis
,
keepdim
)
return
_C_ops
.
min
(
x
,
axis
,
keepdim
)
else
:
reduce_all
,
axis
=
_get_reduce_axis_with_tensor
(
axis
,
x
)
reduce_all
,
axis
=
_get_reduce_axis_with_tensor
(
axis
,
x
)
if
_in_legacy_dygraph
():
helper
=
LayerHelper
(
'min'
,
**
locals
())
return
_legacy_C_ops
.
reduce_min
(
check_variable_and_dtype
(
x
,
'
dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
x
,
'
x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'min'
)
)
helper
=
LayerHelper
(
'min'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
check_variable_and_dtype
(
helper
.
append_op
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'min'
type
=
'reduce_min'
,
)
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
},
helper
.
append_op
(
)
type
=
'reduce_min'
,
return
out
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
},
)
return
out
def
amax
(
x
,
axis
=
None
,
keepdim
=
False
,
name
=
None
):
def
amax
(
x
,
axis
=
None
,
keepdim
=
False
,
name
=
None
):
...
@@ -2707,25 +2561,21 @@ def amax(x, axis=None, keepdim=False, name=None):
...
@@ -2707,25 +2561,21 @@ def amax(x, axis=None, keepdim=False, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
amax
(
x
,
axis
,
keepdim
)
return
_C_ops
.
amax
(
x
,
axis
,
keepdim
)
reduce_all
,
axis
=
_get_reduce_axis
(
axis
,
x
)
else
:
if
_in_legacy_dygraph
():
reduce_all
,
axis
=
_get_reduce_axis
(
axis
,
x
)
return
_legacy_C_ops
.
reduce_amax
(
helper
=
LayerHelper
(
'amax'
,
**
locals
())
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'amax'
)
)
helper
=
LayerHelper
(
'amax'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
check_variable_and_dtype
(
helper
.
append_op
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'amax'
type
=
'reduce_amax'
,
)
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
},
helper
.
append_op
(
)
type
=
'reduce_amax'
,
return
out
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
},
)
return
out
def
amin
(
x
,
axis
=
None
,
keepdim
=
False
,
name
=
None
):
def
amin
(
x
,
axis
=
None
,
keepdim
=
False
,
name
=
None
):
...
@@ -2821,24 +2671,21 @@ def amin(x, axis=None, keepdim=False, name=None):
...
@@ -2821,24 +2671,21 @@ def amin(x, axis=None, keepdim=False, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
amin
(
x
,
axis
,
keepdim
)
return
_C_ops
.
amin
(
x
,
axis
,
keepdim
)
reduce_all
,
axis
=
_get_reduce_axis
(
axis
,
x
)
else
:
if
_in_legacy_dygraph
():
reduce_all
,
axis
=
_get_reduce_axis
(
axis
,
x
)
return
_legacy_C_ops
.
reduce_amin
(
helper
=
LayerHelper
(
'amin'
,
**
locals
())
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'amin'
)
)
helper
=
LayerHelper
(
'amin'
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'amin'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'reduce_amin'
,
type
=
'reduce_amin'
,
inputs
=
{
'X'
:
x
},
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
},
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
},
)
)
return
out
return
out
def
log1p
(
x
,
name
=
None
):
def
log1p
(
x
,
name
=
None
):
...
@@ -2867,16 +2714,14 @@ def log1p(x, name=None):
...
@@ -2867,16 +2714,14 @@ def log1p(x, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
log1p
(
x
)
return
_C_ops
.
log1p
(
x
)
if
_in_legacy_dygraph
():
else
:
return
_legacy_C_ops
.
log1p
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
"log1p"
)
inputs
=
{
'X'
:
[
x
]}
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
"log1p"
)
helper
=
LayerHelper
(
'log1p'
,
**
locals
())
inputs
=
{
'X'
:
[
x
]}
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
helper
=
LayerHelper
(
'log1p'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
helper
.
append_op
(
type
=
"log1p"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
return
out
helper
.
append_op
(
type
=
"log1p"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
def
log2
(
x
,
name
=
None
):
def
log2
(
x
,
name
=
None
):
...
@@ -2919,16 +2764,16 @@ def log2(x, name=None):
...
@@ -2919,16 +2764,16 @@ def log2(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
log2
(
x
)
return
_C_ops
.
log2
(
x
)
if
_in_legacy_dygraph
()
:
else
:
return
_legacy_C_ops
.
log2
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
"log2"
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
"log2"
)
)
inputs
=
{
'X'
:
[
x
]}
inputs
=
{
'X'
:
[
x
]}
helper
=
LayerHelper
(
'log2'
,
**
locals
())
helper
=
LayerHelper
(
'log2'
,
**
locals
())
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
"log2"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
helper
.
append_op
(
type
=
"log2"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
return
out
def
log10
(
x
,
name
=
None
):
def
log10
(
x
,
name
=
None
):
...
@@ -2971,16 +2816,16 @@ def log10(x, name=None):
...
@@ -2971,16 +2816,16 @@ def log10(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
log10
(
x
)
return
_C_ops
.
log10
(
x
)
if
_in_legacy_dygraph
()
:
else
:
return
_legacy_C_ops
.
log10
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
"log10"
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
"log10"
)
)
inputs
=
{
'X'
:
[
x
]}
inputs
=
{
'X'
:
[
x
]}
helper
=
LayerHelper
(
'log10'
,
**
locals
())
helper
=
LayerHelper
(
'log10'
,
**
locals
())
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
dtype
=
helper
.
input_dtype
(
input_param_name
=
'x'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
"log10"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
helper
.
append_op
(
type
=
"log10"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
return
out
def
clip
(
x
,
min
=
None
,
max
=
None
,
name
=
None
):
def
clip
(
x
,
min
=
None
,
max
=
None
,
name
=
None
):
...
@@ -3038,65 +2883,56 @@ def clip(x, min=None, max=None, name=None):
...
@@ -3038,65 +2883,56 @@ def clip(x, min=None, max=None, name=None):
min
=
min_
if
min
is
None
else
min
min
=
min_
if
min
is
None
else
min
max
=
max_
if
max
is
None
else
max
max
=
max_
if
max
is
None
else
max
return
_C_ops
.
clip
(
x
,
min
,
max
)
return
_C_ops
.
clip
(
x
,
min
,
max
)
else
:
if
min
is
not
None
:
check_type
(
min
,
'min'
,
(
float
,
int
,
Variable
),
'clip'
)
if
isinstance
(
min
,
Variable
):
check_dtype
(
min
.
dtype
,
'min'
,
[
'float32'
,
'float64'
,
'int32'
],
'clip'
,
'(When the type of min in clip is Variable.)'
,
)
if
max
is
not
None
:
check_type
(
max
,
'max'
,
(
float
,
int
,
Variable
),
'clip'
)
if
isinstance
(
max
,
Variable
):
check_dtype
(
max
.
dtype
,
'max'
,
[
'float32'
,
'float64'
,
'int32'
],
'clip'
,
'(When the type of max in clip is Variable.)'
,
)
if
_in_legacy_dygraph
():
check_variable_and_dtype
(
if
isinstance
(
min
,
Variable
):
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'clip'
min
=
min
.
numpy
().
item
(
0
)
)
if
isinstance
(
max
,
Variable
):
max
=
max
.
numpy
().
item
(
0
)
min
=
min_
if
min
is
None
else
min
max
=
max_
if
max
is
None
else
max
return
_legacy_C_ops
.
clip
(
x
,
"min"
,
min
,
"max"
,
max
)
if
min
is
not
None
:
check_type
(
min
,
'min'
,
(
float
,
int
,
Variable
),
'clip'
)
if
isinstance
(
min
,
Variable
):
check_dtype
(
min
.
dtype
,
'min'
,
[
'float32'
,
'float64'
,
'int32'
],
'clip'
,
'(When the type of min in clip is Variable.)'
,
)
if
max
is
not
None
:
check_type
(
max
,
'max'
,
(
float
,
int
,
Variable
),
'clip'
)
if
isinstance
(
max
,
Variable
):
check_dtype
(
max
.
dtype
,
'max'
,
[
'float32'
,
'float64'
,
'int32'
],
'clip'
,
'(When the type of max in clip is Variable.)'
,
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'clip'
)
inputs
=
{
'X'
:
x
}
inputs
=
{
'X'
:
x
}
attrs
=
{
'min'
:
min_
,
'max'
:
max_
}
attrs
=
{
'min'
:
min_
,
'max'
:
max_
}
if
isinstance
(
min
,
Variable
):
if
isinstance
(
min
,
Variable
):
min
.
stop_gradient
=
True
min
.
stop_gradient
=
True
inputs
[
'Min'
]
=
min
inputs
[
'Min'
]
=
min
elif
min
is
not
None
:
elif
min
is
not
None
:
attrs
[
'min'
]
=
min
attrs
[
'min'
]
=
min
if
isinstance
(
max
,
Variable
):
if
isinstance
(
max
,
Variable
):
max
.
stop_gradient
=
True
max
.
stop_gradient
=
True
inputs
[
'Max'
]
=
max
inputs
[
'Max'
]
=
max
elif
max
is
not
None
:
elif
max
is
not
None
:
attrs
[
'max'
]
=
max
attrs
[
'max'
]
=
max
helper
=
LayerHelper
(
'clip'
,
**
locals
())
helper
=
LayerHelper
(
'clip'
,
**
locals
())
output
=
helper
.
create_variable_for_type_inference
(
output
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
(
'x'
)
dtype
=
helper
.
input_dtype
(
'x'
)
)
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'clip'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
[
output
]},
attrs
=
attrs
type
=
'clip'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
[
output
]},
attrs
=
attrs
)
)
return
output
return
output
@
inplace_apis_in_dygraph_only
@
inplace_apis_in_dygraph_only
...
@@ -3117,9 +2953,6 @@ def clip_(x, min=None, max=None, name=None):
...
@@ -3117,9 +2953,6 @@ def clip_(x, min=None, max=None, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
clip_
(
x
,
min
,
max
)
return
_C_ops
.
clip_
(
x
,
min
,
max
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
clip_
(
x
,
"min"
,
min
,
"max"
,
max
)
def
trace
(
x
,
offset
=
0
,
axis1
=
0
,
axis2
=
1
,
name
=
None
):
def
trace
(
x
,
offset
=
0
,
axis1
=
0
,
axis2
=
1
,
name
=
None
):
"""
"""
...
@@ -3196,24 +3029,19 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None):
...
@@ -3196,24 +3029,19 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
trace
(
x
,
offset
,
axis1
,
axis2
)
return
_C_ops
.
trace
(
x
,
offset
,
axis1
,
axis2
)
else
:
__check_input
(
x
,
offset
,
axis1
,
axis2
)
if
_in_legacy_dygraph
():
helper
=
LayerHelper
(
'trace'
,
**
locals
())
return
_legacy_C_ops
.
trace
(
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
x
,
'offset'
,
offset
,
'axis1'
,
axis1
,
'axis2'
,
axis2
)
__check_input
(
x
,
offset
,
axis1
,
axis2
)
helper
=
LayerHelper
(
'trace'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'trace'
,
type
=
'trace'
,
inputs
=
{
'Input'
:
[
x
]},
inputs
=
{
'Input'
:
[
x
]},
attrs
=
{
'offset'
:
offset
,
'axis1'
:
axis1
,
'axis2'
:
axis2
},
attrs
=
{
'offset'
:
offset
,
'axis1'
:
axis1
,
'axis2'
:
axis2
},
outputs
=
{
'Out'
:
[
out
]},
outputs
=
{
'Out'
:
[
out
]},
)
)
return
out
return
out
def
diagonal
(
x
,
offset
=
0
,
axis1
=
0
,
axis2
=
1
,
name
=
None
):
def
diagonal
(
x
,
offset
=
0
,
axis1
=
0
,
axis2
=
1
,
name
=
None
):
...
@@ -3284,54 +3112,50 @@ def diagonal(x, offset=0, axis1=0, axis2=1, name=None):
...
@@ -3284,54 +3112,50 @@ def diagonal(x, offset=0, axis1=0, axis2=1, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
diagonal
(
x
,
offset
,
axis1
,
axis2
)
return
_C_ops
.
diagonal
(
x
,
offset
,
axis1
,
axis2
)
else
:
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
diagonal
(
x
,
'offset'
,
offset
,
'axis1'
,
axis1
,
'axis2'
,
axis2
)
def
__check_input
(
x
,
offset
,
axis1
,
axis2
):
def
__check_input
(
x
,
offset
,
axis1
,
axis2
):
check_dtype
(
check_dtype
(
x
.
dtype
,
x
.
dtype
,
'Input'
,
'Input'
,
[
'bool'
,
'int32'
,
'int64'
,
'float16'
,
'float32'
,
'float64'
],
[
'bool'
,
'int32'
,
'int64'
,
'float16'
,
'float32'
,
'float64'
],
'diagonal'
,
'diagonal'
,
)
)
input_shape
=
list
(
x
.
shape
)
input_shape
=
list
(
x
.
shape
)
assert
len
(
input_shape
)
>=
2
,
(
assert
len
(
input_shape
)
>=
2
,
(
"The x must be at least 2-dimensional, "
"The x must be at least 2-dimensional, "
"But received Input x's dimensional: %s.
\n
"
%
len
(
input_shape
)
"But received Input x's dimensional: %s.
\n
"
%
len
(
input_shape
)
)
)
axis1_
=
axis1
if
axis1
>=
0
else
len
(
input_shape
)
+
axis1
axis1_
=
axis1
if
axis1
>=
0
else
len
(
input_shape
)
+
axis1
axis2_
=
axis2
if
axis2
>=
0
else
len
(
input_shape
)
+
axis2
axis2_
=
axis2
if
axis2
>=
0
else
len
(
input_shape
)
+
axis2
assert
axis1_
<
len
(
input_shape
),
(
assert
axis1_
<
len
(
input_shape
),
(
"The argument axis1 is out of range (expected to be in range of [%d, %d], but got %d).
\n
"
"The argument axis1 is out of range (expected to be in range of [%d, %d], but got %d).
\n
"
%
(
-
(
len
(
input_shape
)),
len
(
input_shape
)
-
1
,
axis1
)
%
(
-
(
len
(
input_shape
)),
len
(
input_shape
)
-
1
,
axis1
)
)
)
assert
axis2_
<
len
(
input_shape
),
(
assert
axis2_
<
len
(
input_shape
),
(
"The argument axis2 is out of range (expected to be in range of [%d, %d], but got %d).
\n
"
"The argument axis2 is out of range (expected to be in range of [%d, %d], but got %d).
\n
"
%
(
-
(
len
(
input_shape
)),
len
(
input_shape
)
-
1
,
axis2
)
%
(
-
(
len
(
input_shape
)),
len
(
input_shape
)
-
1
,
axis2
)
)
)
assert
axis1_
!=
axis2_
,
(
assert
axis1_
!=
axis2_
,
(
"axis1 and axis2 cannot be the same axis."
"axis1 and axis2 cannot be the same axis."
"But received axis1 = %d, axis2 = %d
\n
"
%
(
axis1
,
axis2
)
"But received axis1 = %d, axis2 = %d
\n
"
%
(
axis1
,
axis2
)
)
)
__check_input
(
x
,
offset
,
axis1
,
axis2
)
__check_input
(
x
,
offset
,
axis1
,
axis2
)
helper
=
LayerHelper
(
'diagonal'
,
**
locals
())
helper
=
LayerHelper
(
'diagonal'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'diagonal'
,
type
=
'diagonal'
,
inputs
=
{
'Input'
:
[
x
]},
inputs
=
{
'Input'
:
[
x
]},
attrs
=
{
'offset'
:
offset
,
'axis1'
:
axis1
,
'axis2'
:
axis2
},
attrs
=
{
'offset'
:
offset
,
'axis1'
:
axis1
,
'axis2'
:
axis2
},
outputs
=
{
'Out'
:
[
out
]},
outputs
=
{
'Out'
:
[
out
]},
)
)
return
out
return
out
@
templatedoc
(
op_type
=
"kron"
)
@
templatedoc
(
op_type
=
"kron"
)
...
@@ -3363,21 +3187,22 @@ def kron(x, y, name=None):
...
@@ -3363,21 +3187,22 @@ def kron(x, y, name=None):
# [12, 15, 18, 16, 20, 24],
# [12, 15, 18, 16, 20, 24],
# [21, 24, 27, 28, 32, 36]])
# [21, 24, 27, 28, 32, 36]])
"""
"""
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
kron
(
x
,
y
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
kron
(
x
,
y
)
return
_legacy_C_ops
.
kron
(
x
,
y
)
helper
=
LayerHelper
(
'kron'
,
**
locals
())
else
:
check_variable_and_dtype
(
helper
=
LayerHelper
(
'kron'
,
**
locals
())
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'kron'
check_variable_and_dtype
(
)
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'kron'
check_variable_and_dtype
(
)
y
,
'y'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'kron'
check_variable_and_dtype
(
)
y
,
'y'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'kron'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
"kron"
,
inputs
=
{
"X"
:
x
,
"Y"
:
y
},
outputs
=
{
"Out"
:
out
})
helper
.
append_op
(
return
out
type
=
"kron"
,
inputs
=
{
"X"
:
x
,
"Y"
:
y
},
outputs
=
{
"Out"
:
out
}
)
return
out
def
cumsum
(
x
,
axis
=
None
,
dtype
=
None
,
name
=
None
):
def
cumsum
(
x
,
axis
=
None
,
dtype
=
None
,
name
=
None
):
...
@@ -3432,20 +3257,15 @@ def cumsum(x, axis=None, dtype=None, name=None):
...
@@ -3432,20 +3257,15 @@ def cumsum(x, axis=None, dtype=None, name=None):
if
axis
is
None
:
if
axis
is
None
:
axis
=
-
1
axis
=
-
1
return
_C_ops
.
cumsum
(
x
,
axis
,
flatten
,
False
,
False
)
return
_C_ops
.
cumsum
(
x
,
axis
,
flatten
,
False
,
False
)
if
_in_legacy_dygraph
():
else
:
if
axis
is
None
:
check_type
(
x
,
'x'
,
(
Variable
),
'cumsum'
)
return
_legacy_C_ops
.
cumsum
(
x
,
'flatten'
,
flatten
)
locals_var
=
locals
().
copy
()
else
:
kwargs
=
dict
()
return
_legacy_C_ops
.
cumsum
(
x
,
'axis'
,
axis
,
'flatten'
,
flatten
)
for
name
,
val
in
locals_var
.
items
():
if
val
is
not
None
:
check_type
(
x
,
'x'
,
(
Variable
),
'cumsum'
)
kwargs
[
name
]
=
val
locals_var
=
locals
().
copy
()
_cum_sum_
=
generate_layer_fn
(
'cumsum'
)
kwargs
=
dict
()
return
_cum_sum_
(
**
kwargs
)
for
name
,
val
in
locals_var
.
items
():
if
val
is
not
None
:
kwargs
[
name
]
=
val
_cum_sum_
=
generate_layer_fn
(
'cumsum'
)
return
_cum_sum_
(
**
kwargs
)
def
logcumsumexp
(
x
,
axis
=
None
,
dtype
=
None
,
name
=
None
):
def
logcumsumexp
(
x
,
axis
=
None
,
dtype
=
None
,
name
=
None
):
...
@@ -3507,27 +3327,20 @@ def logcumsumexp(x, axis=None, dtype=None, name=None):
...
@@ -3507,27 +3327,20 @@ def logcumsumexp(x, axis=None, dtype=None, name=None):
if
axis
is
None
:
if
axis
is
None
:
axis
=
-
1
axis
=
-
1
return
_C_ops
.
logcumsumexp
(
x
,
axis
,
flatten
,
False
,
False
)
return
_C_ops
.
logcumsumexp
(
x
,
axis
,
flatten
,
False
,
False
)
if
_in_legacy_dygraph
():
else
:
if
axis
is
None
:
check_variable_and_dtype
(
return
_legacy_C_ops
.
logcumsumexp
(
x
,
'flatten'
,
flatten
)
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
"logcumsumexp"
else
:
)
return
_legacy_C_ops
.
logcumsumexp
(
x
,
'axis'
,
axis
,
'flatten'
,
flatten
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
"logcumsumexp"
)
helper
=
LayerHelper
(
'logcumsumexp'
,
**
locals
())
helper
=
LayerHelper
(
'logcumsumexp'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'logcumsumexp'
,
type
=
'logcumsumexp'
,
inputs
=
{
'X'
:
x
},
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'axis'
:
axis
,
'flatten'
:
flatten
},
attrs
=
{
'axis'
:
axis
,
'flatten'
:
flatten
},
)
)
return
out
return
out
def
cumprod
(
x
,
dim
=
None
,
dtype
=
None
,
name
=
None
):
def
cumprod
(
x
,
dim
=
None
,
dtype
=
None
,
name
=
None
):
...
@@ -3586,26 +3399,24 @@ def cumprod(x, dim=None, dtype=None, name=None):
...
@@ -3586,26 +3399,24 @@ def cumprod(x, dim=None, dtype=None, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
cumprod
(
x
,
dim
)
return
_C_ops
.
cumprod
(
x
,
dim
)
if
_in_legacy_dygraph
():
else
:
return
_legacy_C_ops
.
cumprod
(
x
,
'dim'
,
dim
)
check_variable_and_dtype
(
x
,
check_variable_and_dtype
(
"x"
,
x
,
[
'complex64'
,
'complex128'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
"x"
,
'cumprod'
,
[
'complex64'
,
'complex128'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
)
'cumprod'
,
check_type
(
dim
,
'dim'
,
int
,
'cumprod'
)
)
check_type
(
dim
,
'dim'
,
int
,
'cumprod'
)
helper
=
LayerHelper
(
'cumprod'
,
**
locals
())
helper
=
LayerHelper
(
'cumprod'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'cumprod'
,
type
=
'cumprod'
,
inputs
=
{
'X'
:
x
},
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'dim'
:
dim
},
attrs
=
{
'dim'
:
dim
},
)
)
return
out
return
out
def
isfinite
(
x
,
name
=
None
):
def
isfinite
(
x
,
name
=
None
):
...
@@ -3631,15 +3442,19 @@ def isfinite(x, name=None):
...
@@ -3631,15 +3442,19 @@ def isfinite(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
isfinite
(
x
)
return
_C_ops
.
isfinite
(
x
)
if
_in_legacy_dygraph
():
else
:
return
_legacy_C_ops
.
isfinite_v2
(
x
)
helper
=
LayerHelper
(
"isfinite_v2"
,
**
locals
())
helper
=
LayerHelper
(
"isfinite_v2"
,
**
locals
())
check_variable_and_dtype
(
check_variable_and_dtype
(
x
,
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'isfinite'
'x'
,
)
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
out
=
helper
.
create_variable_for_type_inference
(
'bool'
)
'isfinite'
,
helper
.
append_op
(
type
=
"isfinite_v2"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
)
return
out
out
=
helper
.
create_variable_for_type_inference
(
'bool'
)
helper
.
append_op
(
type
=
"isfinite_v2"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
}
)
return
out
def
isinf
(
x
,
name
=
None
):
def
isinf
(
x
,
name
=
None
):
...
@@ -3665,15 +3480,14 @@ def isinf(x, name=None):
...
@@ -3665,15 +3480,14 @@ def isinf(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
isinf
(
x
)
return
_C_ops
.
isinf
(
x
)
if
_in_legacy_dygraph
():
else
:
return
_legacy_C_ops
.
isinf_v2
(
x
)
helper
=
LayerHelper
(
"isinf_v2"
,
**
locals
())
helper
=
LayerHelper
(
"isinf_v2"
,
**
locals
())
check_variable_and_dtype
(
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'isinf'
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'isinf'
)
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
helper
.
append_op
(
type
=
"isinf_v2"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
helper
.
append_op
(
type
=
"isinf_v2"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
return
out
def
isnan
(
x
,
name
=
None
):
def
isnan
(
x
,
name
=
None
):
...
@@ -3699,16 +3513,14 @@ def isnan(x, name=None):
...
@@ -3699,16 +3513,14 @@ def isnan(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
isnan
(
x
)
return
_C_ops
.
isnan
(
x
)
else
:
if
_in_legacy_dygraph
():
helper
=
LayerHelper
(
"isnan_v2"
,
**
locals
())
return
_legacy_C_ops
.
isnan_v2
(
x
)
check_variable_and_dtype
(
helper
=
LayerHelper
(
"isnan_v2"
,
**
locals
())
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'isnan'
check_variable_and_dtype
(
)
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'isnan'
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
)
helper
.
append_op
(
type
=
"isnan_v2"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
return
out
helper
.
append_op
(
type
=
"isnan_v2"
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
def
prod
(
x
,
axis
=
None
,
keepdim
=
False
,
dtype
=
None
,
name
=
None
):
def
prod
(
x
,
axis
=
None
,
keepdim
=
False
,
dtype
=
None
,
name
=
None
):
...
@@ -3775,24 +3587,24 @@ def prod(x, axis=None, keepdim=False, dtype=None, name=None):
...
@@ -3775,24 +3587,24 @@ def prod(x, axis=None, keepdim=False, dtype=None, name=None):
reduce_all
,
axis
=
_get_reduce_axis_with_tensor
(
axis
,
x
)
reduce_all
,
axis
=
_get_reduce_axis_with_tensor
(
axis
,
x
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
prod
(
x
,
axis
,
keepdim
,
reduce_all
)
return
_C_ops
.
prod
(
x
,
axis
,
keepdim
,
reduce_all
)
else
:
if
_in_legacy_dygraph
():
helper
=
LayerHelper
(
'reduce_prod'
,
**
locals
())
return
_legacy_C_ops
.
reduce_prod
(
check_variable_and_dtype
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
x
,
'x/input'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'reduce_prod'
,
)
)
out
=
helper
.
create_variable_for_type_inference
(
helper
=
LayerHelper
(
'reduce_prod'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
check_variable_and_dtype
(
)
x
,
'x/input'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'reduce_prod'
helper
.
append_op
(
)
type
=
'reduce_prod'
,
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
())
inputs
=
{
'X'
:
x
},
helper
.
append_op
(
outputs
=
{
'Out'
:
out
},
type
=
'reduce_prod'
,
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
},
inputs
=
{
'X'
:
x
},
)
outputs
=
{
'Out'
:
out
},
return
out
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
},
)
return
out
def
sign
(
x
,
name
=
None
):
def
sign
(
x
,
name
=
None
):
...
@@ -3817,17 +3629,16 @@ def sign(x, name=None):
...
@@ -3817,17 +3629,16 @@ def sign(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
sign
(
x
)
return
_C_ops
.
sign
(
x
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sign'
)
helper
=
LayerHelper
(
"sign"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
if
_in_legacy_dygraph
():
helper
.
append_op
(
type
=
'sign'
,
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'Out'
:
[
out
]})
return
_legacy_C_ops
.
sign
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sign'
)
helper
=
LayerHelper
(
"sign"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'sign'
,
inputs
=
{
'X'
:
[
x
]},
outputs
=
{
'Out'
:
[
out
]})
return
out
return
out
def
tanh
(
x
,
name
=
None
):
def
tanh
(
x
,
name
=
None
):
...
@@ -3857,16 +3668,15 @@ def tanh(x, name=None):
...
@@ -3857,16 +3668,15 @@ def tanh(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
tanh
(
x
)
return
_C_ops
.
tanh
(
x
)
else
:
if
_in_legacy_dygraph
():
check_variable_and_dtype
(
return
_legacy_C_ops
.
tanh
(
x
)
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'tanh'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'tanh'
)
check_type
(
x
,
'x'
,
(
Variable
),
'tanh'
)
check_type
(
x
,
'x'
,
(
Variable
),
'tanh'
)
helper
=
LayerHelper
(
'tanh'
,
**
locals
())
helper
=
LayerHelper
(
'tanh'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'tanh'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
})
helper
.
append_op
(
type
=
'tanh'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
})
return
out
return
out
@
inplace_apis_in_dygraph_only
@
inplace_apis_in_dygraph_only
...
@@ -3875,9 +3685,7 @@ def tanh_(x, name=None):
...
@@ -3875,9 +3685,7 @@ def tanh_(x, name=None):
Inplace version of ``tanh`` API, the output Tensor will be inplaced with input ``x``.
Inplace version of ``tanh`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_tensor_tanh`.
Please refer to :ref:`api_tensor_tanh`.
"""
"""
if
in_dygraph_mode
():
return
_C_ops
.
tanh_
(
x
)
return
_C_ops
.
tanh_
(
x
)
return
_legacy_C_ops
.
tanh_
(
x
)
def
increment
(
x
,
value
=
1.0
,
name
=
None
):
def
increment
(
x
,
value
=
1.0
,
name
=
None
):
...
@@ -3905,21 +3713,18 @@ def increment(x, value=1.0, name=None):
...
@@ -3905,21 +3713,18 @@ def increment(x, value=1.0, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
increment_
(
x
,
value
)
return
_C_ops
.
increment_
(
x
,
value
)
else
:
if
_in_legacy_dygraph
():
check_variable_and_dtype
(
return
_legacy_C_ops
.
increment
(
x
,
'step'
,
value
)
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'increment'
)
check_variable_and_dtype
(
helper
=
LayerHelper
(
"increment"
,
**
locals
())
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'increment'
helper
.
append_op
(
)
type
=
'increment'
,
helper
=
LayerHelper
(
"increment"
,
**
locals
())
inputs
=
{
'X'
:
[
x
]},
helper
.
append_op
(
outputs
=
{
'Out'
:
[
x
]},
type
=
'increment'
,
attrs
=
{
'step'
:
float
(
value
)},
inputs
=
{
'X'
:
[
x
]},
)
outputs
=
{
'Out'
:
[
x
]},
return
x
attrs
=
{
'step'
:
float
(
value
)},
)
return
x
def
all
(
x
,
axis
=
None
,
keepdim
=
False
,
name
=
None
):
def
all
(
x
,
axis
=
None
,
keepdim
=
False
,
name
=
None
):
...
@@ -3973,28 +3778,26 @@ def all(x, axis=None, keepdim=False, name=None):
...
@@ -3973,28 +3778,26 @@ def all(x, axis=None, keepdim=False, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
all
(
x
,
axis
,
keepdim
)
return
_C_ops
.
all
(
x
,
axis
,
keepdim
)
else
:
reduce_all
,
axis
=
_get_reduce_axis
(
axis
,
x
)
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
,
}
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
],
'all'
)
reduce_all
,
axis
=
_get_reduce_axis
(
axis
,
x
)
check_type
(
axis
,
'axis'
,
(
int
,
list
,
tuple
,
type
(
None
)),
'all'
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
reduce_all
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
,
}
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
],
'all'
)
check_type
(
axis
,
'axis'
,
(
int
,
list
,
tuple
,
type
(
None
)),
'all'
)
helper
=
LayerHelper
(
'all'
,
**
locals
())
helper
=
LayerHelper
(
'all'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'reduce_all'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
type
=
'reduce_all'
,
)
inputs
=
{
'X'
:
x
},
return
out
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
,
)
return
out
def
any
(
x
,
axis
=
None
,
keepdim
=
False
,
name
=
None
):
def
any
(
x
,
axis
=
None
,
keepdim
=
False
,
name
=
None
):
...
@@ -4049,29 +3852,27 @@ def any(x, axis=None, keepdim=False, name=None):
...
@@ -4049,29 +3852,27 @@ def any(x, axis=None, keepdim=False, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
any
(
x
,
axis
,
keepdim
)
return
_C_ops
.
any
(
x
,
axis
,
keepdim
)
else
:
reduce_all
,
axis
=
_get_reduce_axis
(
axis
,
x
)
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
,
}
reduce_all
,
axis
=
_get_reduce_axis
(
axis
,
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
],
'any'
)
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
reduce_any
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
,
}
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
],
'any'
)
check_type
(
axis
,
'axis'
,
(
int
,
list
,
tuple
,
type
(
None
)),
'any'
)
check_type
(
axis
,
'axis'
,
(
int
,
list
,
tuple
,
type
(
None
)),
'any'
)
helper
=
LayerHelper
(
'any'
,
**
locals
())
helper
=
LayerHelper
(
'any'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'reduce_any'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
type
=
'reduce_any'
,
)
inputs
=
{
'X'
:
x
},
return
out
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
,
)
return
out
def
broadcast_shape
(
x_shape
,
y_shape
):
def
broadcast_shape
(
x_shape
,
y_shape
):
...
@@ -4137,22 +3938,21 @@ def conj(x, name=None):
...
@@ -4137,22 +3938,21 @@ def conj(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
conj
(
x
)
return
_C_ops
.
conj
(
x
)
else
:
check_variable_and_dtype
(
x
,
"x"
,
[
'complex64'
,
'complex128'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'conj'
,
)
if
paddle
.
in_dynamic_mode
():
helper
=
LayerHelper
(
'conj'
,
**
locals
())
return
_legacy_C_ops
.
conj
(
x
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
()
check_variable_and_dtype
(
)
x
,
"x"
,
[
'complex64'
,
'complex128'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'conj'
,
)
helper
=
LayerHelper
(
'conj'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
helper
.
input_dtype
())
helper
.
append_op
(
type
=
'conj'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
[
out
]})
helper
.
append_op
(
type
=
'conj'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
[
out
]})
return
out
return
out
def
digamma
(
x
,
name
=
None
):
def
digamma
(
x
,
name
=
None
):
...
@@ -4184,14 +3984,11 @@ def digamma(x, name=None):
...
@@ -4184,14 +3984,11 @@ def digamma(x, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
digamma
(
x
)
return
_C_ops
.
digamma
(
x
)
else
:
else
:
if
_in_legacy_dygraph
():
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'digamma'
)
return
_legacy_C_ops
.
digamma
(
x
)
helper
=
LayerHelper
(
'digamma'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'digamma'
)
helper
.
append_op
(
type
=
'digamma'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
})
helper
=
LayerHelper
(
'digamma'
,
**
locals
())
return
out
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
type
=
'digamma'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
})
return
out
def
lgamma
(
x
,
name
=
None
):
def
lgamma
(
x
,
name
=
None
):
...
@@ -4221,14 +4018,12 @@ def lgamma(x, name=None):
...
@@ -4221,14 +4018,12 @@ def lgamma(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
lgamma
(
x
)
return
_C_ops
.
lgamma
(
x
)
elif
_in_legacy_dygraph
():
else
:
return
_legacy_C_ops
.
lgamma
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'lgamma'
)
helper
=
LayerHelper
(
'lgamma'
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'lgamma'
)
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
=
LayerHelper
(
'lgamma'
,
**
locals
())
helper
.
append_op
(
type
=
'lgamma'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
})
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
return
out
helper
.
append_op
(
type
=
'lgamma'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
})
return
out
def
neg
(
x
,
name
=
None
):
def
neg
(
x
,
name
=
None
):
...
@@ -4304,27 +4099,24 @@ def atan2(x, y, name=None):
...
@@ -4304,27 +4099,24 @@ def atan2(x, y, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
atan2
(
x
,
y
)
return
_C_ops
.
atan2
(
x
,
y
)
else
:
else
:
if
_in_legacy_dygraph
():
check_variable_and_dtype
(
return
_legacy_C_ops
.
atan2
(
x
,
y
)
x
,
else
:
'x'
,
check_variable_and_dtype
(
[
'int32'
,
'int64'
,
'float16'
,
'float32'
,
'float64'
],
x
,
'atan2'
,
'x'
,
)
[
'int32'
,
'int64'
,
'float16'
,
'float32'
,
'float64'
],
check_variable_and_dtype
(
'atan2'
,
y
,
)
'y'
,
check_variable_and_dtype
(
[
'int32'
,
'int64'
,
'float16'
,
'float32'
,
'float64'
],
y
,
'atan2'
,
'y'
,
)
[
'int32'
,
'int64'
,
'float16'
,
'float32'
,
'float64'
],
'atan2'
,
)
helper
=
LayerHelper
(
'atan2'
,
**
locals
())
helper
=
LayerHelper
(
'atan2'
,
**
locals
())
inputs
=
{
'X1'
:
x
,
'X2'
:
y
}
inputs
=
{
'X1'
:
x
,
'X2'
:
y
}
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'atan2'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
})
helper
.
append_op
(
type
=
'atan2'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
})
return
out
return
out
def
logit
(
x
,
eps
=
None
,
name
=
None
):
def
logit
(
x
,
eps
=
None
,
name
=
None
):
...
@@ -4367,20 +4159,23 @@ def logit(x, eps=None, name=None):
...
@@ -4367,20 +4159,23 @@ def logit(x, eps=None, name=None):
# [-1.0277, -4.5365, -0.9544, -1.3269, 1.4468]
# [-1.0277, -4.5365, -0.9544, -1.3269, 1.4468]
"""
"""
if
eps
is
None
:
if
eps
is
None
:
eps
=
0.0
eps
=
0.0
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
logit
(
x
,
'eps'
,
eps
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
logit
(
x
,
eps
)
return
_C_ops
.
logit
(
x
,
eps
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'logit'
)
else
:
helper
=
LayerHelper
(
"logit"
,
**
locals
())
check_variable_and_dtype
(
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'logit'
helper
.
append_op
(
)
type
=
'logit'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'eps'
:
eps
}
helper
=
LayerHelper
(
"logit"
,
**
locals
())
)
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
return
out
helper
.
append_op
(
type
=
'logit'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'eps'
:
eps
},
)
return
out
def
lerp
(
x
,
y
,
weight
,
name
=
None
):
def
lerp
(
x
,
y
,
weight
,
name
=
None
):
...
@@ -4419,23 +4214,21 @@ def lerp(x, y, weight, name=None):
...
@@ -4419,23 +4214,21 @@ def lerp(x, y, weight, name=None):
weight
=
paddle
.
to_tensor
(
weight
,
dtype
=
x
.
dtype
)
weight
=
paddle
.
to_tensor
(
weight
,
dtype
=
x
.
dtype
)
return
_C_ops
.
lerp
(
x
,
y
,
weight
)
return
_C_ops
.
lerp
(
x
,
y
,
weight
)
if
_in_legacy_dygraph
()
:
else
:
if
isinstance
(
weight
,
float
):
if
isinstance
(
weight
,
float
):
weight
=
paddle
.
to_tensor
(
weight
,
dtype
=
x
.
dtype
)
weight
=
paddle
.
full
(
shape
=
[
1
],
fill_value
=
weight
,
dtype
=
x
.
dtype
)
return
_legacy_C_ops
.
lerp
(
x
,
y
,
weight
)
if
isinstance
(
weight
,
float
):
weight
=
paddle
.
full
(
shape
=
[
1
],
fill_value
=
weight
,
dtype
=
x
.
dtype
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'lerp'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'lerp'
)
check_variable_and_dtype
(
y
,
'y'
,
[
'float32'
,
'float64'
],
'lerp'
)
check_variable_and_dtype
(
y
,
'y'
,
[
'float32'
,
'float64'
],
'lerp'
)
check_variable_and_dtype
(
weight
,
'weight'
,
[
'float32'
,
'float64'
],
'lerp'
)
check_variable_and_dtype
(
weight
,
'weight'
,
[
'float32'
,
'float64'
],
'lerp'
)
helper
=
LayerHelper
(
'lerp'
,
**
locals
())
helper
=
LayerHelper
(
'lerp'
,
**
locals
())
inputs
=
{
'X'
:
x
,
'Y'
:
y
,
'Weight'
:
weight
}
inputs
=
{
'X'
:
x
,
'Y'
:
y
,
'Weight'
:
weight
}
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'lerp'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
})
helper
.
append_op
(
type
=
'lerp'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
})
return
out
return
out
@
inplace_apis_in_dygraph_only
@
inplace_apis_in_dygraph_only
...
@@ -4456,9 +4249,7 @@ def lerp_(x, y, weight, name=None):
...
@@ -4456,9 +4249,7 @@ def lerp_(x, y, weight, name=None):
out_shape
,
x
.
shape
out_shape
,
x
.
shape
)
)
)
)
if
in_dygraph_mode
():
return
_C_ops
.
lerp_
(
x
,
y
,
weight
)
return
_C_ops
.
lerp_
(
x
,
y
,
weight
)
return
_legacy_C_ops
.
lerp_
(
x
,
y
,
weight
)
def
erfinv
(
x
,
name
=
None
):
def
erfinv
(
x
,
name
=
None
):
...
@@ -4488,16 +4279,12 @@ def erfinv(x, name=None):
...
@@ -4488,16 +4279,12 @@ def erfinv(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
erfinv
(
x
)
return
_C_ops
.
erfinv
(
x
)
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'erfinv'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'erfinv'
)
helper
=
LayerHelper
(
'erfinv'
,
**
locals
())
if
paddle
.
in_dynamic_mode
():
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
return
_legacy_C_ops
.
erfinv
(
x
)
helper
.
append_op
(
type
=
'erfinv'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
})
return
out
helper
=
LayerHelper
(
'erfinv'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'erfinv'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
})
return
out
@
inplace_apis_in_dygraph_only
@
inplace_apis_in_dygraph_only
...
@@ -4507,9 +4294,7 @@ def erfinv_(x, name=None):
...
@@ -4507,9 +4294,7 @@ def erfinv_(x, name=None):
Please refer to :ref:`api_tensor_erfinv`.
Please refer to :ref:`api_tensor_erfinv`.
"""
"""
check_type
(
x
,
'x'
,
(
paddle
.
Tensor
,
Variable
),
'erfinv'
)
check_type
(
x
,
'x'
,
(
paddle
.
Tensor
,
Variable
),
'erfinv'
)
if
in_dygraph_mode
():
return
_C_ops
.
erfinv_
(
x
)
return
_C_ops
.
erfinv_
(
x
)
return
_legacy_C_ops
.
erfinv_
(
x
)
def
rad2deg
(
x
,
name
=
None
):
def
rad2deg
(
x
,
name
=
None
):
...
@@ -4558,10 +4343,6 @@ def rad2deg(x, name=None):
...
@@ -4558,10 +4343,6 @@ def rad2deg(x, name=None):
if
convert_dtype
(
x
.
dtype
)
in
[
'int32'
,
'int64'
]:
if
convert_dtype
(
x
.
dtype
)
in
[
'int32'
,
'int64'
]:
x
=
cast
(
x
,
dtype
=
"float32"
)
x
=
cast
(
x
,
dtype
=
"float32"
)
return
_C_ops
.
scale
(
x
,
rad2deg_scale
,
0.0
,
True
)
return
_C_ops
.
scale
(
x
,
rad2deg_scale
,
0.0
,
True
)
elif
paddle
.
in_dynamic_mode
():
if
convert_dtype
(
x
.
dtype
)
in
[
'int32'
,
'int64'
]:
x
=
cast
(
x
,
dtype
=
"float32"
)
return
_legacy_C_ops
.
scale
(
x
,
'scale'
,
rad2deg_scale
)
else
:
else
:
check_variable_and_dtype
(
check_variable_and_dtype
(
x
,
'x'
,
[
'int32'
,
'int64'
,
'float32'
,
'float64'
],
'rad2deg'
x
,
'x'
,
[
'int32'
,
'int64'
,
'float32'
,
'float64'
],
'rad2deg'
...
@@ -4626,10 +4407,6 @@ def deg2rad(x, name=None):
...
@@ -4626,10 +4407,6 @@ def deg2rad(x, name=None):
if
convert_dtype
(
x
.
dtype
)
in
[
'int32'
,
'int64'
]:
if
convert_dtype
(
x
.
dtype
)
in
[
'int32'
,
'int64'
]:
x
=
cast
(
x
,
dtype
=
"float32"
)
x
=
cast
(
x
,
dtype
=
"float32"
)
return
_C_ops
.
scale
(
x
,
deg2rad_scale
,
0.0
,
True
)
return
_C_ops
.
scale
(
x
,
deg2rad_scale
,
0.0
,
True
)
elif
paddle
.
in_dynamic_mode
():
if
convert_dtype
(
x
.
dtype
)
in
[
'int32'
,
'int64'
]:
x
=
cast
(
x
,
dtype
=
"float32"
)
return
_legacy_C_ops
.
scale
(
x
,
'scale'
,
deg2rad_scale
)
else
:
else
:
check_variable_and_dtype
(
check_variable_and_dtype
(
x
,
'x'
,
[
'int32'
,
'int64'
,
'float32'
,
'float64'
],
'deg2rad'
x
,
'x'
,
[
'int32'
,
'int64'
,
'float32'
,
'float64'
],
'deg2rad'
...
@@ -4729,7 +4506,7 @@ def gcd(x, y, name=None):
...
@@ -4729,7 +4506,7 @@ def gcd(x, y, name=None):
)
)
return
(
paddle
.
where
(
x
<
y
,
y
,
x
),
paddle
.
where
(
x
<
y
,
x
,
y
))
return
(
paddle
.
where
(
x
<
y
,
y
,
x
),
paddle
.
where
(
x
<
y
,
x
,
y
))
if
paddle
.
in_dynamic
_mode
():
if
in_dygraph
_mode
():
while
_gcd_cond_fn
(
x
,
y
):
while
_gcd_cond_fn
(
x
,
y
):
x
,
y
=
_gcd_body_fn
(
x
,
y
)
x
,
y
=
_gcd_body_fn
(
x
,
y
)
...
@@ -4907,68 +4684,6 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None):
...
@@ -4907,68 +4684,6 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None):
return
_C_ops
.
logical_xor
(
input_back
,
input_front
)
return
_C_ops
.
logical_xor
(
input_back
,
input_front
)
else
:
else
:
return
_C_ops
.
subtract
(
input_back
,
input_front
)
return
_C_ops
.
subtract
(
input_back
,
input_front
)
elif
_in_legacy_dygraph
():
has_pend
=
False
input_list
=
[]
if
prepend
is
not
None
and
append
is
not
None
:
input_list
=
[
prepend
,
x
,
append
]
has_pend
=
True
elif
prepend
is
not
None
:
input_list
=
[
prepend
,
x
]
has_pend
=
True
elif
append
is
not
None
:
input_list
=
[
x
,
append
]
has_pend
=
True
if
has_pend
:
new_input
=
_varbase_creator
()
_legacy_C_ops
.
concat
(
input_list
,
new_input
,
'axis'
,
axis
)
else
:
new_input
=
x
attrs_1
=
()
attrs_2
=
()
dim_len
=
new_input
.
shape
[
axis
]
starts_1
=
[
0
]
attrs_1
+=
(
'starts'
,
starts_1
)
ends_1
=
[
dim_len
-
1
]
attrs_1
+=
(
'ends'
,
ends_1
)
input_front
=
_legacy_C_ops
.
slice
(
new_input
,
None
,
None
,
None
,
None
,
'axes'
,
axes
,
'infer_flags'
,
infer_flags
,
*
attrs_1
)
starts_2
=
[
1
]
attrs_2
+=
(
'starts'
,
starts_2
)
ends_2
=
[
dim_len
]
attrs_2
+=
(
'ends'
,
ends_2
)
input_back
=
_legacy_C_ops
.
slice
(
new_input
,
None
,
None
,
None
,
None
,
'axes'
,
axes
,
'infer_flags'
,
infer_flags
,
*
attrs_2
)
if
x
.
dtype
==
paddle
.
bool
:
return
_legacy_C_ops
.
logical_xor
(
input_back
,
input_front
)
else
:
return
paddle
.
tensor
.
math
.
_subtract_with_axis
(
input_back
,
input_front
,
axis
=
axis
)
else
:
else
:
check_variable_and_dtype
(
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'bool'
,
'int32'
,
'int64'
],
'diff'
x
,
'x'
,
[
'float32'
,
'float64'
,
'bool'
,
'int32'
,
'int64'
],
'diff'
...
@@ -5082,21 +4797,19 @@ def angle(x, name=None):
...
@@ -5082,21 +4797,19 @@ def angle(x, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
angle
(
x
)
return
_C_ops
.
angle
(
x
)
elif
paddle
.
in_dynamic_mode
():
else
:
return
_legacy_C_ops
.
angle
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
'angle'
check_variable_and_dtype
(
)
x
,
'x'
,
[
'float32'
,
'float64'
,
'complex64'
,
'complex128'
],
'angle'
op_type
=
"angle"
)
helper
=
LayerHelper
(
op_type
,
**
locals
())
op_type
=
"angle"
inputs
=
{
"X"
:
x
}
helper
=
LayerHelper
(
op_type
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
inputs
=
{
"X"
:
x
}
dtype
=
_complex_to_real_dtype
(
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
)
dtype
=
_complex_to_real_dtype
(
x
.
dtype
)
outputs
=
{
"Out"
:
out
}
)
helper
.
append_op
(
type
=
op_type
,
inputs
=
inputs
,
outputs
=
outputs
)
outputs
=
{
"Out"
:
out
}
return
out
helper
.
append_op
(
type
=
op_type
,
inputs
=
inputs
,
outputs
=
outputs
)
return
out
def
heaviside
(
x
,
y
,
name
=
None
):
def
heaviside
(
x
,
y
,
name
=
None
):
...
@@ -5143,11 +4856,12 @@ def heaviside(x, y, name=None):
...
@@ -5143,11 +4856,12 @@ def heaviside(x, y, name=None):
op_type
=
'elementwise_heaviside'
op_type
=
'elementwise_heaviside'
axis
=
-
1
axis
=
-
1
act
=
None
act
=
None
if
_non_static
_mode
():
if
in_dygraph
_mode
():
return
_elementwise_op_in_dygraph
(
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
)
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
else
:
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
def
frac
(
x
,
name
=
None
):
def
frac
(
x
,
name
=
None
):
...
@@ -5192,24 +4906,18 @@ def frac(x, name=None):
...
@@ -5192,24 +4906,18 @@ def frac(x, name=None):
y
=
_C_ops
.
trunc
(
x
)
y
=
_C_ops
.
trunc
(
x
)
return
_C_ops
.
subtract
(
x
,
y
)
return
_C_ops
.
subtract
(
x
,
y
)
else
:
else
:
if
_in_legacy_dygraph
():
inputs
=
{
"X"
:
x
}
y
=
_legacy_C_ops
.
trunc
(
x
)
attrs
=
{}
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
else
:
inputs
=
{
"X"
:
x
}
attrs
=
{}
helper
=
LayerHelper
(
"trunc"
,
**
locals
())
helper
=
LayerHelper
(
"trunc"
,
**
locals
())
check_variable_and_dtype
(
check_variable_and_dtype
(
x
,
"X"
,
[
'int32'
,
'int64'
,
'float32'
,
'float64'
],
'trunc'
x
,
"X"
,
[
'int32'
,
'int64'
,
'float32'
,
'float64'
],
'trunc'
)
)
y
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
y
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
"trunc"
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
"Out"
:
y
}
type
=
"trunc"
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
"Out"
:
y
}
)
)
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
def
sgn
(
x
,
name
=
None
):
def
sgn
(
x
,
name
=
None
):
...
@@ -5334,7 +5042,7 @@ def take(x, index, mode='raise', name=None):
...
@@ -5334,7 +5042,7 @@ def take(x, index, mode='raise', name=None):
)
)
)
)
if
paddle
.
in_dynamic
_mode
():
if
in_dygraph
_mode
():
if
not
isinstance
(
index
,
(
paddle
.
Tensor
,
Variable
)):
if
not
isinstance
(
index
,
(
paddle
.
Tensor
,
Variable
)):
raise
TypeError
(
raise
TypeError
(
"The type of 'index' must be Tensor, but got {}"
.
format
(
"The type of 'index' must be Tensor, but got {}"
.
format
(
...
...
python/paddle/tensor/ops.py
浏览文件 @
861fef52
...
@@ -12,9 +12,9 @@
...
@@ -12,9 +12,9 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
from
..
import
_C_ops
,
_legacy_C_ops
from
..
import
_C_ops
from
..fluid.data_feeder
import
check_variable_and_dtype
from
..fluid.data_feeder
import
check_variable_and_dtype
from
..fluid.framework
import
_in_legacy_dygraph
,
in_dygraph_mode
from
..fluid.framework
import
in_dygraph_mode
from
..framework
import
LayerHelper
from
..framework
import
LayerHelper
from
.layer_function_generator
import
(
from
.layer_function_generator
import
(
add_sample_code
,
add_sample_code
,
...
@@ -218,14 +218,14 @@ def acos(x, name=None):
...
@@ -218,14 +218,14 @@ def acos(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
acos
(
x
)
return
_C_ops
.
acos
(
x
)
if
_in_legacy_dygraph
()
:
else
:
return
_legacy_C_ops
.
acos
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'acos'
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'acos'
)
)
helper
=
LayerHelper
(
'acos'
,
**
locals
())
helper
=
LayerHelper
(
'acos'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'acos'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
helper
.
append_op
(
type
=
'acos'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
return
out
def
acosh
(
x
,
name
=
None
):
def
acosh
(
x
,
name
=
None
):
...
@@ -255,14 +255,14 @@ def acosh(x, name=None):
...
@@ -255,14 +255,14 @@ def acosh(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
acosh
(
x
)
return
_C_ops
.
acosh
(
x
)
if
_in_legacy_dygraph
()
:
else
:
return
_legacy_C_ops
.
acosh
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'acosh'
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'acosh'
)
)
helper
=
LayerHelper
(
'acosh'
,
**
locals
())
helper
=
LayerHelper
(
'acosh'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'acosh'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
helper
.
append_op
(
type
=
'acosh'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
return
out
def
asin
(
x
,
name
=
None
):
def
asin
(
x
,
name
=
None
):
...
@@ -292,14 +292,14 @@ def asin(x, name=None):
...
@@ -292,14 +292,14 @@ def asin(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
asin
(
x
)
return
_C_ops
.
asin
(
x
)
if
_in_legacy_dygraph
()
:
else
:
return
_legacy_C_ops
.
asin
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'asin'
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'asin'
)
)
helper
=
LayerHelper
(
'asin'
,
**
locals
())
helper
=
LayerHelper
(
'asin'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'asin'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
helper
.
append_op
(
type
=
'asin'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
return
out
def
asinh
(
x
,
name
=
None
):
def
asinh
(
x
,
name
=
None
):
...
@@ -329,14 +329,14 @@ def asinh(x, name=None):
...
@@ -329,14 +329,14 @@ def asinh(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
asinh
(
x
)
return
_C_ops
.
asinh
(
x
)
if
_in_legacy_dygraph
()
:
else
:
return
_legacy_C_ops
.
asinh
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'asinh'
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'asinh'
)
)
helper
=
LayerHelper
(
'asinh'
,
**
locals
())
helper
=
LayerHelper
(
'asinh'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'asinh'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
helper
.
append_op
(
type
=
'asinh'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
return
out
def
atan
(
x
,
name
=
None
):
def
atan
(
x
,
name
=
None
):
...
@@ -366,14 +366,14 @@ def atan(x, name=None):
...
@@ -366,14 +366,14 @@ def atan(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
atan
(
x
)
return
_C_ops
.
atan
(
x
)
if
_in_legacy_dygraph
()
:
else
:
return
_legacy_C_ops
.
atan
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'atan'
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'atan'
)
)
helper
=
LayerHelper
(
'atan'
,
**
locals
())
helper
=
LayerHelper
(
'atan'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'atan'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
helper
.
append_op
(
type
=
'atan'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
return
out
def
atanh
(
x
,
name
=
None
):
def
atanh
(
x
,
name
=
None
):
...
@@ -403,14 +403,14 @@ def atanh(x, name=None):
...
@@ -403,14 +403,14 @@ def atanh(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
atanh
(
x
)
return
_C_ops
.
atanh
(
x
)
if
_in_legacy_dygraph
()
:
else
:
return
_legacy_C_ops
.
atanh
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'atanh'
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'atanh'
)
)
helper
=
LayerHelper
(
'atanh'
,
**
locals
())
helper
=
LayerHelper
(
'atanh'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'atanh'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
helper
.
append_op
(
type
=
'atanh'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
return
out
def
ceil
(
x
,
name
=
None
):
def
ceil
(
x
,
name
=
None
):
...
@@ -441,14 +441,14 @@ def ceil(x, name=None):
...
@@ -441,14 +441,14 @@ def ceil(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
ceil
(
x
)
return
_C_ops
.
ceil
(
x
)
if
_in_legacy_dygraph
()
:
else
:
return
_legacy_C_ops
.
ceil
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'ceil'
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'ceil'
)
)
helper
=
LayerHelper
(
'ceil'
,
**
locals
())
helper
=
LayerHelper
(
'ceil'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'ceil'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
helper
.
append_op
(
type
=
'ceil'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
return
out
def
cos
(
x
,
name
=
None
):
def
cos
(
x
,
name
=
None
):
...
@@ -480,14 +480,14 @@ def cos(x, name=None):
...
@@ -480,14 +480,14 @@ def cos(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
cos
(
x
)
return
_C_ops
.
cos
(
x
)
if
_in_legacy_dygraph
()
:
else
:
return
_legacy_C_ops
.
cos
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'cos'
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'cos'
)
)
helper
=
LayerHelper
(
'cos'
,
**
locals
())
helper
=
LayerHelper
(
'cos'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'cos'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
helper
.
append_op
(
type
=
'cos'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
return
out
def
cosh
(
x
,
name
=
None
):
def
cosh
(
x
,
name
=
None
):
...
@@ -519,14 +519,14 @@ def cosh(x, name=None):
...
@@ -519,14 +519,14 @@ def cosh(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
cosh
(
x
)
return
_C_ops
.
cosh
(
x
)
if
_in_legacy_dygraph
()
:
else
:
return
_legacy_C_ops
.
cosh
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'cosh'
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'cosh'
)
)
helper
=
LayerHelper
(
'cosh'
,
**
locals
())
helper
=
LayerHelper
(
'cosh'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'cosh'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
helper
.
append_op
(
type
=
'cosh'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
return
out
def
exp
(
x
,
name
=
None
):
def
exp
(
x
,
name
=
None
):
...
@@ -557,27 +557,25 @@ def exp(x, name=None):
...
@@ -557,27 +557,25 @@ def exp(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
exp
(
x
)
return
_C_ops
.
exp
(
x
)
if
_in_legacy_dygraph
():
else
:
return
_legacy_C_ops
.
exp
(
x
)
check_variable_and_dtype
(
x
,
check_variable_and_dtype
(
'x'
,
x
,
[
'x'
,
'int32'
,
[
'int64'
,
'int32'
,
'float16'
,
'int64'
,
'float32'
,
'float16'
,
'float64'
,
'float32'
,
'complex64'
,
'float64'
,
'complex128'
,
'complex64'
,
],
'complex128'
,
'exp'
,
],
)
'exp'
,
helper
=
LayerHelper
(
'exp'
,
**
locals
())
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
=
LayerHelper
(
'exp'
,
**
locals
())
helper
.
append_op
(
type
=
'exp'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
return
out
helper
.
append_op
(
type
=
'exp'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
def
expm1
(
x
,
name
=
None
):
def
expm1
(
x
,
name
=
None
):
...
@@ -608,14 +606,14 @@ def expm1(x, name=None):
...
@@ -608,14 +606,14 @@ def expm1(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
expm1
(
x
)
return
_C_ops
.
expm1
(
x
)
if
_in_legacy_dygraph
()
:
else
:
return
_legacy_C_ops
.
expm1
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'expm1'
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'expm1'
)
)
helper
=
LayerHelper
(
'expm1'
,
**
locals
())
helper
=
LayerHelper
(
'expm1'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'expm1'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
helper
.
append_op
(
type
=
'expm1'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
return
out
def
floor
(
x
,
name
=
None
):
def
floor
(
x
,
name
=
None
):
...
@@ -646,14 +644,14 @@ def floor(x, name=None):
...
@@ -646,14 +644,14 @@ def floor(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
floor
(
x
)
return
_C_ops
.
floor
(
x
)
if
_in_legacy_dygraph
()
:
else
:
return
_legacy_C_ops
.
floor
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'floor'
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'floor'
)
)
helper
=
LayerHelper
(
'floor'
,
**
locals
())
helper
=
LayerHelper
(
'floor'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'floor'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
helper
.
append_op
(
type
=
'floor'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
return
out
def
reciprocal
(
x
,
name
=
None
):
def
reciprocal
(
x
,
name
=
None
):
...
@@ -684,16 +682,16 @@ def reciprocal(x, name=None):
...
@@ -684,16 +682,16 @@ def reciprocal(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
reciprocal
(
x
)
return
_C_ops
.
reciprocal
(
x
)
if
_in_legacy_dygraph
()
:
else
:
return
_legacy_C_ops
.
reciprocal
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'reciprocal'
check_variable_and_dtype
(
)
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'reciprocal'
helper
=
LayerHelper
(
'reciprocal'
,
**
locals
())
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
=
LayerHelper
(
'reciprocal'
,
**
locals
())
helper
.
append_op
(
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
type
=
'reciprocal'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
}
helper
.
append_op
(
type
=
'reciprocal'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
}
)
)
return
out
return
out
def
round
(
x
,
name
=
None
):
def
round
(
x
,
name
=
None
):
...
@@ -731,14 +729,14 @@ def round(x, name=None):
...
@@ -731,14 +729,14 @@ def round(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
round
(
x
)
return
_C_ops
.
round
(
x
)
if
_in_legacy_dygraph
()
:
else
:
return
_legacy_C_ops
.
round
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'round'
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'round'
)
)
helper
=
LayerHelper
(
'round'
,
**
locals
())
helper
=
LayerHelper
(
'round'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'round'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
helper
.
append_op
(
type
=
'round'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
return
out
def
rsqrt
(
x
,
name
=
None
):
def
rsqrt
(
x
,
name
=
None
):
...
@@ -770,14 +768,14 @@ def rsqrt(x, name=None):
...
@@ -770,14 +768,14 @@ def rsqrt(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
rsqrt
(
x
)
return
_C_ops
.
rsqrt
(
x
)
if
_in_legacy_dygraph
()
:
else
:
return
_legacy_C_ops
.
rsqrt
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'rsqrt'
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'rsqrt'
)
)
helper
=
LayerHelper
(
'rsqrt'
,
**
locals
())
helper
=
LayerHelper
(
'rsqrt'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'rsqrt'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
helper
.
append_op
(
type
=
'rsqrt'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
return
out
def
sigmoid
(
x
,
name
=
None
):
def
sigmoid
(
x
,
name
=
None
):
...
@@ -808,16 +806,14 @@ def sigmoid(x, name=None):
...
@@ -808,16 +806,14 @@ def sigmoid(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
sigmoid
(
x
)
return
_C_ops
.
sigmoid
(
x
)
if
_in_legacy_dygraph
():
else
:
return
_legacy_C_ops
.
sigmoid
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sigmoid'
check_variable_and_dtype
(
)
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sigmoid'
helper
=
LayerHelper
(
'sigmoid'
,
**
locals
())
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
=
LayerHelper
(
'sigmoid'
,
**
locals
())
helper
.
append_op
(
type
=
'sigmoid'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
return
out
helper
.
append_op
(
type
=
'sigmoid'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
def
sin
(
x
,
name
=
None
):
def
sin
(
x
,
name
=
None
):
...
@@ -847,14 +843,14 @@ def sin(x, name=None):
...
@@ -847,14 +843,14 @@ def sin(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
sin
(
x
)
return
_C_ops
.
sin
(
x
)
if
_in_legacy_dygraph
()
:
else
:
return
_legacy_C_ops
.
sin
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sin'
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sin'
)
)
helper
=
LayerHelper
(
'sin'
,
**
locals
())
helper
=
LayerHelper
(
'sin'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'sin'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
helper
.
append_op
(
type
=
'sin'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
return
out
def
sinh
(
x
,
name
=
None
):
def
sinh
(
x
,
name
=
None
):
...
@@ -884,14 +880,14 @@ def sinh(x, name=None):
...
@@ -884,14 +880,14 @@ def sinh(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
sinh
(
x
)
return
_C_ops
.
sinh
(
x
)
if
_in_legacy_dygraph
()
:
else
:
return
_legacy_C_ops
.
sinh
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sinh'
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sinh'
)
)
helper
=
LayerHelper
(
'sinh'
,
**
locals
())
helper
=
LayerHelper
(
'sinh'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'sinh'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
helper
.
append_op
(
type
=
'sinh'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
return
out
def
sqrt
(
x
,
name
=
None
):
def
sqrt
(
x
,
name
=
None
):
...
@@ -920,14 +916,14 @@ def sqrt(x, name=None):
...
@@ -920,14 +916,14 @@ def sqrt(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
sqrt
(
x
)
return
_C_ops
.
sqrt
(
x
)
if
_in_legacy_dygraph
()
:
else
:
return
_legacy_C_ops
.
sqrt
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sqrt'
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sqrt'
)
)
helper
=
LayerHelper
(
'sqrt'
,
**
locals
())
helper
=
LayerHelper
(
'sqrt'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'sqrt'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
helper
.
append_op
(
type
=
'sqrt'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
return
out
def
square
(
x
,
name
=
None
):
def
square
(
x
,
name
=
None
):
...
@@ -956,27 +952,25 @@ def square(x, name=None):
...
@@ -956,27 +952,25 @@ def square(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
square
(
x
)
return
_C_ops
.
square
(
x
)
if
_in_legacy_dygraph
():
else
:
return
_legacy_C_ops
.
square
(
x
)
check_variable_and_dtype
(
x
,
check_variable_and_dtype
(
'x'
,
x
,
[
'x'
,
'int32'
,
[
'int64'
,
'int32'
,
'float16'
,
'int64'
,
'float32'
,
'float16'
,
'float64'
,
'float32'
,
'complex64'
,
'float64'
,
'complex128'
,
'complex64'
,
],
'complex128'
,
'square'
,
],
)
'square'
,
helper
=
LayerHelper
(
'square'
,
**
locals
())
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
=
LayerHelper
(
'square'
,
**
locals
())
helper
.
append_op
(
type
=
'square'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
return
out
helper
.
append_op
(
type
=
'square'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
def
tan
(
x
,
name
=
None
):
def
tan
(
x
,
name
=
None
):
...
@@ -1008,14 +1002,14 @@ def tan(x, name=None):
...
@@ -1008,14 +1002,14 @@ def tan(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
tan
(
x
)
return
_C_ops
.
tan
(
x
)
if
_in_legacy_dygraph
()
:
else
:
return
_legacy_C_ops
.
tan
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'tan'
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'tan'
)
)
helper
=
LayerHelper
(
'tan'
,
**
locals
())
helper
=
LayerHelper
(
'tan'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'tan'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
helper
.
append_op
(
type
=
'tan'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
out
})
return
out
return
out
_erf_
=
generate_layer_fn
(
'erf'
)
_erf_
=
generate_layer_fn
(
'erf'
)
...
...
python/paddle/tensor/random.py
浏览文件 @
861fef52
...
@@ -16,11 +16,7 @@
...
@@ -16,11 +16,7 @@
import
paddle
import
paddle
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle.fluid.framework
import
(
from
paddle.fluid.framework
import
_current_expected_place
,
in_dygraph_mode
_current_expected_place
,
_in_legacy_dygraph
,
in_dygraph_mode
,
)
from
paddle.static
import
Variable
from
paddle.static
import
Variable
from
..fluid.data_feeder
import
(
from
..fluid.data_feeder
import
(
...
@@ -80,21 +76,18 @@ def bernoulli(x, name=None):
...
@@ -80,21 +76,18 @@ def bernoulli(x, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
bernoulli
(
x
)
return
_C_ops
.
bernoulli
(
x
)
else
:
if
_in_legacy_dygraph
():
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"bernoulli"
)
return
_legacy_C_ops
.
bernoulli
(
x
)
helper
=
LayerHelper
(
"randint"
,
**
locals
())
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"bernoulli"
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
helper
=
LayerHelper
(
"randint"
,
**
locals
())
)
# maybe set out to int32 ?
out
=
helper
.
create_variable_for_type_inference
(
helper
.
append_op
(
dtype
=
x
.
dtype
type
=
'bernoulli'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{}
)
# maybe set out to int32 ?
)
helper
.
append_op
(
out
.
stop_gradient
=
True
type
=
'bernoulli'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{}
return
out
)
out
.
stop_gradient
=
True
return
out
def
poisson
(
x
,
name
=
None
):
def
poisson
(
x
,
name
=
None
):
...
@@ -129,18 +122,15 @@ def poisson(x, name=None):
...
@@ -129,18 +122,15 @@ def poisson(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
poisson
(
x
)
return
_C_ops
.
poisson
(
x
)
else
:
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"poisson"
)
if
paddle
.
in_dynamic_mode
():
helper
=
LayerHelper
(
"poisson"
,
**
locals
())
return
_legacy_C_ops
.
poisson
(
x
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"poisson"
)
type
=
'poisson'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{}
)
helper
=
LayerHelper
(
"poisson"
,
**
locals
())
return
out
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'poisson'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{}
)
return
out
def
multinomial
(
x
,
num_samples
=
1
,
replacement
=
False
,
name
=
None
):
def
multinomial
(
x
,
num_samples
=
1
,
replacement
=
False
,
name
=
None
):
...
@@ -197,26 +187,21 @@ def multinomial(x, num_samples=1, replacement=False, name=None):
...
@@ -197,26 +187,21 @@ def multinomial(x, num_samples=1, replacement=False, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
multinomial
(
x
,
num_samples
,
replacement
)
return
_C_ops
.
multinomial
(
x
,
num_samples
,
replacement
)
else
:
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"multinomial"
)
if
_in_legacy_dygraph
():
helper
=
LayerHelper
(
"multinomial"
,
**
locals
())
return
_legacy_C_ops
.
multinomial
(
out
=
helper
.
create_variable_for_type_inference
(
x
,
'num_samples'
,
num_samples
,
'replacement'
,
replacement
dtype
=
convert_np_dtype_to_dtype_
(
'int64'
)
)
)
helper
.
append_op
(
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"multinomial"
)
type
=
'multinomial'
,
inputs
=
{
"X"
:
x
},
helper
=
LayerHelper
(
"multinomial"
,
**
locals
())
outputs
=
{
'Out'
:
out
},
out
=
helper
.
create_variable_for_type_inference
(
attrs
=
{
'num_samples'
:
num_samples
,
'replacement'
:
replacement
},
dtype
=
convert_np_dtype_to_dtype_
(
'int64'
)
)
)
out
.
stop_gradient
=
True
helper
.
append_op
(
return
out
type
=
'multinomial'
,
inputs
=
{
"X"
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'num_samples'
:
num_samples
,
'replacement'
:
replacement
},
)
out
.
stop_gradient
=
True
return
out
def
uniform_random_batch_size_like
(
def
uniform_random_batch_size_like
(
...
@@ -356,44 +341,32 @@ def gaussian(shape, mean=0.0, std=1.0, seed=0, dtype=None, name=None):
...
@@ -356,44 +341,32 @@ def gaussian(shape, mean=0.0, std=1.0, seed=0, dtype=None, name=None):
return
_C_ops
.
gaussian
(
return
_C_ops
.
gaussian
(
shape
,
float
(
mean
),
float
(
std
),
seed
,
dtype
,
place
shape
,
float
(
mean
),
float
(
std
),
seed
,
dtype
,
place
)
)
else
:
check_shape
(
shape
,
op_type_for_check
)
check_dtype
(
dtype
,
'dtype'
,
[
'float32'
,
'float64'
],
op_type_for_check
)
if
_in_legacy_dygraph
():
inputs
=
{}
shape
=
utils
.
convert_shape_to_list
(
shape
)
attrs
=
{
return
_legacy_C_ops
.
gaussian_random
(
'mean'
:
mean
,
'shape'
,
'std'
:
std
,
shape
,
'seed'
:
seed
,
'mean'
,
'dtype'
:
dtype
,
float
(
mean
),
'use_mkldnn'
:
False
,
'std'
,
}
float
(
std
),
utils
.
get_shape_tensor_inputs
(
'seed'
,
inputs
=
inputs
,
attrs
=
attrs
,
shape
=
shape
,
op_type
=
op_type_for_check
seed
,
'dtype'
,
dtype
,
)
)
check_shape
(
shape
,
op_type_for_check
)
helper
=
LayerHelper
(
'gaussian'
,
**
locals
())
check_dtype
(
dtype
,
'dtype'
,
[
'float32'
,
'float64'
],
op_type_for_check
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
inputs
=
{}
type
=
'gaussian_random'
,
attrs
=
{
inputs
=
inputs
,
'mean'
:
mean
,
outputs
=
{
'Out'
:
out
},
'std'
:
std
,
attrs
=
attrs
,
'seed'
:
seed
,
)
'dtype'
:
dtype
,
out
.
stop_gradient
=
True
'use_mkldnn'
:
False
,
return
out
}
utils
.
get_shape_tensor_inputs
(
inputs
=
inputs
,
attrs
=
attrs
,
shape
=
shape
,
op_type
=
op_type_for_check
)
helper
=
LayerHelper
(
'gaussian'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
'gaussian_random'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
out
.
stop_gradient
=
True
return
out
def
standard_normal
(
shape
,
dtype
=
None
,
name
=
None
):
def
standard_normal
(
shape
,
dtype
=
None
,
name
=
None
):
...
@@ -550,7 +523,7 @@ def normal(mean=0.0, std=1.0, shape=None, name=None):
...
@@ -550,7 +523,7 @@ def normal(mean=0.0, std=1.0, shape=None, name=None):
# [1.00780561 3.78457445 5.81058198] # random
# [1.00780561 3.78457445 5.81058198] # random
"""
"""
if
not
paddle
.
in_dynamic
_mode
():
if
not
in_dygraph
_mode
():
check_type
(
mean
,
'mean'
,
(
int
,
float
,
Variable
),
'normal'
)
check_type
(
mean
,
'mean'
,
(
int
,
float
,
Variable
),
'normal'
)
check_type
(
std
,
'std'
,
(
int
,
float
,
Variable
),
'normal'
)
check_type
(
std
,
'std'
,
(
int
,
float
,
Variable
),
'normal'
)
if
isinstance
(
mean
,
Variable
):
if
isinstance
(
mean
,
Variable
):
...
@@ -588,7 +561,7 @@ def normal(mean=0.0, std=1.0, shape=None, name=None):
...
@@ -588,7 +561,7 @@ def normal(mean=0.0, std=1.0, shape=None, name=None):
return
gaussian
(
shape
=
shape
,
mean
=
mean
,
std
=
std
,
name
=
name
)
return
gaussian
(
shape
=
shape
,
mean
=
mean
,
std
=
std
,
name
=
name
)
out
=
out
*
std
+
mean
out
=
out
*
std
+
mean
if
not
paddle
.
in_dynamic
_mode
():
if
not
in_dygraph
_mode
():
out
.
stop_grediant
=
True
out
.
stop_grediant
=
True
return
out
return
out
...
@@ -680,40 +653,28 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
...
@@ -680,40 +653,28 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
seed
,
seed
,
_current_expected_place
(),
_current_expected_place
(),
)
)
else
:
if
_in_legacy_dygraph
():
check_type
(
shape
,
'shape'
,
(
list
,
tuple
,
Variable
),
'uniform/rand'
)
shape
=
utils
.
convert_shape_to_list
(
shape
)
check_dtype
(
dtype
,
'dtype'
,
(
'float32'
,
'float64'
),
'uniform/rand'
)
return
_legacy_C_ops
.
uniform_random
(
check_type
(
min
,
'min'
,
(
float
,
int
,
Variable
),
'uniform/rand'
)
'shape'
,
check_type
(
max
,
'max'
,
(
float
,
int
,
Variable
),
'uniform/rand'
)
shape
,
'min'
,
inputs
=
dict
()
float
(
min
),
attrs
=
{
'seed'
:
seed
,
'min'
:
min
,
'max'
:
max
,
'dtype'
:
dtype
}
'max'
,
utils
.
get_shape_tensor_inputs
(
float
(
max
),
inputs
=
inputs
,
attrs
=
attrs
,
shape
=
shape
,
op_type
=
'uniform/rand'
'seed'
,
seed
,
'dtype'
,
dtype
,
)
)
check_type
(
shape
,
'shape'
,
(
list
,
tuple
,
Variable
),
'uniform/rand'
)
helper
=
LayerHelper
(
"uniform"
,
**
locals
())
check_dtype
(
dtype
,
'dtype'
,
(
'float32'
,
'float64'
),
'uniform/rand'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
check_type
(
min
,
'min'
,
(
float
,
int
,
Variable
),
'uniform/rand'
)
helper
.
append_op
(
check_type
(
max
,
'max'
,
(
float
,
int
,
Variable
),
'uniform/rand'
)
type
=
"uniform_random"
,
inputs
=
inputs
,
inputs
=
dict
()
attrs
=
attrs
,
attrs
=
{
'seed'
:
seed
,
'min'
:
min
,
'max'
:
max
,
'dtype'
:
dtype
}
outputs
=
{
"Out"
:
out
},
utils
.
get_shape_tensor_inputs
(
)
inputs
=
inputs
,
attrs
=
attrs
,
shape
=
shape
,
op_type
=
'uniform/rand'
out
.
stop_gradient
=
True
)
return
out
helper
=
LayerHelper
(
"uniform"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
type
=
"uniform_random"
,
inputs
=
inputs
,
attrs
=
attrs
,
outputs
=
{
"Out"
:
out
}
)
out
.
stop_gradient
=
True
return
out
@
dygraph_only
@
dygraph_only
...
@@ -751,12 +712,7 @@ def uniform_(x, min=-1.0, max=1.0, seed=0, name=None):
...
@@ -751,12 +712,7 @@ def uniform_(x, min=-1.0, max=1.0, seed=0, name=None):
# [-0.34646994, -0.45116323, -0.09902662, -0.11397249], # random
# [-0.34646994, -0.45116323, -0.09902662, -0.11397249], # random
# [ 0.433519, 0.39483607, -0.8660099, 0.83664286]] # random
# [ 0.433519, 0.39483607, -0.8660099, 0.83664286]] # random
"""
"""
if
in_dygraph_mode
():
return
_C_ops
.
uniform_inplace_
(
x
,
min
,
max
,
seed
,
0
,
0
,
1.0
)
return
_C_ops
.
uniform_inplace_
(
x
,
min
,
max
,
seed
,
0
,
0
,
1.0
)
else
:
return
_legacy_C_ops
.
uniform_random_inplace_
(
x
,
'min'
,
min
,
'max'
,
max
,
'seed'
,
seed
)
def
randint
(
low
=
0
,
high
=
None
,
shape
=
[
1
],
dtype
=
None
,
name
=
None
):
def
randint
(
low
=
0
,
high
=
None
,
shape
=
[
1
],
dtype
=
None
,
name
=
None
):
...
@@ -841,33 +797,28 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None):
...
@@ -841,33 +797,28 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None):
shape
=
utils
.
convert_shape_to_list
(
shape
)
shape
=
utils
.
convert_shape_to_list
(
shape
)
place
=
_current_expected_place
()
place
=
_current_expected_place
()
return
_C_ops
.
randint
(
low
,
high
,
shape
,
dtype
,
place
)
return
_C_ops
.
randint
(
low
,
high
,
shape
,
dtype
,
place
)
if
_in_legacy_dygraph
():
else
:
shape
=
utils
.
convert_shape_to_list
(
shape
)
check_shape
(
shape
,
'randint'
)
return
_legacy_C_ops
.
randint
(
check_dtype
(
dtype
,
'dtype'
,
[
'int32'
,
'int64'
],
'randint'
)
'shape'
,
shape
,
'low'
,
low
,
'high'
,
high
,
'seed'
,
0
,
'dtype'
,
dtype
if
low
>=
high
:
)
raise
ValueError
(
"randint's low must less then high, but received low = {0}, "
"high = {1}"
.
format
(
low
,
high
)
)
check_shape
(
shape
,
'randint'
)
inputs
=
dict
()
check_dtype
(
dtype
,
'dtype'
,
[
'int32'
,
'int64'
],
'randint'
)
attrs
=
{
'low'
:
low
,
'high'
:
high
,
'seed'
:
0
,
'dtype'
:
dtype
}
if
low
>=
high
:
utils
.
get_shape_tensor_inputs
(
raise
ValueError
(
inputs
=
inputs
,
attrs
=
attrs
,
shape
=
shape
,
op_type
=
'randint'
"randint's low must less then high, but received low = {0}, "
"high = {1}"
.
format
(
low
,
high
)
)
)
inputs
=
dict
()
helper
=
LayerHelper
(
"randint"
,
**
locals
())
attrs
=
{
'low'
:
low
,
'high'
:
high
,
'seed'
:
0
,
'dtype'
:
dtype
}
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
utils
.
get_shape_tensor_inputs
(
helper
.
append_op
(
inputs
=
inputs
,
attrs
=
attrs
,
shape
=
shape
,
op_type
=
'randint'
type
=
'randint'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
)
out
.
stop_gradient
=
True
helper
=
LayerHelper
(
"randint"
,
**
locals
())
return
out
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
dtype
)
helper
.
append_op
(
type
=
'randint'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
out
.
stop_gradient
=
True
return
out
def
randint_like
(
x
,
low
=
0
,
high
=
None
,
dtype
=
None
,
name
=
None
):
def
randint_like
(
x
,
low
=
0
,
high
=
None
,
dtype
=
None
,
name
=
None
):
...
@@ -1015,7 +966,7 @@ def randint_like(x, low=0, high=None, dtype=None, name=None):
...
@@ -1015,7 +966,7 @@ def randint_like(x, low=0, high=None, dtype=None, name=None):
"high = {1}"
.
format
(
low
,
high
)
"high = {1}"
.
format
(
low
,
high
)
)
)
if
paddle
.
in_dynamic
_mode
():
if
in_dygraph
_mode
():
shape
=
utils
.
convert_shape_to_list
(
shape
)
shape
=
utils
.
convert_shape_to_list
(
shape
)
out
=
_legacy_C_ops
.
randint
(
out
=
_legacy_C_ops
.
randint
(
'shape'
,
'shape'
,
...
@@ -1031,33 +982,33 @@ def randint_like(x, low=0, high=None, dtype=None, name=None):
...
@@ -1031,33 +982,33 @@ def randint_like(x, low=0, high=None, dtype=None, name=None):
)
)
out
=
paddle
.
cast
(
out
,
dtype
)
out
=
paddle
.
cast
(
out
,
dtype
)
return
out
return
out
else
:
check_shape
(
shape
,
'randint_like'
)
check_dtype
(
dtype
,
'dtype'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'randint_like'
,
)
check_shape
(
shape
,
'randint_like'
)
inputs
=
{
"ShapeTensor"
:
shape
}
check_dtype
(
attrs
=
{
dtype
,
'low'
:
low
,
'dtype'
,
'high'
:
high
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'seed'
:
0
,
'randint_like'
,
'dtype'
:
core
.
VarDesc
.
VarType
.
INT64
,
)
}
inputs
=
{
"ShapeTensor"
:
shape
}
helper
=
LayerHelper
(
"randint"
,
**
locals
())
attrs
=
{
out
=
helper
.
create_variable_for_type_inference
(
'low'
:
low
,
dtype
=
core
.
VarDesc
.
VarType
.
INT64
'high'
:
high
,
)
'seed'
:
0
,
helper
.
append_op
(
'dtype'
:
core
.
VarDesc
.
VarType
.
INT64
,
type
=
'randint'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
}
)
out
.
stop_gradient
=
True
helper
=
LayerHelper
(
"randint"
,
**
locals
())
out
=
paddle
.
cast
(
out
,
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
return
out
dtype
=
core
.
VarDesc
.
VarType
.
INT64
)
helper
.
append_op
(
type
=
'randint'
,
inputs
=
inputs
,
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
out
.
stop_gradient
=
True
out
=
paddle
.
cast
(
out
,
dtype
)
return
out
def
randperm
(
n
,
dtype
=
"int64"
,
name
=
None
):
def
randperm
(
n
,
dtype
=
"int64"
,
name
=
None
):
...
@@ -1095,23 +1046,23 @@ def randperm(n, dtype="int64", name=None):
...
@@ -1095,23 +1046,23 @@ def randperm(n, dtype="int64", name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
randperm
(
n
,
dtype
,
_current_expected_place
())
return
_C_ops
.
randperm
(
n
,
dtype
,
_current_expected_place
())
if
_in_legacy_dygraph
()
:
else
:
return
_legacy_C_ops
.
randperm
(
'n'
,
n
,
'seed'
,
0
,
'dtype'
,
dtype
)
if
n
<
1
:
raise
ValueError
(
if
n
<
1
:
"The input n should be greater than 0 in randperm op."
raise
ValueError
(
"The input n should be greater than 0 in randperm op."
)
)
check_dtype
(
check_dtype
(
dtype
,
'dtype'
,
[
'int64'
,
'int32'
,
'float32'
,
'float64'
],
'randperm'
dtype
,
'dtype'
,
[
'int64'
,
'int32'
,
'float32'
,
'float64'
],
'randperm'
)
)
helper
=
LayerHelper
(
"randperm"
,
**
locals
())
helper
=
LayerHelper
(
"randperm"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
attrs
=
{
'n'
:
n
,
'dtype'
:
dtype
,
'seed'
:
0
}
attrs
=
{
'n'
:
n
,
'dtype'
:
dtype
,
'seed'
:
0
}
helper
.
append_op
(
helper
.
append_op
(
type
=
'randperm'
,
inputs
=
{},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
type
=
'randperm'
,
inputs
=
{},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
)
)
out
.
stop_gradient
=
True
out
.
stop_gradient
=
True
return
out
return
out
def
rand
(
shape
,
dtype
=
None
,
name
=
None
):
def
rand
(
shape
,
dtype
=
None
,
name
=
None
):
...
@@ -1199,16 +1150,14 @@ def exponential_(x, lam=1.0, name=None):
...
@@ -1199,16 +1150,14 @@ def exponential_(x, lam=1.0, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
exponential_
(
x
,
lam
)
return
_C_ops
.
exponential_
(
x
,
lam
)
elif
paddle
.
in_dynamic_mode
():
else
:
return
_legacy_C_ops
.
exponential_
(
x
,
"lambda"
,
lam
)
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"exponential"
)
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"exponential"
)
helper
=
LayerHelper
(
"exponential"
,
**
locals
())
helper
.
append_op
(
helper
=
LayerHelper
(
"exponential"
,
**
locals
())
type
=
'exponential'
,
helper
.
append_op
(
inputs
=
{
"X"
:
x
},
type
=
'exponential'
,
outputs
=
{
'Out'
:
x
},
inputs
=
{
"X"
:
x
},
attrs
=
{
"lambda"
:
lam
},
outputs
=
{
'Out'
:
x
},
)
attrs
=
{
"lambda"
:
lam
},
return
x
)
return
x
python/paddle/tensor/search.py
浏览文件 @
861fef52
...
@@ -17,14 +17,12 @@
...
@@ -17,14 +17,12 @@
import
numpy
as
np
import
numpy
as
np
import
paddle
import
paddle
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle
import
_C_ops
from
paddle.common_ops_import
import
VarDesc
,
Variable
from
paddle.common_ops_import
import
VarDesc
,
Variable
from
..fluid.data_feeder
import
check_dtype
,
check_variable_and_dtype
from
..fluid.data_feeder
import
check_dtype
,
check_variable_and_dtype
from
..fluid.framework
import
_in_legacy_dygraph
from
..framework
import
(
from
..framework
import
(
LayerHelper
,
LayerHelper
,
_non_static_mode
,
convert_np_dtype_to_dtype_
,
convert_np_dtype_to_dtype_
,
core
,
core
,
in_dygraph_mode
,
in_dygraph_mode
,
...
@@ -99,33 +97,28 @@ def argsort(x, axis=-1, descending=False, name=None):
...
@@ -99,33 +97,28 @@ def argsort(x, axis=-1, descending=False, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
_
,
ids
=
_C_ops
.
argsort
(
x
,
axis
,
descending
)
_
,
ids
=
_C_ops
.
argsort
(
x
,
axis
,
descending
)
return
ids
return
ids
else
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
],
'argsort'
,
)
if
_in_legacy_dygraph
():
helper
=
LayerHelper
(
"argsort"
,
**
locals
())
_
,
ids
=
_legacy_C_ops
.
argsort
(
out
=
helper
.
create_variable_for_type_inference
(
x
,
'axis'
,
axis
,
'descending'
,
descending
dtype
=
x
.
dtype
,
stop_gradient
=
True
)
ids
=
helper
.
create_variable_for_type_inference
(
VarDesc
.
VarType
.
INT64
,
stop_gradient
=
True
)
helper
.
append_op
(
type
=
'argsort'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
,
'Indices'
:
ids
},
attrs
=
{
'axis'
:
axis
,
'descending'
:
descending
},
)
)
return
ids
return
ids
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
],
'argsort'
,
)
helper
=
LayerHelper
(
"argsort"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
,
stop_gradient
=
True
)
ids
=
helper
.
create_variable_for_type_inference
(
VarDesc
.
VarType
.
INT64
,
stop_gradient
=
True
)
helper
.
append_op
(
type
=
'argsort'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
,
'Indices'
:
ids
},
attrs
=
{
'axis'
:
axis
,
'descending'
:
descending
},
)
return
ids
def
argmax
(
x
,
axis
=
None
,
keepdim
=
False
,
dtype
=
"int64"
,
name
=
None
):
def
argmax
(
x
,
axis
=
None
,
keepdim
=
False
,
dtype
=
"int64"
,
name
=
None
):
...
@@ -187,40 +180,27 @@ def argmax(x, axis=None, keepdim=False, dtype="int64", name=None):
...
@@ -187,40 +180,27 @@ def argmax(x, axis=None, keepdim=False, dtype="int64", name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
argmax
(
x
,
axis
,
keepdim
,
flatten
,
var_dtype
)
return
_C_ops
.
argmax
(
x
,
axis
,
keepdim
,
flatten
,
var_dtype
)
if
_in_legacy_dygraph
():
else
:
out
=
_legacy_C_ops
.
arg_max
(
helper
=
LayerHelper
(
"argmax"
,
**
locals
())
check_variable_and_dtype
(
x
,
x
,
'axis'
,
'x'
,
axis
,
[
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
],
'dtype'
,
'paddle.argmax'
,
var_dtype
,
'keepdims'
,
keepdim
,
'flatten'
,
flatten
,
)
)
check_dtype
(
var_dtype
,
'dtype'
,
[
'int32'
,
'int64'
],
'argmin'
)
attrs
=
{}
out
=
helper
.
create_variable_for_type_inference
(
var_dtype
)
attrs
[
'keepdims'
]
=
keepdim
attrs
[
'axis'
]
=
axis
attrs
[
'flatten'
]
=
flatten
attrs
[
'dtype'
]
=
var_dtype
helper
.
append_op
(
type
=
'arg_max'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
[
out
]},
attrs
=
attrs
)
out
.
stop_gradient
=
True
return
out
return
out
helper
=
LayerHelper
(
"argmax"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
],
'paddle.argmax'
,
)
check_dtype
(
var_dtype
,
'dtype'
,
[
'int32'
,
'int64'
],
'argmin'
)
attrs
=
{}
out
=
helper
.
create_variable_for_type_inference
(
var_dtype
)
attrs
[
'keepdims'
]
=
keepdim
attrs
[
'axis'
]
=
axis
attrs
[
'flatten'
]
=
flatten
attrs
[
'dtype'
]
=
var_dtype
helper
.
append_op
(
type
=
'arg_max'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
[
out
]},
attrs
=
attrs
)
out
.
stop_gradient
=
True
return
out
def
argmin
(
x
,
axis
=
None
,
keepdim
=
False
,
dtype
=
"int64"
,
name
=
None
):
def
argmin
(
x
,
axis
=
None
,
keepdim
=
False
,
dtype
=
"int64"
,
name
=
None
):
"""
"""
...
@@ -281,40 +261,27 @@ def argmin(x, axis=None, keepdim=False, dtype="int64", name=None):
...
@@ -281,40 +261,27 @@ def argmin(x, axis=None, keepdim=False, dtype="int64", name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
argmin
(
x
,
axis
,
keepdim
,
flatten
,
var_dtype
)
return
_C_ops
.
argmin
(
x
,
axis
,
keepdim
,
flatten
,
var_dtype
)
if
_in_legacy_dygraph
():
else
:
out
=
_legacy_C_ops
.
arg_min
(
helper
=
LayerHelper
(
"argmin"
,
**
locals
())
check_variable_and_dtype
(
x
,
x
,
'axis'
,
'x'
,
axis
,
[
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
],
'dtype'
,
'paddle.argmin'
,
var_dtype
,
'keepdims'
,
keepdim
,
'flatten'
,
flatten
,
)
)
check_dtype
(
var_dtype
,
'dtype'
,
[
'int32'
,
'int64'
],
'argmin'
)
out
=
helper
.
create_variable_for_type_inference
(
var_dtype
)
attrs
=
{}
attrs
[
'keepdims'
]
=
keepdim
attrs
[
'axis'
]
=
axis
attrs
[
'flatten'
]
=
flatten
attrs
[
'dtype'
]
=
var_dtype
helper
.
append_op
(
type
=
'arg_min'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
[
out
]},
attrs
=
attrs
)
out
.
stop_gradient
=
True
return
out
return
out
helper
=
LayerHelper
(
"argmin"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
],
'paddle.argmin'
,
)
check_dtype
(
var_dtype
,
'dtype'
,
[
'int32'
,
'int64'
],
'argmin'
)
out
=
helper
.
create_variable_for_type_inference
(
var_dtype
)
attrs
=
{}
attrs
[
'keepdims'
]
=
keepdim
attrs
[
'axis'
]
=
axis
attrs
[
'flatten'
]
=
flatten
attrs
[
'dtype'
]
=
var_dtype
helper
.
append_op
(
type
=
'arg_min'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
[
out
]},
attrs
=
attrs
)
out
.
stop_gradient
=
True
return
out
def
index_select
(
x
,
index
,
axis
=
0
,
name
=
None
):
def
index_select
(
x
,
index
,
axis
=
0
,
name
=
None
):
"""
"""
...
@@ -354,30 +321,30 @@ def index_select(x, index, axis=0, name=None):
...
@@ -354,30 +321,30 @@ def index_select(x, index, axis=0, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
index_select
(
x
,
index
,
axis
)
return
_C_ops
.
index_select
(
x
,
index
,
axis
)
else
:
helper
=
LayerHelper
(
"index_select"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'paddle.tensor.search.index_select'
,
)
check_variable_and_dtype
(
index
,
'index'
,
[
'int32'
,
'int64'
],
'paddle.tensor.search.index_select'
,
)
if
_in_legacy_dygraph
():
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
return
_legacy_C_ops
.
index_select
(
x
,
index
,
'dim'
,
axis
)
helper
=
LayerHelper
(
"index_select"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'paddle.tensor.search.index_select'
,
)
check_variable_and_dtype
(
index
,
'index'
,
[
'int32'
,
'int64'
],
'paddle.tensor.search.index_select'
)
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'index_select'
,
type
=
'index_select'
,
inputs
=
{
'X'
:
x
,
'Index'
:
index
},
inputs
=
{
'X'
:
x
,
'Index'
:
index
},
outputs
=
{
'Out'
:
out
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
'dim'
:
axis
},
attrs
=
{
'dim'
:
axis
},
)
)
return
out
return
out
def
nonzero
(
x
,
as_tuple
=
False
):
def
nonzero
(
x
,
as_tuple
=
False
):
...
@@ -438,8 +405,6 @@ def nonzero(x, as_tuple=False):
...
@@ -438,8 +405,6 @@ def nonzero(x, as_tuple=False):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
outs
=
_C_ops
.
nonzero
(
x
)
outs
=
_C_ops
.
nonzero
(
x
)
elif
paddle
.
in_dynamic_mode
():
outs
=
_legacy_C_ops
.
where_index
(
x
)
else
:
else
:
helper
=
LayerHelper
(
"where_index"
,
**
locals
())
helper
=
LayerHelper
(
"where_index"
,
**
locals
())
...
@@ -522,26 +487,21 @@ def sort(x, axis=-1, descending=False, name=None):
...
@@ -522,26 +487,21 @@ def sort(x, axis=-1, descending=False, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
outs
,
_
=
_C_ops
.
argsort
(
x
,
axis
,
descending
)
outs
,
_
=
_C_ops
.
argsort
(
x
,
axis
,
descending
)
return
outs
return
outs
else
:
if
_in_legacy_dygraph
():
helper
=
LayerHelper
(
"sort"
,
**
locals
())
out
s
,
_
=
_legacy_C_ops
.
argsort
(
out
=
helper
.
create_variable_for_type_inference
(
x
,
'axis'
,
axis
,
'descending'
,
descending
dtype
=
x
.
dtype
,
stop_gradient
=
False
)
)
return
outs
ids
=
helper
.
create_variable_for_type_inference
(
helper
=
LayerHelper
(
"sort"
,
**
locals
())
VarDesc
.
VarType
.
INT64
,
stop_gradient
=
True
out
=
helper
.
create_variable_for_type_inference
(
)
dtype
=
x
.
dtype
,
stop_gradient
=
False
helper
.
append_op
(
)
type
=
'argsort'
,
ids
=
helper
.
create_variable_for_type_inference
(
inputs
=
{
'X'
:
x
},
VarDesc
.
VarType
.
INT64
,
stop_gradient
=
True
outputs
=
{
'Out'
:
out
,
'Indices'
:
ids
},
)
attrs
=
{
'axis'
:
axis
,
'descending'
:
descending
},
helper
.
append_op
(
)
type
=
'argsort'
,
return
out
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
,
'Indices'
:
ids
},
attrs
=
{
'axis'
:
axis
,
'descending'
:
descending
},
)
return
out
def
mode
(
x
,
axis
=-
1
,
keepdim
=
False
,
name
=
None
):
def
mode
(
x
,
axis
=-
1
,
keepdim
=
False
,
name
=
None
):
...
@@ -577,26 +537,24 @@ def mode(x, axis=-1, keepdim=False, name=None):
...
@@ -577,26 +537,24 @@ def mode(x, axis=-1, keepdim=False, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
mode
(
x
,
axis
,
keepdim
)
return
_C_ops
.
mode
(
x
,
axis
,
keepdim
)
if
_in_legacy_dygraph
():
else
:
return
_legacy_C_ops
.
mode
(
x
,
"axis"
,
axis
,
"keepdim"
,
keepdim
)
helper
=
LayerHelper
(
"mode"
,
**
locals
())
inputs
=
{
"X"
:
[
x
]}
helper
=
LayerHelper
(
"mode"
,
**
locals
())
attrs
=
{}
inputs
=
{
"X"
:
[
x
]}
attrs
[
'axis'
]
=
axis
attrs
=
{}
attrs
[
'keepdim'
]
=
keepdim
attrs
[
'axis'
]
=
axis
attrs
[
'keepdim'
]
=
keepdim
values
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
values
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
indices
=
helper
.
create_variable_for_type_inference
(
dtype
=
"int64"
)
indices
=
helper
.
create_variable_for_type_inference
(
dtype
=
"int64"
)
helper
.
append_op
(
helper
.
append_op
(
type
=
"mode"
,
type
=
"mode"
,
inputs
=
inputs
,
inputs
=
inputs
,
outputs
=
{
"Out"
:
[
values
],
"Indices"
:
[
indices
]},
outputs
=
{
"Out"
:
[
values
],
"Indices"
:
[
indices
]},
attrs
=
attrs
,
attrs
=
attrs
,
)
)
indices
.
stop_gradient
=
True
indices
.
stop_gradient
=
True
return
values
,
indices
return
values
,
indices
def
where
(
condition
,
x
=
None
,
y
=
None
,
name
=
None
):
def
where
(
condition
,
x
=
None
,
y
=
None
,
name
=
None
):
...
@@ -688,25 +646,20 @@ def where(condition, x=None, y=None, name=None):
...
@@ -688,25 +646,20 @@ def where(condition, x=None, y=None, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
where
(
broadcast_condition
,
broadcast_x
,
broadcast_y
)
return
_C_ops
.
where
(
broadcast_condition
,
broadcast_x
,
broadcast_y
)
else
:
else
:
if
_in_legacy_dygraph
():
helper
=
LayerHelper
(
"where"
,
**
locals
())
return
_legacy_C_ops
.
where
(
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
broadcast_condition
,
broadcast_x
,
broadcast_y
)
else
:
helper
=
LayerHelper
(
"where"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
type
=
'where'
,
inputs
=
{
'Condition'
:
broadcast_condition
,
'X'
:
broadcast_x
,
'Y'
:
broadcast_y
,
},
outputs
=
{
'Out'
:
[
out
]},
)
return
out
helper
.
append_op
(
type
=
'where'
,
inputs
=
{
'Condition'
:
broadcast_condition
,
'X'
:
broadcast_x
,
'Y'
:
broadcast_y
,
},
outputs
=
{
'Out'
:
[
out
]},
)
return
out
def
index_sample
(
x
,
index
):
def
index_sample
(
x
,
index
):
...
@@ -785,30 +738,27 @@ def index_sample(x, index):
...
@@ -785,30 +738,27 @@ def index_sample(x, index):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
index_sample
(
x
,
index
)
return
_C_ops
.
index_sample
(
x
,
index
)
else
:
else
:
if
_in_legacy_dygraph
():
helper
=
LayerHelper
(
"index_sample"
,
**
locals
())
return
_legacy_C_ops
.
index_sample
(
x
,
index
)
check_variable_and_dtype
(
else
:
x
,
helper
=
LayerHelper
(
"index_sample"
,
**
locals
())
'x'
,
check_variable_and_dtype
(
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
x
,
'paddle.tensor.search.index_sample'
,
'x'
,
)
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
check_variable_and_dtype
(
'paddle.tensor.search.index_sample'
,
index
,
)
'index'
,
check_variable_and_dtype
(
[
'int32'
,
'int64'
],
index
,
'paddle.tensor.search.index_sample'
,
'index'
,
)
[
'int32'
,
'int64'
],
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
'paddle.tensor.search.index_sample'
,
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'index_sample'
,
type
=
'index_sample'
,
inputs
=
{
'X'
:
x
,
'Index'
:
index
},
inputs
=
{
'X'
:
x
,
'Index'
:
index
},
outputs
=
{
'Out'
:
out
},
outputs
=
{
'Out'
:
out
},
)
)
return
out
return
out
def
masked_select
(
x
,
mask
,
name
=
None
):
def
masked_select
(
x
,
mask
,
name
=
None
):
...
@@ -843,24 +793,24 @@ def masked_select(x, mask, name=None):
...
@@ -843,24 +793,24 @@ def masked_select(x, mask, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
masked_select
(
x
,
mask
)
return
_C_ops
.
masked_select
(
x
,
mask
)
if
_in_legacy_dygraph
()
:
else
:
return
_legacy_C_ops
.
masked_select
(
x
,
mask
)
helper
=
LayerHelper
(
"masked_select"
,
**
locals
()
)
check_variable_and_dtype
(
helper
=
LayerHelper
(
"masked_select"
,
**
locals
())
x
,
check_variable_and_dtype
(
'x'
,
x
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
]
,
'x
'
,
'paddle.tensor.search.mask_select
'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
)
'paddle.tensor.search.mask_select'
,
check_variable_and_dtype
(
)
mask
,
'mask'
,
[
'bool'
],
'paddle.tensor.search.masked_select'
check_variable_and_dtype
(
)
mask
,
'mask'
,
[
'bool'
],
'paddle.tensor.search.masked_select'
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
)
helper
.
append_op
(
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
type
=
'masked_select'
,
helper
.
append_op
(
inputs
=
{
'X'
:
x
,
'Mask'
:
mask
},
type
=
'masked_select'
,
inputs
=
{
'X'
:
x
,
'Mask'
:
mask
},
outputs
=
{
'Y'
:
out
}
outputs
=
{
'Y'
:
out
},
)
)
return
out
return
out
def
topk
(
x
,
k
,
axis
=
None
,
largest
=
True
,
sorted
=
True
,
name
=
None
):
def
topk
(
x
,
k
,
axis
=
None
,
largest
=
True
,
sorted
=
True
,
name
=
None
):
...
@@ -916,49 +866,30 @@ def topk(x, k, axis=None, largest=True, sorted=True, name=None):
...
@@ -916,49 +866,30 @@ def topk(x, k, axis=None, largest=True, sorted=True, name=None):
axis
=
-
1
axis
=
-
1
out
,
indices
=
_C_ops
.
topk
(
x
,
k
,
axis
,
largest
,
sorted
)
out
,
indices
=
_C_ops
.
topk
(
x
,
k
,
axis
,
largest
,
sorted
)
return
out
,
indices
return
out
,
indices
if
_non_static_mode
():
if
axis
is
None
:
out
,
indices
=
_legacy_C_ops
.
top_k_v2
(
x
,
'k'
,
int
(
k
),
'largest'
,
largest
,
'sorted'
,
sorted
)
else
:
out
,
indices
=
_legacy_C_ops
.
top_k_v2
(
x
,
'k'
,
int
(
k
),
'axis'
,
axis
,
'largest'
,
largest
,
'sorted'
,
sorted
,
)
return
out
,
indices
helper
=
LayerHelper
(
"top_k_v2"
,
**
locals
())
inputs
=
{
"X"
:
[
x
]}
attrs
=
{}
if
isinstance
(
k
,
Variable
):
inputs
[
'K'
]
=
[
k
]
else
:
else
:
attrs
=
{
'k'
:
k
}
helper
=
LayerHelper
(
"top_k_v2"
,
**
locals
())
attrs
[
'largest'
]
=
largest
inputs
=
{
"X"
:
[
x
]}
attrs
[
'sorted'
]
=
sorted
attrs
=
{}
if
axis
is
not
None
:
if
isinstance
(
k
,
Variable
):
attrs
[
'axis'
]
=
axis
inputs
[
'K'
]
=
[
k
]
else
:
attrs
=
{
'k'
:
k
}
attrs
[
'largest'
]
=
largest
attrs
[
'sorted'
]
=
sorted
if
axis
is
not
None
:
attrs
[
'axis'
]
=
axis
values
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
values
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
indices
=
helper
.
create_variable_for_type_inference
(
dtype
=
"int64"
)
indices
=
helper
.
create_variable_for_type_inference
(
dtype
=
"int64"
)
helper
.
append_op
(
helper
.
append_op
(
type
=
"top_k_v2"
,
type
=
"top_k_v2"
,
inputs
=
inputs
,
inputs
=
inputs
,
outputs
=
{
"Out"
:
[
values
],
"Indices"
:
[
indices
]},
outputs
=
{
"Out"
:
[
values
],
"Indices"
:
[
indices
]},
attrs
=
attrs
,
attrs
=
attrs
,
)
)
indices
.
stop_gradient
=
True
indices
.
stop_gradient
=
True
return
values
,
indices
return
values
,
indices
def
bucketize
(
x
,
sorted_sequence
,
out_int32
=
False
,
right
=
False
,
name
=
None
):
def
bucketize
(
x
,
sorted_sequence
,
out_int32
=
False
,
right
=
False
,
name
=
None
):
...
@@ -1065,36 +996,31 @@ def searchsorted(
...
@@ -1065,36 +996,31 @@ def searchsorted(
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
searchsorted
(
sorted_sequence
,
values
,
out_int32
,
right
)
return
_C_ops
.
searchsorted
(
sorted_sequence
,
values
,
out_int32
,
right
)
else
:
if
_in_legacy_dygraph
():
check_variable_and_dtype
(
return
_legacy_C_ops
.
searchsorted
(
sorted_sequence
,
sorted_sequence
,
values
,
"out_int32"
,
out_int32
,
"right"
,
right
'SortedSequence'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'paddle.searchsorted'
,
)
check_variable_and_dtype
(
values
,
'Values'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'paddle.searchsorted'
,
)
)
check_variable_and_dtype
(
helper
=
LayerHelper
(
'searchsorted'
,
**
locals
())
sorted_sequence
,
out_type
=
'int32'
if
out_int32
else
'int64'
'SortedSequence'
,
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
out_type
)
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
helper
.
append_op
(
'paddle.searchsorted'
,
type
=
'searchsorted'
,
)
inputs
=
{
'SortedSequence'
:
sorted_sequence
,
"Values"
:
values
},
check_variable_and_dtype
(
outputs
=
{
'Out'
:
out
},
values
,
attrs
=
{
"out_int32"
:
out_int32
,
"right"
:
right
},
'Values'
,
)
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'paddle.searchsorted'
,
)
helper
=
LayerHelper
(
'searchsorted'
,
**
locals
())
out_type
=
'int32'
if
out_int32
else
'int64'
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
out_type
)
helper
.
append_op
(
type
=
'searchsorted'
,
inputs
=
{
'SortedSequence'
:
sorted_sequence
,
"Values"
:
values
},
outputs
=
{
'Out'
:
out
},
attrs
=
{
"out_int32"
:
out_int32
,
"right"
:
right
},
)
return
out
return
out
def
kthvalue
(
x
,
k
,
axis
=
None
,
keepdim
=
False
,
name
=
None
):
def
kthvalue
(
x
,
k
,
axis
=
None
,
keepdim
=
False
,
name
=
None
):
...
@@ -1135,16 +1061,10 @@ def kthvalue(x, k, axis=None, keepdim=False, name=None):
...
@@ -1135,16 +1061,10 @@ def kthvalue(x, k, axis=None, keepdim=False, name=None):
# [[0, 2],
# [[0, 2],
# [1, 2]]))
# [1, 2]]))
"""
"""
if
_non_static
_mode
():
if
in_dygraph
_mode
():
if
axis
is
not
None
:
if
axis
is
not
None
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
kthvalue
(
x
,
'k'
,
k
,
"axis"
,
axis
,
"keepdim"
,
keepdim
)
return
_C_ops
.
kthvalue
(
x
,
k
,
axis
,
keepdim
)
return
_C_ops
.
kthvalue
(
x
,
k
,
axis
,
keepdim
)
else
:
else
:
if
_in_legacy_dygraph
():
return
_legacy_C_ops
.
kthvalue
(
x
,
'k'
,
k
,
"keepdim"
,
keepdim
)
return
_C_ops
.
kthvalue
(
x
,
k
,
-
1
,
keepdim
)
return
_C_ops
.
kthvalue
(
x
,
k
,
-
1
,
keepdim
)
helper
=
LayerHelper
(
"kthvalue"
,
**
locals
())
helper
=
LayerHelper
(
"kthvalue"
,
**
locals
())
...
...
python/paddle/tensor/stat.py
浏览文件 @
861fef52
...
@@ -16,7 +16,7 @@
...
@@ -16,7 +16,7 @@
import
paddle
import
paddle
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle.fluid.framework
import
_in_legacy_dygraph
,
in_dygraph_mode
from
paddle.fluid.framework
import
in_dygraph_mode
from
..fluid.data_feeder
import
check_type
,
check_variable_and_dtype
from
..fluid.data_feeder
import
check_type
,
check_variable_and_dtype
from
..framework
import
LayerHelper
,
core
from
..framework
import
LayerHelper
,
core
...
@@ -81,39 +81,37 @@ def mean(x, axis=None, keepdim=False, name=None):
...
@@ -81,39 +81,37 @@ def mean(x, axis=None, keepdim=False, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
mean
(
x
,
axis
,
keepdim
)
return
_C_ops
.
mean
(
x
,
axis
,
keepdim
)
else
:
reduce_all
,
axis
=
_get_reduce_axis_with_tensor
(
axis
,
x
)
reduce_all
,
axis
=
_get_reduce_axis_with_tensor
(
axis
,
x
)
if
_in_legacy_dygraph
():
check_variable_and_dtype
(
return
_legacy_C_ops
.
reduce_mean
(
x
,
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
'x/input'
,
[
'uint16'
,
'float16'
,
'float32'
,
'float64'
],
'mean/reduce_mean'
,
)
)
check_type
(
axis
,
'axis/dim'
,
(
int
,
list
,
tuple
,
Variable
),
'mean/reduce_mean'
)
if
isinstance
(
axis
,
(
list
,
tuple
)):
for
item
in
axis
:
check_type
(
item
,
'elements of axis/dim'
,
(
int
,
Variable
),
'mean/reduce_mean'
,
)
check_variable_and_dtype
(
helper
=
LayerHelper
(
'mean'
,
**
locals
())
x
,
'x/input'
,
[
'uint16'
,
'float16'
,
'float32'
,
'float64'
],
'mean/reduce_mean'
,
)
check_type
(
axis
,
'axis/dim'
,
(
int
,
list
,
tuple
,
Variable
),
'mean/reduce_mean'
)
if
isinstance
(
axis
,
(
list
,
tuple
)):
for
item
in
axis
:
check_type
(
item
,
'elements of axis/dim'
,
(
int
,
Variable
),
'mean/reduce_mean'
,
)
helper
=
LayerHelper
(
'mean'
,
**
locals
())
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
}
attrs
=
{
'dim'
:
axis
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all
}
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
.
append_op
(
helper
.
append_op
(
type
=
'reduce_mean'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
type
=
'reduce_mean'
,
)
inputs
=
{
'X'
:
x
},
return
out
outputs
=
{
'Out'
:
out
},
attrs
=
attrs
,
)
return
out
def
var
(
x
,
axis
=
None
,
unbiased
=
True
,
keepdim
=
False
,
name
=
None
):
def
var
(
x
,
axis
=
None
,
unbiased
=
True
,
keepdim
=
False
,
name
=
None
):
...
@@ -146,7 +144,7 @@ def var(x, axis=None, unbiased=True, keepdim=False, name=None):
...
@@ -146,7 +144,7 @@ def var(x, axis=None, unbiased=True, keepdim=False, name=None):
out2 = paddle.var(x, axis=1)
out2 = paddle.var(x, axis=1)
# [1. 4.33333333]
# [1. 4.33333333]
"""
"""
if
not
paddle
.
in_dynamic
_mode
():
if
not
in_dygraph
_mode
():
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'var'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'var'
)
u
=
mean
(
x
,
axis
,
True
,
name
)
u
=
mean
(
x
,
axis
,
True
,
name
)
...
@@ -211,7 +209,7 @@ def std(x, axis=None, unbiased=True, keepdim=False, name=None):
...
@@ -211,7 +209,7 @@ def std(x, axis=None, unbiased=True, keepdim=False, name=None):
# [1. 2.081666]
# [1. 2.081666]
"""
"""
if
not
paddle
.
in_dynamic
_mode
():
if
not
in_dygraph
_mode
():
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'std'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'std'
)
out
=
var
(
**
locals
())
out
=
var
(
**
locals
())
...
@@ -243,17 +241,15 @@ def numel(x, name=None):
...
@@ -243,17 +241,15 @@ def numel(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
_C_ops
.
numel
(
x
)
return
_C_ops
.
numel
(
x
)
elif
_in_legacy_dygraph
():
else
:
return
_legacy_C_ops
.
size
(
x
)
if
not
isinstance
(
x
,
Variable
):
raise
TypeError
(
"x must be a Tensor in numel"
)
if
not
isinstance
(
x
,
Variable
):
helper
=
LayerHelper
(
'numel'
,
**
locals
())
raise
TypeError
(
"x must be a Tensor in numel"
)
out
=
helper
.
create_variable_for_type_inference
(
helper
=
LayerHelper
(
'numel'
,
**
locals
())
dtype
=
core
.
VarDesc
.
VarType
.
INT64
out
=
helper
.
create_variable_for_type_inference
(
)
dtype
=
core
.
VarDesc
.
VarType
.
INT64
helper
.
append_op
(
type
=
'size'
,
inputs
=
{
'Input'
:
x
},
outputs
=
{
'Out'
:
out
})
)
return
out
helper
.
append_op
(
type
=
'size'
,
inputs
=
{
'Input'
:
x
},
outputs
=
{
'Out'
:
out
})
return
out
def
nanmedian
(
x
,
axis
=
None
,
keepdim
=
True
,
name
=
None
):
def
nanmedian
(
x
,
axis
=
None
,
keepdim
=
True
,
name
=
None
):
...
@@ -331,27 +327,30 @@ def nanmedian(x, axis=None, keepdim=True, name=None):
...
@@ -331,27 +327,30 @@ def nanmedian(x, axis=None, keepdim=True, name=None):
if
len
(
axis
)
!=
len
(
set
(
axis
)):
if
len
(
axis
)
!=
len
(
set
(
axis
)):
raise
ValueError
(
"Axis has duplicated elements."
)
raise
ValueError
(
"Axis has duplicated elements."
)
if
_in_legacy_dygraph
():
if
in_dygraph_mode
():
median_index
,
out
=
_legacy_C_ops
.
nanmedian
(
median_index
,
out
=
_legacy_C_ops
.
nanmedian
(
x
,
'axis'
,
axis
,
'keepdim'
,
keepdim
x
,
'axis'
,
axis
,
'keepdim'
,
keepdim
)
)
return
out
return
out
else
:
check_variable_and_dtype
(
x
,
'X'
,
[
'int32'
,
'int64'
,
'float16'
,
'float32'
,
'float64'
],
'nanmedian'
,
)
check_variable_and_dtype
(
helper
=
LayerHelper
(
'nanmedian'
,
**
locals
())
x
,
'X'
,
[
'int32'
,
'int64'
,
'float16'
,
'float32'
,
'float64'
],
'nanmedian'
attrs
=
{
'axis'
:
axis
,
'keepdim'
:
keepdim
}
)
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
medians
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
helper
=
LayerHelper
(
'nanmedian'
,
**
locals
())
helper
.
append_op
(
attrs
=
{
'axis'
:
axis
,
'keepdim'
:
keepdim
}
type
=
'nanmedian'
,
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
inputs
=
{
'X'
:
x
},
medians
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
outputs
=
{
'Out'
:
out
,
'MedianIndex'
:
medians
},
helper
.
append_op
(
attrs
=
attrs
,
type
=
'nanmedian'
,
)
inputs
=
{
'X'
:
x
},
return
out
outputs
=
{
'Out'
:
out
,
'MedianIndex'
:
medians
},
attrs
=
attrs
,
)
return
out
def
median
(
x
,
axis
=
None
,
keepdim
=
False
,
name
=
None
):
def
median
(
x
,
axis
=
None
,
keepdim
=
False
,
name
=
None
):
...
@@ -534,7 +533,7 @@ def _compute_quantile(x, q, axis=None, keepdim=False, ignore_nan=False):
...
@@ -534,7 +533,7 @@ def _compute_quantile(x, q, axis=None, keepdim=False, ignore_nan=False):
for
q_num
in
q
:
for
q_num
in
q
:
if
q_num
<
0
or
q_num
>
1
:
if
q_num
<
0
or
q_num
>
1
:
raise
ValueError
(
"q should be in range [0, 1]"
)
raise
ValueError
(
"q should be in range [0, 1]"
)
if
paddle
.
in_dynamic
_mode
():
if
in_dygraph
_mode
():
q_num
=
paddle
.
to_tensor
(
q_num
,
dtype
=
'float64'
)
q_num
=
paddle
.
to_tensor
(
q_num
,
dtype
=
'float64'
)
if
ignore_nan
:
if
ignore_nan
:
indices
.
append
(
q_num
*
(
valid_counts
-
1
))
indices
.
append
(
q_num
*
(
valid_counts
-
1
))
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录