Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
242ef2b9
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
242ef2b9
编写于
12月 22, 2021
作者:
Z
Zhanlue Yang
提交者:
GitHub
12月 22, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Replaced core.ops with _C_ops (#38337)
上级
274b135b
变更
19
隐藏空白更改
内联
并排
Showing
19 changed file
with
61 addition
and
51 deletion
+61
-51
python/paddle/distributed/utils.py
python/paddle/distributed/utils.py
+3
-3
python/paddle/fluid/clip.py
python/paddle/fluid/clip.py
+2
-1
python/paddle/fluid/tests/unittests/test_deprecated_decorator.py
...paddle/fluid/tests/unittests/test_deprecated_decorator.py
+1
-1
python/paddle/fluid/tests/unittests/test_faster_tokenizer_op.py
.../paddle/fluid/tests/unittests/test_faster_tokenizer_op.py
+2
-1
python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py
...ittests/test_imperative_star_gan_with_gradient_penalty.py
+3
-2
python/paddle/fluid/tests/unittests/test_op_function_generator.py
...addle/fluid/tests/unittests/test_op_function_generator.py
+6
-5
python/paddle/fluid/tests/unittests/test_sum_op.py
python/paddle/fluid/tests/unittests/test_sum_op.py
+3
-2
python/paddle/fluid/tests/unittests/xpu/test_sum_op_xpu.py
python/paddle/fluid/tests/unittests/xpu/test_sum_op_xpu.py
+3
-2
python/paddle/incubate/operators/graph_send_recv.py
python/paddle/incubate/operators/graph_send_recv.py
+3
-2
python/paddle/incubate/operators/softmax_mask_fuse.py
python/paddle/incubate/operators/softmax_mask_fuse.py
+2
-1
python/paddle/incubate/operators/softmax_mask_fuse_upper_triangle.py
...le/incubate/operators/softmax_mask_fuse_upper_triangle.py
+2
-1
python/paddle/nn/functional/common.py
python/paddle/nn/functional/common.py
+1
-1
python/paddle/nn/functional/loss.py
python/paddle/nn/functional/loss.py
+1
-1
python/paddle/tensor/linalg.py
python/paddle/tensor/linalg.py
+3
-3
python/paddle/tensor/manipulation.py
python/paddle/tensor/manipulation.py
+13
-13
python/paddle/tensor/random.py
python/paddle/tensor/random.py
+2
-2
python/paddle/text/viterbi_decode.py
python/paddle/text/viterbi_decode.py
+3
-3
python/paddle/vision/ops.py
python/paddle/vision/ops.py
+6
-6
tools/count_api_without_core_ops.py
tools/count_api_without_core_ops.py
+2
-1
未找到文件。
python/paddle/distributed/utils.py
浏览文件 @
242ef2b9
...
@@ -31,7 +31,7 @@ from distutils.util import strtobool
...
@@ -31,7 +31,7 @@ from distutils.util import strtobool
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle.fluid.framework
import
in_dygraph_mode
from
paddle.fluid.framework
import
in_dygraph_mode
from
paddle.fluid.data_feeder
import
check_variable_and_dtype
from
paddle.fluid.data_feeder
import
check_variable_and_dtype
from
paddle
import
_C_ops
__all__
=
[
#noqa
__all__
=
[
#noqa
'get_host_name_ip'
,
'get_host_name_ip'
,
...
@@ -146,7 +146,7 @@ def global_scatter(x,
...
@@ -146,7 +146,7 @@ def global_scatter(x,
ring_id
=
0
if
group
is
None
else
group
.
id
ring_id
=
0
if
group
is
None
else
group
.
id
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
global_scatter
(
x
,
local_count
,
\
return
_C_
ops
.
global_scatter
(
x
,
local_count
,
\
global_count
,
\
global_count
,
\
'use_calc_stream'
,
use_calc_stream
,
\
'use_calc_stream'
,
use_calc_stream
,
\
'ring_id'
,
ring_id
)
'ring_id'
,
ring_id
)
...
@@ -258,7 +258,7 @@ def global_gather(x,
...
@@ -258,7 +258,7 @@ def global_gather(x,
ring_id
=
0
if
group
is
None
else
group
.
id
ring_id
=
0
if
group
is
None
else
group
.
id
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
global_gather
(
x
,
local_count
,
\
return
_C_
ops
.
global_gather
(
x
,
local_count
,
\
global_count
,
\
global_count
,
\
'use_calc_stream'
,
use_calc_stream
,
\
'use_calc_stream'
,
use_calc_stream
,
\
'ring_id'
,
ring_id
)
'ring_id'
,
ring_id
)
...
...
python/paddle/fluid/clip.py
浏览文件 @
242ef2b9
...
@@ -29,6 +29,7 @@ from .data_feeder import check_variable_and_dtype
...
@@ -29,6 +29,7 @@ from .data_feeder import check_variable_and_dtype
from
.framework
import
in_dygraph_mode
from
.framework
import
in_dygraph_mode
from
.layer_helper
import
LayerHelper
from
.layer_helper
import
LayerHelper
from
.framework
import
default_main_program
from
.framework
import
default_main_program
from
paddle
import
_C_ops
__all__
=
[
__all__
=
[
'set_gradient_clip'
,
'ErrorClipByValue'
,
'ClipGradByValue'
,
'set_gradient_clip'
,
'ErrorClipByValue'
,
'ClipGradByValue'
,
...
@@ -47,7 +48,7 @@ def _squared_l2_norm(x):
...
@@ -47,7 +48,7 @@ def _squared_l2_norm(x):
return
sum_square
return
sum_square
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
squared_l2_norm
(
x
)
return
_C_
ops
.
squared_l2_norm
(
x
)
op_type
=
'squared_l2_norm'
op_type
=
'squared_l2_norm'
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
op_type
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
op_type
)
...
...
python/paddle/fluid/tests/unittests/test_deprecated_decorator.py
浏览文件 @
242ef2b9
...
@@ -140,7 +140,7 @@ class TestDeprecatedDocorator(unittest.TestCase):
...
@@ -140,7 +140,7 @@ class TestDeprecatedDocorator(unittest.TestCase):
b
=
np
.
random
.
uniform
(
0.1
,
1
,
[
51
,
76
]).
astype
(
np
.
float32
)
b
=
np
.
random
.
uniform
(
0.1
,
1
,
[
51
,
76
]).
astype
(
np
.
float32
)
x
=
paddle
.
to_tensor
(
a
)
x
=
paddle
.
to_tensor
(
a
)
y
=
paddle
.
to_tensor
(
b
)
y
=
paddle
.
to_tensor
(
b
)
res
=
core
.
ops
.
elementwise_mul
(
x
,
y
)
res
=
_C_
ops
.
elementwise_mul
(
x
,
y
)
# expected
# expected
expected
=
LOWEST_WARNING_POSTION
expected
=
LOWEST_WARNING_POSTION
...
...
python/paddle/fluid/tests/unittests/test_faster_tokenizer_op.py
浏览文件 @
242ef2b9
...
@@ -24,6 +24,7 @@ import paddle.nn as nn
...
@@ -24,6 +24,7 @@ import paddle.nn as nn
from
paddle.dataset.common
import
DATA_HOME
from
paddle.dataset.common
import
DATA_HOME
from
paddle.fluid.framework
import
core
,
in_dygraph_mode
from
paddle.fluid.framework
import
core
,
in_dygraph_mode
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle
import
_C_ops
import
sys
import
sys
sys
.
path
.
append
(
"./tokenizer"
)
sys
.
path
.
append
(
"./tokenizer"
)
...
@@ -75,7 +76,7 @@ class FasterTokenizer(nn.Layer):
...
@@ -75,7 +76,7 @@ class FasterTokenizer(nn.Layer):
is_split_into_words
=
False
,
is_split_into_words
=
False
,
pad_to_max_seq_len
=
False
):
pad_to_max_seq_len
=
False
):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
input_ids
,
seg_ids
=
core
.
ops
.
faster_tokenizer
(
input_ids
,
seg_ids
=
_C_
ops
.
faster_tokenizer
(
self
.
vocab
,
text
,
text_pair
,
"do_lower_case"
,
do_lower_case
,
self
.
vocab
,
text
,
text_pair
,
"do_lower_case"
,
do_lower_case
,
"max_seq_len"
,
max_seq_len
,
"pad_to_max_seq_len"
,
"max_seq_len"
,
max_seq_len
,
"pad_to_max_seq_len"
,
pad_to_max_seq_len
,
"is_split_into_words"
,
is_split_into_words
)
pad_to_max_seq_len
,
"is_split_into_words"
,
is_split_into_words
)
...
...
python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py
浏览文件 @
242ef2b9
...
@@ -16,6 +16,7 @@ import paddle
...
@@ -16,6 +16,7 @@ import paddle
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
import
numpy
as
np
import
numpy
as
np
import
unittest
import
unittest
from
paddle
import
_C_ops
if
fluid
.
is_compiled_with_cuda
():
if
fluid
.
is_compiled_with_cuda
():
fluid
.
core
.
globals
()[
'FLAGS_cudnn_deterministic'
]
=
True
fluid
.
core
.
globals
()[
'FLAGS_cudnn_deterministic'
]
=
True
...
@@ -112,8 +113,8 @@ class InstanceNorm(fluid.dygraph.Layer):
...
@@ -112,8 +113,8 @@ class InstanceNorm(fluid.dygraph.Layer):
def
forward
(
self
,
input
):
def
forward
(
self
,
input
):
if
fluid
.
in_dygraph_mode
():
if
fluid
.
in_dygraph_mode
():
out
,
_
,
_
=
fluid
.
core
.
ops
.
instance_norm
(
out
,
_
,
_
=
_C_ops
.
instance_norm
(
input
,
self
.
scale
,
self
.
bias
,
input
,
self
.
scale
,
self
.
bias
,
'epsilon'
,
self
.
epsilon
)
'epsilon'
,
self
.
epsilon
)
return
out
return
out
else
:
else
:
return
fluid
.
layers
.
instance_norm
(
return
fluid
.
layers
.
instance_norm
(
...
...
python/paddle/fluid/tests/unittests/test_op_function_generator.py
浏览文件 @
242ef2b9
...
@@ -21,6 +21,7 @@ import paddle.fluid.layers as layers
...
@@ -21,6 +21,7 @@ import paddle.fluid.layers as layers
import
paddle.fluid.core
as
core
import
paddle.fluid.core
as
core
from
paddle.fluid.dygraph.jit
import
TracedLayer
from
paddle.fluid.dygraph.jit
import
TracedLayer
import
numpy
as
np
import
numpy
as
np
from
paddle
import
_C_ops
class
TestTracedLayer
(
fluid
.
dygraph
.
Layer
):
class
TestTracedLayer
(
fluid
.
dygraph
.
Layer
):
...
@@ -28,7 +29,7 @@ class TestTracedLayer(fluid.dygraph.Layer):
...
@@ -28,7 +29,7 @@ class TestTracedLayer(fluid.dygraph.Layer):
super
(
TestTracedLayer
,
self
).
__init__
(
name_scope
)
super
(
TestTracedLayer
,
self
).
__init__
(
name_scope
)
def
forward
(
self
,
input
):
def
forward
(
self
,
input
):
return
core
.
ops
.
relu
(
input
)
return
_C_
ops
.
relu
(
input
)
class
TestVariable
(
unittest
.
TestCase
):
class
TestVariable
(
unittest
.
TestCase
):
...
@@ -46,7 +47,7 @@ class TestVariable(unittest.TestCase):
...
@@ -46,7 +47,7 @@ class TestVariable(unittest.TestCase):
x
.
stop_gradient
=
False
x
.
stop_gradient
=
False
res1
=
layers
.
elementwise_add
(
x
,
y
)
res1
=
layers
.
elementwise_add
(
x
,
y
)
res2
=
core
.
ops
.
elementwise_add
(
x
,
y
)
res2
=
_C_
ops
.
elementwise_add
(
x
,
y
)
self
.
assertTrue
(
np
.
array_equal
(
res1
.
numpy
(),
res2
.
numpy
()))
self
.
assertTrue
(
np
.
array_equal
(
res1
.
numpy
(),
res2
.
numpy
()))
...
@@ -58,7 +59,7 @@ class TestVariable(unittest.TestCase):
...
@@ -58,7 +59,7 @@ class TestVariable(unittest.TestCase):
y
=
fluid
.
dygraph
.
to_variable
(
b
)
y
=
fluid
.
dygraph
.
to_variable
(
b
)
res1
=
layers
.
elementwise_mul
(
x
,
y
)
res1
=
layers
.
elementwise_mul
(
x
,
y
)
res2
=
core
.
ops
.
elementwise_mul
(
x
,
y
)
res2
=
_C_
ops
.
elementwise_mul
(
x
,
y
)
self
.
assertTrue
(
np
.
array_equal
(
res1
.
numpy
(),
res2
.
numpy
()))
self
.
assertTrue
(
np
.
array_equal
(
res1
.
numpy
(),
res2
.
numpy
()))
...
@@ -68,7 +69,7 @@ class TestVariable(unittest.TestCase):
...
@@ -68,7 +69,7 @@ class TestVariable(unittest.TestCase):
x
=
fluid
.
dygraph
.
to_variable
(
a
)
x
=
fluid
.
dygraph
.
to_variable
(
a
)
res1
=
layers
.
relu
(
x
)
res1
=
layers
.
relu
(
x
)
res2
=
core
.
ops
.
relu
(
x
)
res2
=
_C_
ops
.
relu
(
x
)
self
.
assertTrue
(
np
.
array_equal
(
res1
.
numpy
(),
res2
.
numpy
()))
self
.
assertTrue
(
np
.
array_equal
(
res1
.
numpy
(),
res2
.
numpy
()))
...
@@ -81,7 +82,7 @@ class TestVariable(unittest.TestCase):
...
@@ -81,7 +82,7 @@ class TestVariable(unittest.TestCase):
x
.
stop_gradient
=
False
x
.
stop_gradient
=
False
y
.
stop_gradient
=
False
y
.
stop_gradient
=
False
loss
=
core
.
ops
.
elementwise_mul
(
x
,
y
)
loss
=
_C_
ops
.
elementwise_mul
(
x
,
y
)
loss
.
backward
()
loss
.
backward
()
x_grad
=
x
.
gradient
()
x_grad
=
x
.
gradient
()
...
...
python/paddle/fluid/tests/unittests/test_sum_op.py
浏览文件 @
242ef2b9
...
@@ -24,6 +24,7 @@ import paddle.fluid.core as core
...
@@ -24,6 +24,7 @@ import paddle.fluid.core as core
from
paddle.fluid.op
import
Operator
from
paddle.fluid.op
import
Operator
from
paddle.fluid.tests.unittests.op_test
import
(
from
paddle.fluid.tests.unittests.op_test
import
(
OpTest
,
convert_float_to_uint16
,
convert_uint16_to_float
)
OpTest
,
convert_float_to_uint16
,
convert_uint16_to_float
)
from
paddle
import
_C_ops
class
TestSumOp
(
OpTest
):
class
TestSumOp
(
OpTest
):
...
@@ -382,11 +383,11 @@ class TestSumOpError(unittest.TestCase):
...
@@ -382,11 +383,11 @@ class TestSumOpError(unittest.TestCase):
def
test_errors
(
self
):
def
test_errors
(
self
):
def
test_empty_list_input
():
def
test_empty_list_input
():
with
fluid
.
dygraph
.
guard
():
with
fluid
.
dygraph
.
guard
():
fluid
.
core
.
ops
.
sum
([])
fluid
.
_C_
ops
.
sum
([])
def
test_list_of_none_input
():
def
test_list_of_none_input
():
with
fluid
.
dygraph
.
guard
():
with
fluid
.
dygraph
.
guard
():
fluid
.
core
.
ops
.
sum
([
None
])
fluid
.
_C_
ops
.
sum
([
None
])
self
.
assertRaises
(
Exception
,
test_empty_list_input
)
self
.
assertRaises
(
Exception
,
test_empty_list_input
)
self
.
assertRaises
(
Exception
,
test_list_of_none_input
)
self
.
assertRaises
(
Exception
,
test_list_of_none_input
)
...
...
python/paddle/fluid/tests/unittests/xpu/test_sum_op_xpu.py
浏览文件 @
242ef2b9
...
@@ -25,6 +25,7 @@ import paddle.fluid.core as core
...
@@ -25,6 +25,7 @@ import paddle.fluid.core as core
from
paddle.fluid.op
import
Operator
from
paddle.fluid.op
import
Operator
from
paddle.fluid.tests.unittests.op_test
import
(
from
paddle.fluid.tests.unittests.op_test
import
(
OpTest
,
convert_float_to_uint16
,
convert_uint16_to_float
)
OpTest
,
convert_float_to_uint16
,
convert_uint16_to_float
)
from
paddle
import
_C_ops
paddle
.
enable_static
()
paddle
.
enable_static
()
...
@@ -171,11 +172,11 @@ class TestSumOpError(unittest.TestCase):
...
@@ -171,11 +172,11 @@ class TestSumOpError(unittest.TestCase):
def
test_errors
(
self
):
def
test_errors
(
self
):
def
test_empty_list_input
():
def
test_empty_list_input
():
with
fluid
.
dygraph
.
guard
():
with
fluid
.
dygraph
.
guard
():
fluid
.
core
.
ops
.
sum
([])
fluid
.
_C_
ops
.
sum
([])
def
test_list_of_none_input
():
def
test_list_of_none_input
():
with
fluid
.
dygraph
.
guard
():
with
fluid
.
dygraph
.
guard
():
fluid
.
core
.
ops
.
sum
([
None
])
fluid
.
_C_
ops
.
sum
([
None
])
self
.
assertRaises
(
Exception
,
test_empty_list_input
)
self
.
assertRaises
(
Exception
,
test_empty_list_input
)
self
.
assertRaises
(
Exception
,
test_list_of_none_input
)
self
.
assertRaises
(
Exception
,
test_list_of_none_input
)
...
...
python/paddle/incubate/operators/graph_send_recv.py
浏览文件 @
242ef2b9
...
@@ -16,6 +16,7 @@ from paddle.fluid.layer_helper import LayerHelper
...
@@ -16,6 +16,7 @@ from paddle.fluid.layer_helper import LayerHelper
from
paddle.fluid.framework
import
in_dygraph_mode
from
paddle.fluid.framework
import
in_dygraph_mode
from
paddle.fluid.data_feeder
import
check_variable_and_dtype
from
paddle.fluid.data_feeder
import
check_variable_and_dtype
from
paddle.fluid
import
core
from
paddle.fluid
import
core
from
paddle
import
_C_ops
def
graph_send_recv
(
x
,
src_index
,
dst_index
,
pool_type
=
"sum"
,
name
=
None
):
def
graph_send_recv
(
x
,
src_index
,
dst_index
,
pool_type
=
"sum"
,
name
=
None
):
...
@@ -82,8 +83,8 @@ def graph_send_recv(x, src_index, dst_index, pool_type="sum", name=None):
...
@@ -82,8 +83,8 @@ def graph_send_recv(x, src_index, dst_index, pool_type="sum", name=None):
%
pool_type
)
%
pool_type
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
out
,
tmp
=
core
.
ops
.
graph_send_recv
(
x
,
src_index
,
dst_index
,
out
,
tmp
=
_C_ops
.
graph_send_recv
(
x
,
src_index
,
dst_index
,
'pool_type'
,
'pool_type'
,
pool_type
.
upper
())
pool_type
.
upper
())
return
out
return
out
check_variable_and_dtype
(
x
,
"X"
,
(
"float32"
,
"float64"
,
"int32"
,
"int64"
),
check_variable_and_dtype
(
x
,
"X"
,
(
"float32"
,
"float64"
,
"int32"
,
"int64"
),
...
...
python/paddle/incubate/operators/softmax_mask_fuse.py
浏览文件 @
242ef2b9
...
@@ -17,6 +17,7 @@ from __future__ import print_function
...
@@ -17,6 +17,7 @@ from __future__ import print_function
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle.fluid.framework
import
in_dygraph_mode
from
paddle.fluid.framework
import
in_dygraph_mode
from
paddle.fluid
import
core
from
paddle.fluid
import
core
from
paddle
import
_C_ops
def
softmax_mask_fuse
(
x
,
mask
,
name
=
None
):
def
softmax_mask_fuse
(
x
,
mask
,
name
=
None
):
...
@@ -58,7 +59,7 @@ def softmax_mask_fuse(x, mask, name=None):
...
@@ -58,7 +59,7 @@ def softmax_mask_fuse(x, mask, name=None):
# [[[[0.02404429, 0.04658398, 0.02746007, ..., 0.01489375, 0.02397441, 0.02851614] ... ]]]
# [[[[0.02404429, 0.04658398, 0.02746007, ..., 0.01489375, 0.02397441, 0.02851614] ... ]]]
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
out
=
core
.
ops
.
fused_softmax_mask
(
x
,
mask
)
out
=
_C_
ops
.
fused_softmax_mask
(
x
,
mask
)
return
out
return
out
helper
=
LayerHelper
(
'fused_softmax_mask'
,
**
locals
())
helper
=
LayerHelper
(
'fused_softmax_mask'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
...
...
python/paddle/incubate/operators/softmax_mask_fuse_upper_triangle.py
浏览文件 @
242ef2b9
...
@@ -17,6 +17,7 @@ from __future__ import print_function
...
@@ -17,6 +17,7 @@ from __future__ import print_function
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle.fluid.framework
import
in_dygraph_mode
from
paddle.fluid.framework
import
in_dygraph_mode
from
paddle.fluid
import
core
from
paddle.fluid
import
core
from
paddle
import
_C_ops
def
softmax_mask_fuse_upper_triangle
(
x
):
def
softmax_mask_fuse_upper_triangle
(
x
):
...
@@ -58,7 +59,7 @@ def softmax_mask_fuse_upper_triangle(x):
...
@@ -58,7 +59,7 @@ def softmax_mask_fuse_upper_triangle(x):
# ... ]]]
# ... ]]]
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
out
=
core
.
ops
.
fused_softmax_mask_upper_triangle
(
x
)
out
=
_C_
ops
.
fused_softmax_mask_upper_triangle
(
x
)
return
out
return
out
helper
=
LayerHelper
(
'fused_softmax_mask_upper_triangle'
,
**
locals
())
helper
=
LayerHelper
(
'fused_softmax_mask_upper_triangle'
,
**
locals
())
...
...
python/paddle/nn/functional/common.py
浏览文件 @
242ef2b9
...
@@ -1763,7 +1763,7 @@ def class_center_sample(label, num_classes, num_samples, group=None):
...
@@ -1763,7 +1763,7 @@ def class_center_sample(label, num_classes, num_samples, group=None):
seed
=
default_main_program
().
random_seed
seed
=
default_main_program
().
random_seed
if
in_dygraph_mode
():
if
in_dygraph_mode
():
remapped_label
,
sampled_class_center
=
core
.
ops
.
class_center_sample
(
remapped_label
,
sampled_class_center
=
_C_
ops
.
class_center_sample
(
label
,
'num_classes'
,
num_classes
,
'num_samples'
,
num_samples
,
label
,
'num_classes'
,
num_classes
,
'num_samples'
,
num_samples
,
'ring_id'
,
ring_id
,
'nranks'
,
nranks
,
'rank'
,
rank
,
'fix_seed'
,
'ring_id'
,
ring_id
,
'nranks'
,
nranks
,
'rank'
,
rank
,
'fix_seed'
,
seed
is
not
None
,
'seed'
,
seed
if
seed
is
not
None
else
0
)
seed
is
not
None
,
'seed'
,
seed
if
seed
is
not
None
else
0
)
...
...
python/paddle/nn/functional/loss.py
浏览文件 @
242ef2b9
...
@@ -1320,7 +1320,7 @@ def margin_cross_entropy(logits,
...
@@ -1320,7 +1320,7 @@ def margin_cross_entropy(logits,
label
=
paddle
.
unsqueeze
(
label
,
axis
=-
1
)
label
=
paddle
.
unsqueeze
(
label
,
axis
=-
1
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
softmax
,
loss
=
core
.
ops
.
margin_cross_entropy
(
softmax
,
loss
=
_C_
ops
.
margin_cross_entropy
(
logits
,
label
,
'ring_id'
,
ring_id
,
'rank'
,
rank
,
'nranks'
,
nranks
,
logits
,
label
,
'ring_id'
,
ring_id
,
'rank'
,
rank
,
'nranks'
,
nranks
,
'margin1'
,
margin1
,
'margin2'
,
margin2
,
'margin3'
,
margin3
,
'scale'
,
'margin1'
,
margin1
,
'margin2'
,
margin2
,
'margin3'
,
margin3
,
'scale'
,
scale
,
'return_softmax'
,
return_softmax
)
scale
,
'return_softmax'
,
return_softmax
)
...
...
python/paddle/tensor/linalg.py
浏览文件 @
242ef2b9
...
@@ -1430,7 +1430,7 @@ def det(x, name=None):
...
@@ -1430,7 +1430,7 @@ def det(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
determinant
(
x
)
return
_C_
ops
.
determinant
(
x
)
check_dtype
(
x
.
dtype
,
'Input'
,
[
'float32'
,
'float64'
],
'det'
)
check_dtype
(
x
.
dtype
,
'Input'
,
[
'float32'
,
'float64'
],
'det'
)
...
@@ -1485,7 +1485,7 @@ def slogdet(x, name=None):
...
@@ -1485,7 +1485,7 @@ def slogdet(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
slogdeterminant
(
x
)
return
_C_
ops
.
slogdeterminant
(
x
)
check_dtype
(
x
.
dtype
,
'Input'
,
[
'float32'
,
'float64'
],
'slogdet'
)
check_dtype
(
x
.
dtype
,
'Input'
,
[
'float32'
,
'float64'
],
'slogdet'
)
...
@@ -1633,7 +1633,7 @@ def matrix_power(x, n, name=None):
...
@@ -1633,7 +1633,7 @@ def matrix_power(x, n, name=None):
# [ 1.80555556 , -1.91666667 , 0.44444444 ]]
# [ 1.80555556 , -1.91666667 , 0.44444444 ]]
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
matrix_power
(
x
,
"n"
,
n
)
return
_C_
ops
.
matrix_power
(
x
,
"n"
,
n
)
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'matrix_power'
)
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'matrix_power'
)
check_type
(
n
,
'n'
,
int
,
'matrix_power'
)
check_type
(
n
,
'n'
,
int
,
'matrix_power'
)
...
...
python/paddle/tensor/manipulation.py
浏览文件 @
242ef2b9
...
@@ -70,8 +70,8 @@ def fill_(x, value):
...
@@ -70,8 +70,8 @@ def fill_(x, value):
raise
TypeError
(
raise
TypeError
(
"The type of 'value' must be int or float, but received %s."
%
"The type of 'value' must be int or float, but received %s."
%
(
type
(
value
)))
(
type
(
value
)))
return
core
.
ops
.
fill_any_
(
x
,
"value_float"
,
return
_C_
ops
.
fill_any_
(
x
,
"value_float"
,
float
(
value
),
"value_int"
,
int
(
value
))
float
(
value
),
"value_int"
,
int
(
value
))
setattr
(
core
.
VarBase
,
'fill_'
,
fill_
)
setattr
(
core
.
VarBase
,
'fill_'
,
fill_
)
...
@@ -102,7 +102,7 @@ def zero_(x):
...
@@ -102,7 +102,7 @@ def zero_(x):
print(tensor.tolist()) #[0, 0, 0, 0, 0]
print(tensor.tolist()) #[0, 0, 0, 0, 0]
"""
"""
return
core
.
ops
.
fill_any_
(
x
,
"value_float"
,
0.
,
"value_int"
,
int
(
0
))
return
_C_
ops
.
fill_any_
(
x
,
"value_float"
,
0.
,
"value_int"
,
int
(
0
))
setattr
(
core
.
VarBase
,
'zero_'
,
zero_
)
setattr
(
core
.
VarBase
,
'zero_'
,
zero_
)
...
@@ -148,10 +148,10 @@ def fill_diagonal_(x, value, offset=0, wrap=False, name=None):
...
@@ -148,10 +148,10 @@ def fill_diagonal_(x, value, offset=0, wrap=False, name=None):
'Tensor dims should be equal while input dims > 2 in fill_diagonal_ API'
'Tensor dims should be equal while input dims > 2 in fill_diagonal_ API'
)
)
if
len
(
inshape
)
==
2
:
if
len
(
inshape
)
==
2
:
return
core
.
ops
.
fill_diagonal_
(
x
,
'value'
,
value
,
'offset'
,
offset
,
return
_C_
ops
.
fill_diagonal_
(
x
,
'value'
,
value
,
'offset'
,
offset
,
'wrap'
,
wrap
)
'wrap'
,
wrap
)
return
core
.
ops
.
fill_diagonal_
(
x
,
'value'
,
value
,
'offset'
,
offset
,
'wrap'
,
return
_C_
ops
.
fill_diagonal_
(
x
,
'value'
,
value
,
'offset'
,
offset
,
'wrap'
,
True
)
True
)
setattr
(
core
.
VarBase
,
'fill_diagonal_'
,
fill_diagonal_
)
setattr
(
core
.
VarBase
,
'fill_diagonal_'
,
fill_diagonal_
)
...
@@ -182,10 +182,10 @@ def _fill_diagonal_tensor_impl(x, y, offset=0, dim1=0, dim2=1, inplace=False):
...
@@ -182,10 +182,10 @@ def _fill_diagonal_tensor_impl(x, y, offset=0, dim1=0, dim2=1, inplace=False):
y
=
y
.
reshape
([
1
,
-
1
])
y
=
y
.
reshape
([
1
,
-
1
])
if
inplace
:
if
inplace
:
return
core
.
ops
.
fill_diagonal_tensor_
(
x
,
y
,
'dim1'
,
dim1
,
'dim2'
,
dim2
,
return
_C_
ops
.
fill_diagonal_tensor_
(
x
,
y
,
'dim1'
,
dim1
,
'dim2'
,
dim2
,
'offset'
,
offset
)
'offset'
,
offset
)
return
core
.
ops
.
fill_diagonal_tensor
(
x
,
y
,
'dim1'
,
dim1
,
'dim2'
,
dim2
,
return
_C_
ops
.
fill_diagonal_tensor
(
x
,
y
,
'dim1'
,
dim1
,
'dim2'
,
dim2
,
'offset'
,
offset
)
'offset'
,
offset
)
def
fill_diagonal_tensor_
(
x
,
y
,
offset
=
0
,
dim1
=
0
,
dim2
=
1
,
name
=
None
):
def
fill_diagonal_tensor_
(
x
,
y
,
offset
=
0
,
dim1
=
0
,
dim2
=
1
,
name
=
None
):
...
@@ -475,7 +475,7 @@ def flip(x, axis, name=None):
...
@@ -475,7 +475,7 @@ def flip(x, axis, name=None):
if
isinstance
(
axis
,
int
):
if
isinstance
(
axis
,
int
):
axis
=
[
axis
]
axis
=
[
axis
]
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
flip
(
x
,
"axis"
,
axis
)
return
_C_
ops
.
flip
(
x
,
"axis"
,
axis
)
helper
=
LayerHelper
(
"flip"
,
**
locals
())
helper
=
LayerHelper
(
"flip"
,
**
locals
())
check_type
(
x
,
'X'
,
(
Variable
),
'flip'
)
check_type
(
x
,
'X'
,
(
Variable
),
'flip'
)
...
@@ -1107,7 +1107,7 @@ def unique_consecutive(x,
...
@@ -1107,7 +1107,7 @@ def unique_consecutive(x,
axis
=
[
axis
]
axis
=
[
axis
]
attr_dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
attr_dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
out
,
inverse
,
counts
=
core
.
ops
.
unique_consecutive
(
out
,
inverse
,
counts
=
_C_
ops
.
unique_consecutive
(
x
,
'dtype'
,
attr_dtype
,
'return_inverse'
,
return_inverse
,
x
,
'dtype'
,
attr_dtype
,
'return_inverse'
,
return_inverse
,
'return_counts'
,
return_counts
,
'axis'
,
axis
)
'return_counts'
,
return_counts
,
'axis'
,
axis
)
outs
=
[
out
]
outs
=
[
out
]
...
...
python/paddle/tensor/random.py
浏览文件 @
242ef2b9
...
@@ -555,8 +555,8 @@ def uniform_(x, min=-1.0, max=1.0, seed=0, name=None):
...
@@ -555,8 +555,8 @@ def uniform_(x, min=-1.0, max=1.0, seed=0, name=None):
# [-0.34646994, -0.45116323, -0.09902662, -0.11397249], # random
# [-0.34646994, -0.45116323, -0.09902662, -0.11397249], # random
# [ 0.433519, 0.39483607, -0.8660099, 0.83664286]] # random
# [ 0.433519, 0.39483607, -0.8660099, 0.83664286]] # random
"""
"""
return
core
.
ops
.
uniform_random_inplace_
(
x
,
'min'
,
min
,
'max'
,
max
,
'seed'
,
return
_C_
ops
.
uniform_random_inplace_
(
x
,
'min'
,
min
,
'max'
,
max
,
'seed'
,
seed
)
seed
)
def
randint
(
low
=
0
,
high
=
None
,
shape
=
[
1
],
dtype
=
None
,
name
=
None
):
def
randint
(
low
=
0
,
high
=
None
,
shape
=
[
1
],
dtype
=
None
,
name
=
None
):
...
...
python/paddle/text/viterbi_decode.py
浏览文件 @
242ef2b9
...
@@ -16,6 +16,7 @@ from ..nn import Layer
...
@@ -16,6 +16,7 @@ from ..nn import Layer
from
..fluid.framework
import
core
,
in_dygraph_mode
from
..fluid.framework
import
core
,
in_dygraph_mode
from
..fluid.layer_helper
import
LayerHelper
from
..fluid.layer_helper
import
LayerHelper
from
..fluid.data_feeder
import
check_variable_and_dtype
,
check_type
from
..fluid.data_feeder
import
check_variable_and_dtype
,
check_type
from
paddle
import
_C_ops
__all__
=
[
'viterbi_decode'
,
'ViterbiDecoder'
]
__all__
=
[
'viterbi_decode'
,
'ViterbiDecoder'
]
...
@@ -58,9 +59,8 @@ def viterbi_decode(potentials,
...
@@ -58,9 +59,8 @@ def viterbi_decode(potentials,
scores, path = paddle.text.viterbi_decode(emission, transition, length, False) # scores: [3.37089300, 1.56825531], path: [[1, 0, 0], [1, 1, 0]]
scores, path = paddle.text.viterbi_decode(emission, transition, length, False) # scores: [3.37089300, 1.56825531], path: [[1, 0, 0], [1, 1, 0]]
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
viterbi_decode
(
potentials
,
transition_params
,
lengths
,
return
_C_ops
.
viterbi_decode
(
potentials
,
transition_params
,
lengths
,
'include_bos_eos_tag'
,
'include_bos_eos_tag'
,
include_bos_eos_tag
)
include_bos_eos_tag
)
check_variable_and_dtype
(
potentials
,
'input'
,
[
'float32'
,
'float64'
],
check_variable_and_dtype
(
potentials
,
'input'
,
[
'float32'
,
'float64'
],
'viterbi_decode'
)
'viterbi_decode'
)
check_variable_and_dtype
(
transition_params
,
'transitions'
,
check_variable_and_dtype
(
transition_params
,
'transitions'
,
...
...
python/paddle/vision/ops.py
浏览文件 @
242ef2b9
...
@@ -953,10 +953,10 @@ def psroi_pool(x, boxes, boxes_num, output_size, spatial_scale=1.0, name=None):
...
@@ -953,10 +953,10 @@ def psroi_pool(x, boxes, boxes_num, output_size, spatial_scale=1.0, name=None):
"Input features with shape should be (N, C, H, W)"
)
"Input features with shape should be (N, C, H, W)"
)
output_channels
=
int
(
x
.
shape
[
1
]
/
(
pooled_height
*
pooled_width
))
output_channels
=
int
(
x
.
shape
[
1
]
/
(
pooled_height
*
pooled_width
))
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
psroi_pool
(
x
,
boxes
,
boxes_num
,
"output_channels"
,
return
_C_
ops
.
psroi_pool
(
x
,
boxes
,
boxes_num
,
"output_channels"
,
output_channels
,
"spatial_scale"
,
output_channels
,
"spatial_scale"
,
spatial_scale
,
"pooled_height"
,
spatial_scale
,
"pooled_height"
,
pooled_height
,
pooled_height
,
"pooled_width"
,
pooled_width
)
"pooled_width"
,
pooled_width
)
helper
=
LayerHelper
(
'psroi_pool'
,
**
locals
())
helper
=
LayerHelper
(
'psroi_pool'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
dtype
=
helper
.
input_dtype
()
...
@@ -1064,7 +1064,7 @@ def roi_pool(x, boxes, boxes_num, output_size, spatial_scale=1.0, name=None):
...
@@ -1064,7 +1064,7 @@ def roi_pool(x, boxes, boxes_num, output_size, spatial_scale=1.0, name=None):
pooled_height
,
pooled_width
=
output_size
pooled_height
,
pooled_width
=
output_size
if
in_dygraph_mode
():
if
in_dygraph_mode
():
assert
boxes_num
is
not
None
,
"boxes_num should not be None in dygraph mode."
assert
boxes_num
is
not
None
,
"boxes_num should not be None in dygraph mode."
pool_out
,
argmaxes
=
core
.
ops
.
roi_pool
(
pool_out
,
argmaxes
=
_C_
ops
.
roi_pool
(
x
,
boxes
,
boxes_num
,
"pooled_height"
,
pooled_height
,
"pooled_width"
,
x
,
boxes
,
boxes_num
,
"pooled_height"
,
pooled_height
,
"pooled_width"
,
pooled_width
,
"spatial_scale"
,
spatial_scale
)
pooled_width
,
"spatial_scale"
,
spatial_scale
)
return
pool_out
return
pool_out
...
@@ -1219,7 +1219,7 @@ def roi_align(x,
...
@@ -1219,7 +1219,7 @@ def roi_align(x,
pooled_height
,
pooled_width
=
output_size
pooled_height
,
pooled_width
=
output_size
if
in_dygraph_mode
():
if
in_dygraph_mode
():
assert
boxes_num
is
not
None
,
"boxes_num should not be None in dygraph mode."
assert
boxes_num
is
not
None
,
"boxes_num should not be None in dygraph mode."
align_out
=
core
.
ops
.
roi_align
(
align_out
=
_C_
ops
.
roi_align
(
x
,
boxes
,
boxes_num
,
"pooled_height"
,
pooled_height
,
"pooled_width"
,
x
,
boxes
,
boxes_num
,
"pooled_height"
,
pooled_height
,
"pooled_width"
,
pooled_width
,
"spatial_scale"
,
spatial_scale
,
"sampling_ratio"
,
pooled_width
,
"spatial_scale"
,
spatial_scale
,
"sampling_ratio"
,
sampling_ratio
,
"aligned"
,
aligned
)
sampling_ratio
,
"aligned"
,
aligned
)
...
...
tools/count_api_without_core_ops.py
浏览文件 @
242ef2b9
...
@@ -22,6 +22,7 @@ import pydoc
...
@@ -22,6 +22,7 @@ import pydoc
import
hashlib
import
hashlib
import
functools
import
functools
import
platform
import
platform
from
paddle
import
_C_ops
__all__
=
[
'get_apis_with_and_without_core_ops'
,
]
__all__
=
[
'get_apis_with_and_without_core_ops'
,
]
...
@@ -207,7 +208,7 @@ if __name__ == "__main__":
...
@@ -207,7 +208,7 @@ if __name__ == "__main__":
else
:
else
:
print
(
"""Usage:
print
(
"""Usage:
1. Count and list all operator-raleated APIs that contains append_op but not
core.
ops.xx.
1. Count and list all operator-raleated APIs that contains append_op but not
_C_
ops.xx.
python ./count_api_without_core_ops.py -c paddle
python ./count_api_without_core_ops.py -c paddle
2. Print api and the md5 of source code of the api.
2. Print api and the md5 of source code of the api.
python ./count_api_without_core_ops.py -p paddle
python ./count_api_without_core_ops.py -p paddle
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录