Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
242ef2b9
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
242ef2b9
编写于
12月 22, 2021
作者:
Z
Zhanlue Yang
提交者:
GitHub
12月 22, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Replaced core.ops with _C_ops (#38337)
上级
274b135b
变更
19
隐藏空白更改
内联
并排
Showing
19 changed file
with
61 addition
and
51 deletion
+61
-51
python/paddle/distributed/utils.py
python/paddle/distributed/utils.py
+3
-3
python/paddle/fluid/clip.py
python/paddle/fluid/clip.py
+2
-1
python/paddle/fluid/tests/unittests/test_deprecated_decorator.py
...paddle/fluid/tests/unittests/test_deprecated_decorator.py
+1
-1
python/paddle/fluid/tests/unittests/test_faster_tokenizer_op.py
.../paddle/fluid/tests/unittests/test_faster_tokenizer_op.py
+2
-1
python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py
...ittests/test_imperative_star_gan_with_gradient_penalty.py
+3
-2
python/paddle/fluid/tests/unittests/test_op_function_generator.py
...addle/fluid/tests/unittests/test_op_function_generator.py
+6
-5
python/paddle/fluid/tests/unittests/test_sum_op.py
python/paddle/fluid/tests/unittests/test_sum_op.py
+3
-2
python/paddle/fluid/tests/unittests/xpu/test_sum_op_xpu.py
python/paddle/fluid/tests/unittests/xpu/test_sum_op_xpu.py
+3
-2
python/paddle/incubate/operators/graph_send_recv.py
python/paddle/incubate/operators/graph_send_recv.py
+3
-2
python/paddle/incubate/operators/softmax_mask_fuse.py
python/paddle/incubate/operators/softmax_mask_fuse.py
+2
-1
python/paddle/incubate/operators/softmax_mask_fuse_upper_triangle.py
...le/incubate/operators/softmax_mask_fuse_upper_triangle.py
+2
-1
python/paddle/nn/functional/common.py
python/paddle/nn/functional/common.py
+1
-1
python/paddle/nn/functional/loss.py
python/paddle/nn/functional/loss.py
+1
-1
python/paddle/tensor/linalg.py
python/paddle/tensor/linalg.py
+3
-3
python/paddle/tensor/manipulation.py
python/paddle/tensor/manipulation.py
+13
-13
python/paddle/tensor/random.py
python/paddle/tensor/random.py
+2
-2
python/paddle/text/viterbi_decode.py
python/paddle/text/viterbi_decode.py
+3
-3
python/paddle/vision/ops.py
python/paddle/vision/ops.py
+6
-6
tools/count_api_without_core_ops.py
tools/count_api_without_core_ops.py
+2
-1
未找到文件。
python/paddle/distributed/utils.py
浏览文件 @
242ef2b9
...
...
@@ -31,7 +31,7 @@ from distutils.util import strtobool
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle.fluid.framework
import
in_dygraph_mode
from
paddle.fluid.data_feeder
import
check_variable_and_dtype
from
paddle
import
_C_ops
__all__
=
[
#noqa
'get_host_name_ip'
,
...
...
@@ -146,7 +146,7 @@ def global_scatter(x,
ring_id
=
0
if
group
is
None
else
group
.
id
if
in_dygraph_mode
():
return
core
.
ops
.
global_scatter
(
x
,
local_count
,
\
return
_C_
ops
.
global_scatter
(
x
,
local_count
,
\
global_count
,
\
'use_calc_stream'
,
use_calc_stream
,
\
'ring_id'
,
ring_id
)
...
...
@@ -258,7 +258,7 @@ def global_gather(x,
ring_id
=
0
if
group
is
None
else
group
.
id
if
in_dygraph_mode
():
return
core
.
ops
.
global_gather
(
x
,
local_count
,
\
return
_C_
ops
.
global_gather
(
x
,
local_count
,
\
global_count
,
\
'use_calc_stream'
,
use_calc_stream
,
\
'ring_id'
,
ring_id
)
...
...
python/paddle/fluid/clip.py
浏览文件 @
242ef2b9
...
...
@@ -29,6 +29,7 @@ from .data_feeder import check_variable_and_dtype
from
.framework
import
in_dygraph_mode
from
.layer_helper
import
LayerHelper
from
.framework
import
default_main_program
from
paddle
import
_C_ops
__all__
=
[
'set_gradient_clip'
,
'ErrorClipByValue'
,
'ClipGradByValue'
,
...
...
@@ -47,7 +48,7 @@ def _squared_l2_norm(x):
return
sum_square
if
in_dygraph_mode
():
return
core
.
ops
.
squared_l2_norm
(
x
)
return
_C_
ops
.
squared_l2_norm
(
x
)
op_type
=
'squared_l2_norm'
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
op_type
)
...
...
python/paddle/fluid/tests/unittests/test_deprecated_decorator.py
浏览文件 @
242ef2b9
...
...
@@ -140,7 +140,7 @@ class TestDeprecatedDocorator(unittest.TestCase):
b
=
np
.
random
.
uniform
(
0.1
,
1
,
[
51
,
76
]).
astype
(
np
.
float32
)
x
=
paddle
.
to_tensor
(
a
)
y
=
paddle
.
to_tensor
(
b
)
res
=
core
.
ops
.
elementwise_mul
(
x
,
y
)
res
=
_C_
ops
.
elementwise_mul
(
x
,
y
)
# expected
expected
=
LOWEST_WARNING_POSTION
...
...
python/paddle/fluid/tests/unittests/test_faster_tokenizer_op.py
浏览文件 @
242ef2b9
...
...
@@ -24,6 +24,7 @@ import paddle.nn as nn
from
paddle.dataset.common
import
DATA_HOME
from
paddle.fluid.framework
import
core
,
in_dygraph_mode
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle
import
_C_ops
import
sys
sys
.
path
.
append
(
"./tokenizer"
)
...
...
@@ -75,7 +76,7 @@ class FasterTokenizer(nn.Layer):
is_split_into_words
=
False
,
pad_to_max_seq_len
=
False
):
if
in_dygraph_mode
():
input_ids
,
seg_ids
=
core
.
ops
.
faster_tokenizer
(
input_ids
,
seg_ids
=
_C_
ops
.
faster_tokenizer
(
self
.
vocab
,
text
,
text_pair
,
"do_lower_case"
,
do_lower_case
,
"max_seq_len"
,
max_seq_len
,
"pad_to_max_seq_len"
,
pad_to_max_seq_len
,
"is_split_into_words"
,
is_split_into_words
)
...
...
python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py
浏览文件 @
242ef2b9
...
...
@@ -16,6 +16,7 @@ import paddle
import
paddle.fluid
as
fluid
import
numpy
as
np
import
unittest
from
paddle
import
_C_ops
if
fluid
.
is_compiled_with_cuda
():
fluid
.
core
.
globals
()[
'FLAGS_cudnn_deterministic'
]
=
True
...
...
@@ -112,8 +113,8 @@ class InstanceNorm(fluid.dygraph.Layer):
def
forward
(
self
,
input
):
if
fluid
.
in_dygraph_mode
():
out
,
_
,
_
=
fluid
.
core
.
ops
.
instance_norm
(
input
,
self
.
scale
,
self
.
bias
,
'epsilon'
,
self
.
epsilon
)
out
,
_
,
_
=
_C_ops
.
instance_norm
(
input
,
self
.
scale
,
self
.
bias
,
'epsilon'
,
self
.
epsilon
)
return
out
else
:
return
fluid
.
layers
.
instance_norm
(
...
...
python/paddle/fluid/tests/unittests/test_op_function_generator.py
浏览文件 @
242ef2b9
...
...
@@ -21,6 +21,7 @@ import paddle.fluid.layers as layers
import
paddle.fluid.core
as
core
from
paddle.fluid.dygraph.jit
import
TracedLayer
import
numpy
as
np
from
paddle
import
_C_ops
class
TestTracedLayer
(
fluid
.
dygraph
.
Layer
):
...
...
@@ -28,7 +29,7 @@ class TestTracedLayer(fluid.dygraph.Layer):
super
(
TestTracedLayer
,
self
).
__init__
(
name_scope
)
def
forward
(
self
,
input
):
return
core
.
ops
.
relu
(
input
)
return
_C_
ops
.
relu
(
input
)
class
TestVariable
(
unittest
.
TestCase
):
...
...
@@ -46,7 +47,7 @@ class TestVariable(unittest.TestCase):
x
.
stop_gradient
=
False
res1
=
layers
.
elementwise_add
(
x
,
y
)
res2
=
core
.
ops
.
elementwise_add
(
x
,
y
)
res2
=
_C_
ops
.
elementwise_add
(
x
,
y
)
self
.
assertTrue
(
np
.
array_equal
(
res1
.
numpy
(),
res2
.
numpy
()))
...
...
@@ -58,7 +59,7 @@ class TestVariable(unittest.TestCase):
y
=
fluid
.
dygraph
.
to_variable
(
b
)
res1
=
layers
.
elementwise_mul
(
x
,
y
)
res2
=
core
.
ops
.
elementwise_mul
(
x
,
y
)
res2
=
_C_
ops
.
elementwise_mul
(
x
,
y
)
self
.
assertTrue
(
np
.
array_equal
(
res1
.
numpy
(),
res2
.
numpy
()))
...
...
@@ -68,7 +69,7 @@ class TestVariable(unittest.TestCase):
x
=
fluid
.
dygraph
.
to_variable
(
a
)
res1
=
layers
.
relu
(
x
)
res2
=
core
.
ops
.
relu
(
x
)
res2
=
_C_
ops
.
relu
(
x
)
self
.
assertTrue
(
np
.
array_equal
(
res1
.
numpy
(),
res2
.
numpy
()))
...
...
@@ -81,7 +82,7 @@ class TestVariable(unittest.TestCase):
x
.
stop_gradient
=
False
y
.
stop_gradient
=
False
loss
=
core
.
ops
.
elementwise_mul
(
x
,
y
)
loss
=
_C_
ops
.
elementwise_mul
(
x
,
y
)
loss
.
backward
()
x_grad
=
x
.
gradient
()
...
...
python/paddle/fluid/tests/unittests/test_sum_op.py
浏览文件 @
242ef2b9
...
...
@@ -24,6 +24,7 @@ import paddle.fluid.core as core
from
paddle.fluid.op
import
Operator
from
paddle.fluid.tests.unittests.op_test
import
(
OpTest
,
convert_float_to_uint16
,
convert_uint16_to_float
)
from
paddle
import
_C_ops
class
TestSumOp
(
OpTest
):
...
...
@@ -382,11 +383,11 @@ class TestSumOpError(unittest.TestCase):
def
test_errors
(
self
):
def
test_empty_list_input
():
with
fluid
.
dygraph
.
guard
():
fluid
.
core
.
ops
.
sum
([])
fluid
.
_C_
ops
.
sum
([])
def
test_list_of_none_input
():
with
fluid
.
dygraph
.
guard
():
fluid
.
core
.
ops
.
sum
([
None
])
fluid
.
_C_
ops
.
sum
([
None
])
self
.
assertRaises
(
Exception
,
test_empty_list_input
)
self
.
assertRaises
(
Exception
,
test_list_of_none_input
)
...
...
python/paddle/fluid/tests/unittests/xpu/test_sum_op_xpu.py
浏览文件 @
242ef2b9
...
...
@@ -25,6 +25,7 @@ import paddle.fluid.core as core
from
paddle.fluid.op
import
Operator
from
paddle.fluid.tests.unittests.op_test
import
(
OpTest
,
convert_float_to_uint16
,
convert_uint16_to_float
)
from
paddle
import
_C_ops
paddle
.
enable_static
()
...
...
@@ -171,11 +172,11 @@ class TestSumOpError(unittest.TestCase):
def
test_errors
(
self
):
def
test_empty_list_input
():
with
fluid
.
dygraph
.
guard
():
fluid
.
core
.
ops
.
sum
([])
fluid
.
_C_
ops
.
sum
([])
def
test_list_of_none_input
():
with
fluid
.
dygraph
.
guard
():
fluid
.
core
.
ops
.
sum
([
None
])
fluid
.
_C_
ops
.
sum
([
None
])
self
.
assertRaises
(
Exception
,
test_empty_list_input
)
self
.
assertRaises
(
Exception
,
test_list_of_none_input
)
...
...
python/paddle/incubate/operators/graph_send_recv.py
浏览文件 @
242ef2b9
...
...
@@ -16,6 +16,7 @@ from paddle.fluid.layer_helper import LayerHelper
from
paddle.fluid.framework
import
in_dygraph_mode
from
paddle.fluid.data_feeder
import
check_variable_and_dtype
from
paddle.fluid
import
core
from
paddle
import
_C_ops
def
graph_send_recv
(
x
,
src_index
,
dst_index
,
pool_type
=
"sum"
,
name
=
None
):
...
...
@@ -82,8 +83,8 @@ def graph_send_recv(x, src_index, dst_index, pool_type="sum", name=None):
%
pool_type
)
if
in_dygraph_mode
():
out
,
tmp
=
core
.
ops
.
graph_send_recv
(
x
,
src_index
,
dst_index
,
'pool_type'
,
pool_type
.
upper
())
out
,
tmp
=
_C_ops
.
graph_send_recv
(
x
,
src_index
,
dst_index
,
'pool_type'
,
pool_type
.
upper
())
return
out
check_variable_and_dtype
(
x
,
"X"
,
(
"float32"
,
"float64"
,
"int32"
,
"int64"
),
...
...
python/paddle/incubate/operators/softmax_mask_fuse.py
浏览文件 @
242ef2b9
...
...
@@ -17,6 +17,7 @@ from __future__ import print_function
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle.fluid.framework
import
in_dygraph_mode
from
paddle.fluid
import
core
from
paddle
import
_C_ops
def
softmax_mask_fuse
(
x
,
mask
,
name
=
None
):
...
...
@@ -58,7 +59,7 @@ def softmax_mask_fuse(x, mask, name=None):
# [[[[0.02404429, 0.04658398, 0.02746007, ..., 0.01489375, 0.02397441, 0.02851614] ... ]]]
"""
if
in_dygraph_mode
():
out
=
core
.
ops
.
fused_softmax_mask
(
x
,
mask
)
out
=
_C_
ops
.
fused_softmax_mask
(
x
,
mask
)
return
out
helper
=
LayerHelper
(
'fused_softmax_mask'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
...
...
python/paddle/incubate/operators/softmax_mask_fuse_upper_triangle.py
浏览文件 @
242ef2b9
...
...
@@ -17,6 +17,7 @@ from __future__ import print_function
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle.fluid.framework
import
in_dygraph_mode
from
paddle.fluid
import
core
from
paddle
import
_C_ops
def
softmax_mask_fuse_upper_triangle
(
x
):
...
...
@@ -58,7 +59,7 @@ def softmax_mask_fuse_upper_triangle(x):
# ... ]]]
"""
if
in_dygraph_mode
():
out
=
core
.
ops
.
fused_softmax_mask_upper_triangle
(
x
)
out
=
_C_
ops
.
fused_softmax_mask_upper_triangle
(
x
)
return
out
helper
=
LayerHelper
(
'fused_softmax_mask_upper_triangle'
,
**
locals
())
...
...
python/paddle/nn/functional/common.py
浏览文件 @
242ef2b9
...
...
@@ -1763,7 +1763,7 @@ def class_center_sample(label, num_classes, num_samples, group=None):
seed
=
default_main_program
().
random_seed
if
in_dygraph_mode
():
remapped_label
,
sampled_class_center
=
core
.
ops
.
class_center_sample
(
remapped_label
,
sampled_class_center
=
_C_
ops
.
class_center_sample
(
label
,
'num_classes'
,
num_classes
,
'num_samples'
,
num_samples
,
'ring_id'
,
ring_id
,
'nranks'
,
nranks
,
'rank'
,
rank
,
'fix_seed'
,
seed
is
not
None
,
'seed'
,
seed
if
seed
is
not
None
else
0
)
...
...
python/paddle/nn/functional/loss.py
浏览文件 @
242ef2b9
...
...
@@ -1320,7 +1320,7 @@ def margin_cross_entropy(logits,
label
=
paddle
.
unsqueeze
(
label
,
axis
=-
1
)
if
in_dygraph_mode
():
softmax
,
loss
=
core
.
ops
.
margin_cross_entropy
(
softmax
,
loss
=
_C_
ops
.
margin_cross_entropy
(
logits
,
label
,
'ring_id'
,
ring_id
,
'rank'
,
rank
,
'nranks'
,
nranks
,
'margin1'
,
margin1
,
'margin2'
,
margin2
,
'margin3'
,
margin3
,
'scale'
,
scale
,
'return_softmax'
,
return_softmax
)
...
...
python/paddle/tensor/linalg.py
浏览文件 @
242ef2b9
...
...
@@ -1430,7 +1430,7 @@ def det(x, name=None):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
determinant
(
x
)
return
_C_
ops
.
determinant
(
x
)
check_dtype
(
x
.
dtype
,
'Input'
,
[
'float32'
,
'float64'
],
'det'
)
...
...
@@ -1485,7 +1485,7 @@ def slogdet(x, name=None):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
slogdeterminant
(
x
)
return
_C_
ops
.
slogdeterminant
(
x
)
check_dtype
(
x
.
dtype
,
'Input'
,
[
'float32'
,
'float64'
],
'slogdet'
)
...
...
@@ -1633,7 +1633,7 @@ def matrix_power(x, n, name=None):
# [ 1.80555556 , -1.91666667 , 0.44444444 ]]
"""
if
in_dygraph_mode
():
return
core
.
ops
.
matrix_power
(
x
,
"n"
,
n
)
return
_C_
ops
.
matrix_power
(
x
,
"n"
,
n
)
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'matrix_power'
)
check_type
(
n
,
'n'
,
int
,
'matrix_power'
)
...
...
python/paddle/tensor/manipulation.py
浏览文件 @
242ef2b9
...
...
@@ -70,8 +70,8 @@ def fill_(x, value):
raise
TypeError
(
"The type of 'value' must be int or float, but received %s."
%
(
type
(
value
)))
return
core
.
ops
.
fill_any_
(
x
,
"value_float"
,
float
(
value
),
"value_int"
,
int
(
value
))
return
_C_
ops
.
fill_any_
(
x
,
"value_float"
,
float
(
value
),
"value_int"
,
int
(
value
))
setattr
(
core
.
VarBase
,
'fill_'
,
fill_
)
...
...
@@ -102,7 +102,7 @@ def zero_(x):
print(tensor.tolist()) #[0, 0, 0, 0, 0]
"""
return
core
.
ops
.
fill_any_
(
x
,
"value_float"
,
0.
,
"value_int"
,
int
(
0
))
return
_C_
ops
.
fill_any_
(
x
,
"value_float"
,
0.
,
"value_int"
,
int
(
0
))
setattr
(
core
.
VarBase
,
'zero_'
,
zero_
)
...
...
@@ -148,10 +148,10 @@ def fill_diagonal_(x, value, offset=0, wrap=False, name=None):
'Tensor dims should be equal while input dims > 2 in fill_diagonal_ API'
)
if
len
(
inshape
)
==
2
:
return
core
.
ops
.
fill_diagonal_
(
x
,
'value'
,
value
,
'offset'
,
offset
,
'wrap'
,
wrap
)
return
core
.
ops
.
fill_diagonal_
(
x
,
'value'
,
value
,
'offset'
,
offset
,
'wrap'
,
True
)
return
_C_
ops
.
fill_diagonal_
(
x
,
'value'
,
value
,
'offset'
,
offset
,
'wrap'
,
wrap
)
return
_C_
ops
.
fill_diagonal_
(
x
,
'value'
,
value
,
'offset'
,
offset
,
'wrap'
,
True
)
setattr
(
core
.
VarBase
,
'fill_diagonal_'
,
fill_diagonal_
)
...
...
@@ -182,10 +182,10 @@ def _fill_diagonal_tensor_impl(x, y, offset=0, dim1=0, dim2=1, inplace=False):
y
=
y
.
reshape
([
1
,
-
1
])
if
inplace
:
return
core
.
ops
.
fill_diagonal_tensor_
(
x
,
y
,
'dim1'
,
dim1
,
'dim2'
,
dim2
,
'offset'
,
offset
)
return
core
.
ops
.
fill_diagonal_tensor
(
x
,
y
,
'dim1'
,
dim1
,
'dim2'
,
dim2
,
'offset'
,
offset
)
return
_C_
ops
.
fill_diagonal_tensor_
(
x
,
y
,
'dim1'
,
dim1
,
'dim2'
,
dim2
,
'offset'
,
offset
)
return
_C_
ops
.
fill_diagonal_tensor
(
x
,
y
,
'dim1'
,
dim1
,
'dim2'
,
dim2
,
'offset'
,
offset
)
def
fill_diagonal_tensor_
(
x
,
y
,
offset
=
0
,
dim1
=
0
,
dim2
=
1
,
name
=
None
):
...
...
@@ -475,7 +475,7 @@ def flip(x, axis, name=None):
if
isinstance
(
axis
,
int
):
axis
=
[
axis
]
if
in_dygraph_mode
():
return
core
.
ops
.
flip
(
x
,
"axis"
,
axis
)
return
_C_
ops
.
flip
(
x
,
"axis"
,
axis
)
helper
=
LayerHelper
(
"flip"
,
**
locals
())
check_type
(
x
,
'X'
,
(
Variable
),
'flip'
)
...
...
@@ -1107,7 +1107,7 @@ def unique_consecutive(x,
axis
=
[
axis
]
attr_dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
in_dygraph_mode
():
out
,
inverse
,
counts
=
core
.
ops
.
unique_consecutive
(
out
,
inverse
,
counts
=
_C_
ops
.
unique_consecutive
(
x
,
'dtype'
,
attr_dtype
,
'return_inverse'
,
return_inverse
,
'return_counts'
,
return_counts
,
'axis'
,
axis
)
outs
=
[
out
]
...
...
python/paddle/tensor/random.py
浏览文件 @
242ef2b9
...
...
@@ -555,8 +555,8 @@ def uniform_(x, min=-1.0, max=1.0, seed=0, name=None):
# [-0.34646994, -0.45116323, -0.09902662, -0.11397249], # random
# [ 0.433519, 0.39483607, -0.8660099, 0.83664286]] # random
"""
return
core
.
ops
.
uniform_random_inplace_
(
x
,
'min'
,
min
,
'max'
,
max
,
'seed'
,
seed
)
return
_C_
ops
.
uniform_random_inplace_
(
x
,
'min'
,
min
,
'max'
,
max
,
'seed'
,
seed
)
def
randint
(
low
=
0
,
high
=
None
,
shape
=
[
1
],
dtype
=
None
,
name
=
None
):
...
...
python/paddle/text/viterbi_decode.py
浏览文件 @
242ef2b9
...
...
@@ -16,6 +16,7 @@ from ..nn import Layer
from
..fluid.framework
import
core
,
in_dygraph_mode
from
..fluid.layer_helper
import
LayerHelper
from
..fluid.data_feeder
import
check_variable_and_dtype
,
check_type
from
paddle
import
_C_ops
__all__
=
[
'viterbi_decode'
,
'ViterbiDecoder'
]
...
...
@@ -58,9 +59,8 @@ def viterbi_decode(potentials,
scores, path = paddle.text.viterbi_decode(emission, transition, length, False) # scores: [3.37089300, 1.56825531], path: [[1, 0, 0], [1, 1, 0]]
"""
if
in_dygraph_mode
():
return
core
.
ops
.
viterbi_decode
(
potentials
,
transition_params
,
lengths
,
'include_bos_eos_tag'
,
include_bos_eos_tag
)
return
_C_ops
.
viterbi_decode
(
potentials
,
transition_params
,
lengths
,
'include_bos_eos_tag'
,
include_bos_eos_tag
)
check_variable_and_dtype
(
potentials
,
'input'
,
[
'float32'
,
'float64'
],
'viterbi_decode'
)
check_variable_and_dtype
(
transition_params
,
'transitions'
,
...
...
python/paddle/vision/ops.py
浏览文件 @
242ef2b9
...
...
@@ -953,10 +953,10 @@ def psroi_pool(x, boxes, boxes_num, output_size, spatial_scale=1.0, name=None):
"Input features with shape should be (N, C, H, W)"
)
output_channels
=
int
(
x
.
shape
[
1
]
/
(
pooled_height
*
pooled_width
))
if
in_dygraph_mode
():
return
core
.
ops
.
psroi_pool
(
x
,
boxes
,
boxes_num
,
"output_channels"
,
output_channels
,
"spatial_scale"
,
spatial_scale
,
"pooled_height"
,
pooled_height
,
"pooled_width"
,
pooled_width
)
return
_C_
ops
.
psroi_pool
(
x
,
boxes
,
boxes_num
,
"output_channels"
,
output_channels
,
"spatial_scale"
,
spatial_scale
,
"pooled_height"
,
pooled_height
,
"pooled_width"
,
pooled_width
)
helper
=
LayerHelper
(
'psroi_pool'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
...
...
@@ -1064,7 +1064,7 @@ def roi_pool(x, boxes, boxes_num, output_size, spatial_scale=1.0, name=None):
pooled_height
,
pooled_width
=
output_size
if
in_dygraph_mode
():
assert
boxes_num
is
not
None
,
"boxes_num should not be None in dygraph mode."
pool_out
,
argmaxes
=
core
.
ops
.
roi_pool
(
pool_out
,
argmaxes
=
_C_
ops
.
roi_pool
(
x
,
boxes
,
boxes_num
,
"pooled_height"
,
pooled_height
,
"pooled_width"
,
pooled_width
,
"spatial_scale"
,
spatial_scale
)
return
pool_out
...
...
@@ -1219,7 +1219,7 @@ def roi_align(x,
pooled_height
,
pooled_width
=
output_size
if
in_dygraph_mode
():
assert
boxes_num
is
not
None
,
"boxes_num should not be None in dygraph mode."
align_out
=
core
.
ops
.
roi_align
(
align_out
=
_C_
ops
.
roi_align
(
x
,
boxes
,
boxes_num
,
"pooled_height"
,
pooled_height
,
"pooled_width"
,
pooled_width
,
"spatial_scale"
,
spatial_scale
,
"sampling_ratio"
,
sampling_ratio
,
"aligned"
,
aligned
)
...
...
tools/count_api_without_core_ops.py
浏览文件 @
242ef2b9
...
...
@@ -22,6 +22,7 @@ import pydoc
import
hashlib
import
functools
import
platform
from
paddle
import
_C_ops
__all__
=
[
'get_apis_with_and_without_core_ops'
,
]
...
...
@@ -207,7 +208,7 @@ if __name__ == "__main__":
else
:
print
(
"""Usage:
1. Count and list all operator-raleated APIs that contains append_op but not
core.
ops.xx.
1. Count and list all operator-raleated APIs that contains append_op but not
_C_
ops.xx.
python ./count_api_without_core_ops.py -c paddle
2. Print api and the md5 of source code of the api.
python ./count_api_without_core_ops.py -p paddle
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录