未验证 提交 242ef2b9 编写于 作者: Z Zhanlue Yang 提交者: GitHub

Replaced core.ops with _C_ops (#38337)

上级 274b135b
......@@ -31,7 +31,7 @@ from distutils.util import strtobool
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.framework import in_dygraph_mode
from paddle.fluid.data_feeder import check_variable_and_dtype
from paddle import _C_ops
__all__ = [ #noqa
'get_host_name_ip',
......@@ -146,7 +146,7 @@ def global_scatter(x,
ring_id = 0 if group is None else group.id
if in_dygraph_mode():
return core.ops.global_scatter(x, local_count, \
return _C_ops.global_scatter(x, local_count, \
global_count, \
'use_calc_stream', use_calc_stream, \
'ring_id', ring_id)
......@@ -258,7 +258,7 @@ def global_gather(x,
ring_id = 0 if group is None else group.id
if in_dygraph_mode():
return core.ops.global_gather(x, local_count, \
return _C_ops.global_gather(x, local_count, \
global_count, \
'use_calc_stream', use_calc_stream, \
'ring_id', ring_id)
......
......@@ -29,6 +29,7 @@ from .data_feeder import check_variable_and_dtype
from .framework import in_dygraph_mode
from .layer_helper import LayerHelper
from .framework import default_main_program
from paddle import _C_ops
__all__ = [
'set_gradient_clip', 'ErrorClipByValue', 'ClipGradByValue',
......@@ -47,7 +48,7 @@ def _squared_l2_norm(x):
return sum_square
if in_dygraph_mode():
return core.ops.squared_l2_norm(x)
return _C_ops.squared_l2_norm(x)
op_type = 'squared_l2_norm'
check_variable_and_dtype(x, 'x', ['float32', 'float64'], op_type)
......
......@@ -140,7 +140,7 @@ class TestDeprecatedDocorator(unittest.TestCase):
b = np.random.uniform(0.1, 1, [51, 76]).astype(np.float32)
x = paddle.to_tensor(a)
y = paddle.to_tensor(b)
res = core.ops.elementwise_mul(x, y)
res = _C_ops.elementwise_mul(x, y)
# expected
expected = LOWEST_WARNING_POSTION
......
......@@ -24,6 +24,7 @@ import paddle.nn as nn
from paddle.dataset.common import DATA_HOME
from paddle.fluid.framework import core, in_dygraph_mode
from paddle.fluid.layer_helper import LayerHelper
from paddle import _C_ops
import sys
sys.path.append("./tokenizer")
......@@ -75,7 +76,7 @@ class FasterTokenizer(nn.Layer):
is_split_into_words=False,
pad_to_max_seq_len=False):
if in_dygraph_mode():
input_ids, seg_ids = core.ops.faster_tokenizer(
input_ids, seg_ids = _C_ops.faster_tokenizer(
self.vocab, text, text_pair, "do_lower_case", do_lower_case,
"max_seq_len", max_seq_len, "pad_to_max_seq_len",
pad_to_max_seq_len, "is_split_into_words", is_split_into_words)
......
......@@ -16,6 +16,7 @@ import paddle
import paddle.fluid as fluid
import numpy as np
import unittest
from paddle import _C_ops
if fluid.is_compiled_with_cuda():
fluid.core.globals()['FLAGS_cudnn_deterministic'] = True
......@@ -112,8 +113,8 @@ class InstanceNorm(fluid.dygraph.Layer):
def forward(self, input):
if fluid.in_dygraph_mode():
out, _, _ = fluid.core.ops.instance_norm(
input, self.scale, self.bias, 'epsilon', self.epsilon)
out, _, _ = _C_ops.instance_norm(input, self.scale, self.bias,
'epsilon', self.epsilon)
return out
else:
return fluid.layers.instance_norm(
......
......@@ -21,6 +21,7 @@ import paddle.fluid.layers as layers
import paddle.fluid.core as core
from paddle.fluid.dygraph.jit import TracedLayer
import numpy as np
from paddle import _C_ops
class TestTracedLayer(fluid.dygraph.Layer):
......@@ -28,7 +29,7 @@ class TestTracedLayer(fluid.dygraph.Layer):
super(TestTracedLayer, self).__init__(name_scope)
def forward(self, input):
return core.ops.relu(input)
return _C_ops.relu(input)
class TestVariable(unittest.TestCase):
......@@ -46,7 +47,7 @@ class TestVariable(unittest.TestCase):
x.stop_gradient = False
res1 = layers.elementwise_add(x, y)
res2 = core.ops.elementwise_add(x, y)
res2 = _C_ops.elementwise_add(x, y)
self.assertTrue(np.array_equal(res1.numpy(), res2.numpy()))
......@@ -58,7 +59,7 @@ class TestVariable(unittest.TestCase):
y = fluid.dygraph.to_variable(b)
res1 = layers.elementwise_mul(x, y)
res2 = core.ops.elementwise_mul(x, y)
res2 = _C_ops.elementwise_mul(x, y)
self.assertTrue(np.array_equal(res1.numpy(), res2.numpy()))
......@@ -68,7 +69,7 @@ class TestVariable(unittest.TestCase):
x = fluid.dygraph.to_variable(a)
res1 = layers.relu(x)
res2 = core.ops.relu(x)
res2 = _C_ops.relu(x)
self.assertTrue(np.array_equal(res1.numpy(), res2.numpy()))
......@@ -81,7 +82,7 @@ class TestVariable(unittest.TestCase):
x.stop_gradient = False
y.stop_gradient = False
loss = core.ops.elementwise_mul(x, y)
loss = _C_ops.elementwise_mul(x, y)
loss.backward()
x_grad = x.gradient()
......
......@@ -24,6 +24,7 @@ import paddle.fluid.core as core
from paddle.fluid.op import Operator
from paddle.fluid.tests.unittests.op_test import (
OpTest, convert_float_to_uint16, convert_uint16_to_float)
from paddle import _C_ops
class TestSumOp(OpTest):
......@@ -382,11 +383,11 @@ class TestSumOpError(unittest.TestCase):
def test_errors(self):
def test_empty_list_input():
with fluid.dygraph.guard():
fluid.core.ops.sum([])
fluid._C_ops.sum([])
def test_list_of_none_input():
with fluid.dygraph.guard():
fluid.core.ops.sum([None])
fluid._C_ops.sum([None])
self.assertRaises(Exception, test_empty_list_input)
self.assertRaises(Exception, test_list_of_none_input)
......
......@@ -25,6 +25,7 @@ import paddle.fluid.core as core
from paddle.fluid.op import Operator
from paddle.fluid.tests.unittests.op_test import (
OpTest, convert_float_to_uint16, convert_uint16_to_float)
from paddle import _C_ops
paddle.enable_static()
......@@ -171,11 +172,11 @@ class TestSumOpError(unittest.TestCase):
def test_errors(self):
def test_empty_list_input():
with fluid.dygraph.guard():
fluid.core.ops.sum([])
fluid._C_ops.sum([])
def test_list_of_none_input():
with fluid.dygraph.guard():
fluid.core.ops.sum([None])
fluid._C_ops.sum([None])
self.assertRaises(Exception, test_empty_list_input)
self.assertRaises(Exception, test_list_of_none_input)
......
......@@ -16,6 +16,7 @@ from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.framework import in_dygraph_mode
from paddle.fluid.data_feeder import check_variable_and_dtype
from paddle.fluid import core
from paddle import _C_ops
def graph_send_recv(x, src_index, dst_index, pool_type="sum", name=None):
......@@ -82,8 +83,8 @@ def graph_send_recv(x, src_index, dst_index, pool_type="sum", name=None):
% pool_type)
if in_dygraph_mode():
out, tmp = core.ops.graph_send_recv(x, src_index, dst_index,
'pool_type', pool_type.upper())
out, tmp = _C_ops.graph_send_recv(x, src_index, dst_index, 'pool_type',
pool_type.upper())
return out
check_variable_and_dtype(x, "X", ("float32", "float64", "int32", "int64"),
......
......@@ -17,6 +17,7 @@ from __future__ import print_function
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.framework import in_dygraph_mode
from paddle.fluid import core
from paddle import _C_ops
def softmax_mask_fuse(x, mask, name=None):
......@@ -58,7 +59,7 @@ def softmax_mask_fuse(x, mask, name=None):
# [[[[0.02404429, 0.04658398, 0.02746007, ..., 0.01489375, 0.02397441, 0.02851614] ... ]]]
"""
if in_dygraph_mode():
out = core.ops.fused_softmax_mask(x, mask)
out = _C_ops.fused_softmax_mask(x, mask)
return out
helper = LayerHelper('fused_softmax_mask', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
......
......@@ -17,6 +17,7 @@ from __future__ import print_function
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.framework import in_dygraph_mode
from paddle.fluid import core
from paddle import _C_ops
def softmax_mask_fuse_upper_triangle(x):
......@@ -58,7 +59,7 @@ def softmax_mask_fuse_upper_triangle(x):
# ... ]]]
"""
if in_dygraph_mode():
out = core.ops.fused_softmax_mask_upper_triangle(x)
out = _C_ops.fused_softmax_mask_upper_triangle(x)
return out
helper = LayerHelper('fused_softmax_mask_upper_triangle', **locals())
......
......@@ -1763,7 +1763,7 @@ def class_center_sample(label, num_classes, num_samples, group=None):
seed = default_main_program().random_seed
if in_dygraph_mode():
remapped_label, sampled_class_center = core.ops.class_center_sample(
remapped_label, sampled_class_center = _C_ops.class_center_sample(
label, 'num_classes', num_classes, 'num_samples', num_samples,
'ring_id', ring_id, 'nranks', nranks, 'rank', rank, 'fix_seed',
seed is not None, 'seed', seed if seed is not None else 0)
......
......@@ -1320,7 +1320,7 @@ def margin_cross_entropy(logits,
label = paddle.unsqueeze(label, axis=-1)
if in_dygraph_mode():
softmax, loss = core.ops.margin_cross_entropy(
softmax, loss = _C_ops.margin_cross_entropy(
logits, label, 'ring_id', ring_id, 'rank', rank, 'nranks', nranks,
'margin1', margin1, 'margin2', margin2, 'margin3', margin3, 'scale',
scale, 'return_softmax', return_softmax)
......
......@@ -1430,7 +1430,7 @@ def det(x, name=None):
"""
if in_dygraph_mode():
return core.ops.determinant(x)
return _C_ops.determinant(x)
check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'det')
......@@ -1485,7 +1485,7 @@ def slogdet(x, name=None):
"""
if in_dygraph_mode():
return core.ops.slogdeterminant(x)
return _C_ops.slogdeterminant(x)
check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'slogdet')
......@@ -1633,7 +1633,7 @@ def matrix_power(x, n, name=None):
# [ 1.80555556 , -1.91666667 , 0.44444444 ]]
"""
if in_dygraph_mode():
return core.ops.matrix_power(x, "n", n)
return _C_ops.matrix_power(x, "n", n)
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'matrix_power')
check_type(n, 'n', int, 'matrix_power')
......
......@@ -70,7 +70,7 @@ def fill_(x, value):
raise TypeError(
"The type of 'value' must be int or float, but received %s." %
(type(value)))
return core.ops.fill_any_(x, "value_float",
return _C_ops.fill_any_(x, "value_float",
float(value), "value_int", int(value))
......@@ -102,7 +102,7 @@ def zero_(x):
print(tensor.tolist()) #[0, 0, 0, 0, 0]
"""
return core.ops.fill_any_(x, "value_float", 0., "value_int", int(0))
return _C_ops.fill_any_(x, "value_float", 0., "value_int", int(0))
setattr(core.VarBase, 'zero_', zero_)
......@@ -148,9 +148,9 @@ def fill_diagonal_(x, value, offset=0, wrap=False, name=None):
'Tensor dims should be equal while input dims > 2 in fill_diagonal_ API'
)
if len(inshape) == 2:
return core.ops.fill_diagonal_(x, 'value', value, 'offset', offset,
return _C_ops.fill_diagonal_(x, 'value', value, 'offset', offset,
'wrap', wrap)
return core.ops.fill_diagonal_(x, 'value', value, 'offset', offset, 'wrap',
return _C_ops.fill_diagonal_(x, 'value', value, 'offset', offset, 'wrap',
True)
......@@ -182,9 +182,9 @@ def _fill_diagonal_tensor_impl(x, y, offset=0, dim1=0, dim2=1, inplace=False):
y = y.reshape([1, -1])
if inplace:
return core.ops.fill_diagonal_tensor_(x, y, 'dim1', dim1, 'dim2', dim2,
return _C_ops.fill_diagonal_tensor_(x, y, 'dim1', dim1, 'dim2', dim2,
'offset', offset)
return core.ops.fill_diagonal_tensor(x, y, 'dim1', dim1, 'dim2', dim2,
return _C_ops.fill_diagonal_tensor(x, y, 'dim1', dim1, 'dim2', dim2,
'offset', offset)
......@@ -475,7 +475,7 @@ def flip(x, axis, name=None):
if isinstance(axis, int):
axis = [axis]
if in_dygraph_mode():
return core.ops.flip(x, "axis", axis)
return _C_ops.flip(x, "axis", axis)
helper = LayerHelper("flip", **locals())
check_type(x, 'X', (Variable), 'flip')
......@@ -1107,7 +1107,7 @@ def unique_consecutive(x,
axis = [axis]
attr_dtype = convert_np_dtype_to_dtype_(dtype)
if in_dygraph_mode():
out, inverse, counts = core.ops.unique_consecutive(
out, inverse, counts = _C_ops.unique_consecutive(
x, 'dtype', attr_dtype, 'return_inverse', return_inverse,
'return_counts', return_counts, 'axis', axis)
outs = [out]
......
......@@ -555,7 +555,7 @@ def uniform_(x, min=-1.0, max=1.0, seed=0, name=None):
# [-0.34646994, -0.45116323, -0.09902662, -0.11397249], # random
# [ 0.433519, 0.39483607, -0.8660099, 0.83664286]] # random
"""
return core.ops.uniform_random_inplace_(x, 'min', min, 'max', max, 'seed',
return _C_ops.uniform_random_inplace_(x, 'min', min, 'max', max, 'seed',
seed)
......
......@@ -16,6 +16,7 @@ from ..nn import Layer
from ..fluid.framework import core, in_dygraph_mode
from ..fluid.layer_helper import LayerHelper
from ..fluid.data_feeder import check_variable_and_dtype, check_type
from paddle import _C_ops
__all__ = ['viterbi_decode', 'ViterbiDecoder']
......@@ -58,9 +59,8 @@ def viterbi_decode(potentials,
scores, path = paddle.text.viterbi_decode(emission, transition, length, False) # scores: [3.37089300, 1.56825531], path: [[1, 0, 0], [1, 1, 0]]
"""
if in_dygraph_mode():
return core.ops.viterbi_decode(potentials, transition_params, lengths,
'include_bos_eos_tag',
include_bos_eos_tag)
return _C_ops.viterbi_decode(potentials, transition_params, lengths,
'include_bos_eos_tag', include_bos_eos_tag)
check_variable_and_dtype(potentials, 'input', ['float32', 'float64'],
'viterbi_decode')
check_variable_and_dtype(transition_params, 'transitions',
......
......@@ -953,10 +953,10 @@ def psroi_pool(x, boxes, boxes_num, output_size, spatial_scale=1.0, name=None):
"Input features with shape should be (N, C, H, W)")
output_channels = int(x.shape[1] / (pooled_height * pooled_width))
if in_dygraph_mode():
return core.ops.psroi_pool(x, boxes, boxes_num, "output_channels",
return _C_ops.psroi_pool(x, boxes, boxes_num, "output_channels",
output_channels, "spatial_scale",
spatial_scale, "pooled_height",
pooled_height, "pooled_width", pooled_width)
spatial_scale, "pooled_height", pooled_height,
"pooled_width", pooled_width)
helper = LayerHelper('psroi_pool', **locals())
dtype = helper.input_dtype()
......@@ -1064,7 +1064,7 @@ def roi_pool(x, boxes, boxes_num, output_size, spatial_scale=1.0, name=None):
pooled_height, pooled_width = output_size
if in_dygraph_mode():
assert boxes_num is not None, "boxes_num should not be None in dygraph mode."
pool_out, argmaxes = core.ops.roi_pool(
pool_out, argmaxes = _C_ops.roi_pool(
x, boxes, boxes_num, "pooled_height", pooled_height, "pooled_width",
pooled_width, "spatial_scale", spatial_scale)
return pool_out
......@@ -1219,7 +1219,7 @@ def roi_align(x,
pooled_height, pooled_width = output_size
if in_dygraph_mode():
assert boxes_num is not None, "boxes_num should not be None in dygraph mode."
align_out = core.ops.roi_align(
align_out = _C_ops.roi_align(
x, boxes, boxes_num, "pooled_height", pooled_height, "pooled_width",
pooled_width, "spatial_scale", spatial_scale, "sampling_ratio",
sampling_ratio, "aligned", aligned)
......
......@@ -22,6 +22,7 @@ import pydoc
import hashlib
import functools
import platform
from paddle import _C_ops
__all__ = ['get_apis_with_and_without_core_ops', ]
......@@ -207,7 +208,7 @@ if __name__ == "__main__":
else:
print("""Usage:
1. Count and list all operator-raleated APIs that contains append_op but not core.ops.xx.
1. Count and list all operator-raleated APIs that contains append_op but not _C_ops.xx.
python ./count_api_without_core_ops.py -c paddle
2. Print api and the md5 of source code of the api.
python ./count_api_without_core_ops.py -p paddle
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册