未验证 提交 273783b3 编写于 作者: L LoneRanger 提交者: GitHub

remove op.py in fluid (#52248)

* remove op.py

* [Zero-Dim] change Tensor.numpy() usage to other equivalent usage, avoid hack (#52197)

* [BugFix] fix compute error in fused_dropout_add (#52261)

* fix bg

* add utest

* add utest

* [CodeStyle][UP034] remove (()) cases (#52060)

* add up34

* modify var name in loop

* revert changes in test_slice

* Revert "modify var name in loop"

This reverts commit 6d748e371afb417054ed0c6b36fd11e87959a90d.

* temporarily ignore test_slice.py

* add comment

* empty commit, re-trigger all ci

* fix inc

---------
Co-authored-by: NSigureMo <sigure.qaq@gmail.com>

* [AMP OP&Test] add unittest for log_softmax (#52264)

* Fix_Linux_[-Wterminate]warning (#52186)

* [CustomOP Inplace] Automap inplace dtype and shape, prepare for vector<Tensor> output (#52214)

* [CustomOP Inplace] Automap inplace dtype and shape, prepare for vector<Tensor> output

* delete dtype,shape func of multi_inplace op

* [CustomOP Inplace] Automap inplace dtype and shape, support vector<Tensor> output

* [CustomOP Inplace] Auto-generate python API for inplace vector<Tensor> output

* [AMP OP&Test] add float16 optest for reshape_op (#51678)

* [AMP OP&Test] add float16 optest for reshape_op

* add public_python_api

* [AMP OP&Test] Add fp16/bf16 to clip op (#52158)

* add fp16/bf16 to clip op

* fix as reviewed

* update test_clip_op.py

* update test_clip_op.py

* fix bug

* fix code style

* fix bug

* fix bug

---------
Co-authored-by: zhouweiwei2014's avatarZhou Wei <1183042833@qq.com>
Co-authored-by: NShenLiang <1422485404@qq.com>
Co-authored-by: N张春乔 <83450930+Liyulingyue@users.noreply.github.com>
Co-authored-by: NSigureMo <sigure.qaq@gmail.com>
Co-authored-by: NCcc <52520497+juncaipeng@users.noreply.github.com>
Co-authored-by: NGalaxy1458 <55453380+Galaxy1458@users.noreply.github.com>
Co-authored-by: NHongyuJia <jiahongyu@baidu.com>
Co-authored-by: Nzhaoyingli <86812880+zhaoyinglia@users.noreply.github.com>
Co-authored-by: Nwuyefeilin <30919197+wuyefeilin@users.noreply.github.com>
上级 c85a0c5c
......@@ -16,8 +16,7 @@ import time
import numpy as np
from eager_op_test import OpTest
from paddle.fluid.op import Operator
from op import Operator
class BenchmarkSuite(OpTest):
......
......@@ -18,7 +18,7 @@ import numpy as np
from paddle import fluid
from paddle.fluid import core
from paddle.fluid.op import Operator
from paddle.fluid.tests.unittests.op import Operator
class TestDGCMomentumOp1(unittest.TestCase):
......
......@@ -18,7 +18,7 @@ import numpy as np
from paddle import fluid
from paddle.fluid import core
from paddle.fluid.op import Operator
from paddle.fluid.tests.unittests.op import Operator
g_array_size = 102400
......
......@@ -25,7 +25,7 @@ from dist_test_utils import remove_ps_flag
from paddle import fluid
from paddle.fluid import core
from paddle.fluid.framework import Program, program_guard
from paddle.fluid.op import Operator
from paddle.fluid.tests.unittests.op import Operator
from paddle.incubate.distributed.fleet.parameter_server.mode import (
DistributedMode,
)
......
......@@ -23,6 +23,7 @@ from collections import defaultdict
from copy import copy
import numpy as np
from op import Operator
import paddle
from paddle import fluid
......@@ -35,7 +36,6 @@ from paddle.fluid.framework import (
_current_expected_place,
canonicalize_attrs,
)
from paddle.fluid.op import Operator
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
from prim_op_test import OpTestUtils, PrimForwardChecker, PrimGradChecker
......
......@@ -14,8 +14,9 @@
import numpy as np
import paddle.fluid.core as core
import paddle.fluid.proto.framework_pb2 as framework_pb2
from paddle.fluid import core
from paddle.fluid.proto import framework_pb2
# NOTE: this is added to support creating a Scalar message
# from a python number
......@@ -256,13 +257,13 @@ def create_op_creation_method(op_proto):
inputs=[(var.name, var.duplicable) for var in op_proto.inputs],
outputs=[(var.name, var.duplicable) for var in op_proto.outputs],
attrs=[attr.name for attr in op_proto.attrs],
extra_attrs=[item for item in extra_attrs_map.keys()],
extra_attrs=list(extra_attrs_map.keys()),
)
class OperatorFactory:
def __init__(self):
self.op_methods = dict()
self.op_methods = {}
for op_proto in get_all_op_protos():
method = create_op_creation_method(op_proto)
......@@ -313,70 +314,4 @@ class OperatorFactory:
return self.get_op_info(type).extra_attrs
class __RecurrentOp__:
__proto__ = None
type = "recurrent"
def __init__(self):
# cache recurrent_op's proto
if self.__proto__ is None:
for op_proto in get_all_op_protos():
if op_proto.type == self.type:
self.__proto__ = op_proto
def __call__(self, *args, **kwargs):
if self.type not in args and "type" not in kwargs:
kwargs["type"] = self.type
# create proto
create_method = OpDescCreationMethod(self.__proto__)
proto = create_method(*args, **kwargs)
# create rnnop
return core.RecurrentOp.create(proto.SerializeToString())
class __DynamicRecurrentOp__:
__proto__ = None
type = "dynamic_recurrent"
def __init__(self):
# cache recurrent_op's proto
if self.__proto__ is None:
for op_proto in get_all_op_protos():
if op_proto.type == self.type:
self.__proto__ = op_proto
def __call__(self, *args, **kwargs):
if self.type not in args and "type" not in kwargs:
kwargs["type"] = self.type
# create proto
create_method = OpDescCreationMethod(self.__proto__)
proto = create_method(*args, **kwargs)
# create rnnop
return core.DynamicRecurrentOp.create(proto.SerializeToString())
class __CondOp__:
__proto__ = None
type = "cond"
def __init__(self):
# cache recurrent_op's proto
if self.__proto__ is None:
for op_proto in get_all_op_protos():
if op_proto.type == self.type:
self.__proto__ = op_proto
def __call__(self, *args, **kwargs):
if self.type not in args and "type" not in kwargs:
kwargs["type"] = self.type
# create proto
create_method = OpDescCreationMethod(self.__proto__)
proto = create_method(*args, **kwargs)
# create condop
return core.CondOp.create(proto.SerializeToString())
Operator = OperatorFactory() # The default global factory
RecurrentOp = __RecurrentOp__()
DynamicRecurrentOp = __DynamicRecurrentOp__()
CondOp = __CondOp__()
......@@ -15,10 +15,10 @@
import unittest
import numpy as np
from op import Operator
import paddle
from paddle.fluid import core
from paddle.fluid.op import Operator
class TestSparseSquareOp(unittest.TestCase):
......
......@@ -17,10 +17,10 @@ import unittest
import numpy as np
from eager_op_test import OpTest
from op import Operator
import paddle
from paddle.fluid import core
from paddle.fluid.op import Operator
def adamgrad_wrapper(
......
......@@ -16,11 +16,11 @@ import unittest
import numpy as np
from eager_op_test import OpTest
from op import Operator
import paddle
from paddle import fluid
from paddle.fluid import core
from paddle.fluid.op import Operator
def adam_wrapper(
......
......@@ -17,12 +17,12 @@ import unittest
import numpy as np
from eager_op_test import OpTest, _set_use_system_allocator
from op import Operator
import paddle
from paddle import fluid
from paddle.fluid import Program, core, program_guard
from paddle.fluid.framework import grad_var_name
from paddle.fluid.op import Operator
_set_use_system_allocator(True)
......
......@@ -15,9 +15,9 @@
import unittest
import numpy as np
from op import Operator
from paddle.fluid import core
from paddle.fluid.op import Operator
class TestBeamSearchDecodeOp(unittest.TestCase):
......
......@@ -15,9 +15,9 @@
import unittest
import numpy as np
from op import Operator
from paddle.fluid import core
from paddle.fluid.op import Operator
def create_tensor(scope, name, np_data):
......
......@@ -16,9 +16,9 @@ import unittest
import numpy as np
from eager_op_test import OpTest
from op import Operator
import paddle
from paddle import fluid
from paddle.fluid import core
from paddle.nn import clip
......@@ -119,7 +119,7 @@ class TestClipByNormOpWithSelectedRows(unittest.TestCase):
out_selected_rows = scope.var('Out').get_selected_rows()
# run clip_by_norm_op
clip_by_norm_op = fluid.op.Operator(
clip_by_norm_op = Operator(
"clip_by_norm", max_norm=self.max_norm, X='X', Out='Out'
)
clip_by_norm_op.run(scope, place)
......
......@@ -17,11 +17,11 @@ import unittest
import numpy as np
from eager_op_test import OpTest
from op import Operator
import paddle
from paddle import fluid
from paddle.fluid import Program, core, program_guard
from paddle.fluid.op import Operator
def _reference_testing(x, batch_size, batch_sum, batch_square_sum, slot_dim=-1):
......
......@@ -14,8 +14,9 @@
import unittest
from op import Operator
from paddle.fluid import core
from paddle.fluid.op import Operator
class TestFakeInitOpSelectedRows(unittest.TestCase):
......
......@@ -16,11 +16,11 @@ import unittest
import numpy as np
from eager_op_test import OpTest, convert_float_to_uint16
from op import Operator
import paddle
from paddle import fluid
from paddle.fluid import Program, core, program_guard
from paddle.fluid.op import Operator
def fill_wrapper(shape, value=0.0):
......
......@@ -16,9 +16,9 @@ import unittest
import numpy as np
from eager_op_test import OpTest, convert_float_to_uint16
from op import Operator
from paddle.fluid import core
from paddle.fluid.op import Operator
class TestFillOp1(OpTest):
......
......@@ -16,9 +16,9 @@ import unittest
import numpy as np
from eager_op_test import OpTest
from op import Operator
from paddle.fluid import core
from paddle.fluid.op import Operator
def ftrl_step(param, grad, rows, sq_accum, lin_accum, lr, l1, l2, lr_power):
......
......@@ -15,10 +15,10 @@
import unittest
import numpy as np
from op import Operator
import paddle
from paddle.fluid import Program, core, program_guard
from paddle.fluid.op import Operator
from paddle.nn import clip
......
......@@ -16,10 +16,10 @@ import unittest
import numpy as np
from eager_op_test import OpTest
from op import Operator
import paddle
from paddle.fluid import core
from paddle.fluid.op import Operator
paddle.enable_static()
......
......@@ -15,11 +15,11 @@
import unittest
import numpy as np
from op import Operator
import paddle
from paddle import enable_static, fluid
from paddle.fluid import core
from paddle.fluid.op import Operator
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
convert_float_to_uint16,
......
......@@ -21,12 +21,12 @@ from eager_op_test import (
paddle_static_guard,
skip_check_grad_ci,
)
from op import Operator
import paddle
import paddle.nn.functional as F
from paddle import fluid
from paddle.fluid import Program, core, program_guard
from paddle.fluid.op import Operator
class TestLookupTableOp(OpTest):
......
......@@ -16,11 +16,11 @@ import unittest
import numpy as np
from eager_op_test import OpTest, skip_check_grad_ci
from op import Operator
import paddle
from paddle import fluid
from paddle.fluid import Program, core, program_guard
from paddle.fluid.op import Operator
class TestStaticGraphSupportMultipleInt(unittest.TestCase):
......
......@@ -15,9 +15,9 @@
import unittest
import numpy as np
from op import Operator
from paddle.fluid import core
from paddle.fluid.op import Operator
class TestMergeSelectedRows(unittest.TestCase):
......
......@@ -17,11 +17,11 @@ import unittest
import numpy
import numpy as np
from eager_op_test import OpTest
from op import Operator
import paddle
from paddle import fluid
from paddle.fluid import core
from paddle.fluid.op import Operator
def calculate_momentum_by_numpy(
......
......@@ -15,8 +15,8 @@
import unittest
import numpy as np
import op
from paddle.fluid import op
from paddle.fluid.proto import framework_pb2
......
......@@ -15,11 +15,11 @@
import unittest
import numpy as np
from op import Operator
import paddle
from paddle import fluid
from paddle.fluid import core
from paddle.fluid.op import Operator
def create_selected_rows_and_tensor(
......
......@@ -15,8 +15,9 @@
import unittest
import numpy as np
import op
from paddle.fluid import framework, op
from paddle.fluid import framework
class TestWarpAsScalar(unittest.TestCase):
......
......@@ -18,11 +18,11 @@ import gradient_checker
import numpy as np
from decorator_helper import prog_scope
from eager_op_test import OpTest, convert_float_to_uint16
from op import Operator
import paddle
from paddle import fluid
from paddle.fluid import core
from paddle.fluid.op import Operator
from paddle.static import Program, program_guard
......
......@@ -16,11 +16,11 @@ import unittest
import numpy as np
from eager_op_test import OpTest
from op import Operator
import paddle
from paddle import fluid
from paddle.fluid import core
from paddle.fluid.op import Operator
paddle.enable_static()
......
......@@ -16,11 +16,11 @@ import struct
import unittest
import numpy as np
from op import Operator
import paddle
from paddle import fluid
from paddle.fluid import core
from paddle.fluid.op import Operator
from paddle.fluid.tests.unittests.eager_op_test import (
OpTest,
OpTestTool,
......
......@@ -16,10 +16,10 @@ import unittest
import numpy as np
from eager_op_test import OpTest, convert_float_to_uint16
from op import Operator
import paddle
from paddle.fluid import core
from paddle.fluid.op import Operator
class TestShapeOp(OpTest):
......
......@@ -16,9 +16,9 @@ import unittest
import numpy as np
from eager_op_test import OpTest
from op import Operator
from paddle.fluid import core
from paddle.fluid.op import Operator
class TestShareDataOp(OpTest):
......
......@@ -25,13 +25,13 @@ from eager_op_test import (
convert_float_to_uint16,
convert_uint16_to_float,
)
from op import Operator
import paddle
import paddle.inference as paddle_infer
from paddle import enable_static, fluid
from paddle.fluid import core
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.op import Operator
def sum_wrapper(X, use_mkldnn=False):
......
......@@ -16,11 +16,11 @@ import unittest
import numpy as np
from eager_op_test import OpTest, convert_uint16_to_float
from op import Operator
import paddle
from paddle import fluid
from paddle.fluid import core
from paddle.fluid.op import Operator
from paddle.fluid.tests.unittests.test_uniform_random_op import (
output_hist,
output_hist_diag,
......
......@@ -17,13 +17,13 @@ import unittest
import numpy as np
from eager_op_test import OpTest, convert_uint16_to_float
from op import Operator
from test_attribute_var import UnittestBase
import paddle
from paddle import fluid
from paddle.fluid import Program, core, program_guard
from paddle.fluid.framework import convert_np_dtype_to_dtype_
from paddle.fluid.op import Operator
from paddle.tensor import random
......
......@@ -13,9 +13,9 @@
# limitations under the License.
import numpy as np
from op import Operator
from paddle.fluid import core
from paddle.fluid.op import Operator
def create_op(scope, op_type, inputs, outputs, attrs, cache_list=None):
......
......@@ -27,7 +27,7 @@ from xpu.get_test_cover_info import (
import paddle
from paddle.fluid import core
from paddle.fluid.op import Operator
from paddle.fluid.tests.unittests.op import Operator
class XPUTestAdamOp(XPUOpTestWrapper):
......
......@@ -27,7 +27,7 @@ from xpu.get_test_cover_info import (
import paddle
from paddle.fluid import core
from paddle.fluid.op import Operator
from paddle.fluid.tests.unittests.op import Operator
paddle.enable_static()
......
......@@ -29,7 +29,7 @@ from xpu.get_test_cover_info import (
import paddle
from paddle import fluid
from paddle.fluid import core
from paddle.fluid.op import Operator
from paddle.fluid.tests.unittests.op import Operator
paddle.enable_static()
......
......@@ -28,7 +28,7 @@ from xpu.get_test_cover_info import (
import paddle
from paddle import fluid
from paddle.fluid import core
from paddle.fluid.op import Operator
from paddle.fluid.tests.unittests.op import Operator
class XPUTestSgdOp(XPUOpTestWrapper):
......
......@@ -27,7 +27,7 @@ from xpu.get_test_cover_info import (
import paddle
from paddle.fluid import core
from paddle.fluid.op import Operator
from paddle.fluid.tests.unittests.op import Operator
paddle.enable_static()
......
......@@ -16,8 +16,8 @@ import unittest
import numpy as np
import paddle.fluid.op as fluid_op
from paddle.fluid import core
from paddle.fluid.tests.unittests.op import Operator
from paddle.fluid.tests.unittests.test_sum_op import TestSumOp
......@@ -70,7 +70,7 @@ class TestMKLDNNSumInplaceOp(unittest.TestCase):
tensor = var.get_tensor()
tensor.set(var_value, place)
sum_op = fluid_op.Operator(
sum_op = Operator(
"sum", X=["x0", "x1"], Out=out_var_name, use_mkldnn=True
)
expected_out = np.array(self.x0 + self.x1)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册