未验证 提交 f85def97 编写于 作者: N Nyakku Shigure 提交者: GitHub

[CodeStyle][isort] introduce isort (part4) (#48402)

* isort all files

* revert conflicting files

* revert conflicting files

* revert conflicting files
上级 485de16a
......@@ -12,10 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import sys
import shutil
import glob
import sys
def main():
......
......@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import sys
def GenerateFileStructureForFinalDygraph(eager_dir):
......
......@@ -12,9 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
import re
import yaml
####################
# Global Variables #
####################
......
......@@ -12,30 +12,36 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import argparse
import os
import re
from codegen_utils import (
core_ops_returns_info,
core_ops_args_info,
core_ops_args_type_info,
)
from codegen_utils import ReadBwdFile
from codegen_utils import FindForwardName, GetGradNodeName, GetSavedName
from codegen_utils import IsPlainTensorType, IsVectorTensorType
from codegen_utils import GetConstReference, RemoveConstAndReference
from codegen_utils import (
AssertMessage,
FindForwardName,
FunctionGeneratorBase,
GeneratorBase,
GetAutoGradMetaName,
GetAutoGradMetaVectorName,
GetConstReference,
GetDygraphForwardFunctionName,
GetGradNodeName,
GetIndent,
GetInplacedFunctionName,
GetIntermediateAPIFunctionName,
GetSavedName,
IsPlainTensorType,
IsVectorTensorType,
ParseYamlBackward,
ParseYamlForwardFromBackward,
ParseYamlInplaceInfo,
ReadBwdFile,
RemoveConstAndReference,
core_ops_args_info,
core_ops_args_type_info,
core_ops_returns_info,
ops_to_fill_zero_for_empty_grads,
)
from codegen_utils import GetAutoGradMetaName, GetAutoGradMetaVectorName
from codegen_utils import GetInplacedFunctionName
from codegen_utils import ParseYamlForwardFromBackward
from codegen_utils import ParseYamlBackward
from codegen_utils import ParseYamlInplaceInfo
from codegen_utils import FunctionGeneratorBase, GeneratorBase
from codegen_utils import ops_to_fill_zero_for_empty_grads
from codegen_utils import AssertMessage, GetIndent
# Note: assign is a inplace api when parameter(output) isn't none,
# so we should check parameter(output) with rule of inplace.
......
......@@ -12,11 +12,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
from codegen_utils import FunctionGeneratorBase, GeneratorBase
from codegen_utils import GetForwardFunctionName, IsVectorTensorType
from codegen_utils import GetInplacedFunctionName
import os
from codegen_utils import (
FunctionGeneratorBase,
GeneratorBase,
GetForwardFunctionName,
GetInplacedFunctionName,
IsVectorTensorType,
)
#########################
# Global Configurations #
......
......@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import tarfile
import sys
import tarfile
def untar(fname, dirs):
......
......@@ -10,15 +10,17 @@
# without warranties or conditions of any kind, either express or implied.
# see the license for the specific language governing permissions and
# limitations under the license.
import os
import argparse
import io
import numpy as np
import os
import shutil
import sys
import tarfile
import numpy as np
from PIL import Image
from paddle.dataset.common import download
import tarfile
import argparse
import shutil
np.random.seed(0)
......
......@@ -12,17 +12,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import xml.etree.ElementTree
from PIL import Image
import numpy as np
import argparse
import hashlib
import os
import sys
from paddle.dataset.common import download
import tarfile
import xml.etree.ElementTree
from io import StringIO
import hashlib
import tarfile
import argparse
import numpy as np
from PIL import Image
from paddle.dataset.common import download
DATA_URL = (
"http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar"
......
......@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os
import unittest
class Test_Preprocess(unittest.TestCase):
......
......@@ -16,18 +16,16 @@ import itertools
import re
from type_mapping import (
input_types_map,
optional_input_types_map,
attr_types_map,
opmaker_attr_types_map,
output_type_map,
)
from type_mapping import (
dense_input_types_map,
dense_optional_input_types_map,
dense_output_types_map,
sr_output_types_map,
input_types_map,
opmaker_attr_types_map,
optional_input_types_map,
output_type_map,
phi_attr_types_map,
sr_output_types_map,
)
......
......@@ -17,24 +17,24 @@ import os
from pathlib import Path
import yaml
from jinja2 import Environment, FileSystemLoader, StrictUndefined
from filters import (
cartesian_prod_mapping,
to_input_name,
to_op_attr_type,
to_opmaker_name,
to_opmaker_name_cstr,
to_pascal_case,
)
from jinja2 import Environment, FileSystemLoader, StrictUndefined
from parse_utils import to_named_dict
from tests import (
is_base_op,
is_vec,
is_scalar,
is_initializer_list,
is_scalar,
is_vec,
supports_inplace,
supports_no_need_buffer,
)
from filters import to_input_name, cartesian_prod_mapping
from parse_utils import to_named_dict
file_loader = FileSystemLoader(Path(__file__).parent / "templates")
env = Environment(
......
......@@ -17,25 +17,25 @@ import os
from pathlib import Path
import yaml
from jinja2 import Environment, FileSystemLoader, StrictUndefined
from filters import (
cartesian_prod_mapping,
to_input_name,
to_op_attr_type,
to_opmaker_name,
to_opmaker_name_cstr,
to_pascal_case,
)
from generate_op import process_invoke_op
from jinja2 import Environment, FileSystemLoader, StrictUndefined
from parse_utils import to_named_dict
from tests import (
is_base_op,
is_vec,
is_scalar,
is_initializer_list,
is_scalar,
is_vec,
supports_inplace,
supports_no_need_buffer,
)
from filters import to_input_name, cartesian_prod_mapping
from parse_utils import to_named_dict
from generate_op import process_invoke_op
file_loader = FileSystemLoader(Path(__file__).parent / "templates")
env = Environment(
......
......@@ -15,7 +15,6 @@
import argparse
import yaml
from parse_utils import parse_op_entry
......
......@@ -14,7 +14,8 @@
import re
from copy import copy
from typing import Dict, Any, List, Tuple
from typing import Any, Dict, List, Tuple
from tests import is_attr, is_input, is_output, is_vec
......
......@@ -13,7 +13,8 @@
# limitations under the License.
import re
from type_mapping import input_types_map, attr_types_map, output_type_map
from type_mapping import attr_types_map, input_types_map, output_type_map
# tests for typename
......
......@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import sys
if __name__ == "__main__":
assert len(sys.argv) == 3
......
......@@ -12,10 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import paddle
from paddle.static import InputSpec
from paddle.jit import to_static
import sys
from paddle.static import InputSpec
class AbsNet(paddle.nn.Layer):
......
......@@ -12,12 +12,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
# url: https://aistudio.baidu.com/aistudio/projectdetail/3756986?forkThirdPart=1
from net import EfficientNet
import paddle
from paddle.jit import to_static
from paddle.static import InputSpec
import paddle
import sys
model = EfficientNet.from_name('efficientnet-b4')
net = to_static(
......
......@@ -17,13 +17,13 @@ import paddle.nn as nn
import paddle.nn.functional as F
from .utils import (
round_filters,
round_repeats,
drop_connect,
get_same_padding_conv2d,
get_model_params,
efficientnet_params,
get_model_params,
get_same_padding_conv2d,
load_pretrained_weights,
round_filters,
round_repeats,
)
......
......@@ -12,10 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import collections
import math
import re
from functools import partial
import collections
import paddle
import paddle.nn as nn
......
......@@ -14,6 +14,7 @@
# example 1: save layer
import numpy as np
import paddle
import paddle.nn as nn
import paddle.optimizer as opt
......
......@@ -12,11 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import paddle
from paddle.vision.models import resnet50
from paddle.jit import to_static
from paddle.static import InputSpec
import sys
from paddle.vision.models import resnet50
model = resnet50(True)
net = to_static(
......
......@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import collections
import re
PREFIX_TENSOR_NAME = 'input_'
PREFIX_META_TENSOR_NAME = 'meta_'
......
......@@ -12,11 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
import argparse
import re
from api_base import BaseAPI, PREFIX_TENSOR_NAME
import yaml
from api_base import PREFIX_TENSOR_NAME, BaseAPI
inplace_out_type_map = {
"Tensor": "Tensor&",
......
......@@ -12,10 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
import argparse
import re
import yaml
from api_base import BaseAPI
......
......@@ -12,9 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
import argparse
import yaml
from api_gen import ForwardAPI
from sparse_api_gen import SparseAPI
......
......@@ -12,9 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
import re
import argparse
import re
import yaml
def map_code_template(attrs_str, attrs_checker_str):
......
......@@ -12,11 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
import argparse
from api_gen import ForwardAPI
import yaml
from api_base import PREFIX_TENSOR_NAME
from api_gen import ForwardAPI
class SparseAPI(ForwardAPI):
......
......@@ -12,11 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
import argparse
from sparse_api_gen import SparseAPI
import yaml
from backward_api_gen import BackwardAPI
from sparse_api_gen import SparseAPI
class SparseBackwardAPI(SparseAPI, BackwardAPI):
......
......@@ -12,9 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
import argparse
import yaml
from api_gen import ForwardAPI
PREFIX_TENSOR_NAME = 'input_'
......
......@@ -12,9 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
import argparse
import yaml
from api_gen import ForwardAPI
kernel_func_set = set()
......
......@@ -14,11 +14,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
import platform
import argparse
import os
#
import platform
def parse_args():
parser = argparse.ArgumentParser("conda build for paddlepaddle version")
......
......@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
import paddle as pd
import paddle.fluid as fluid
fluid.install_check.run_check()
print(pd.__version__)
......@@ -13,6 +13,7 @@
# limitations under the License.
from paddle.fluid import core
from .fluid import framework
__all__ = []
......
......@@ -12,8 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.fluid.dygraph.amp import amp_guard
from paddle.fluid.dygraph.amp import amp_decorate
from paddle.fluid.dygraph.amp import amp_decorate, amp_guard
__all__ = []
......
......@@ -12,10 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.fluid.dygraph.amp import AmpScaler
from paddle.fluid.dygraph.amp import OptimizerState
from collections import defaultdict
from paddle.fluid.dygraph.amp import AmpScaler, OptimizerState
__all__ = []
......
......@@ -12,11 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License
import paddle
from pathlib import Path
from typing import Optional, Tuple, Union
import paddle
class AudioInfo:
"""Audio info, return type of backend info function"""
......
......@@ -14,12 +14,12 @@
import sys
import warnings
from . import wave_backend
from . import backend
from typing import List
import paddle
from . import backend, wave_backend
def _check_version(version: str) -> bool:
# require paddleaudio >= 1.0.2
......
......@@ -12,13 +12,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import wave
import numpy as np
from pathlib import Path
from typing import Optional, Tuple, Union
import numpy as np
import paddle
from .backend import AudioInfo
......
......@@ -15,10 +15,7 @@ from typing import List
import paddle
from ..features import MelSpectrogram
from ..features import Spectrogram
from ..features import MFCC
from ..features import LogMelSpectrogram
from ..features import MFCC, LogMelSpectrogram, MelSpectrogram, Spectrogram
feat_funcs = {
'raw': None,
......
......@@ -13,11 +13,11 @@
# limitations under the License.
import collections
import os
from typing import List
from typing import Tuple
from typing import List, Tuple
from paddle.utils import download
from paddle.dataset.common import DATA_HOME
from paddle.utils import download
from .dataset import AudioClassificationDataset
__all__ = []
......
......@@ -13,11 +13,11 @@
# limitations under the License.
import collections
import os
from typing import List
from typing import Tuple
from typing import List, Tuple
from paddle.utils import download
from paddle.dataset.common import DATA_HOME
from paddle.utils import download
from .dataset import AudioClassificationDataset
__all__ = []
......
......@@ -12,16 +12,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import Optional
from typing import Union
from typing import Optional, Union
import paddle
import paddle.nn as nn
from paddle import Tensor
from ..functional import compute_fbank_matrix
from ..functional import create_dct
from ..functional import power_to_db
from ..functional import compute_fbank_matrix, create_dct, power_to_db
from ..functional.window import get_window
......
......@@ -13,8 +13,7 @@
# limitations under the License.
# Modified from librosa(https://github.com/librosa/librosa)
import math
from typing import Optional
from typing import Union
from typing import Optional, Union
import paddle
from paddle import Tensor
......
......@@ -11,9 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
import math
from typing import List
from typing import Tuple
from typing import Union
from typing import List, Tuple, Union
import paddle
from paddle import Tensor
......
......@@ -12,10 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.fluid import core
from paddle.fluid import framework
from paddle.fluid.backward import gradients_with_optimizer # noqa: F401
import paddle
from paddle.fluid import core, framework
from paddle.fluid.backward import gradients_with_optimizer # noqa: F401
__all__ = []
......
......@@ -13,10 +13,10 @@
# limitations under the License.
import paddle
from paddle.fluid.framework import dygraph_only
from paddle.fluid.dygraph.amp.auto_cast import amp_state
from paddle.amp.auto_cast import auto_cast
from paddle.fluid import core
from paddle.fluid.dygraph.amp.auto_cast import amp_state
from paddle.fluid.framework import dygraph_only
__all__ = []
......
......@@ -13,12 +13,12 @@
# limitations under the License.
from .hapi.callbacks import Callback # noqa: F401
from .hapi.callbacks import ProgBarLogger # noqa: F401
from .hapi.callbacks import ModelCheckpoint # noqa: F401
from .hapi.callbacks import VisualDL # noqa: F401
from .hapi.callbacks import LRScheduler # noqa: F401
from .hapi.callbacks import EarlyStopping # noqa: F401
from .hapi.callbacks import LRScheduler # noqa: F401
from .hapi.callbacks import ModelCheckpoint # noqa: F401
from .hapi.callbacks import ProgBarLogger # noqa: F401
from .hapi.callbacks import ReduceLROnPlateau # noqa: F401
from .hapi.callbacks import VisualDL # noqa: F401
from .hapi.callbacks import WandbCallback # noqa: F401
__all__ = [ # noqa
......
......@@ -12,11 +12,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.static as static
import numpy as np
import json
import os
import numpy as np
import paddle
import paddle.static as static
from paddle.fluid import core
......
......@@ -27,11 +27,13 @@ images per class.
"""
import pickle
import tarfile
import numpy
import paddle.dataset.common
import paddle.utils.deprecated as deprecated
import tarfile
import pickle
__all__ = []
......
......@@ -12,18 +12,20 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
import errno
import glob
import hashlib
import importlib
import os
import errno
import pickle
import shutil
import sys
import importlib
import paddle.dataset
import pickle
import tempfile
import glob
import requests
import paddle
import paddle.dataset
__all__ = []
......
......@@ -20,8 +20,9 @@ dataset. And a pre-trained word vector model based on Wikipedia corpus is used
to initialize SRL model.
"""
import tarfile
import gzip
import tarfile
import paddle.dataset.common
import paddle.utils.deprecated as deprecated
......
......@@ -30,17 +30,16 @@ http://www.robots.ox.ac.uk/~vgg/publications/papers/nilsback08.{pdf,ps.gz}.
"""
import functools
from .common import download
import tarfile
from multiprocessing import cpu_count
from paddle.dataset.image import load_image_bytes
from paddle.dataset.image import simple_transform
from paddle.reader import map_readers, xmap_readers
import paddle.utils.deprecated as deprecated
from multiprocessing import cpu_count
from paddle.dataset.image import load_image_bytes, simple_transform
from paddle.reader import map_readers, xmap_readers
from paddle.utils import try_import
from .common import download
__all__ = []
DATA_URL = 'http://paddlemodels.bj.bcebos.com/flowers/102flowers.tgz'
......
......@@ -30,13 +30,14 @@ the image layout as follows.
be keep consistent between the training and inference period.
"""
import numpy as np
import os
# FIXME(minqiyang): this is an ugly fix for the numpy bug reported here
# https://github.com/numpy/numpy/issues/12497
import subprocess
import sys
import os
import numpy as np
interpreter = sys.executable
# Note(zhouwei): if use Python/C 'PyRun_SimpleString', 'sys.executable'
......@@ -59,8 +60,8 @@ else:
cv2 = None
import os
import tarfile
import pickle
import tarfile
__all__ = []
......
......@@ -20,12 +20,13 @@ of 25,000 highly polar movie reviews for training, and 25,000 for testing.
Besides, this module also provides API for building dictionary.
"""
import paddle.dataset.common
import paddle.utils.deprecated as deprecated
import collections
import tarfile
import re
import string
import tarfile
import paddle.dataset.common
import paddle.utils.deprecated as deprecated
__all__ = []
......
......@@ -19,11 +19,12 @@ http://www.fit.vutbr.cz/~imikolov/rnnlm/ and parse training set and test set
into paddle reader creators.
"""
import paddle.dataset.common
import paddle.utils.deprecated as deprecated
import collections
import tarfile
import paddle.dataset.common
import paddle.utils.deprecated as deprecated
__all__ = []
# URL = 'http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz'
......
......@@ -18,12 +18,14 @@ This module will download dataset from http://yann.lecun.com/exdb/mnist/ and
parse training set and test set into paddle reader creators.
"""
import paddle.dataset.common
import paddle.utils.deprecated as deprecated
import gzip
import numpy
import struct
import numpy
import paddle.dataset.common
import paddle.utils.deprecated as deprecated
__all__ = []
URL_PREFIX = 'https://dataset.bj.bcebos.com/mnist/'
......
......@@ -22,12 +22,14 @@ set and test set into paddle reader creators.
"""
import numpy as np
import functools
import re
import zipfile
import numpy as np
import paddle.dataset.common
import paddle.utils.deprecated as deprecated
import re
import functools
__all__ = []
......
......@@ -12,9 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.dataset.cifar
import unittest
import paddle.dataset.cifar
__all__ = []
......
......@@ -12,9 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.dataset.flowers
import unittest
import paddle.dataset.flowers
__all__ = []
......
......@@ -12,9 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.dataset.imdb
import unittest
import re
import unittest
import paddle.dataset.imdb
__all__ = []
......
......@@ -12,9 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.dataset.imikolov
import unittest
import paddle.dataset.imikolov
WORD_DICT = paddle.dataset.imikolov.build_dict()
__all__ = []
......
......@@ -12,9 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.dataset.mnist
import unittest
import paddle.dataset.mnist
__all__ = []
......
......@@ -20,7 +20,9 @@ Description:
import sys
import unittest
import numpy as np
from paddle.dataset import image
__all__ = []
......
......@@ -12,9 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.dataset.voc2012
import unittest
import paddle.dataset.voc2012
__all__ = []
......
......@@ -12,9 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.dataset.wmt16
import unittest
import paddle.dataset.wmt16
__all__ = []
......
......@@ -19,10 +19,12 @@ https://archive.ics.uci.edu/ml/machine-learning-databases/housing/ and
parse training set and test set into paddle reader creators.
"""
import numpy as np
import tempfile
import tarfile
import os
import tarfile
import tempfile
import numpy as np
import paddle.dataset.common
import paddle.utils.deprecated as deprecated
......
......@@ -19,13 +19,15 @@ to training/test sets has been maintained. The total number of images
with segmentation has been increased from 7,062 to 9,993.
"""
import tarfile
import io
import tarfile
import numpy as np
from paddle.dataset.common import download
import paddle.utils.deprecated as deprecated
from PIL import Image
import paddle.utils.deprecated as deprecated
from paddle.dataset.common import download
__all__ = []
VOC_URL = 'http://host.robots.ox.ac.uk/pascal/VOC/voc2012/\
......
......@@ -13,15 +13,16 @@
# limitations under the License.
import os
import warnings
import paddle
from paddle.fluid import core
from paddle.fluid.layers.utils import _hash_with_id
from paddle.fluid.core import (
CUDAPlace,
is_compiled_with_cuda,
is_compiled_with_rocm,
CUDAPlace,
)
import warnings
from paddle.fluid.layers.utils import _hash_with_id
if is_compiled_with_cuda() and not is_compiled_with_rocm():
from paddle.fluid.core import CUDAGraph as CoreCUDAGraph
......
......@@ -12,5 +12,5 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.fluid.core import CUDAStream as Stream # noqa: F401
from paddle.fluid.core import CUDAEvent as Event # noqa: F401
from paddle.fluid.core import CUDAStream as Stream # noqa: F401
......@@ -17,12 +17,13 @@ import time
import paddle
from paddle.hapi.callbacks import (
ProgBarLogger,
ModelCheckpoint,
LRScheduler,
CallbackList,
Callback,
CallbackList,
LRScheduler,
ModelCheckpoint,
ProgBarLogger,
)
from .interface import CollectionNames, get_collection
......
......@@ -12,10 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
from enum import IntEnum
from enum import unique
import os
from enum import IntEnum, unique
import paddle
......
......@@ -12,9 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import IntEnum, unique
import numpy as np
from enum import IntEnum
from enum import unique
from paddle.fluid import core
from paddle.fluid.core import Device # noqa: F401
......
......@@ -15,17 +15,23 @@
import copy
import logging
from paddle.distributed.fleet.meta_optimizers.common import OpRole
from paddle.fluid import core
from .utils import is_naive_data_parallel, get_logger
from .utils import is_gradient_clip_op, __no_shape_var_type__
from .operators import find_compatible_distributed_operator_impls
from .dist_attribute import (
OperatorDistributedAttribute,
TensorDistributedAttribute,
)
from .dist_context import _node_id
from .dist_attribute import TensorDistributedAttribute
from .dist_attribute import OperatorDistributedAttribute
from .process_mesh import ProcessMesh
from .operators import find_compatible_distributed_operator_impls
from .process_group import get_world_process_group
from paddle.distributed.fleet.meta_optimizers.common import OpRole
from .process_mesh import ProcessMesh
from .utils import (
__no_shape_var_type__,
get_logger,
is_gradient_clip_op,
is_naive_data_parallel,
)
def compute_compatible_process_mesh(process_mesh_list):
......
......@@ -12,10 +12,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import warnings
import logging
import warnings
import numpy as np
import paddle
from ..utils.log_utils import get_logger
......
......@@ -17,12 +17,10 @@ from functools import reduce
import paddle
from ..utils import _get_comm_group
from ..process_group import get_process_group
from ..cluster import LinkType
from ..dist_tensor import DistributedTensor
from ..utils import _get_idx_in_axis
from ..dist_tensor import DistributedTensor
from ..process_group import get_process_group
from ..utils import _get_comm_group, _get_idx_in_axis
COMM_OP_TYPE = [
"send_v2",
......
......@@ -18,9 +18,9 @@ from functools import reduce
import paddle
from paddle.distributed.fleet.meta_optimizers.common import OpRole
from .base_cost import Cost
from ..operators.common import get_distributed_operator_impl_container
from ..dist_tensor import DistributedTensor
from ..operators.common import get_distributed_operator_impl_container
from .base_cost import Cost
class CostEstimator:
......@@ -544,9 +544,10 @@ class CostEstimator:
def get_cost_from_engine(engine, mode):
from ..utils import to_list
import copy
from ..utils import to_list
# Construct cost estimator by original main program
serial_main_prog = (
engine._fwd_main_progs[mode].clone()
......
......@@ -15,8 +15,8 @@
from functools import reduce
import paddle
from paddle.fluid.framework import Variable
from paddle.distributed.auto_parallel.dist_tensor import DistributedTensor
from paddle.fluid.framework import Variable
from .base_cost import Cost
......
......@@ -12,15 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import queue
import copy
import queue
from enum import Enum
import numpy as np
import paddle
from paddle.fluid import core
from paddle.distributed.fleet.meta_optimizers.common import OpRole
from paddle.fluid import core
SUCC = 0 # successor
PRED = 1 # predecessor
......
......@@ -13,7 +13,9 @@
# limitations under the License
import copy
from paddle.fluid.framework import Variable
from .process_mesh import ProcessMesh
_g_tensor_dist_attr_field_keys = [
......
......@@ -14,16 +14,19 @@
import copy
from collections import defaultdict
from paddle.fluid import framework
from paddle.fluid.framework import set_flags
from paddle.fluid import core
from paddle.distributed.passes import PassContext
from .dist_tensor import DistributedTensor
from paddle.fluid import core, framework
from paddle.fluid.framework import set_flags
from .dist_op import DistributedOperator
from .dist_tensor import DistributedTensor
from .process_mesh import ProcessMesh
from .utils import _copy_dist_attr_to_cpp
from .utils import is_loss_grad_op, __no_shape_var_type__
from .utils import (
__no_shape_var_type__,
_copy_dist_attr_to_cpp,
is_loss_grad_op,
)
# There always exists a default context for user. And user can set it to another one.
_g_default_distributed_context = None
......
......@@ -13,19 +13,20 @@
# limitations under the License
import abc
import numpy as np
import paddle
from paddle.io import BatchSampler, IterableDataset
from paddle.fluid.dataloader.batch_sampler import (
_InfiniteIterableSampler,
DistributedBatchSampler,
_InfiniteIterableSampler,
)
from paddle.fluid.dataloader.dataloader_iter import (
_DatasetKind,
default_collate_fn,
default_convert_fn,
)
from paddle.io import BatchSampler, IterableDataset
class DistributedDataLoaderBase(metaclass=abc.ABCMeta):
......
......@@ -13,15 +13,19 @@
# limitations under the License
import copy
import paddle
from paddle.fluid.framework import Variable
from .dist_attribute import OperatorDistributedAttribute
from .dist_attribute import append_op_input_suffix
from .dist_attribute import append_op_output_suffix
from .dist_attribute import (
OperatorDistributedAttribute,
append_op_input_suffix,
append_op_output_suffix,
)
from .utils import (
__no_shape_var_type__,
convert_to_shard_spec,
verify_shard_spec,
__no_shape_var_type__,
)
......
......@@ -12,19 +12,21 @@
# See the License for the specific language governing permissions and
# limitations under the License
import re
import os
import errno
import pickle
import logging
import os
import pickle
import re
import numpy as np
import paddle
import paddle
from paddle import fluid
from paddle.fluid import core
from .utils import get_dist_attr
from .process_group import _g_process_group_map
from ..utils.log_utils import get_logger
from .process_group import _g_process_group_map
from .utils import get_dist_attr
def check_filename(re_exp, filename):
......
......@@ -16,9 +16,10 @@ import copy
import inspect
import paddle
from paddle.fluid.framework import Parameter, Block, Variable
from paddle.fluid.framework import Block, Parameter, Variable
from .dist_attribute import TensorDistributedAttribute
from .utils import _linear_idx2coordinate, __no_shape_var_type__
from .utils import __no_shape_var_type__, _linear_idx2coordinate
class DistributedTensor:
......
......@@ -12,49 +12,48 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import copy
import logging
import random
import numbers
import numpy as np
import os
import random
from collections import defaultdict
import numpy as np
import paddle
import paddle.utils as utils
import paddle.distributed.auto_parallel.utils as auto_utils
import paddle.utils as utils
from paddle import fluid, static
from paddle.distributed import fleet
from paddle.fluid import Variable, core
from paddle.fluid.dygraph.parallel import ParallelEnv
from paddle.fluid.executor import _to_name_str, global_scope
from paddle.fluid.framework import Operator
from paddle.fluid.framework import _current_expected_place as _get_device
from paddle.fluid.framework import _non_static_mode
from paddle.fluid.layers.utils import flatten
from paddle.metric import Metric
from paddle.static import InputSpec
from paddle.fluid import core
from paddle.fluid import Variable
from paddle.fluid.layers.utils import flatten
from paddle.fluid.executor import global_scope, _to_name_str
from paddle.fluid.framework import Operator, _non_static_mode
from paddle.fluid.framework import _current_expected_place as _get_device
from paddle.fluid.dygraph.parallel import ParallelEnv
from paddle.distributed import fleet
from ..utils.log_utils import get_logger
from .callbacks import config_callbacks
from .converter import Converter
from .helper import ProgramHelper
from .cluster import Cluster, get_default_cluster
from .planner_v2 import Planner
from .parallelizer_v2 import Parallelizer
from .dist_op import DistributedOperator
from .dist_saver import DistributedSaver
from .converter import Converter
from .cost.estimate_cost import get_cost_from_engine
from .dist_context import DistributedContext, get_default_distributed_context
from .dist_loader import (
DistributedDataLoaderFromGenerator,
DistributedDataLoader,
DistributedDataLoaderFromGenerator,
)
from .strategy import Strategy
from .process_group import new_process_group, get_all_process_groups
from .dist_context import DistributedContext, get_default_distributed_context
from .dist_op import DistributedOperator
from .dist_saver import DistributedSaver
from .helper import ProgramHelper
from .interface import CollectionNames, get_collection
from .cost.estimate_cost import get_cost_from_engine
from ..utils.log_utils import get_logger
from .parallelizer_v2 import Parallelizer
from .planner_v2 import Planner
from .process_group import get_all_process_groups, new_process_group
from .strategy import Strategy
class Engine:
......
......@@ -15,18 +15,14 @@
import logging
from collections import defaultdict
from paddle.nn import Layer
from paddle.jit import to_static, not_to_static
from paddle.fluid.framework import Parameter
from paddle.fluid.framework import program_guard
from paddle.fluid.executor import global_scope
from paddle.jit.dy2static.program_translator import (
StaticFunction,
)
from paddle.fluid.framework import Parameter, program_guard
from paddle.jit import not_to_static, to_static
from paddle.jit.dy2static.program_translator import StaticFunction
from paddle.nn import Layer
from .utils import to_list
from .utils import get_logger
from .converter import Converter
from .utils import get_logger, to_list
class ProxyLayer(Layer):
......
......@@ -13,15 +13,15 @@
# limitations under the License.
import paddle
from .process_mesh import ProcessMesh
from .process_mesh import get_current_process_mesh
from .dist_context import get_default_distributed_context
from .dist_tensor import DistributedTensor
from .dist_op import DistributedOperatorHelper
from .dist_tensor import DistributedTensor
from .process_mesh import ProcessMesh, get_current_process_mesh
from .utils import (
verify_shard_spec,
convert_to_dims_mapping,
__no_shape_var_type__,
convert_to_dims_mapping,
verify_shard_spec,
)
......
......@@ -12,13 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License
import os
import operator
import functools
import paddle
import operator
import os
from collections import deque
from .graph import Graph
import paddle
from .cluster import DeviceType
from .graph import Graph
from .process_group import get_process_group
......
......@@ -13,10 +13,12 @@
# limitations under the License
import abc
from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole
from ..dist_attribute import OperatorDistributedAttribute
from ..utils import _get_comm_group, _get_corresponding_rank, is_optimize_op
from ..process_group import new_process_group
from ..utils import _get_comm_group, _get_corresponding_rank, is_optimize_op
_g_distributed_operator_impl_containers = {}
......
......@@ -12,12 +12,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from .common import DistributedOperatorImplContainer
from .common import DistributedOperatorImpl
from .common import register_distributed_operator_impl_container
from .common import register_distributed_operator_impl
from .dist_default import DistributedDefaultImpl0
from ..utils import compute_compatible_and_update_dim_mapping
from .common import (
DistributedOperatorImpl,
DistributedOperatorImplContainer,
register_distributed_operator_impl,
register_distributed_operator_impl_container,
)
from .dist_default import DistributedDefaultImpl0
class DistributedAssign(DistributedOperatorImplContainer):
......
......@@ -12,19 +12,21 @@
# See the License for the specific language governing permissions and
# limitations under the License
from .common import DistributedOperatorImplContainer
from .common import DistributedOperatorImpl
from .common import register_distributed_operator_impl_container
from .common import register_distributed_operator_impl
from paddle.fluid import core
from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole
from ..utils import set_var_dist_attr
from ..utils import set_dist_op_desc_original_id
from ..process_group import new_process_group
from ..dist_attribute import OperatorDistributedAttribute
from paddle.distributed.auto_parallel.process_group import (
get_world_process_group,
)
from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole
from paddle.fluid import core
from ..dist_attribute import OperatorDistributedAttribute
from ..process_group import new_process_group
from ..utils import set_dist_op_desc_original_id, set_var_dist_attr
from .common import (
DistributedOperatorImpl,
DistributedOperatorImplContainer,
register_distributed_operator_impl,
register_distributed_operator_impl_container,
)
world_process_group = get_world_process_group()
......
......@@ -12,21 +12,31 @@
# See the License for the specific language governing permissions and
# limitations under the License
from .common import DistributedOperatorImplContainer
from .common import DistributedOperatorImpl
from .common import register_distributed_operator_impl_container
from .common import gradient_synchronization
from .common import register_distributed_operator_impl, is_parameter_related
from ..utils import is_prim_op
from ..utils import compute_compatible_dim_mapping
from ..utils import set_dist_op_desc_original_id
from ..dist_attribute import OperatorDistributedAttribute
from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole
from ..cost import (
_g_op_cost_factory,
build_comp_costs_from_descs,
build_comp_desc_from_dist_op,
build_dp_costs,
)
from ..dist_attribute import OperatorDistributedAttribute
from ..process_group import new_process_group
from ..utils import _get_comm_group, _get_corresponding_rank
from ..cost import _g_op_cost_factory
from ..cost import build_comp_desc_from_dist_op, build_dp_costs
from ..cost import build_comp_costs_from_descs
from ..utils import (
_get_comm_group,
_get_corresponding_rank,
compute_compatible_dim_mapping,
is_prim_op,
set_dist_op_desc_original_id,
)
from .common import (
DistributedOperatorImpl,
DistributedOperatorImplContainer,
gradient_synchronization,
is_parameter_related,
register_distributed_operator_impl,
register_distributed_operator_impl_container,
)
__op_not_need_param_init__ = ["while", "cond"]
......
......@@ -12,18 +12,27 @@
# See the License for the specific language governing permissions and
# limitations under the License
from .common import DistributedOperatorImplContainer
from .common import DistributedOperatorImpl
from .common import register_distributed_operator_impl_container
from .common import register_distributed_operator_impl, is_parameter_related
from .common import is_elementwise_op
from ..utils import compute_compatible_dim_mapping
from ..utils import compute_compatible_dims_mapping
from paddle.distributed.fleet.meta_optimizers.common import OpRole
from ..cost import (
_g_op_cost_factory,
build_comp_costs_from_descs,
build_comp_desc_from_dist_op,
build_dp_costs,
)
from ..utils import (
compute_compatible_dim_mapping,
compute_compatible_dims_mapping,
)
from .common import (
DistributedOperatorImpl,
DistributedOperatorImplContainer,
is_elementwise_op,
is_parameter_related,
register_distributed_operator_impl,
register_distributed_operator_impl_container,
)
from .dist_default import DistributedDefaultImpl0
from ..cost import _g_op_cost_factory
from ..cost import build_comp_desc_from_dist_op, build_dp_costs
from ..cost import build_comp_costs_from_descs
class DistributedElementwise(DistributedOperatorImplContainer):
......
......@@ -12,40 +12,43 @@
# See the License for the specific language governing permissions and
# limitations under the License
from .common import infer_shape
from .common import DistributedOperatorImplContainer
from .common import DistributedOperatorImpl
from .common import register_distributed_operator_impl_container
from .common import gradient_synchronization
from .common import (
naive_copy_op_dist_attr_for_program,
register_distributed_operator_impl,
set_comm_op_dist_attr_for_program,
from paddle.distributed.auto_parallel.cost.comm_op_cost import (
AllreduceSumOpCost,
IdentityOpCost,
)
from ..utils import is_dim_shard
from ..utils import is_dim_replicate
from ..utils import compute_compatible_and_update_dim_mapping
from ..dist_attribute import OperatorDistributedAttribute
from paddle.fluid import core, unique_name
from paddle.fluid.data_feeder import check_variable_and_dtype, check_dtype
from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole
from paddle.fluid import core, unique_name
from paddle.fluid.data_feeder import check_dtype, check_variable_and_dtype
from ..cost import (
EmbeddingGradOpCost,
EmbeddingOpCost,
build_comm_costs_from_descs,
build_comm_desc_from_dist_op,
build_comp_costs_from_descs,
build_comp_desc_from_dist_op,
build_dp_costs,
)
from ..dist_attribute import OperatorDistributedAttribute
from ..process_group import new_process_group
from ..utils import (
_get_comm_group,
_get_idx_in_axis,
_get_corresponding_rank,
_get_idx_in_axis,
compute_compatible_and_update_dim_mapping,
is_dim_replicate,
is_dim_shard,
set_var_dist_attr,
)
from ..cost import build_comp_desc_from_dist_op, build_comm_desc_from_dist_op
from ..cost import (
build_comm_costs_from_descs,
build_comp_costs_from_descs,
build_dp_costs,
)
from ..cost import EmbeddingOpCost, EmbeddingGradOpCost
from paddle.distributed.auto_parallel.cost.comm_op_cost import (
AllreduceSumOpCost,
IdentityOpCost,
from .common import (
DistributedOperatorImpl,
DistributedOperatorImplContainer,
gradient_synchronization,
infer_shape,
naive_copy_op_dist_attr_for_program,
register_distributed_operator_impl,
register_distributed_operator_impl_container,
set_comm_op_dist_attr_for_program,
)
......
......@@ -12,16 +12,21 @@
# See the License for the specific language governing permissions and
# limitations under the License
from .common import DistributedOperatorImplContainer
from .common import DistributedOperatorImpl
from .common import register_distributed_operator_impl_container
from .common import register_distributed_operator_impl
from ..utils import compute_compatible_and_update_dim_mapping
from paddle.distributed.fleet.meta_optimizers.common import OpRole
from ..cost import (
FillConstantBatchSizeLikeOpCost,
build_comp_costs_from_descs,
build_comp_desc_from_dist_op,
)
from ..utils import compute_compatible_and_update_dim_mapping
from .common import (
DistributedOperatorImpl,
DistributedOperatorImplContainer,
register_distributed_operator_impl,
register_distributed_operator_impl_container,
)
from .dist_default import DistributedDefaultImpl0
from ..cost import FillConstantBatchSizeLikeOpCost
from ..cost import build_comp_desc_from_dist_op
from ..cost import build_comp_costs_from_descs
class DistributedFillConstantBatchSizeLike(DistributedOperatorImplContainer):
......
......@@ -12,15 +12,21 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from .common import DistributedOperatorImplContainer
from .common import DistributedOperatorImpl
from .common import register_distributed_operator_impl_container
from .common import register_distributed_operator_impl
from ..utils import is_dim_shard, is_dim_replicate
from ..utils import compute_compatible_and_update_dim_mapping
from .dist_default import DistributedDefaultImpl0
from ..utils import _get_comm_group, _get_corresponding_rank
from ..process_group import new_process_group
from ..utils import (
_get_comm_group,
_get_corresponding_rank,
compute_compatible_and_update_dim_mapping,
is_dim_replicate,
is_dim_shard,
)
from .common import (
DistributedOperatorImpl,
DistributedOperatorImplContainer,
register_distributed_operator_impl,
register_distributed_operator_impl_container,
)
from .dist_default import DistributedDefaultImpl0
class DistributedFusedAttention(DistributedOperatorImplContainer):
......
......@@ -12,15 +12,21 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from .common import DistributedOperatorImplContainer
from .common import DistributedOperatorImpl
from .common import register_distributed_operator_impl_container
from .common import register_distributed_operator_impl
from ..utils import is_dim_shard, is_dim_replicate
from ..utils import compute_compatible_and_update_dim_mapping
from .dist_default import DistributedDefaultImpl0
from ..utils import _get_comm_group, _get_corresponding_rank
from ..process_group import new_process_group
from ..utils import (
_get_comm_group,
_get_corresponding_rank,
compute_compatible_and_update_dim_mapping,
is_dim_replicate,
is_dim_shard,
)
from .common import (
DistributedOperatorImpl,
DistributedOperatorImplContainer,
register_distributed_operator_impl,
register_distributed_operator_impl_container,
)
from .dist_default import DistributedDefaultImpl0
class DistributedFusedFeedForward(DistributedOperatorImplContainer):
......
......@@ -14,38 +14,50 @@
import copy
from .common import infer_shape
from .common import DistributedOperatorImplContainer
from .common import DistributedOperatorImpl
from .common import register_distributed_operator_impl_container
from .common import register_distributed_operator_impl
from .common import gradient_synchronization
from .common import is_parameter_related, set_comm_op_dist_attr_for_program
from ..utils import is_dim_shard
from ..utils import is_dim_replicate
from ..utils import is_valid_list_index
from ..utils import compute_compatible_dims_mapping
from ..utils import compute_compatible_and_update_dim_mapping
from ..utils import set_dist_op_desc_original_id
from ..dist_attribute import OperatorDistributedAttribute
from paddle.fluid import core, unique_name
from paddle.fluid.data_feeder import check_variable_and_dtype, check_dtype
from paddle.distributed.auto_parallel.cost.comm_op_cost import (
AllreduceSumOpCost,
IdentityOpCost,
)
from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole
from ..process_group import new_process_group
from ..utils import _get_comm_group, _get_corresponding_rank
from .dist_default import DistributedDefaultImpl0
from paddle.fluid import core, unique_name
from paddle.fluid.data_feeder import check_dtype, check_variable_and_dtype
from ..cost import (
build_comp_desc_from_dist_op,
MatmulGradOpCost,
MatmulOpCost,
MatmulV2GradOpCost,
MatmulV2OpCost,
MulGradOpCost,
MulOpCost,
build_comm_costs_from_descs,
build_comm_desc_from_dist_op,
build_comp_costs_from_descs,
build_comp_desc_from_dist_op,
build_dp_costs,
)
from ..cost import build_comm_costs_from_descs, build_comp_costs_from_descs
from ..cost import MatmulV2OpCost, MatmulOpCost, MulOpCost
from ..cost import MatmulV2GradOpCost, MatmulGradOpCost, MulGradOpCost
from paddle.distributed.auto_parallel.cost.comm_op_cost import (
AllreduceSumOpCost,
IdentityOpCost,
from ..dist_attribute import OperatorDistributedAttribute
from ..process_group import new_process_group
from ..utils import (
_get_comm_group,
_get_corresponding_rank,
compute_compatible_and_update_dim_mapping,
compute_compatible_dims_mapping,
is_dim_replicate,
is_dim_shard,
is_valid_list_index,
set_dist_op_desc_original_id,
)
from .common import (
DistributedOperatorImpl,
DistributedOperatorImplContainer,
gradient_synchronization,
infer_shape,
is_parameter_related,
register_distributed_operator_impl,
register_distributed_operator_impl_container,
set_comm_op_dist_attr_for_program,
)
from .dist_default import DistributedDefaultImpl0
def trans_x_y_dims_mapping(trans_x, trans_y, x_dims_mapping, y_dims_mapping):
......
......@@ -14,26 +14,30 @@
import copy
from .common import DistributedOperatorImplContainer
from .common import DistributedOperatorImpl
from .common import register_distributed_operator_impl_container
from .common import register_distributed_operator_impl
from paddle.fluid import core
from paddle.fluid.data_feeder import check_dtype, check_variable_and_dtype
from paddle.fluid.framework import Operator
from ..dist_attribute import (
OperatorDistributedAttribute,
TensorDistributedAttribute,
)
from ..process_group import new_process_group
from ..utils import is_dim_shard, is_dim_replicate, _get_corresponding_rank
from ..utils import (
_get_comm_group,
_get_corresponding_rank,
compute_compatible_dim_mapping,
is_dim_replicate,
is_dim_shard,
set_dist_op_desc_original_id,
_get_comm_group,
)
from ..dist_attribute import (
TensorDistributedAttribute,
OperatorDistributedAttribute,
from .common import (
DistributedOperatorImpl,
DistributedOperatorImplContainer,
register_distributed_operator_impl,
register_distributed_operator_impl_container,
)
from paddle.fluid import core
from paddle.fluid.framework import Operator
from paddle.fluid.data_feeder import check_variable_and_dtype, check_dtype
class DistributedPNorm(DistributedOperatorImplContainer):
def __init__(self, op_type):
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册