From adca3654bbb1e8d536bfee03949c1d6964322b79 Mon Sep 17 00:00:00 2001 From: co63oc Date: Tue, 23 May 2023 15:12:45 +0800 Subject: [PATCH] Fix typos (#54015) --- paddle/.common_test_util.sh | 2 +- paddle/CMakeLists.txt | 4 ++-- paddle/fluid/dialect/pd_type_storage.h | 4 ++-- paddle/fluid/imperative/amp_auto_cast.cc | 4 ++-- paddle/phi/backends/dynload/CMakeLists.txt | 2 +- paddle/scripts/README.md | 2 +- paddle/scripts/musl_build/README.md | 16 ++++++++-------- .../fluid/tests/unittests/dist_fleet_ctr.py | 2 +- .../fluid/tests/unittests/dist_sharding_save.py | 2 +- .../fluid/tests/unittests/eager_op_test.py | 12 ++++++------ .../fluid/tests/unittests/gradient_checker.py | 2 +- .../paddle/fluid/tests/unittests/prim_op_test.py | 6 +++--- .../fluid/tests/unittests/seresnext_net.py | 2 +- .../tests/unittests/test_activation_nn_grad.py | 2 +- python/paddle/nn/__init__.py | 4 ++-- python/paddle/utils/download.py | 4 ++-- python/paddle/utils/flops.py | 10 +++++----- python/paddle/utils/install_check.py | 6 +++--- python/paddle/utils/layers_utils.py | 2 +- python/paddle/vision/datasets/folder.py | 6 +++--- 20 files changed, 47 insertions(+), 47 deletions(-) diff --git a/paddle/.common_test_util.sh b/paddle/.common_test_util.sh index 4681e49a0f5..b27ee3a9b48 100644 --- a/paddle/.common_test_util.sh +++ b/paddle/.common_test_util.sh @@ -20,7 +20,7 @@ PORT_LOCK_FILE=/tmp/paddle_test_ports.lock touch $PORT_FILE $PORT_LOCK_FILE 2>/dev/null chmod a+rw $PORT_FILE $PORT_LOCK_FILE 2>/dev/null -# acquire a range of ports that not used by other runtests.sh currentlly. +# acquire a range of ports that not used by other runtests.sh currently. # return 1 if ports is used by other, otherwise return 0. # NOTE: the acquire_ports/release_ports is interprocess mutexed. # diff --git a/paddle/CMakeLists.txt b/paddle/CMakeLists.txt index 35556347cb3..924d0c2cb8c 100644 --- a/paddle/CMakeLists.txt +++ b/paddle/CMakeLists.txt @@ -17,7 +17,7 @@ add_subdirectory(ir) # (4) the tests binaries are generated in different directories, as the same as the # folder of source file. -# Now, we want to make all cc tests dynamically linked to the main paddle labrary, +# Now, we want to make all cc tests dynamically linked to the main paddle library, # i.e., `libpaddle.so`, so we changes the logic of (2), (3), (4): # (2) calling `cc_test()` in each `CMakeLists.txt` will not `exactly` add test, but # record all tests and its source files, the action of add tests is defered to HERE. @@ -26,7 +26,7 @@ add_subdirectory(ir) # (3) the tests links dynamic libraries, `libpaddle.so` # (4) the tests are generated to the same directory, i.e., `CC_TESTS_DIR` defined above. -# Next, (to be discusssed) +# Next, (to be discussed) # (1) move all source files to same folder, # (2) naturally, and configure tests in only one `CMakeLists.txt`, # (3) cc tests support linking pre-built dynamic libraries. For example, use the dynamic diff --git a/paddle/fluid/dialect/pd_type_storage.h b/paddle/fluid/dialect/pd_type_storage.h index 1ea44eccc73..e52d81db1dd 100644 --- a/paddle/fluid/dialect/pd_type_storage.h +++ b/paddle/fluid/dialect/pd_type_storage.h @@ -39,7 +39,7 @@ struct hash> { namespace paddle { namespace dialect { /// -/// \brief Define Parameteric TypeStorage for DenseTensorType. +/// \brief Define Parametric TypeStorage for DenseTensorType. /// /// NOTE(zhangbo9674): The derived TypeStorage class needs to implement the /// following methods: (1)declare ParamKey, (2)define Construction method, @@ -93,7 +93,7 @@ struct DenseTensorTypeStorage : public ir::TypeStorage { offset_(offset) {} /// - /// \brief Each derived TypeStorage must define a Construc method, which + /// \brief Each derived TypeStorage must define a Construct method, which /// StorageManager uses to construct a derived TypeStorage. /// static DenseTensorTypeStorage *Construct(ParamKey key) { diff --git a/paddle/fluid/imperative/amp_auto_cast.cc b/paddle/fluid/imperative/amp_auto_cast.cc index 85633f1b9cc..3c643024cfa 100644 --- a/paddle/fluid/imperative/amp_auto_cast.cc +++ b/paddle/fluid/imperative/amp_auto_cast.cc @@ -147,7 +147,7 @@ AmpOperators::AmpOperators() OpSupportedInfos("GPU", paddle::framework::proto::VarType::BF16)); unsupported_bf16_ops_->insert(unsupported_ops_gpu_bf16.begin(), unsupported_ops_gpu_bf16.end()); -// NOTE: GPU/XPU is compiled seperatly. +// NOTE: GPU/XPU is compiled separately. #elif defined(PADDLE_WITH_XPU) auto unsupported_ops_xpu_fp16 = std::get<2>( OpSupportedInfos("XPU", paddle::framework::proto::VarType::FP16)); @@ -244,7 +244,7 @@ inline bool NeedCast(const std::shared_ptr& var) { paddle::platform::is_cuda_pinned_place(place) || paddle::platform::is_xpu_place(place) || paddle::platform::is_custom_place(place)) { - // CudaPinndePlace is added for varbase created by dataloader + // CudaPinnedPlace is added for varbase created by dataloader if (data_type == paddle::framework::proto::VarType::FP32 || data_type == paddle::framework::proto::VarType::FP16 || data_type == paddle::framework::proto::VarType::BF16) { diff --git a/paddle/phi/backends/dynload/CMakeLists.txt b/paddle/phi/backends/dynload/CMakeLists.txt index a96af96adac..2d4e84beb69 100644 --- a/paddle/phi/backends/dynload/CMakeLists.txt +++ b/paddle/phi/backends/dynload/CMakeLists.txt @@ -31,7 +31,7 @@ if(WITH_ROCM) endif() # There is no macOS version of NCCL. -# Disable nvrtc and cuda_driver api on MacOS, and only do a early test on Linux and Windows. +# Disable nvrtc and cuda_driver api on macOS, and only do an early test on Linux and Windows. if(NOT APPLE) list(APPEND CUDA_SRCS nvrtc.cc cuda_driver.cc) if(WITH_NCCL) diff --git a/paddle/scripts/README.md b/paddle/scripts/README.md index 2c92d0ee1ae..fadad81d514 100644 --- a/paddle/scripts/README.md +++ b/paddle/scripts/README.md @@ -69,7 +69,7 @@ Users can specify the following Docker build arguments with either "ON" or "OFF" | `WITH_PYTHON` | ON | Build with python support. Turn this off if build is only for capi. | | `WITH_STYLE_CHECK` | ON | Check the code style when building. | | `PYTHON_ABI` | "" | Build for different python ABI support, can be cp27-cp27m or cp27-cp27mu | -| `RUN_TEST` | OFF | Run unit test immediently after the build. | +| `RUN_TEST` | OFF | Run unit test immediately after the build. | ## Docker Images diff --git a/paddle/scripts/musl_build/README.md b/paddle/scripts/musl_build/README.md index 762e1225817..f4b48c2eb1d 100644 --- a/paddle/scripts/musl_build/README.md +++ b/paddle/scripts/musl_build/README.md @@ -37,12 +37,12 @@ mkdir -p build && cd build ```bash -# setup proxy addresss, when the speed of internet is not good. +# setup proxy address, when the speed of internet is not good. # export HTTP_PROXY='http://127.0.0.1:8080' # export HTTPS_PROXY='https://127.0.0.1:8080' # invoke build paddle script -# all arguments, such as -j8 optinal, is past to make procedure. +# all arguments, such as -j8 optional, is past to make procedure. ../paddle/scripts/musl_build/build_paddle.sh -j8 # find output wheel package @@ -57,7 +57,7 @@ ls ./output/*.whl # checkout paddle source code git clone https://github.com/PaddlePaddle/Paddle.git -# entery paddle directory +# enter paddle directory cd ./Paddle # build docker image @@ -87,7 +87,7 @@ make -j8 # Scripts 1. **build_docker.sh** - compiling docker building script. it use alpine linux 3.10 as musl linux build enironment. it will try to install all the compiling tools, development packages, and python requirements for paddle musl compiling. + compiling docker building script. it use alpine linux 3.10 as musl linux build environment. it will try to install all the compiling tools, development packages, and python requirements for paddle musl compiling. environment variables: - PYTHON_VERSION: the version of python used for image building, default=3.7. @@ -104,10 +104,10 @@ make -j8 environment variables: - BUILD_MAN: build the paddle manually, default=0. - - WITH_TEST: build with unitest, and run unitest check, default=0. + - WITH_TEST: build with unittest, and run unittest check, default=0. - WITH_PRUNE_CONTAINER: remove the container after building, default=1. - - CTEST_*: CTEST flages used for unit test. - - FLAGS_*: build flages used for paddle building. + - CTEST_*: CTEST flags used for unit test. + - FLAGS_*: build flags used for paddle building. - HTTP_PROXY: use http proxy. - HTTPS_PROXY: use https proxy. @@ -118,4 +118,4 @@ make -j8 - **config.sh**: build config script for configure compiling option setting. - **Dockerfile**: build docker definition file. - **package.txt**: build required develop packages for alpine linux. -- **REAME.md**: this file. +- **README.md**: this file. diff --git a/python/paddle/fluid/tests/unittests/dist_fleet_ctr.py b/python/paddle/fluid/tests/unittests/dist_fleet_ctr.py index 76b246a0987..7ac56dfff58 100644 --- a/python/paddle/fluid/tests/unittests/dist_fleet_ctr.py +++ b/python/paddle/fluid/tests/unittests/dist_fleet_ctr.py @@ -395,7 +395,7 @@ class TestDistCTR2x2(FleetDistRunnerBase): fleet.save_persistables(exe, patch_dirname, None, 5) fleet.check_save_pre_patch_done() - # add for gpugrahp + # add for gpu graph fleet.save_cache_table(0, 0) fleet.shrink() diff --git a/python/paddle/fluid/tests/unittests/dist_sharding_save.py b/python/paddle/fluid/tests/unittests/dist_sharding_save.py index 34287e8b7f4..9466e325a73 100755 --- a/python/paddle/fluid/tests/unittests/dist_sharding_save.py +++ b/python/paddle/fluid/tests/unittests/dist_sharding_save.py @@ -87,7 +87,7 @@ def runtime_main(): if __name__ == "__main__": - # NOTE(liangjianzhong): dist unittest should be imlpement using runtime_main in test_dist_base.py + # NOTE(liangjianzhong): dist unittest should be implemented using runtime_main in test_dist_base.py # but the runtime_main in test_dist_base.py use the fleet, DistributedStrategy from # paddle.incubate.distributed.fleet.collective which is not support by sharding (paddle.distributed.fleet). # this should be update in future. diff --git a/python/paddle/fluid/tests/unittests/eager_op_test.py b/python/paddle/fluid/tests/unittests/eager_op_test.py index ca16e71a8ea..20195a1bf16 100644 --- a/python/paddle/fluid/tests/unittests/eager_op_test.py +++ b/python/paddle/fluid/tests/unittests/eager_op_test.py @@ -67,7 +67,7 @@ def check_out_dtype(api_fn, in_specs, expect_dtypes, target_index=0, **configs): Args: api_fn(callable): paddle api function in_specs(list[tuple]): list of shape and dtype information for constructing input tensor of api_fn, such as [(shape, dtype), (shape, dtype)]. - expected_dtype(list[str]): expected dtype of output tensor. + expect_dtypes(list[str]): expected dtype of output tensor. target_index(int): indicate which one from in_specs to infer the dtype of output. config(dict): other arguments of paddle api function @@ -1364,14 +1364,14 @@ class OpTest(unittest.TestCase): def _get_need_run_ops(self, op_desc, fwd_op_desc=None): """Postorder traversal of the 'grad' tree to get all ops that need to run during inplace test. - An op needs to run druing inplace check if, + An op needs to run during inplace check if, (1) it has infer_inplace, (2) it has infer_inplace in its grad descendants. (since we need its outputs as to construct its grad's inputs) Args: op_desc (OpDesc): The op_desc of current op. fwd_op_desc (OpDesc): The op_desc of current op's forward op, None if current op has no forward op. - Eg. relu's fwd_op is None, relu_grad's fwd_op is relu, relu_grad_grad's fwd_op is relu_grad, etc. + E.g. relu's fwd_op is None, relu_grad's fwd_op is relu, relu_grad_grad's fwd_op is relu_grad, etc. Returns: need_run_ops (list[(op_desc, fwd_op_desc)]): The ops that need to run during inplace test. @@ -1540,7 +1540,7 @@ class OpTest(unittest.TestCase): def check_inplace_output_with_place( self, place, no_check_set=None, inplace_atol=None ): - """Chech the inplace correctness of given op, its grad op, its grad_grad op, etc. + """Check the inplace correctness of given op, its grad op, its grad_grad op, etc. (1) Get all ops need to run. (see conditions in _get_need_run_ops()) (2) Run op in need_run_ops, and do inplace check if it has infer_inplace. @@ -2115,7 +2115,7 @@ class OpTest(unittest.TestCase): for var_name in var_names: i = find_fetch_index(var_name, fetch_list) if i == -1: - # The output is dispensiable or intermediate. + # The output is dispensable or intermediate. break out = fetch_outs[i] if isinstance(out, core.LoDTensor): @@ -2818,7 +2818,7 @@ class OpTest(unittest.TestCase): user_defined_grad_outputs = [user_defined_grad_outputs] grad_outputs = [] for grad_out_value in user_defined_grad_outputs: - # `presistable` is used to avoid executor create new var in local scope + # `persistable` is used to avoid executor create new var in local scope var = block.create_var( shape=grad_out_value.shape, dtype=grad_out_value.dtype, diff --git a/python/paddle/fluid/tests/unittests/gradient_checker.py b/python/paddle/fluid/tests/unittests/gradient_checker.py index 022d6372ef9..68a3e040a09 100644 --- a/python/paddle/fluid/tests/unittests/gradient_checker.py +++ b/python/paddle/fluid/tests/unittests/gradient_checker.py @@ -549,7 +549,7 @@ def get_static_double_grad( program (Program|None): a Program with forward pass. If None, use fluid.default_main_program(). Returns: - A list of numpy array that stores second derivative result calulated by static graph. + A list of numpy array that stores second derivative result calculated by static graph. """ if program is None: diff --git a/python/paddle/fluid/tests/unittests/prim_op_test.py b/python/paddle/fluid/tests/unittests/prim_op_test.py index 22f050fc8f0..0ecad992594 100644 --- a/python/paddle/fluid/tests/unittests/prim_op_test.py +++ b/python/paddle/fluid/tests/unittests/prim_op_test.py @@ -734,7 +734,7 @@ class PrimForwardChecker: def check_jit_comp_with_cinn(self): if self.prim_op_type == "prim": return - # cinn doesn't suppoort cpu place + # cinn doesn't support cpu place if ( type(self.place) == paddle.fluid.libpaddle.CPUPlace and self.enable_cinn @@ -869,7 +869,7 @@ class PrimGradChecker(PrimForwardChecker): def get_output_dict(self, np_outputs, api_outputs, outputs_sig): assert len(api_outputs) <= len(outputs_sig), ( - "forward api outputs length must be the less than or equal to KernelSignature outputs,but recive %s and %s" + "forward api outputs length must be the less than or equal to KernelSignature outputs,but receive %s and %s" ) % (len(api_outputs), len(outputs_sig)) output_dict = {} for i in range(len(api_outputs)): @@ -1249,7 +1249,7 @@ class PrimGradChecker(PrimForwardChecker): net.forward.program_cache.clear() def check_jit_comp_with_cinn(self): - # cinn doesen't support cpu place + # cinn doesn't support cpu place if ( type(self.place) is paddle.fluid.libpaddle.CPUPlace and self.enable_cinn diff --git a/python/paddle/fluid/tests/unittests/seresnext_net.py b/python/paddle/fluid/tests/unittests/seresnext_net.py index a2d296a1981..614a1d030d1 100644 --- a/python/paddle/fluid/tests/unittests/seresnext_net.py +++ b/python/paddle/fluid/tests/unittests/seresnext_net.py @@ -100,7 +100,7 @@ def shortcut(input, ch_out, stride): def bottleneck_block(input, num_filters, stride, cardinality, reduction_ratio): # The number of first 1x1 convolutional channels for each bottleneck build block - # was halved to reduce the compution cost. + # was halved to reduce the computation cost. conv0 = conv_bn_layer( input=input, num_filters=num_filters, filter_size=1, act='relu' ) diff --git a/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py b/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py index cddeb3daeed..58a2085a3a8 100644 --- a/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py +++ b/python/paddle/fluid/tests/unittests/test_activation_nn_grad.py @@ -409,7 +409,7 @@ class TestSquareDoubleGradCheck(unittest.TestCase): @prog_scope() def func(self, place): - # the shape of input variable should be clearly specified, not inlcude -1. + # the shape of input variable should be clearly specified, not include -1. shape = [2, 3, 7, 9] eps = 0.005 dtype = np.float64 diff --git a/python/paddle/nn/__init__.py b/python/paddle/nn/__init__.py index d682255a343..2cad4950fde 100644 --- a/python/paddle/nn/__init__.py +++ b/python/paddle/nn/__init__.py @@ -157,7 +157,7 @@ from .layer.layers import Layer # noqa: F401 from .utils.spectral_norm_hook import spectral_norm -# TODO: remove loss, keep it for too many used in unitests +# TODO: remove loss, keep it for too many used in unittests from .layer import loss # noqa: F401 from . import utils # noqa: F401 @@ -171,7 +171,7 @@ from paddle.utils import deprecated @deprecated( since="2.0.0", - update_to="paddle.nn.funcitional.diag_embed", + update_to="paddle.nn.functional.diag_embed", level=1, reason="diag_embed in paddle.nn will be removed in future", ) diff --git a/python/paddle/utils/download.py b/python/paddle/utils/download.py index d03a7fe5330..c7551945f5c 100644 --- a/python/paddle/utils/download.py +++ b/python/paddle/utils/download.py @@ -182,7 +182,7 @@ def _get_download(url, fullname): "{}!".format(url, req.status_code) ) - # For protecting download interupted, download to + # For protecting download interrupted, download to # tmp_fullname firstly, move tmp_fullname to fullname # after download finished tmp_fullname = fullname + "_tmp" @@ -295,7 +295,7 @@ def _decompress(fname): """ logger.info(f"Decompressing {fname}...") - # For protecting decompressing interupted, + # For protecting decompressing interrupted, # decompress to fpath_tmp directory firstly, if decompress # successed, move decompress files to fpath and delete # fpath_tmp and remove download compress file. diff --git a/python/paddle/utils/flops.py b/python/paddle/utils/flops.py index 2ff1d582cfc..de451508a65 100644 --- a/python/paddle/utils/flops.py +++ b/python/paddle/utils/flops.py @@ -176,7 +176,7 @@ def _elementwise_flops_compute(input_shapes, attrs): def _elementwise_add_flops(input_shapes, attrs): """FLOPs computation for elementwise_add op. For elementwise_add(input,other): - input_shapes = [shape_of_input, shape_of_ohther] + input_shapes = [shape_of_input, shape_of_other] shape_of_input = [dim1, dim2, dim3 ...] shape_of_other = [odim1, odim2, odim3...] equation: flops = max(dim1, odim1) * max(dim2, odim2) * max()... @@ -188,7 +188,7 @@ def _elementwise_add_flops(input_shapes, attrs): def _elementwise_mul_flops(input_shapes, attrs): """FLOPs computation for elementwise_mul op. For elementwise_mul(input,other): - input_shapes = [shape_of_input, shape_of_ohther] + input_shapes = [shape_of_input, shape_of_other] shape_of_input = [dim1, dim2, dim3 ...] shape_of_other = [odim1, odim2, odim3...] equation: flops = max(dim1, odim1) * max(dim2, odim2)* max()... @@ -200,7 +200,7 @@ def _elementwise_mul_flops(input_shapes, attrs): def _elementwise_div_flops(input_shapes, attrs): """FLOPs computation for elementwise_div op. For elementwise_div(input,other): - input_shapes = [shape_of_input, shape_of_ohther] + input_shapes = [shape_of_input, shape_of_other] shape_of_input = [dim1, dim2, dim3 ...] shape_of_other = [odim1, odim2, odim3...] equation: flops = max(dim1,odim1)*max(dim2,odim2)*max()... @@ -237,7 +237,7 @@ def _layer_norm_flops(input_shapes, attrs): def _matmul_flops(input_shapes, attrs): """FLOPs computation for matmul op. For matmul(input,other): - input_shapes = [shape_of_input, shape_of_ohther] + input_shapes = [shape_of_input, shape_of_other] shape_of_input = [dim1,dim2 ...dim_n_1,dim_n] length:n shape_of_other = [odim1,odim2 ... odim(n-m)... odim_m_1,dim_m] length:m suppose n > m and dim_n = odim_m_1: @@ -274,7 +274,7 @@ def _matmul_flops(input_shapes, attrs): def _matmul_v2_flops(input_shapes, attrs): """FLOPs computation for matmul_v2 op. For matmul_v2(input,other): - input_shapes = [shape_of_input, shape_of_ohther] + input_shapes = [shape_of_input, shape_of_other] shape_of_input = [dim1, dim2 ...dim_n_1, dim_n] length:n shape_of_other = [odim1, odim2 ... odim(n-m) ... odim_m_1, dim_m] length:m suppose n > m and dim_n = odim_m_1: diff --git a/python/paddle/utils/install_check.py b/python/paddle/utils/install_check.py index 548d16e8a96..9fd42947e0a 100644 --- a/python/paddle/utils/install_check.py +++ b/python/paddle/utils/install_check.py @@ -51,7 +51,7 @@ def _prepare_data(): def _is_cuda_available(): """ - Check whether CUDA is avaiable. + Check whether CUDA is available. """ try: assert len(paddle.static.cuda_places()) > 0 @@ -67,7 +67,7 @@ def _is_cuda_available(): def _is_xpu_available(): """ - Check whether XPU is avaiable. + Check whether XPU is available. """ try: assert len(paddle.static.xpu_places()) > 0 @@ -154,7 +154,7 @@ def _run_static_single(use_cuda, use_xpu): def train_for_run_parallel(): """ - train script for parallel traning check + train script for parallel training check """ # to avoid cyclic import diff --git a/python/paddle/utils/layers_utils.py b/python/paddle/utils/layers_utils.py index fbb0781db03..3d8b55dd2dc 100644 --- a/python/paddle/utils/layers_utils.py +++ b/python/paddle/utils/layers_utils.py @@ -27,7 +27,7 @@ from ..fluid.framework import Block, Variable, in_dygraph_mode def convert_to_list(value, n, name, dtype=int): """ Converts a single numerical type or iterable of numerical - types into an numerical type list. + types into a numerical type list. Arguments: value: The value to validate and convert. Could an int, or any iterable diff --git a/python/paddle/vision/datasets/folder.py b/python/paddle/vision/datasets/folder.py index 650114cd19d..aecc298fbba 100644 --- a/python/paddle/vision/datasets/folder.py +++ b/python/paddle/vision/datasets/folder.py @@ -24,7 +24,7 @@ __all__ = [] def has_valid_extension(filename, extensions): - """Checks if a file is a vilid extension. + """Checks if a file is a valid extension. Args: filename (str): path to a file @@ -363,7 +363,7 @@ class ImageFolder(Dataset): dirname = list(subpath.keys())[0] make_directory(root / dirname, subpath[dirname]) - directory_hirerarchy = [ + directory_hierarchy = [ "abc.jpg", "def.png", {"ghi": [ @@ -376,7 +376,7 @@ class ImageFolder(Dataset): # You can replace this with any directory to explore the structure # of generated data. e.g. fake_data_dir = "./temp_dir" fake_data_dir = tempfile.mkdtemp() - make_directory(fake_data_dir, directory_hirerarchy) + make_directory(fake_data_dir, directory_hierarchy) image_folder_1 = ImageFolder(fake_data_dir) print(image_folder_1.samples) # ['./temp_dir/abc.jpg', './temp_dir/def.png', -- GitLab