未验证 提交 adca3654 编写于 作者: C co63oc 提交者: GitHub

Fix typos (#54015)

上级 11aa5edd
...@@ -20,7 +20,7 @@ PORT_LOCK_FILE=/tmp/paddle_test_ports.lock ...@@ -20,7 +20,7 @@ PORT_LOCK_FILE=/tmp/paddle_test_ports.lock
touch $PORT_FILE $PORT_LOCK_FILE 2>/dev/null touch $PORT_FILE $PORT_LOCK_FILE 2>/dev/null
chmod a+rw $PORT_FILE $PORT_LOCK_FILE 2>/dev/null chmod a+rw $PORT_FILE $PORT_LOCK_FILE 2>/dev/null
# acquire a range of ports that not used by other runtests.sh currentlly. # acquire a range of ports that not used by other runtests.sh currently.
# return 1 if ports is used by other, otherwise return 0. # return 1 if ports is used by other, otherwise return 0.
# NOTE: the acquire_ports/release_ports is interprocess mutexed. # NOTE: the acquire_ports/release_ports is interprocess mutexed.
# #
......
...@@ -17,7 +17,7 @@ add_subdirectory(ir) ...@@ -17,7 +17,7 @@ add_subdirectory(ir)
# (4) the tests binaries are generated in different directories, as the same as the # (4) the tests binaries are generated in different directories, as the same as the
# folder of source file. # folder of source file.
# Now, we want to make all cc tests dynamically linked to the main paddle labrary, # Now, we want to make all cc tests dynamically linked to the main paddle library,
# i.e., `libpaddle.so`, so we changes the logic of (2), (3), (4): # i.e., `libpaddle.so`, so we changes the logic of (2), (3), (4):
# (2) calling `cc_test()` in each `CMakeLists.txt` will not `exactly` add test, but # (2) calling `cc_test()` in each `CMakeLists.txt` will not `exactly` add test, but
# record all tests and its source files, the action of add tests is defered to HERE. # record all tests and its source files, the action of add tests is defered to HERE.
...@@ -26,7 +26,7 @@ add_subdirectory(ir) ...@@ -26,7 +26,7 @@ add_subdirectory(ir)
# (3) the tests links dynamic libraries, `libpaddle.so` # (3) the tests links dynamic libraries, `libpaddle.so`
# (4) the tests are generated to the same directory, i.e., `CC_TESTS_DIR` defined above. # (4) the tests are generated to the same directory, i.e., `CC_TESTS_DIR` defined above.
# Next, (to be discusssed) # Next, (to be discussed)
# (1) move all source files to same folder, # (1) move all source files to same folder,
# (2) naturally, and configure tests in only one `CMakeLists.txt`, # (2) naturally, and configure tests in only one `CMakeLists.txt`,
# (3) cc tests support linking pre-built dynamic libraries. For example, use the dynamic # (3) cc tests support linking pre-built dynamic libraries. For example, use the dynamic
......
...@@ -39,7 +39,7 @@ struct hash<std::vector<T>> { ...@@ -39,7 +39,7 @@ struct hash<std::vector<T>> {
namespace paddle { namespace paddle {
namespace dialect { namespace dialect {
/// ///
/// \brief Define Parameteric TypeStorage for DenseTensorType. /// \brief Define Parametric TypeStorage for DenseTensorType.
/// ///
/// NOTE(zhangbo9674): The derived TypeStorage class needs to implement the /// NOTE(zhangbo9674): The derived TypeStorage class needs to implement the
/// following methods: (1)declare ParamKey, (2)define Construction method, /// following methods: (1)declare ParamKey, (2)define Construction method,
...@@ -93,7 +93,7 @@ struct DenseTensorTypeStorage : public ir::TypeStorage { ...@@ -93,7 +93,7 @@ struct DenseTensorTypeStorage : public ir::TypeStorage {
offset_(offset) {} offset_(offset) {}
/// ///
/// \brief Each derived TypeStorage must define a Construc method, which /// \brief Each derived TypeStorage must define a Construct method, which
/// StorageManager uses to construct a derived TypeStorage. /// StorageManager uses to construct a derived TypeStorage.
/// ///
static DenseTensorTypeStorage *Construct(ParamKey key) { static DenseTensorTypeStorage *Construct(ParamKey key) {
......
...@@ -147,7 +147,7 @@ AmpOperators::AmpOperators() ...@@ -147,7 +147,7 @@ AmpOperators::AmpOperators()
OpSupportedInfos("GPU", paddle::framework::proto::VarType::BF16)); OpSupportedInfos("GPU", paddle::framework::proto::VarType::BF16));
unsupported_bf16_ops_->insert(unsupported_ops_gpu_bf16.begin(), unsupported_bf16_ops_->insert(unsupported_ops_gpu_bf16.begin(),
unsupported_ops_gpu_bf16.end()); unsupported_ops_gpu_bf16.end());
// NOTE: GPU/XPU is compiled seperatly. // NOTE: GPU/XPU is compiled separately.
#elif defined(PADDLE_WITH_XPU) #elif defined(PADDLE_WITH_XPU)
auto unsupported_ops_xpu_fp16 = std::get<2>( auto unsupported_ops_xpu_fp16 = std::get<2>(
OpSupportedInfos("XPU", paddle::framework::proto::VarType::FP16)); OpSupportedInfos("XPU", paddle::framework::proto::VarType::FP16));
...@@ -244,7 +244,7 @@ inline bool NeedCast(const std::shared_ptr<VarType>& var) { ...@@ -244,7 +244,7 @@ inline bool NeedCast(const std::shared_ptr<VarType>& var) {
paddle::platform::is_cuda_pinned_place(place) || paddle::platform::is_cuda_pinned_place(place) ||
paddle::platform::is_xpu_place(place) || paddle::platform::is_xpu_place(place) ||
paddle::platform::is_custom_place(place)) { paddle::platform::is_custom_place(place)) {
// CudaPinndePlace is added for varbase created by dataloader // CudaPinnedPlace is added for varbase created by dataloader
if (data_type == paddle::framework::proto::VarType::FP32 || if (data_type == paddle::framework::proto::VarType::FP32 ||
data_type == paddle::framework::proto::VarType::FP16 || data_type == paddle::framework::proto::VarType::FP16 ||
data_type == paddle::framework::proto::VarType::BF16) { data_type == paddle::framework::proto::VarType::BF16) {
......
...@@ -31,7 +31,7 @@ if(WITH_ROCM) ...@@ -31,7 +31,7 @@ if(WITH_ROCM)
endif() endif()
# There is no macOS version of NCCL. # There is no macOS version of NCCL.
# Disable nvrtc and cuda_driver api on MacOS, and only do a early test on Linux and Windows. # Disable nvrtc and cuda_driver api on macOS, and only do an early test on Linux and Windows.
if(NOT APPLE) if(NOT APPLE)
list(APPEND CUDA_SRCS nvrtc.cc cuda_driver.cc) list(APPEND CUDA_SRCS nvrtc.cc cuda_driver.cc)
if(WITH_NCCL) if(WITH_NCCL)
......
...@@ -69,7 +69,7 @@ Users can specify the following Docker build arguments with either "ON" or "OFF" ...@@ -69,7 +69,7 @@ Users can specify the following Docker build arguments with either "ON" or "OFF"
| `WITH_PYTHON` | ON | Build with python support. Turn this off if build is only for capi. | | `WITH_PYTHON` | ON | Build with python support. Turn this off if build is only for capi. |
| `WITH_STYLE_CHECK` | ON | Check the code style when building. | | `WITH_STYLE_CHECK` | ON | Check the code style when building. |
| `PYTHON_ABI` | "" | Build for different python ABI support, can be cp27-cp27m or cp27-cp27mu | | `PYTHON_ABI` | "" | Build for different python ABI support, can be cp27-cp27m or cp27-cp27mu |
| `RUN_TEST` | OFF | Run unit test immediently after the build. | | `RUN_TEST` | OFF | Run unit test immediately after the build. |
## Docker Images ## Docker Images
......
...@@ -37,12 +37,12 @@ mkdir -p build && cd build ...@@ -37,12 +37,12 @@ mkdir -p build && cd build
```bash ```bash
# setup proxy addresss, when the speed of internet is not good. # setup proxy address, when the speed of internet is not good.
# export HTTP_PROXY='http://127.0.0.1:8080' # export HTTP_PROXY='http://127.0.0.1:8080'
# export HTTPS_PROXY='https://127.0.0.1:8080' # export HTTPS_PROXY='https://127.0.0.1:8080'
# invoke build paddle script # invoke build paddle script
# all arguments, such as -j8 optinal, is past to make procedure. # all arguments, such as -j8 optional, is past to make procedure.
../paddle/scripts/musl_build/build_paddle.sh -j8 ../paddle/scripts/musl_build/build_paddle.sh -j8
# find output wheel package # find output wheel package
...@@ -57,7 +57,7 @@ ls ./output/*.whl ...@@ -57,7 +57,7 @@ ls ./output/*.whl
# checkout paddle source code # checkout paddle source code
git clone https://github.com/PaddlePaddle/Paddle.git git clone https://github.com/PaddlePaddle/Paddle.git
# entery paddle directory # enter paddle directory
cd ./Paddle cd ./Paddle
# build docker image # build docker image
...@@ -87,7 +87,7 @@ make -j8 ...@@ -87,7 +87,7 @@ make -j8
# Scripts # Scripts
1. **build_docker.sh** 1. **build_docker.sh**
compiling docker building script. it use alpine linux 3.10 as musl linux build enironment. it will try to install all the compiling tools, development packages, and python requirements for paddle musl compiling. compiling docker building script. it use alpine linux 3.10 as musl linux build environment. it will try to install all the compiling tools, development packages, and python requirements for paddle musl compiling.
environment variables: environment variables:
- PYTHON_VERSION: the version of python used for image building, default=3.7. - PYTHON_VERSION: the version of python used for image building, default=3.7.
...@@ -104,10 +104,10 @@ make -j8 ...@@ -104,10 +104,10 @@ make -j8
environment variables: environment variables:
- BUILD_MAN: build the paddle manually, default=0. - BUILD_MAN: build the paddle manually, default=0.
- WITH_TEST: build with unitest, and run unitest check, default=0. - WITH_TEST: build with unittest, and run unittest check, default=0.
- WITH_PRUNE_CONTAINER: remove the container after building, default=1. - WITH_PRUNE_CONTAINER: remove the container after building, default=1.
- CTEST_*: CTEST flages used for unit test. - CTEST_*: CTEST flags used for unit test.
- FLAGS_*: build flages used for paddle building. - FLAGS_*: build flags used for paddle building.
- HTTP_PROXY: use http proxy. - HTTP_PROXY: use http proxy.
- HTTPS_PROXY: use https proxy. - HTTPS_PROXY: use https proxy.
...@@ -118,4 +118,4 @@ make -j8 ...@@ -118,4 +118,4 @@ make -j8
- **config.sh**: build config script for configure compiling option setting. - **config.sh**: build config script for configure compiling option setting.
- **Dockerfile**: build docker definition file. - **Dockerfile**: build docker definition file.
- **package.txt**: build required develop packages for alpine linux. - **package.txt**: build required develop packages for alpine linux.
- **REAME.md**: this file. - **README.md**: this file.
...@@ -395,7 +395,7 @@ class TestDistCTR2x2(FleetDistRunnerBase): ...@@ -395,7 +395,7 @@ class TestDistCTR2x2(FleetDistRunnerBase):
fleet.save_persistables(exe, patch_dirname, None, 5) fleet.save_persistables(exe, patch_dirname, None, 5)
fleet.check_save_pre_patch_done() fleet.check_save_pre_patch_done()
# add for gpugrahp # add for gpu graph
fleet.save_cache_table(0, 0) fleet.save_cache_table(0, 0)
fleet.shrink() fleet.shrink()
......
...@@ -87,7 +87,7 @@ def runtime_main(): ...@@ -87,7 +87,7 @@ def runtime_main():
if __name__ == "__main__": if __name__ == "__main__":
# NOTE(liangjianzhong): dist unittest should be imlpement using runtime_main in test_dist_base.py # NOTE(liangjianzhong): dist unittest should be implemented using runtime_main in test_dist_base.py
# but the runtime_main in test_dist_base.py use the fleet, DistributedStrategy from # but the runtime_main in test_dist_base.py use the fleet, DistributedStrategy from
# paddle.incubate.distributed.fleet.collective which is not support by sharding (paddle.distributed.fleet). # paddle.incubate.distributed.fleet.collective which is not support by sharding (paddle.distributed.fleet).
# this should be update in future. # this should be update in future.
......
...@@ -67,7 +67,7 @@ def check_out_dtype(api_fn, in_specs, expect_dtypes, target_index=0, **configs): ...@@ -67,7 +67,7 @@ def check_out_dtype(api_fn, in_specs, expect_dtypes, target_index=0, **configs):
Args: Args:
api_fn(callable): paddle api function api_fn(callable): paddle api function
in_specs(list[tuple]): list of shape and dtype information for constructing input tensor of api_fn, such as [(shape, dtype), (shape, dtype)]. in_specs(list[tuple]): list of shape and dtype information for constructing input tensor of api_fn, such as [(shape, dtype), (shape, dtype)].
expected_dtype(list[str]): expected dtype of output tensor. expect_dtypes(list[str]): expected dtype of output tensor.
target_index(int): indicate which one from in_specs to infer the dtype of output. target_index(int): indicate which one from in_specs to infer the dtype of output.
config(dict): other arguments of paddle api function config(dict): other arguments of paddle api function
...@@ -1364,14 +1364,14 @@ class OpTest(unittest.TestCase): ...@@ -1364,14 +1364,14 @@ class OpTest(unittest.TestCase):
def _get_need_run_ops(self, op_desc, fwd_op_desc=None): def _get_need_run_ops(self, op_desc, fwd_op_desc=None):
"""Postorder traversal of the 'grad' tree to get all ops that need to run during inplace test. """Postorder traversal of the 'grad' tree to get all ops that need to run during inplace test.
An op needs to run druing inplace check if, An op needs to run during inplace check if,
(1) it has infer_inplace, (1) it has infer_inplace,
(2) it has infer_inplace in its grad descendants. (since we need its outputs as to construct its grad's inputs) (2) it has infer_inplace in its grad descendants. (since we need its outputs as to construct its grad's inputs)
Args: Args:
op_desc (OpDesc): The op_desc of current op. op_desc (OpDesc): The op_desc of current op.
fwd_op_desc (OpDesc): The op_desc of current op's forward op, None if current op has no forward op. fwd_op_desc (OpDesc): The op_desc of current op's forward op, None if current op has no forward op.
Eg. relu's fwd_op is None, relu_grad's fwd_op is relu, relu_grad_grad's fwd_op is relu_grad, etc. E.g. relu's fwd_op is None, relu_grad's fwd_op is relu, relu_grad_grad's fwd_op is relu_grad, etc.
Returns: Returns:
need_run_ops (list[(op_desc, fwd_op_desc)]): The ops that need to run during inplace test. need_run_ops (list[(op_desc, fwd_op_desc)]): The ops that need to run during inplace test.
...@@ -1540,7 +1540,7 @@ class OpTest(unittest.TestCase): ...@@ -1540,7 +1540,7 @@ class OpTest(unittest.TestCase):
def check_inplace_output_with_place( def check_inplace_output_with_place(
self, place, no_check_set=None, inplace_atol=None self, place, no_check_set=None, inplace_atol=None
): ):
"""Chech the inplace correctness of given op, its grad op, its grad_grad op, etc. """Check the inplace correctness of given op, its grad op, its grad_grad op, etc.
(1) Get all ops need to run. (see conditions in _get_need_run_ops()) (1) Get all ops need to run. (see conditions in _get_need_run_ops())
(2) Run op in need_run_ops, and do inplace check if it has infer_inplace. (2) Run op in need_run_ops, and do inplace check if it has infer_inplace.
...@@ -2115,7 +2115,7 @@ class OpTest(unittest.TestCase): ...@@ -2115,7 +2115,7 @@ class OpTest(unittest.TestCase):
for var_name in var_names: for var_name in var_names:
i = find_fetch_index(var_name, fetch_list) i = find_fetch_index(var_name, fetch_list)
if i == -1: if i == -1:
# The output is dispensiable or intermediate. # The output is dispensable or intermediate.
break break
out = fetch_outs[i] out = fetch_outs[i]
if isinstance(out, core.LoDTensor): if isinstance(out, core.LoDTensor):
...@@ -2818,7 +2818,7 @@ class OpTest(unittest.TestCase): ...@@ -2818,7 +2818,7 @@ class OpTest(unittest.TestCase):
user_defined_grad_outputs = [user_defined_grad_outputs] user_defined_grad_outputs = [user_defined_grad_outputs]
grad_outputs = [] grad_outputs = []
for grad_out_value in user_defined_grad_outputs: for grad_out_value in user_defined_grad_outputs:
# `presistable` is used to avoid executor create new var in local scope # `persistable` is used to avoid executor create new var in local scope
var = block.create_var( var = block.create_var(
shape=grad_out_value.shape, shape=grad_out_value.shape,
dtype=grad_out_value.dtype, dtype=grad_out_value.dtype,
......
...@@ -549,7 +549,7 @@ def get_static_double_grad( ...@@ -549,7 +549,7 @@ def get_static_double_grad(
program (Program|None): a Program with forward pass. program (Program|None): a Program with forward pass.
If None, use fluid.default_main_program(). If None, use fluid.default_main_program().
Returns: Returns:
A list of numpy array that stores second derivative result calulated by static graph. A list of numpy array that stores second derivative result calculated by static graph.
""" """
if program is None: if program is None:
......
...@@ -734,7 +734,7 @@ class PrimForwardChecker: ...@@ -734,7 +734,7 @@ class PrimForwardChecker:
def check_jit_comp_with_cinn(self): def check_jit_comp_with_cinn(self):
if self.prim_op_type == "prim": if self.prim_op_type == "prim":
return return
# cinn doesn't suppoort cpu place # cinn doesn't support cpu place
if ( if (
type(self.place) == paddle.fluid.libpaddle.CPUPlace type(self.place) == paddle.fluid.libpaddle.CPUPlace
and self.enable_cinn and self.enable_cinn
...@@ -869,7 +869,7 @@ class PrimGradChecker(PrimForwardChecker): ...@@ -869,7 +869,7 @@ class PrimGradChecker(PrimForwardChecker):
def get_output_dict(self, np_outputs, api_outputs, outputs_sig): def get_output_dict(self, np_outputs, api_outputs, outputs_sig):
assert len(api_outputs) <= len(outputs_sig), ( assert len(api_outputs) <= len(outputs_sig), (
"forward api outputs length must be the less than or equal to KernelSignature outputs,but recive %s and %s" "forward api outputs length must be the less than or equal to KernelSignature outputs,but receive %s and %s"
) % (len(api_outputs), len(outputs_sig)) ) % (len(api_outputs), len(outputs_sig))
output_dict = {} output_dict = {}
for i in range(len(api_outputs)): for i in range(len(api_outputs)):
...@@ -1249,7 +1249,7 @@ class PrimGradChecker(PrimForwardChecker): ...@@ -1249,7 +1249,7 @@ class PrimGradChecker(PrimForwardChecker):
net.forward.program_cache.clear() net.forward.program_cache.clear()
def check_jit_comp_with_cinn(self): def check_jit_comp_with_cinn(self):
# cinn doesen't support cpu place # cinn doesn't support cpu place
if ( if (
type(self.place) is paddle.fluid.libpaddle.CPUPlace type(self.place) is paddle.fluid.libpaddle.CPUPlace
and self.enable_cinn and self.enable_cinn
......
...@@ -100,7 +100,7 @@ def shortcut(input, ch_out, stride): ...@@ -100,7 +100,7 @@ def shortcut(input, ch_out, stride):
def bottleneck_block(input, num_filters, stride, cardinality, reduction_ratio): def bottleneck_block(input, num_filters, stride, cardinality, reduction_ratio):
# The number of first 1x1 convolutional channels for each bottleneck build block # The number of first 1x1 convolutional channels for each bottleneck build block
# was halved to reduce the compution cost. # was halved to reduce the computation cost.
conv0 = conv_bn_layer( conv0 = conv_bn_layer(
input=input, num_filters=num_filters, filter_size=1, act='relu' input=input, num_filters=num_filters, filter_size=1, act='relu'
) )
......
...@@ -409,7 +409,7 @@ class TestSquareDoubleGradCheck(unittest.TestCase): ...@@ -409,7 +409,7 @@ class TestSquareDoubleGradCheck(unittest.TestCase):
@prog_scope() @prog_scope()
def func(self, place): def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1. # the shape of input variable should be clearly specified, not include -1.
shape = [2, 3, 7, 9] shape = [2, 3, 7, 9]
eps = 0.005 eps = 0.005
dtype = np.float64 dtype = np.float64
......
...@@ -157,7 +157,7 @@ from .layer.layers import Layer # noqa: F401 ...@@ -157,7 +157,7 @@ from .layer.layers import Layer # noqa: F401
from .utils.spectral_norm_hook import spectral_norm from .utils.spectral_norm_hook import spectral_norm
# TODO: remove loss, keep it for too many used in unitests # TODO: remove loss, keep it for too many used in unittests
from .layer import loss # noqa: F401 from .layer import loss # noqa: F401
from . import utils # noqa: F401 from . import utils # noqa: F401
...@@ -171,7 +171,7 @@ from paddle.utils import deprecated ...@@ -171,7 +171,7 @@ from paddle.utils import deprecated
@deprecated( @deprecated(
since="2.0.0", since="2.0.0",
update_to="paddle.nn.funcitional.diag_embed", update_to="paddle.nn.functional.diag_embed",
level=1, level=1,
reason="diag_embed in paddle.nn will be removed in future", reason="diag_embed in paddle.nn will be removed in future",
) )
......
...@@ -182,7 +182,7 @@ def _get_download(url, fullname): ...@@ -182,7 +182,7 @@ def _get_download(url, fullname):
"{}!".format(url, req.status_code) "{}!".format(url, req.status_code)
) )
# For protecting download interupted, download to # For protecting download interrupted, download to
# tmp_fullname firstly, move tmp_fullname to fullname # tmp_fullname firstly, move tmp_fullname to fullname
# after download finished # after download finished
tmp_fullname = fullname + "_tmp" tmp_fullname = fullname + "_tmp"
...@@ -295,7 +295,7 @@ def _decompress(fname): ...@@ -295,7 +295,7 @@ def _decompress(fname):
""" """
logger.info(f"Decompressing {fname}...") logger.info(f"Decompressing {fname}...")
# For protecting decompressing interupted, # For protecting decompressing interrupted,
# decompress to fpath_tmp directory firstly, if decompress # decompress to fpath_tmp directory firstly, if decompress
# successed, move decompress files to fpath and delete # successed, move decompress files to fpath and delete
# fpath_tmp and remove download compress file. # fpath_tmp and remove download compress file.
......
...@@ -176,7 +176,7 @@ def _elementwise_flops_compute(input_shapes, attrs): ...@@ -176,7 +176,7 @@ def _elementwise_flops_compute(input_shapes, attrs):
def _elementwise_add_flops(input_shapes, attrs): def _elementwise_add_flops(input_shapes, attrs):
"""FLOPs computation for elementwise_add op. """FLOPs computation for elementwise_add op.
For elementwise_add(input,other): For elementwise_add(input,other):
input_shapes = [shape_of_input, shape_of_ohther] input_shapes = [shape_of_input, shape_of_other]
shape_of_input = [dim1, dim2, dim3 ...] shape_of_input = [dim1, dim2, dim3 ...]
shape_of_other = [odim1, odim2, odim3...] shape_of_other = [odim1, odim2, odim3...]
equation: flops = max(dim1, odim1) * max(dim2, odim2) * max()... equation: flops = max(dim1, odim1) * max(dim2, odim2) * max()...
...@@ -188,7 +188,7 @@ def _elementwise_add_flops(input_shapes, attrs): ...@@ -188,7 +188,7 @@ def _elementwise_add_flops(input_shapes, attrs):
def _elementwise_mul_flops(input_shapes, attrs): def _elementwise_mul_flops(input_shapes, attrs):
"""FLOPs computation for elementwise_mul op. """FLOPs computation for elementwise_mul op.
For elementwise_mul(input,other): For elementwise_mul(input,other):
input_shapes = [shape_of_input, shape_of_ohther] input_shapes = [shape_of_input, shape_of_other]
shape_of_input = [dim1, dim2, dim3 ...] shape_of_input = [dim1, dim2, dim3 ...]
shape_of_other = [odim1, odim2, odim3...] shape_of_other = [odim1, odim2, odim3...]
equation: flops = max(dim1, odim1) * max(dim2, odim2)* max()... equation: flops = max(dim1, odim1) * max(dim2, odim2)* max()...
...@@ -200,7 +200,7 @@ def _elementwise_mul_flops(input_shapes, attrs): ...@@ -200,7 +200,7 @@ def _elementwise_mul_flops(input_shapes, attrs):
def _elementwise_div_flops(input_shapes, attrs): def _elementwise_div_flops(input_shapes, attrs):
"""FLOPs computation for elementwise_div op. """FLOPs computation for elementwise_div op.
For elementwise_div(input,other): For elementwise_div(input,other):
input_shapes = [shape_of_input, shape_of_ohther] input_shapes = [shape_of_input, shape_of_other]
shape_of_input = [dim1, dim2, dim3 ...] shape_of_input = [dim1, dim2, dim3 ...]
shape_of_other = [odim1, odim2, odim3...] shape_of_other = [odim1, odim2, odim3...]
equation: flops = max(dim1,odim1)*max(dim2,odim2)*max()... equation: flops = max(dim1,odim1)*max(dim2,odim2)*max()...
...@@ -237,7 +237,7 @@ def _layer_norm_flops(input_shapes, attrs): ...@@ -237,7 +237,7 @@ def _layer_norm_flops(input_shapes, attrs):
def _matmul_flops(input_shapes, attrs): def _matmul_flops(input_shapes, attrs):
"""FLOPs computation for matmul op. """FLOPs computation for matmul op.
For matmul(input,other): For matmul(input,other):
input_shapes = [shape_of_input, shape_of_ohther] input_shapes = [shape_of_input, shape_of_other]
shape_of_input = [dim1,dim2 ...dim_n_1,dim_n] length:n shape_of_input = [dim1,dim2 ...dim_n_1,dim_n] length:n
shape_of_other = [odim1,odim2 ... odim(n-m)... odim_m_1,dim_m] length:m shape_of_other = [odim1,odim2 ... odim(n-m)... odim_m_1,dim_m] length:m
suppose n > m and dim_n = odim_m_1: suppose n > m and dim_n = odim_m_1:
...@@ -274,7 +274,7 @@ def _matmul_flops(input_shapes, attrs): ...@@ -274,7 +274,7 @@ def _matmul_flops(input_shapes, attrs):
def _matmul_v2_flops(input_shapes, attrs): def _matmul_v2_flops(input_shapes, attrs):
"""FLOPs computation for matmul_v2 op. """FLOPs computation for matmul_v2 op.
For matmul_v2(input,other): For matmul_v2(input,other):
input_shapes = [shape_of_input, shape_of_ohther] input_shapes = [shape_of_input, shape_of_other]
shape_of_input = [dim1, dim2 ...dim_n_1, dim_n] length:n shape_of_input = [dim1, dim2 ...dim_n_1, dim_n] length:n
shape_of_other = [odim1, odim2 ... odim(n-m) ... odim_m_1, dim_m] length:m shape_of_other = [odim1, odim2 ... odim(n-m) ... odim_m_1, dim_m] length:m
suppose n > m and dim_n = odim_m_1: suppose n > m and dim_n = odim_m_1:
......
...@@ -51,7 +51,7 @@ def _prepare_data(): ...@@ -51,7 +51,7 @@ def _prepare_data():
def _is_cuda_available(): def _is_cuda_available():
""" """
Check whether CUDA is avaiable. Check whether CUDA is available.
""" """
try: try:
assert len(paddle.static.cuda_places()) > 0 assert len(paddle.static.cuda_places()) > 0
...@@ -67,7 +67,7 @@ def _is_cuda_available(): ...@@ -67,7 +67,7 @@ def _is_cuda_available():
def _is_xpu_available(): def _is_xpu_available():
""" """
Check whether XPU is avaiable. Check whether XPU is available.
""" """
try: try:
assert len(paddle.static.xpu_places()) > 0 assert len(paddle.static.xpu_places()) > 0
...@@ -154,7 +154,7 @@ def _run_static_single(use_cuda, use_xpu): ...@@ -154,7 +154,7 @@ def _run_static_single(use_cuda, use_xpu):
def train_for_run_parallel(): def train_for_run_parallel():
""" """
train script for parallel traning check train script for parallel training check
""" """
# to avoid cyclic import # to avoid cyclic import
......
...@@ -27,7 +27,7 @@ from ..fluid.framework import Block, Variable, in_dygraph_mode ...@@ -27,7 +27,7 @@ from ..fluid.framework import Block, Variable, in_dygraph_mode
def convert_to_list(value, n, name, dtype=int): def convert_to_list(value, n, name, dtype=int):
""" """
Converts a single numerical type or iterable of numerical Converts a single numerical type or iterable of numerical
types into an numerical type list. types into a numerical type list.
Arguments: Arguments:
value: The value to validate and convert. Could an int, or any iterable value: The value to validate and convert. Could an int, or any iterable
......
...@@ -24,7 +24,7 @@ __all__ = [] ...@@ -24,7 +24,7 @@ __all__ = []
def has_valid_extension(filename, extensions): def has_valid_extension(filename, extensions):
"""Checks if a file is a vilid extension. """Checks if a file is a valid extension.
Args: Args:
filename (str): path to a file filename (str): path to a file
...@@ -363,7 +363,7 @@ class ImageFolder(Dataset): ...@@ -363,7 +363,7 @@ class ImageFolder(Dataset):
dirname = list(subpath.keys())[0] dirname = list(subpath.keys())[0]
make_directory(root / dirname, subpath[dirname]) make_directory(root / dirname, subpath[dirname])
directory_hirerarchy = [ directory_hierarchy = [
"abc.jpg", "abc.jpg",
"def.png", "def.png",
{"ghi": [ {"ghi": [
...@@ -376,7 +376,7 @@ class ImageFolder(Dataset): ...@@ -376,7 +376,7 @@ class ImageFolder(Dataset):
# You can replace this with any directory to explore the structure # You can replace this with any directory to explore the structure
# of generated data. e.g. fake_data_dir = "./temp_dir" # of generated data. e.g. fake_data_dir = "./temp_dir"
fake_data_dir = tempfile.mkdtemp() fake_data_dir = tempfile.mkdtemp()
make_directory(fake_data_dir, directory_hirerarchy) make_directory(fake_data_dir, directory_hierarchy)
image_folder_1 = ImageFolder(fake_data_dir) image_folder_1 = ImageFolder(fake_data_dir)
print(image_folder_1.samples) print(image_folder_1.samples)
# ['./temp_dir/abc.jpg', './temp_dir/def.png', # ['./temp_dir/abc.jpg', './temp_dir/def.png',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册