diff --git a/paddle/phi/README.md b/paddle/phi/README.md index 2827f0aa1619db29b9aab9d5bf07594ec300132f..e9cb953bc0510c2c0f235cb900ced3d2d600996f 100644 --- a/paddle/phi/README.md +++ b/paddle/phi/README.md @@ -34,7 +34,7 @@ The root cause of poor reusability is the inflexibility of the original Op archi After the release of Paddle 2.0, it has received many feedbacks from internal and external users that the performance of the dynamic graph is several times lower than that of competing products in the execution scenario of small model on CPU. -The main reason for this problem is: the execution path of the C++ side of the Padddle dynamic graph is relatively long and the scheduling overhead is relatively heavy, which is related to the early design of the dynamic graph which is compatible with the static graph and inherits many object construction processes of the static graph Op. +The main reason for this problem is: the execution path of the C++ side of the Paddle dynamic graph is relatively long and the scheduling overhead is relatively heavy, which is related to the early design of the dynamic graph which is compatible with the static graph and inherits many object construction processes of the static graph Op. Therefore, the dynamic graph needs to be upgraded to a function-based scheduling architecture, and this problem can be solved by abandoning the original complex Op architecture, which depends on the OpKernel being changed to a functional writing method. @@ -213,7 +213,7 @@ void ScaleKernel(const Context& dev_ctx, ##### 2.3.1.3 IntArray -IntArray is an integer type array that can be constructed from `vector`, `Tensor` and `vector`. Currently, it is mainly used to represent dimension index variables such as `shape`, `index` and `aixs`. +IntArray is an integer type array that can be constructed from `vector`, `Tensor` and `vector`. Currently, it is mainly used to represent dimension index variables such as `shape`, `index` and `axis`. Taking `FullKernel` as an example, the shape parameter is used to indicate the dimension information of the returned Tensor (e.g. [2, 8, 8]). When calling `FullKernel`, the parameters of `vector`, `Tensor` and `vector` type variables can be used to complete the call. Using `IntArray` avoids the problem of writing a separate overloaded function for each shape type. @@ -701,7 +701,7 @@ PD_DECLARE_KERNEL(as_real, CPU, ALL_LAYOUT); ... ``` -For the specific implementation of `kernel_declare`, please refer to the function implementation in `camke/phi.cmake`, which will not be introduced here. +For the specific implementation of `kernel_declare`, please refer to the function implementation in `cmake/phi.cmake`, which will not be introduced here. ##### 2.3.5.2 Kernel dependencies diff --git a/paddle/phi/core/compat/get_kerneltype_forvar_utils.cc b/paddle/phi/core/compat/get_kerneltype_forvar_utils.cc index 7519399e0e4d197c94cae3c7fffa9c60c2349d4f..03f99133360121e6aee636a6c787bb82b3e0db87 100644 --- a/paddle/phi/core/compat/get_kerneltype_forvar_utils.cc +++ b/paddle/phi/core/compat/get_kerneltype_forvar_utils.cc @@ -22,7 +22,7 @@ const std::string& GetKernelTypeForVarContext::GetVarName(void) const { var_name_, nullptr, errors::InvalidArgument( - "Variablle name is null. The context hasn't been initialized. ")); + "Variable name is null. The context hasn't been initialized. ")); return *var_name_; } diff --git a/paddle/phi/core/compat/op_utils.h b/paddle/phi/core/compat/op_utils.h index 2145d73cd9f37405d5e37b340ce15b0e9220c5b8..7e3d10bd56a7060e81b984938ffcdecaeded12e7 100644 --- a/paddle/phi/core/compat/op_utils.h +++ b/paddle/phi/core/compat/op_utils.h @@ -115,7 +115,7 @@ class DefaultKernelSignatureMap { Has(op_type), true, phi::errors::AlreadyExists( - "Operator (%s)'s Kernel Siginature has been registered.", op_type)); + "Operator (%s)'s Kernel Signature has been registered.", op_type)); map_.insert({std::move(op_type), std::move(signature)}); } @@ -160,7 +160,7 @@ class OpUtilsMap { arg_mapping_fn_map_.count(op_type), 0UL, phi::errors::AlreadyExists( - "Operator (%s)'s argu,emt mapping function has been registered.", + "Operator (%s)'s argument mapping function has been registered.", op_type)); arg_mapping_fn_map_.insert({std::move(op_type), std::move(fn)}); } diff --git a/paddle/phi/core/distributed/store/tcp_store.cc b/paddle/phi/core/distributed/store/tcp_store.cc index f938a3cc06f663d0773392f948d326861bab3e59..dcf75042c104df0cba6f4113ab29bfc57574e903 100644 --- a/paddle/phi/core/distributed/store/tcp_store.cc +++ b/paddle/phi/core/distributed/store/tcp_store.cc @@ -87,7 +87,7 @@ void MasterDaemon::_notify_waiting_sockets(const std::string& key) { if (_waiting_sockets.find(key) != _waiting_sockets.end()) { for (auto waiting_socket : _waiting_sockets.at(key)) { auto reply = ReplyType::STOP_WAIT; - VLOG(3) << "TCPStore: nofify the socket: " << GetSockName(waiting_socket) + VLOG(3) << "TCPStore: notify the socket: " << GetSockName(waiting_socket) << " that key: " << key << " is ready."; tcputils::send_value(waiting_socket, reply); } diff --git a/paddle/phi/core/distributed/store/tcp_utils.cc b/paddle/phi/core/distributed/store/tcp_utils.cc index d7b1fd3b972edfa43b5af60e74d46d76c841de13..516c6437e0f568f9a0217a05ca6b259e8e238523 100644 --- a/paddle/phi/core/distributed/store/tcp_utils.cc +++ b/paddle/phi/core/distributed/store/tcp_utils.cc @@ -172,7 +172,7 @@ SocketType tcp_listen(const std::string host, PADDLE_ENFORCE_GT(sockfd, 0, phi::errors::InvalidArgument( - "Bind network on %s:%s failedd.", node, port)); + "Bind network on %s:%s failed.", node, port)); ::listen(sockfd, LISTENQ);