diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index 43a5b7a0bb9086e2491dd70754c36286be52f29d..c82976d197c27904bd6d195ac4caf86be5bdb5f3 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -680,8 +680,7 @@ class OperatorWithKernel : public OperatorBase { /** * Transfer data from scope to a transferred scope. If there is no data need - * to - * be tranfered, it returns nullptr. + * to be transferred, it returns nullptr. * * * transfered_inplace_vars is a output vector. */ diff --git a/paddle/fluid/imperative/layer.cc b/paddle/fluid/imperative/layer.cc index e1dd78cb9f228827b35b3caf4ab6c338f9419e92..c7c6eb104f5e6a1298fdd25609b3398955446166 100644 --- a/paddle/fluid/imperative/layer.cc +++ b/paddle/fluid/imperative/layer.cc @@ -517,7 +517,7 @@ static void OpBaseRunImpl(const framework::OperatorBase& op, * `transfer_scope` is created before PrepareData, the data after * transform is stored in the temporary scope, and then discarded * after the execution of op, but the original input is directly - * overwritten in the previous dynamic graph implemention. + * overwritten in the previous dynamic graph implementation. */ auto prepared_op = PreparedOp::Prepare(ins, outs, *op_kernel, place, attrs, default_attrs); diff --git a/paddle/fluid/imperative/tracer.h b/paddle/fluid/imperative/tracer.h index 686c367335326d45bf7f0c8037f90408e7814ef8..a9ede4bb25199369a73a84b7bc8eeae98a42fa94 100644 --- a/paddle/fluid/imperative/tracer.h +++ b/paddle/fluid/imperative/tracer.h @@ -139,7 +139,7 @@ class Tracer { // intermediate var both in imperative and static mode. But the // `UniqueNameGenerator` in C++ and `unique_name.py` in Python doesn't share // the same auto-increment id. It will create a variable repeatedly with same - // name like `tmp_0` in some cases when transform dygraph into static layers. + // name like `tmp_0` in some cases when transform dygraph into static layers. // So we modify the default prefix key into `eager_tmp` to distinguish with // static graph. std::string GenerateUniqueName(std::string key = "dygraph_tmp") { diff --git a/paddle/fluid/operators/matmul_v2_op.cc b/paddle/fluid/operators/matmul_v2_op.cc index 209bf6d1f6ccd39ac011493bec02b61bbd5cb012..8f233d7650dafa203bbf3df462662bb6dd260677 100644 --- a/paddle/fluid/operators/matmul_v2_op.cc +++ b/paddle/fluid/operators/matmul_v2_op.cc @@ -62,12 +62,12 @@ class MatMulV2Op : public framework::OperatorWithKernel { 0, platform::errors::InvalidArgument( "The Input(X) dims size must be greater than 0," - " but reviced dims size is 0. ")); + " but received dims size is 0. ")); PADDLE_ENFORCE_GT(ndims_y, 0, platform::errors::InvalidArgument( "The Input(Y) dims size must be greater than 0," - " but reviced dims size is 0. ")); + " but received dims size is 0. ")); bool x_broadcasted = false, y_broadcasted = false; if (ndims_x == 1) { @@ -160,9 +160,7 @@ class MatMulV2Op : public framework::OperatorWithKernel { } else { #ifdef PADDLE_WITH_MKLDNN // When matmul_v2 is first oneDNN op in a chain (there was some non oneDNN - // op - // previously) - // then we also need to rotate shape NHWC -> NCWH + // op previously) then we also need to rotate shape NHWC -> NCWH if ((expected_kernel_type.data_layout_ == framework::DataLayout::kMKLDNN) && (tensor.layout() != framework::DataLayout::kMKLDNN) && diff --git a/paddle/phi/api/include/context_pool.h b/paddle/phi/api/include/context_pool.h index b429252beb7fdeeebee693f8cb932eb9af5948f0..f2e797575fd576c0473782eed103d077195c3542 100644 --- a/paddle/phi/api/include/context_pool.h +++ b/paddle/phi/api/include/context_pool.h @@ -59,7 +59,7 @@ struct DefaultDeviceContextType { * * Note: DeviceContextPool is an experimental API and may be removed in the * future. From 2.3, we recommend directly using the C++ API to combine new - * perators. + * operators. */ class PADDLE_API DeviceContextPool { public: diff --git a/paddle/phi/common/backend.h b/paddle/phi/common/backend.h index 6f1774fe8e46ac35bb6fabe05350c1ba307d59c0..b740815305dedfced7001a37a513de6dcfc55a23 100644 --- a/paddle/phi/common/backend.h +++ b/paddle/phi/common/backend.h @@ -61,7 +61,7 @@ enum class Backend : uint8_t { NUM_BACKENDS, /** - * [ Why we need ALL in baisc kernel key member? ] + * [ Why we need ALL in basic kernel key member? ] * * For Tensor, ALL represents an illegal Backend, but for Kernel, some * kernels may be device-independent by nature, such as reshape; and when diff --git a/paddle/phi/core/kernel_factory.h b/paddle/phi/core/kernel_factory.h index 59e91451fff750c0c0cd115259c354c410f9a0d6..8e98c276646d9cf387758417b53eaedba10e0564 100644 --- a/paddle/phi/core/kernel_factory.h +++ b/paddle/phi/core/kernel_factory.h @@ -210,7 +210,7 @@ class KernelArgsDef { class Kernel { public: - // for map element contruct + // for map element construct Kernel() = default; explicit Kernel(KernelFn fn, void* variadic_fn)