From 447d79da22095bfc18a6da90516a8d8d966dbfc2 Mon Sep 17 00:00:00 2001 From: HongyuJia Date: Thu, 8 Sep 2022 16:34:01 +0800 Subject: [PATCH] polish code comment, test=doc (#45859) --- paddle/fluid/framework/operator.h | 3 +-- paddle/fluid/imperative/layer.cc | 2 +- paddle/fluid/imperative/tracer.h | 2 +- paddle/fluid/operators/matmul_v2_op.cc | 8 +++----- paddle/phi/api/include/context_pool.h | 2 +- paddle/phi/common/backend.h | 2 +- paddle/phi/core/kernel_factory.h | 2 +- 7 files changed, 9 insertions(+), 12 deletions(-) diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index 43a5b7a0bb9..c82976d197c 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -680,8 +680,7 @@ class OperatorWithKernel : public OperatorBase { /** * Transfer data from scope to a transferred scope. If there is no data need - * to - * be tranfered, it returns nullptr. + * to be transferred, it returns nullptr. * * * transfered_inplace_vars is a output vector. */ diff --git a/paddle/fluid/imperative/layer.cc b/paddle/fluid/imperative/layer.cc index e1dd78cb9f2..c7c6eb104f5 100644 --- a/paddle/fluid/imperative/layer.cc +++ b/paddle/fluid/imperative/layer.cc @@ -517,7 +517,7 @@ static void OpBaseRunImpl(const framework::OperatorBase& op, * `transfer_scope` is created before PrepareData, the data after * transform is stored in the temporary scope, and then discarded * after the execution of op, but the original input is directly - * overwritten in the previous dynamic graph implemention. + * overwritten in the previous dynamic graph implementation. */ auto prepared_op = PreparedOp::Prepare(ins, outs, *op_kernel, place, attrs, default_attrs); diff --git a/paddle/fluid/imperative/tracer.h b/paddle/fluid/imperative/tracer.h index 686c3673353..a9ede4bb251 100644 --- a/paddle/fluid/imperative/tracer.h +++ b/paddle/fluid/imperative/tracer.h @@ -139,7 +139,7 @@ class Tracer { // intermediate var both in imperative and static mode. But the // `UniqueNameGenerator` in C++ and `unique_name.py` in Python doesn't share // the same auto-increment id. It will create a variable repeatedly with same - // name like `tmp_0` in some cases when transform dygraph into static layers. + // name like `tmp_0` in some cases when transform dygraph into static layers. // So we modify the default prefix key into `eager_tmp` to distinguish with // static graph. std::string GenerateUniqueName(std::string key = "dygraph_tmp") { diff --git a/paddle/fluid/operators/matmul_v2_op.cc b/paddle/fluid/operators/matmul_v2_op.cc index 209bf6d1f6c..8f233d7650d 100644 --- a/paddle/fluid/operators/matmul_v2_op.cc +++ b/paddle/fluid/operators/matmul_v2_op.cc @@ -62,12 +62,12 @@ class MatMulV2Op : public framework::OperatorWithKernel { 0, platform::errors::InvalidArgument( "The Input(X) dims size must be greater than 0," - " but reviced dims size is 0. ")); + " but received dims size is 0. ")); PADDLE_ENFORCE_GT(ndims_y, 0, platform::errors::InvalidArgument( "The Input(Y) dims size must be greater than 0," - " but reviced dims size is 0. ")); + " but received dims size is 0. ")); bool x_broadcasted = false, y_broadcasted = false; if (ndims_x == 1) { @@ -160,9 +160,7 @@ class MatMulV2Op : public framework::OperatorWithKernel { } else { #ifdef PADDLE_WITH_MKLDNN // When matmul_v2 is first oneDNN op in a chain (there was some non oneDNN - // op - // previously) - // then we also need to rotate shape NHWC -> NCWH + // op previously) then we also need to rotate shape NHWC -> NCWH if ((expected_kernel_type.data_layout_ == framework::DataLayout::kMKLDNN) && (tensor.layout() != framework::DataLayout::kMKLDNN) && diff --git a/paddle/phi/api/include/context_pool.h b/paddle/phi/api/include/context_pool.h index b429252beb7..f2e797575fd 100644 --- a/paddle/phi/api/include/context_pool.h +++ b/paddle/phi/api/include/context_pool.h @@ -59,7 +59,7 @@ struct DefaultDeviceContextType { * * Note: DeviceContextPool is an experimental API and may be removed in the * future. From 2.3, we recommend directly using the C++ API to combine new - * perators. + * operators. */ class PADDLE_API DeviceContextPool { public: diff --git a/paddle/phi/common/backend.h b/paddle/phi/common/backend.h index 6f1774fe8e4..b740815305d 100644 --- a/paddle/phi/common/backend.h +++ b/paddle/phi/common/backend.h @@ -61,7 +61,7 @@ enum class Backend : uint8_t { NUM_BACKENDS, /** - * [ Why we need ALL in baisc kernel key member? ] + * [ Why we need ALL in basic kernel key member? ] * * For Tensor, ALL represents an illegal Backend, but for Kernel, some * kernels may be device-independent by nature, such as reshape; and when diff --git a/paddle/phi/core/kernel_factory.h b/paddle/phi/core/kernel_factory.h index 59e91451fff..8e98c276646 100644 --- a/paddle/phi/core/kernel_factory.h +++ b/paddle/phi/core/kernel_factory.h @@ -210,7 +210,7 @@ class KernelArgsDef { class Kernel { public: - // for map element contruct + // for map element construct Kernel() = default; explicit Kernel(KernelFn fn, void* variadic_fn) -- GitLab