From 0cdca6763224350385496c74e0680d14adc9c58c Mon Sep 17 00:00:00 2001 From: HongyuJia Date: Tue, 22 Nov 2022 19:14:47 +0800 Subject: [PATCH] fix typo error (#48156) --- paddle/fluid/framework/custom_operator.cc | 12 +++++------- paddle/fluid/framework/grad_op_desc_maker.h | 2 +- paddle/fluid/framework/op_desc.h | 2 +- paddle/fluid/framework/op_proto_maker.cc | 4 ++-- paddle/fluid/operators/ops_extra_info.h | 2 +- paddle/phi/api/ext/op_meta_info.h | 5 ++--- 6 files changed, 12 insertions(+), 15 deletions(-) diff --git a/paddle/fluid/framework/custom_operator.cc b/paddle/fluid/framework/custom_operator.cc index a76b1644671..1ca2f4e56dd 100644 --- a/paddle/fluid/framework/custom_operator.cc +++ b/paddle/fluid/framework/custom_operator.cc @@ -513,8 +513,8 @@ Custom Operator. According to the phi::DenseTensor operation function implemented by the user independently of the framework, it is encapsulated into a framework -operator to adapt to various execution scenarios such as dynamic graph, -mode static graph mode, and inference mode. +operator to adapt to various execution scenarios such as dynamic graph +mode, static graph mode, and inference mode. )DOC"); } @@ -979,11 +979,9 @@ void RegisterOperatorWithMetaInfo(const std::vector& op_meta_infos, "Custom grad operator infershape error. " "If a custom grad operator contains only one input and " "only one output, the input shape will be directly set " - "to " - "the output shape. Otherwise, Please set the forward " - "input " - "as the grad operator's input or set the InferShapeFn " - "of custom grad operator by " + "to the output shape. Otherwise, Please set the forward " + "input as the grad operator's input or set the " + "InferShapeFn of custom grad operator by " ".SetInferShapeFn(PD_INFER_SHAPE(...))")); ctx->ShareDim(grad_op_inputs[0], out_name); } diff --git a/paddle/fluid/framework/grad_op_desc_maker.h b/paddle/fluid/framework/grad_op_desc_maker.h index bb36742d475..dd795e190bd 100644 --- a/paddle/fluid/framework/grad_op_desc_maker.h +++ b/paddle/fluid/framework/grad_op_desc_maker.h @@ -56,7 +56,7 @@ using GradOpPtr = typename details::GradOpPtrTrait::Type; operator fwd_op. After it is called (through operator()), the pairs of (gradient variable, corresponding input variable of fwd_op) will be added to grad_to_var. If an input variable of fwd_op is contained in no_grad_set, its - gradient varialbe will be ignored or kEmptyVarName depending on the template + gradient variable will be ignored or kEmptyVarName depending on the template argument DropEmptyIG in the derived classes. */ class GradOpDescMakerBase { diff --git a/paddle/fluid/framework/op_desc.h b/paddle/fluid/framework/op_desc.h index 6c6f13d7c92..33460d08f72 100644 --- a/paddle/fluid/framework/op_desc.h +++ b/paddle/fluid/framework/op_desc.h @@ -217,7 +217,7 @@ class OpDesc { return ret_val; } - // it it really needed? or just mantain a ptr from block? + // it it really needed? or just maintain a ptr from block? proto::OpDesc desc_; BlockDesc *block_{nullptr}; // not_own // input arg name => input variable names diff --git a/paddle/fluid/framework/op_proto_maker.cc b/paddle/fluid/framework/op_proto_maker.cc index 5f75991b506..fbad45e8891 100644 --- a/paddle/fluid/framework/op_proto_maker.cc +++ b/paddle/fluid/framework/op_proto_maker.cc @@ -98,12 +98,12 @@ void OpProtoAndCheckerMaker::operator()(proto::OpProto* proto, .SetDefault({}) .AsExtra(); - AddAttr(OpNamescopeAttrName(), "Operator name with namesope.") + AddAttr(OpNamescopeAttrName(), "Operator name with namescope.") .SetDefault("") .AsExtra(); AddAttr>(OpCreationCallstackAttrName(), - "Callstack for Op Creatation.") + "Callstack for Op Creation.") .SetDefault({}) .AsExtra(); AddAttr(OpDeviceAttrName(), "Device type of this operator.") diff --git a/paddle/fluid/operators/ops_extra_info.h b/paddle/fluid/operators/ops_extra_info.h index 33f8c8ddb9c..b16e4ed58f3 100644 --- a/paddle/fluid/operators/ops_extra_info.h +++ b/paddle/fluid/operators/ops_extra_info.h @@ -37,7 +37,7 @@ enum class ExtraAttrProperty : uint8_t { SCHEDULE, // The attributes for ONEDNN only, can be saved in OneDNNContext ONEDNN, - // The attributes for ONEDNN only, can be saved in GPUContext + // The attributes for GPUDNN only, can be saved in GPUContext GPUDNN, // Add necessary properties as needed }; diff --git a/paddle/phi/api/ext/op_meta_info.h b/paddle/phi/api/ext/op_meta_info.h index 546b0accf8b..7d2be9c90d7 100644 --- a/paddle/phi/api/ext/op_meta_info.h +++ b/paddle/phi/api/ext/op_meta_info.h @@ -271,7 +271,7 @@ struct KernelFuncImpl { static void Compute(CustomOpKernelContext* ctx, const Args&... args) { static_assert(out_idx == 0, "If return std::vector in Custom OpKernel, " - "you cannot pass output by kernel funciton argument."); + "you cannot pass output by kernel function argument."); auto outs = impl_fn(args...); auto* orig_outs = ctx->AllMutableOutput(); PD_CHECK(orig_outs->size() == outs.size(), @@ -626,8 +626,7 @@ class PADDLE_API OpMetaInfoBuilder { void RegisterAllCustomOperator(); // Using this api to load compiled custom operator's dynamic library and -// register Custom -// Operator into it +// register Custom Operator into it void LoadCustomOperatorLib(const std::string& dso_name); /////////////////////// Op register Macro ///////////////////////// -- GitLab