diff --git a/paddle/fluid/framework/custom_operator.cc b/paddle/fluid/framework/custom_operator.cc index a76b164467149305224bbcedf502ae9f63ab4e19..1ca2f4e56dd7176ee466da295d0f3b04a4baf51b 100644 --- a/paddle/fluid/framework/custom_operator.cc +++ b/paddle/fluid/framework/custom_operator.cc @@ -513,8 +513,8 @@ Custom Operator. According to the phi::DenseTensor operation function implemented by the user independently of the framework, it is encapsulated into a framework -operator to adapt to various execution scenarios such as dynamic graph, -mode static graph mode, and inference mode. +operator to adapt to various execution scenarios such as dynamic graph +mode, static graph mode, and inference mode. )DOC"); } @@ -979,11 +979,9 @@ void RegisterOperatorWithMetaInfo(const std::vector& op_meta_infos, "Custom grad operator infershape error. " "If a custom grad operator contains only one input and " "only one output, the input shape will be directly set " - "to " - "the output shape. Otherwise, Please set the forward " - "input " - "as the grad operator's input or set the InferShapeFn " - "of custom grad operator by " + "to the output shape. Otherwise, Please set the forward " + "input as the grad operator's input or set the " + "InferShapeFn of custom grad operator by " ".SetInferShapeFn(PD_INFER_SHAPE(...))")); ctx->ShareDim(grad_op_inputs[0], out_name); } diff --git a/paddle/fluid/framework/grad_op_desc_maker.h b/paddle/fluid/framework/grad_op_desc_maker.h index bb36742d475ad6b72edd11fa1deccbdd3da517d8..dd795e190bdd20bf0ce84338abeb330377bf17bb 100644 --- a/paddle/fluid/framework/grad_op_desc_maker.h +++ b/paddle/fluid/framework/grad_op_desc_maker.h @@ -56,7 +56,7 @@ using GradOpPtr = typename details::GradOpPtrTrait::Type; operator fwd_op. After it is called (through operator()), the pairs of (gradient variable, corresponding input variable of fwd_op) will be added to grad_to_var. If an input variable of fwd_op is contained in no_grad_set, its - gradient varialbe will be ignored or kEmptyVarName depending on the template + gradient variable will be ignored or kEmptyVarName depending on the template argument DropEmptyIG in the derived classes. */ class GradOpDescMakerBase { diff --git a/paddle/fluid/framework/op_desc.h b/paddle/fluid/framework/op_desc.h index 6c6f13d7c929b4346bd323aa85d4229e3679c0cf..33460d08f729b0a7a3d80bdedbc2e6bd43e36cce 100644 --- a/paddle/fluid/framework/op_desc.h +++ b/paddle/fluid/framework/op_desc.h @@ -217,7 +217,7 @@ class OpDesc { return ret_val; } - // it it really needed? or just mantain a ptr from block? + // it it really needed? or just maintain a ptr from block? proto::OpDesc desc_; BlockDesc *block_{nullptr}; // not_own // input arg name => input variable names diff --git a/paddle/fluid/framework/op_proto_maker.cc b/paddle/fluid/framework/op_proto_maker.cc index 5f75991b50671bd5293a89ff0020c103830f35a8..fbad45e889156347dcaf454331c8850ddea55e99 100644 --- a/paddle/fluid/framework/op_proto_maker.cc +++ b/paddle/fluid/framework/op_proto_maker.cc @@ -98,12 +98,12 @@ void OpProtoAndCheckerMaker::operator()(proto::OpProto* proto, .SetDefault({}) .AsExtra(); - AddAttr(OpNamescopeAttrName(), "Operator name with namesope.") + AddAttr(OpNamescopeAttrName(), "Operator name with namescope.") .SetDefault("") .AsExtra(); AddAttr>(OpCreationCallstackAttrName(), - "Callstack for Op Creatation.") + "Callstack for Op Creation.") .SetDefault({}) .AsExtra(); AddAttr(OpDeviceAttrName(), "Device type of this operator.") diff --git a/paddle/fluid/operators/ops_extra_info.h b/paddle/fluid/operators/ops_extra_info.h index 33f8c8ddb9c8ef89f57e6487659272a5bf9f0961..b16e4ed58f3fe24057325b1520cf6c342c52f180 100644 --- a/paddle/fluid/operators/ops_extra_info.h +++ b/paddle/fluid/operators/ops_extra_info.h @@ -37,7 +37,7 @@ enum class ExtraAttrProperty : uint8_t { SCHEDULE, // The attributes for ONEDNN only, can be saved in OneDNNContext ONEDNN, - // The attributes for ONEDNN only, can be saved in GPUContext + // The attributes for GPUDNN only, can be saved in GPUContext GPUDNN, // Add necessary properties as needed }; diff --git a/paddle/phi/api/ext/op_meta_info.h b/paddle/phi/api/ext/op_meta_info.h index 546b0accf8ba7e41b71f672807257ffa783de0c2..7d2be9c90d79ed5b950d3d23866833e8511a98fc 100644 --- a/paddle/phi/api/ext/op_meta_info.h +++ b/paddle/phi/api/ext/op_meta_info.h @@ -271,7 +271,7 @@ struct KernelFuncImpl { static void Compute(CustomOpKernelContext* ctx, const Args&... args) { static_assert(out_idx == 0, "If return std::vector in Custom OpKernel, " - "you cannot pass output by kernel funciton argument."); + "you cannot pass output by kernel function argument."); auto outs = impl_fn(args...); auto* orig_outs = ctx->AllMutableOutput(); PD_CHECK(orig_outs->size() == outs.size(), @@ -626,8 +626,7 @@ class PADDLE_API OpMetaInfoBuilder { void RegisterAllCustomOperator(); // Using this api to load compiled custom operator's dynamic library and -// register Custom -// Operator into it +// register Custom Operator into it void LoadCustomOperatorLib(const std::string& dso_name); /////////////////////// Op register Macro /////////////////////////