未验证 提交 0cdca676 编写于 作者: H HongyuJia 提交者: GitHub

fix typo error (#48156)

上级 91f4d1ce
...@@ -513,8 +513,8 @@ Custom Operator. ...@@ -513,8 +513,8 @@ Custom Operator.
According to the phi::DenseTensor operation function implemented by the user According to the phi::DenseTensor operation function implemented by the user
independently of the framework, it is encapsulated into a framework independently of the framework, it is encapsulated into a framework
operator to adapt to various execution scenarios such as dynamic graph, operator to adapt to various execution scenarios such as dynamic graph
mode static graph mode, and inference mode. mode, static graph mode, and inference mode.
)DOC"); )DOC");
} }
...@@ -979,11 +979,9 @@ void RegisterOperatorWithMetaInfo(const std::vector<OpMetaInfo>& op_meta_infos, ...@@ -979,11 +979,9 @@ void RegisterOperatorWithMetaInfo(const std::vector<OpMetaInfo>& op_meta_infos,
"Custom grad operator infershape error. " "Custom grad operator infershape error. "
"If a custom grad operator contains only one input and " "If a custom grad operator contains only one input and "
"only one output, the input shape will be directly set " "only one output, the input shape will be directly set "
"to " "to the output shape. Otherwise, Please set the forward "
"the output shape. Otherwise, Please set the forward " "input as the grad operator's input or set the "
"input " "InferShapeFn of custom grad operator by "
"as the grad operator's input or set the InferShapeFn "
"of custom grad operator by "
".SetInferShapeFn(PD_INFER_SHAPE(...))")); ".SetInferShapeFn(PD_INFER_SHAPE(...))"));
ctx->ShareDim(grad_op_inputs[0], out_name); ctx->ShareDim(grad_op_inputs[0], out_name);
} }
......
...@@ -56,7 +56,7 @@ using GradOpPtr = typename details::GradOpPtrTrait<T>::Type; ...@@ -56,7 +56,7 @@ using GradOpPtr = typename details::GradOpPtrTrait<T>::Type;
operator fwd_op. After it is called (through operator()), the pairs of operator fwd_op. After it is called (through operator()), the pairs of
(gradient variable, corresponding input variable of fwd_op) will be added to (gradient variable, corresponding input variable of fwd_op) will be added to
grad_to_var. If an input variable of fwd_op is contained in no_grad_set, its grad_to_var. If an input variable of fwd_op is contained in no_grad_set, its
gradient varialbe will be ignored or kEmptyVarName depending on the template gradient variable will be ignored or kEmptyVarName depending on the template
argument DropEmptyIG in the derived classes. argument DropEmptyIG in the derived classes.
*/ */
class GradOpDescMakerBase { class GradOpDescMakerBase {
......
...@@ -217,7 +217,7 @@ class OpDesc { ...@@ -217,7 +217,7 @@ class OpDesc {
return ret_val; return ret_val;
} }
// it it really needed? or just mantain a ptr from block? // it it really needed? or just maintain a ptr from block?
proto::OpDesc desc_; proto::OpDesc desc_;
BlockDesc *block_{nullptr}; // not_own BlockDesc *block_{nullptr}; // not_own
// input arg name => input variable names // input arg name => input variable names
......
...@@ -98,12 +98,12 @@ void OpProtoAndCheckerMaker::operator()(proto::OpProto* proto, ...@@ -98,12 +98,12 @@ void OpProtoAndCheckerMaker::operator()(proto::OpProto* proto,
.SetDefault({}) .SetDefault({})
.AsExtra(); .AsExtra();
AddAttr<std::string>(OpNamescopeAttrName(), "Operator name with namesope.") AddAttr<std::string>(OpNamescopeAttrName(), "Operator name with namescope.")
.SetDefault("") .SetDefault("")
.AsExtra(); .AsExtra();
AddAttr<std::vector<std::string>>(OpCreationCallstackAttrName(), AddAttr<std::vector<std::string>>(OpCreationCallstackAttrName(),
"Callstack for Op Creatation.") "Callstack for Op Creation.")
.SetDefault({}) .SetDefault({})
.AsExtra(); .AsExtra();
AddAttr<std::string>(OpDeviceAttrName(), "Device type of this operator.") AddAttr<std::string>(OpDeviceAttrName(), "Device type of this operator.")
......
...@@ -37,7 +37,7 @@ enum class ExtraAttrProperty : uint8_t { ...@@ -37,7 +37,7 @@ enum class ExtraAttrProperty : uint8_t {
SCHEDULE, SCHEDULE,
// The attributes for ONEDNN only, can be saved in OneDNNContext // The attributes for ONEDNN only, can be saved in OneDNNContext
ONEDNN, ONEDNN,
// The attributes for ONEDNN only, can be saved in GPUContext // The attributes for GPUDNN only, can be saved in GPUContext
GPUDNN, GPUDNN,
// Add necessary properties as needed // Add necessary properties as needed
}; };
......
...@@ -271,7 +271,7 @@ struct KernelFuncImpl<Return (*)(Args...), impl_fn> { ...@@ -271,7 +271,7 @@ struct KernelFuncImpl<Return (*)(Args...), impl_fn> {
static void Compute(CustomOpKernelContext* ctx, const Args&... args) { static void Compute(CustomOpKernelContext* ctx, const Args&... args) {
static_assert(out_idx == 0, static_assert(out_idx == 0,
"If return std::vector<Tensor> in Custom OpKernel, " "If return std::vector<Tensor> in Custom OpKernel, "
"you cannot pass output by kernel funciton argument."); "you cannot pass output by kernel function argument.");
auto outs = impl_fn(args...); auto outs = impl_fn(args...);
auto* orig_outs = ctx->AllMutableOutput(); auto* orig_outs = ctx->AllMutableOutput();
PD_CHECK(orig_outs->size() == outs.size(), PD_CHECK(orig_outs->size() == outs.size(),
...@@ -626,8 +626,7 @@ class PADDLE_API OpMetaInfoBuilder { ...@@ -626,8 +626,7 @@ class PADDLE_API OpMetaInfoBuilder {
void RegisterAllCustomOperator(); void RegisterAllCustomOperator();
// Using this api to load compiled custom operator's dynamic library and // Using this api to load compiled custom operator's dynamic library and
// register Custom // register Custom Operator into it
// Operator into it
void LoadCustomOperatorLib(const std::string& dso_name); void LoadCustomOperatorLib(const std::string& dso_name);
/////////////////////// Op register Macro ///////////////////////// /////////////////////// Op register Macro /////////////////////////
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册