未验证 提交 0cdca676 编写于 作者: H HongyuJia 提交者: GitHub

fix typo error (#48156)

上级 91f4d1ce
......@@ -513,8 +513,8 @@ Custom Operator.
According to the phi::DenseTensor operation function implemented by the user
independently of the framework, it is encapsulated into a framework
operator to adapt to various execution scenarios such as dynamic graph,
mode static graph mode, and inference mode.
operator to adapt to various execution scenarios such as dynamic graph
mode, static graph mode, and inference mode.
)DOC");
}
......@@ -979,11 +979,9 @@ void RegisterOperatorWithMetaInfo(const std::vector<OpMetaInfo>& op_meta_infos,
"Custom grad operator infershape error. "
"If a custom grad operator contains only one input and "
"only one output, the input shape will be directly set "
"to "
"the output shape. Otherwise, Please set the forward "
"input "
"as the grad operator's input or set the InferShapeFn "
"of custom grad operator by "
"to the output shape. Otherwise, Please set the forward "
"input as the grad operator's input or set the "
"InferShapeFn of custom grad operator by "
".SetInferShapeFn(PD_INFER_SHAPE(...))"));
ctx->ShareDim(grad_op_inputs[0], out_name);
}
......
......@@ -56,7 +56,7 @@ using GradOpPtr = typename details::GradOpPtrTrait<T>::Type;
operator fwd_op. After it is called (through operator()), the pairs of
(gradient variable, corresponding input variable of fwd_op) will be added to
grad_to_var. If an input variable of fwd_op is contained in no_grad_set, its
gradient varialbe will be ignored or kEmptyVarName depending on the template
gradient variable will be ignored or kEmptyVarName depending on the template
argument DropEmptyIG in the derived classes.
*/
class GradOpDescMakerBase {
......
......@@ -217,7 +217,7 @@ class OpDesc {
return ret_val;
}
// it it really needed? or just mantain a ptr from block?
// it it really needed? or just maintain a ptr from block?
proto::OpDesc desc_;
BlockDesc *block_{nullptr}; // not_own
// input arg name => input variable names
......
......@@ -98,12 +98,12 @@ void OpProtoAndCheckerMaker::operator()(proto::OpProto* proto,
.SetDefault({})
.AsExtra();
AddAttr<std::string>(OpNamescopeAttrName(), "Operator name with namesope.")
AddAttr<std::string>(OpNamescopeAttrName(), "Operator name with namescope.")
.SetDefault("")
.AsExtra();
AddAttr<std::vector<std::string>>(OpCreationCallstackAttrName(),
"Callstack for Op Creatation.")
"Callstack for Op Creation.")
.SetDefault({})
.AsExtra();
AddAttr<std::string>(OpDeviceAttrName(), "Device type of this operator.")
......
......@@ -37,7 +37,7 @@ enum class ExtraAttrProperty : uint8_t {
SCHEDULE,
// The attributes for ONEDNN only, can be saved in OneDNNContext
ONEDNN,
// The attributes for ONEDNN only, can be saved in GPUContext
// The attributes for GPUDNN only, can be saved in GPUContext
GPUDNN,
// Add necessary properties as needed
};
......
......@@ -271,7 +271,7 @@ struct KernelFuncImpl<Return (*)(Args...), impl_fn> {
static void Compute(CustomOpKernelContext* ctx, const Args&... args) {
static_assert(out_idx == 0,
"If return std::vector<Tensor> in Custom OpKernel, "
"you cannot pass output by kernel funciton argument.");
"you cannot pass output by kernel function argument.");
auto outs = impl_fn(args...);
auto* orig_outs = ctx->AllMutableOutput();
PD_CHECK(orig_outs->size() == outs.size(),
......@@ -626,8 +626,7 @@ class PADDLE_API OpMetaInfoBuilder {
void RegisterAllCustomOperator();
// Using this api to load compiled custom operator's dynamic library and
// register Custom
// Operator into it
// register Custom Operator into it
void LoadCustomOperatorLib(const std::string& dso_name);
/////////////////////// Op register Macro /////////////////////////
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册