未验证 提交 ee4eecef 编写于 作者: C co63oc 提交者: GitHub

Fix typos, Betweeen to Between (#53952)

* Fix typos

* Fix
上级 c36a000d
......@@ -1839,14 +1839,14 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
" };\n";
std::string view_strategy_str = "";
std::string viwe_input_name = view_op_map[op_type].first;
std::string viwe_output_name = view_op_map[op_type].second;
std::string view_input_name = view_op_map[op_type].first;
std::string view_output_name = view_op_map[op_type].second;
view_strategy_str +=
paddle::string::Sprintf(HANDLE_VIEW_BETWEEN_INPUT_AND_OUTPUT,
viwe_input_name,
viwe_output_name,
viwe_input_name,
viwe_output_name);
view_input_name,
view_output_name,
view_input_name,
view_output_name);
generated_function_body += view_strategy_str;
generated_function_body += "\n";
......
......@@ -316,7 +316,7 @@ RunCustomOpNode::operator()(paddle::small_vector<std::vector<paddle::Tensor>,
for (size_t i = 0; i < slot_outs_num; i++) {
const auto& size_pair = ctx.OutputRangeAt(i);
const std::vector<paddle::Tensor>& out_tensors =
ctx.OutputsBetweeen(size_pair.first, size_pair.second);
ctx.OutputsBetween(size_pair.first, size_pair.second);
for (size_t j = size_pair.first; j < size_pair.second; j++) {
// SetOutRankWithSlot: slot_id = i, rank = j - size_pair.first
outs_auto_grad_metas[j]->SetSingleOutRankWithSlot(i,
......@@ -332,8 +332,8 @@ RunCustomOpNode::operator()(paddle::small_vector<std::vector<paddle::Tensor>,
<< " to grad_inputs: " << it->second;
grad_node->fwd_outs[it->second] =
egr::RunCustomOpNode::ConstructTensorWrapper(
ctx.OutputsBetweeen(ctx.OutputRangeAt(it->first).first,
ctx.OutputRangeAt(it->first).second));
ctx.OutputsBetween(ctx.OutputRangeAt(it->first).first,
ctx.OutputRangeAt(it->first).second));
}
// Prepare Grad inputs with fwd inputs
......
......@@ -747,7 +747,7 @@ static PyObject* eager_api_run_custom_op(PyObject* self,
for (size_t i = 0; i < slot_outs_num; i++) {
const auto& size_pair = ctx.OutputRangeAt(i);
const std::vector<paddle::Tensor>& out_tensors =
ctx.OutputsBetweeen(size_pair.first, size_pair.second);
ctx.OutputsBetween(size_pair.first, size_pair.second);
for (size_t j = size_pair.first; j < size_pair.second; j++) {
// SetOutRankWithSlot: slot_id = i, rank = j - size_pair.first
outs_auto_grad_metas[j]->SetSingleOutRankWithSlot(
......@@ -763,8 +763,8 @@ static PyObject* eager_api_run_custom_op(PyObject* self,
<< " to grad_inputs: " << it->second;
grad_node->fwd_outs[it->second] =
egr::RunCustomOpNode::ConstructTensorWrapper(
ctx.OutputsBetweeen(ctx.OutputRangeAt(it->first).first,
ctx.OutputRangeAt(it->first).second));
ctx.OutputsBetween(ctx.OutputRangeAt(it->first).first,
ctx.OutputRangeAt(it->first).second));
}
// Prepare Grad inputs with fwd inputs
......
......@@ -299,14 +299,14 @@ std::string GenerateOpFunctionsBody(
}
outs_initializer += "}";
if (FindViewOpMap(op_type)) {
std::string viwe_input_name = view_op_map[op_type].first;
std::string viwe_output_name = view_op_map[op_type].second;
std::string view_input_name = view_op_map[op_type].first;
std::string view_output_name = view_op_map[op_type].second;
view_strategy_str +=
paddle::string::Sprintf(HANDLE_VIEW_BETWEEN_INPUT_AND_OUTPUT,
viwe_input_name,
viwe_output_name,
viwe_input_name,
viwe_output_name);
view_input_name,
view_output_name,
view_input_name,
view_output_name);
}
if (!inplace_map.empty()) {
// For inplace op, Use the input PyObject directly.
......
......@@ -133,8 +133,8 @@ class PADDLE_API CustomOpKernelContext {
return output_range_;
}
Tensor* MutableOutputAt(size_t idx);
std::vector<Tensor*> MutableOutputBetweeen(size_t start, size_t end);
std::vector<Tensor> OutputsBetweeen(size_t start, size_t end);
std::vector<Tensor*> MutableOutputBetween(size_t start, size_t end);
std::vector<Tensor> OutputsBetween(size_t start, size_t end);
std::vector<Tensor>* AllMutableOutput();
template <typename AttrType>
......@@ -391,7 +391,7 @@ struct KernelFuncImpl<Return (*)(Args...), impl_fn> {
template <int in_idx, int attr_idx, int out_idx, typename... PreviousArgs>
static void Compute(CustomOpKernelContext* ctx, PreviousArgs&... pargs) {
auto& range = ctx->OutputRangeAt(out_idx);
auto arg = ctx->MutableOutputBetweeen(range.first, range.second);
auto arg = ctx->MutableOutputBetween(range.first, range.second);
ComputeCallHelper<
Tail...>::template Compute<in_idx, attr_idx, out_idx + 1>(ctx,
pargs...,
......
......@@ -162,8 +162,8 @@ CustomOpKernelContext::OptionalInputsBetween(size_t start, size_t end) {
Tensor* CustomOpKernelContext::MutableOutputAt(size_t idx) {
return &(outputs_.at(idx));
}
std::vector<Tensor*> CustomOpKernelContext::MutableOutputBetweeen(size_t start,
size_t end) {
std::vector<Tensor*> CustomOpKernelContext::MutableOutputBetween(size_t start,
size_t end) {
std::vector<Tensor*> rlt;
for (size_t i = start; i < end; ++i) {
rlt.emplace_back(&(outputs_.at(i)));
......@@ -171,8 +171,8 @@ std::vector<Tensor*> CustomOpKernelContext::MutableOutputBetweeen(size_t start,
return rlt;
}
std::vector<Tensor> CustomOpKernelContext::OutputsBetweeen(size_t start,
size_t end) {
std::vector<Tensor> CustomOpKernelContext::OutputsBetween(size_t start,
size_t end) {
std::vector<Tensor> rlt;
for (size_t i = start; i < end; ++i) {
rlt.emplace_back(outputs_.at(i));
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册