未验证 提交 2265d63c 编写于 作者: H hong 提交者: GitHub

[NewIR]fix new ir shadow typo (#55706)

* fix new ir shadow typo

* update
上级 ae93930f
......@@ -402,7 +402,7 @@ std::unique_ptr<::ir::Program> ConstructFowardIrProgram(
}
auto op_desc = local_program.MutableBlock(0)->AppendOp();
op_desc->SetType("shaddow_output");
op_desc->SetType("shadow_output");
op_desc->SetAttr("name", name);
op_desc->SetInput("x", {name});
op_desc->SetOutput("out", {"@EMPTY@"});
......@@ -458,7 +458,7 @@ std::unique_ptr<::ir::Program> ConstructBackwardIrProgram(
continue;
}
auto op_desc = local_program.MutableBlock(0)->AppendOp();
op_desc->SetType("shaddow_output");
op_desc->SetType("shadow_output");
op_desc->SetAttr("name", name);
op_desc->SetInput("x", {name});
op_desc->SetOutput("out", {"@EMPTY@"});
......
......@@ -980,7 +980,7 @@ void BuildOpFuncList(
if (op_name == "builtin.combine" || op_name == "pd.feed" ||
op_name == "builtin.set_parameter" ||
op_name == "builtin.get_parameter" || op_name == "builtin.slice" ||
op_name == "pd.feed_with_place" || op_name == "pd.shaddow_output") {
op_name == "pd.feed_with_place" || op_name == "pd.shadow_output") {
VLOG(6) << "skip process " << op_name;
continue;
}
......
......@@ -277,7 +277,7 @@
backward: null
- name: shaddow_feed
- name: shadow_feed
inputs:
- typename: Tensor
name: x
......@@ -293,7 +293,7 @@
func: UnchangedInferMeta
param: [x]
kernel:
func: [shaddow_feed]
func: [shadow_feed]
param: [x]
backend: null
layout: null
......
......@@ -278,8 +278,8 @@ void HandleForSpecialOp(
(*value_2_var_name)[value] = param_name;
}
if (op_name == "pd.shaddow_output") {
VLOG(6) << "Handle for pd.shaddow_ouptut";
if (op_name == "pd.shadow_output") {
VLOG(6) << "Handle for pd.shadow_ouptut";
auto var_name =
op->attributes().at("name").dyn_cast<ir::StrAttribute>().AsString();
......@@ -408,7 +408,7 @@ void BuildScope(const ir::Block& block,
if (op_name == "pd.feed" || op_name == "pd.fetch" ||
op_name == "builtin.combine" || op_name == "builtin.set_parameter" ||
op_name == "builtin.get_parameter" || op_name == "builtin.slice" ||
op_name == "pd.feed_with_place" || op_name == "pd.shaddow_output") {
op_name == "pd.feed_with_place" || op_name == "pd.shadow_output") {
HandleForSpecialOp(op,
inner_scope,
var_name_prefix,
......
......@@ -437,30 +437,30 @@ std::unique_ptr<ir::Program> PdOpLowerToKernelPass(ir::Program* prog,
program->block()->push_back(op);
if ((*it)->name() == "pd.feed" && platform::is_gpu_place(place)) {
// add shaddow feed op
phi::KernelKey shaddow_key{
// add shadow feed op
phi::KernelKey shadow_key{
phi::Backend::GPU,
phi::DataLayout::ANY,
TransToPhiDataType(
(*it)->result(0).type().dyn_cast<DenseTensorType>().dtype())};
std::unordered_map<std::string, ir::Attribute> attr_map{
{"op_name", ir::StrAttribute::get(ctx, "pd.shaddow_feed")},
{"kernel_name", ir::StrAttribute::get(ctx, "shaddow_feed")},
{"kernel_key", dialect::KernelAttribute::get(ctx, shaddow_key)}};
{"op_name", ir::StrAttribute::get(ctx, "pd.shadow_feed")},
{"kernel_name", ir::StrAttribute::get(ctx, "shadow_feed")},
{"kernel_key", dialect::KernelAttribute::get(ctx, shadow_key)}};
auto out_type = paddle::dialect::AllocatedDenseTensorType::get(
ctx,
phi::TransToPhiPlace(shaddow_key.backend()),
phi::TransToPhiPlace(shadow_key.backend()),
(*it)->result(0).type().dyn_cast<dialect::DenseTensorType>());
ir::Operation* shaddow_op =
ir::Operation* shadow_op =
ir::Operation::Create({op->result(0)}, attr_map, {out_type}, op_info);
map_op_pair[*it] = shaddow_op;
program->block()->push_back(shaddow_op);
map_op_pair[*it] = shadow_op;
program->block()->push_back(shadow_op);
if ((*it)->num_results() > 0) {
for (size_t i = 0; i < shaddow_op->num_results(); ++i) {
map_value_pair[(*it)->result(i)] = shaddow_op->result(i);
for (size_t i = 0; i < shadow_op->num_results(); ++i) {
map_value_pair[(*it)->result(i)] = shadow_op->result(i);
}
}
}
......
......@@ -1136,7 +1136,7 @@ struct FetchOpTranscriber : public OpTranscriber {
}
};
struct ShaddowOutputOpTranscriber : public OpTranscriber {
struct ShadowOutputOpTranscriber : public OpTranscriber {
ir::Operation* operator()(ir::IrContext* ctx,
TranslationContext* param_map,
const OpDesc& op_desc,
......@@ -1463,7 +1463,7 @@ OpTranslator::OpTranslator() {
special_handlers["reduce_all"] = ReduceOpTranscriber();
special_handlers["reduce_any"] = ReduceOpTranscriber();
special_handlers["rnn"] = RnnOpTranscriber();
special_handlers["shaddow_output"] = ShaddowOutputOpTranscriber();
special_handlers["shadow_output"] = ShadowOutputOpTranscriber();
special_handlers["split"] = SplitOpTranscriber();
special_handlers["sum"] = AddNOpTranscriber();
......
......@@ -2412,6 +2412,10 @@
extra :
attrs : [bool use_mkldnn=false]
- op : shadow_output
inputs: {x: x}
outputs: {out: out}
- op : shape
inputs :
input : Input
......
......@@ -2256,14 +2256,14 @@
optional : master_param, master_param_out
inplace : (param -> param_out), (master_param -> master_param_out)
- op : shaddow_output
- op : shadow_output
args : (Tensor x, str name)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel:
func : shaddow_output
func : shadow_output
param : [x]
- op : shape
......
......@@ -27,18 +27,19 @@ void FeedWithPlaceKernel(const Context& ctx,
DenseTensor* out) {}
template <typename T, typename Context>
void ShaddowOutputKernel(const Context& ctx,
void ShadowOutputKernel(const Context& ctx,
const DenseTensor& x,
DenseTensor* out) {}
} // namespace phi
PD_REGISTER_KERNEL(
feed_with_place, CPU, ALL_LAYOUT, phi::FeedWithPlaceKernel, float) {}
PD_REGISTER_KERNEL(shaddow_feed,
PD_REGISTER_KERNEL(shadow_feed,
CPU,
ALL_LAYOUT,
phi::ShaddowFeedKernel,
phi::ShadowFeedKernel,
bool,
float,
int32_t,
......@@ -50,4 +51,4 @@ PD_REGISTER_KERNEL(shaddow_feed,
phi::complex128) {}
PD_REGISTER_KERNEL(
shaddow_output, CPU, ALL_LAYOUT, phi::ShaddowOutputKernel, float) {}
shadow_output, CPU, ALL_LAYOUT, phi::ShadowOutputKernel, float) {}
......@@ -26,12 +26,12 @@ void FeedWithPlaceKernel(const Context& ctx,
DenseTensor* out);
template <typename T, typename Context>
void ShaddowOutputKernel(const Context& ctx,
void ShadowOutputKernel(const Context& ctx,
const DenseTensor& x,
DenseTensor* out);
template <typename T, typename Context>
void ShaddowFeedKernel(const Context& ctx,
void ShadowFeedKernel(const Context& ctx,
const DenseTensor& x,
DenseTensor* out);
......
......@@ -18,10 +18,10 @@
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/feed_with_place_impl.h"
PD_REGISTER_KERNEL(shaddow_feed,
PD_REGISTER_KERNEL(shadow_feed,
GPU,
ALL_LAYOUT,
phi::ShaddowFeedKernel,
phi::ShadowFeedKernel,
bool,
float,
int32_t,
......
......@@ -20,7 +20,7 @@
namespace phi {
template <typename T, typename Context>
void ShaddowFeedKernel(const Context& ctx,
void ShadowFeedKernel(const Context& ctx,
const DenseTensor& x,
DenseTensor* out) {
ctx.template Alloc<T>(out);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册