diff --git a/paddle/fluid/framework/executor_cache.cc b/paddle/fluid/framework/executor_cache.cc index 506ce36e47242dbc5b9ff9ceebbc510d902cb853..bb079937329c2524d976f0700cfd44a702f46a40 100644 --- a/paddle/fluid/framework/executor_cache.cc +++ b/paddle/fluid/framework/executor_cache.cc @@ -402,7 +402,7 @@ std::unique_ptr<::ir::Program> ConstructFowardIrProgram( } auto op_desc = local_program.MutableBlock(0)->AppendOp(); - op_desc->SetType("shaddow_output"); + op_desc->SetType("shadow_output"); op_desc->SetAttr("name", name); op_desc->SetInput("x", {name}); op_desc->SetOutput("out", {"@EMPTY@"}); @@ -458,7 +458,7 @@ std::unique_ptr<::ir::Program> ConstructBackwardIrProgram( continue; } auto op_desc = local_program.MutableBlock(0)->AppendOp(); - op_desc->SetType("shaddow_output"); + op_desc->SetType("shadow_output"); op_desc->SetAttr("name", name); op_desc->SetInput("x", {name}); op_desc->SetOutput("out", {"@EMPTY@"}); diff --git a/paddle/fluid/framework/new_executor/interpreter/interpreter_util.cc b/paddle/fluid/framework/new_executor/interpreter/interpreter_util.cc index 66c09760ffff61b44bc0dfeb27656b2a6155b04d..c03d57c5f391112d2774e115a9f6a1b6a42d2fc6 100644 --- a/paddle/fluid/framework/new_executor/interpreter/interpreter_util.cc +++ b/paddle/fluid/framework/new_executor/interpreter/interpreter_util.cc @@ -980,7 +980,7 @@ void BuildOpFuncList( if (op_name == "builtin.combine" || op_name == "pd.feed" || op_name == "builtin.set_parameter" || op_name == "builtin.get_parameter" || op_name == "builtin.slice" || - op_name == "pd.feed_with_place" || op_name == "pd.shaddow_output") { + op_name == "pd.feed_with_place" || op_name == "pd.shadow_output") { VLOG(6) << "skip process " << op_name; continue; } diff --git a/paddle/fluid/ir/dialect/pd_op.yaml b/paddle/fluid/ir/dialect/pd_op.yaml index 9b113c02cf953a00bf0a893b045025b57e25c0af..5796cea48b13aff752caedf48468e3208afbba53 100644 --- a/paddle/fluid/ir/dialect/pd_op.yaml +++ b/paddle/fluid/ir/dialect/pd_op.yaml @@ -277,7 +277,7 @@ backward: null -- name: shaddow_feed +- name: shadow_feed inputs: - typename: Tensor name: x @@ -293,7 +293,7 @@ func: UnchangedInferMeta param: [x] kernel: - func: [shaddow_feed] + func: [shadow_feed] param: [x] backend: null layout: null diff --git a/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.cc b/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.cc index 0388ee9791a35b7ff2e0c0bd026ba6c486e6c87a..d78a1291b1543045f1f2ca1f92523039a4be7bed 100644 --- a/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.cc +++ b/paddle/fluid/ir/phi_kernel_adaptor/phi_kernel_util.cc @@ -278,8 +278,8 @@ void HandleForSpecialOp( (*value_2_var_name)[value] = param_name; } - if (op_name == "pd.shaddow_output") { - VLOG(6) << "Handle for pd.shaddow_ouptut"; + if (op_name == "pd.shadow_output") { + VLOG(6) << "Handle for pd.shadow_ouptut"; auto var_name = op->attributes().at("name").dyn_cast().AsString(); @@ -408,7 +408,7 @@ void BuildScope(const ir::Block& block, if (op_name == "pd.feed" || op_name == "pd.fetch" || op_name == "builtin.combine" || op_name == "builtin.set_parameter" || op_name == "builtin.get_parameter" || op_name == "builtin.slice" || - op_name == "pd.feed_with_place" || op_name == "pd.shaddow_output") { + op_name == "pd.feed_with_place" || op_name == "pd.shadow_output") { HandleForSpecialOp(op, inner_scope, var_name_prefix, diff --git a/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc b/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc index 15bc6d7c3d058c550db7339c524cceaff98076f8..f85192bada6d3933bea135fe5463d7fa931cbc95 100644 --- a/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc +++ b/paddle/fluid/ir/transforms/pd_op_to_kernel_pass.cc @@ -437,30 +437,30 @@ std::unique_ptr PdOpLowerToKernelPass(ir::Program* prog, program->block()->push_back(op); if ((*it)->name() == "pd.feed" && platform::is_gpu_place(place)) { - // add shaddow feed op - phi::KernelKey shaddow_key{ + // add shadow feed op + phi::KernelKey shadow_key{ phi::Backend::GPU, phi::DataLayout::ANY, TransToPhiDataType( (*it)->result(0).type().dyn_cast().dtype())}; std::unordered_map attr_map{ - {"op_name", ir::StrAttribute::get(ctx, "pd.shaddow_feed")}, - {"kernel_name", ir::StrAttribute::get(ctx, "shaddow_feed")}, - {"kernel_key", dialect::KernelAttribute::get(ctx, shaddow_key)}}; + {"op_name", ir::StrAttribute::get(ctx, "pd.shadow_feed")}, + {"kernel_name", ir::StrAttribute::get(ctx, "shadow_feed")}, + {"kernel_key", dialect::KernelAttribute::get(ctx, shadow_key)}}; auto out_type = paddle::dialect::AllocatedDenseTensorType::get( ctx, - phi::TransToPhiPlace(shaddow_key.backend()), + phi::TransToPhiPlace(shadow_key.backend()), (*it)->result(0).type().dyn_cast()); - ir::Operation* shaddow_op = + ir::Operation* shadow_op = ir::Operation::Create({op->result(0)}, attr_map, {out_type}, op_info); - map_op_pair[*it] = shaddow_op; - program->block()->push_back(shaddow_op); + map_op_pair[*it] = shadow_op; + program->block()->push_back(shadow_op); if ((*it)->num_results() > 0) { - for (size_t i = 0; i < shaddow_op->num_results(); ++i) { - map_value_pair[(*it)->result(i)] = shaddow_op->result(i); + for (size_t i = 0; i < shadow_op->num_results(); ++i) { + map_value_pair[(*it)->result(i)] = shadow_op->result(i); } } } diff --git a/paddle/fluid/ir_adaptor/translator/op_translator.cc b/paddle/fluid/ir_adaptor/translator/op_translator.cc index 497b002f41ee94e8e6970751318444629de359c3..5b03badf25f9774f7baac3545e4b6d2568ee8cb6 100644 --- a/paddle/fluid/ir_adaptor/translator/op_translator.cc +++ b/paddle/fluid/ir_adaptor/translator/op_translator.cc @@ -1136,7 +1136,7 @@ struct FetchOpTranscriber : public OpTranscriber { } }; -struct ShaddowOutputOpTranscriber : public OpTranscriber { +struct ShadowOutputOpTranscriber : public OpTranscriber { ir::Operation* operator()(ir::IrContext* ctx, TranslationContext* param_map, const OpDesc& op_desc, @@ -1463,7 +1463,7 @@ OpTranslator::OpTranslator() { special_handlers["reduce_all"] = ReduceOpTranscriber(); special_handlers["reduce_any"] = ReduceOpTranscriber(); special_handlers["rnn"] = RnnOpTranscriber(); - special_handlers["shaddow_output"] = ShaddowOutputOpTranscriber(); + special_handlers["shadow_output"] = ShadowOutputOpTranscriber(); special_handlers["split"] = SplitOpTranscriber(); special_handlers["sum"] = AddNOpTranscriber(); diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index 2a4cb3e52593518961340997fbc48d661810c2be..c566a371957977650ac22ea68717036f67ee23ad 100755 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -2412,6 +2412,10 @@ extra : attrs : [bool use_mkldnn=false] +- op : shadow_output + inputs: {x: x} + outputs: {out: out} + - op : shape inputs : input : Input diff --git a/paddle/phi/api/yaml/ops.yaml b/paddle/phi/api/yaml/ops.yaml index 167e19a0fb9ae37015658fdec7df830eb146c89f..78cbd7c65188d0129dd163aa10b7e8db148afd0c 100644 --- a/paddle/phi/api/yaml/ops.yaml +++ b/paddle/phi/api/yaml/ops.yaml @@ -2256,14 +2256,14 @@ optional : master_param, master_param_out inplace : (param -> param_out), (master_param -> master_param_out) -- op : shaddow_output +- op : shadow_output args : (Tensor x, str name) output : Tensor(out) infer_meta : func : UnchangedInferMeta param : [x] kernel: - func : shaddow_output + func : shadow_output param : [x] - op : shape diff --git a/paddle/phi/kernels/cpu/feed_with_place_kernel.cc b/paddle/phi/kernels/cpu/feed_with_place_kernel.cc index ce346472821afdc11264ed65edebf96f2261b265..aaafeb00d0ff5516f26ae096e0a3df99d17d92c7 100644 --- a/paddle/phi/kernels/cpu/feed_with_place_kernel.cc +++ b/paddle/phi/kernels/cpu/feed_with_place_kernel.cc @@ -27,18 +27,19 @@ void FeedWithPlaceKernel(const Context& ctx, DenseTensor* out) {} template -void ShaddowOutputKernel(const Context& ctx, - const DenseTensor& x, - DenseTensor* out) {} +void ShadowOutputKernel(const Context& ctx, + const DenseTensor& x, + DenseTensor* out) {} + } // namespace phi PD_REGISTER_KERNEL( feed_with_place, CPU, ALL_LAYOUT, phi::FeedWithPlaceKernel, float) {} -PD_REGISTER_KERNEL(shaddow_feed, +PD_REGISTER_KERNEL(shadow_feed, CPU, ALL_LAYOUT, - phi::ShaddowFeedKernel, + phi::ShadowFeedKernel, bool, float, int32_t, @@ -50,4 +51,4 @@ PD_REGISTER_KERNEL(shaddow_feed, phi::complex128) {} PD_REGISTER_KERNEL( - shaddow_output, CPU, ALL_LAYOUT, phi::ShaddowOutputKernel, float) {} + shadow_output, CPU, ALL_LAYOUT, phi::ShadowOutputKernel, float) {} diff --git a/paddle/phi/kernels/feed_with_place_kernel.h b/paddle/phi/kernels/feed_with_place_kernel.h index 725ec0c508af1df4e979fc827265eb7187a86f02..1d173797fd864cc3c9ee732a5f044a80ba00107c 100644 --- a/paddle/phi/kernels/feed_with_place_kernel.h +++ b/paddle/phi/kernels/feed_with_place_kernel.h @@ -26,13 +26,13 @@ void FeedWithPlaceKernel(const Context& ctx, DenseTensor* out); template -void ShaddowOutputKernel(const Context& ctx, - const DenseTensor& x, - DenseTensor* out); +void ShadowOutputKernel(const Context& ctx, + const DenseTensor& x, + DenseTensor* out); template -void ShaddowFeedKernel(const Context& ctx, - const DenseTensor& x, - DenseTensor* out); +void ShadowFeedKernel(const Context& ctx, + const DenseTensor& x, + DenseTensor* out); } // namespace phi diff --git a/paddle/phi/kernels/gpu/feed_with_place_kernel.cu b/paddle/phi/kernels/gpu/feed_with_place_kernel.cu index 07d4c8719da2c77190d6b18b1699400f97aa3ed0..f848ff0c2b174256515e753e7334cb07032798c5 100644 --- a/paddle/phi/kernels/gpu/feed_with_place_kernel.cu +++ b/paddle/phi/kernels/gpu/feed_with_place_kernel.cu @@ -18,10 +18,10 @@ #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/impl/feed_with_place_impl.h" -PD_REGISTER_KERNEL(shaddow_feed, +PD_REGISTER_KERNEL(shadow_feed, GPU, ALL_LAYOUT, - phi::ShaddowFeedKernel, + phi::ShadowFeedKernel, bool, float, int32_t, diff --git a/paddle/phi/kernels/impl/feed_with_place_impl.h b/paddle/phi/kernels/impl/feed_with_place_impl.h index a7602c2d37927ca28db332b4be2d2a5cc6249d72..269c4c886dfe89ae63ef6128d38c1a828de3dfc1 100644 --- a/paddle/phi/kernels/impl/feed_with_place_impl.h +++ b/paddle/phi/kernels/impl/feed_with_place_impl.h @@ -20,9 +20,9 @@ namespace phi { template -void ShaddowFeedKernel(const Context& ctx, - const DenseTensor& x, - DenseTensor* out) { +void ShadowFeedKernel(const Context& ctx, + const DenseTensor& x, + DenseTensor* out) { ctx.template Alloc(out); if (x.place() == out->place()) { out->ShareDataWith(x);