未验证 提交 274e5e54 编写于 作者: K kangguangli 提交者: GitHub

[NewIR] Rename feed with place to data (#55778)

* fix bug: feed_with_place should consider variable existence

* fix

* fix build scope

* change method to set feed var name

* remove feed_with_place to placeholder

* fix

* rename to data

* fix

* fix
上级 e3b6e02f
...@@ -377,7 +377,7 @@ std::unique_ptr<::ir::Program> ConstructFowardIrProgram( ...@@ -377,7 +377,7 @@ std::unique_ptr<::ir::Program> ConstructFowardIrProgram(
auto place = in_t.place().GetType(); auto place = in_t.place().GetType();
auto op_desc = block->PrependOp(); auto op_desc = block->PrependOp();
op_desc->SetType("feed_with_place"); op_desc->SetType("data");
op_desc->SetAttr("index", 0); op_desc->SetAttr("index", 0);
// TODO(phlrain) : using tensor dtype // TODO(phlrain) : using tensor dtype
op_desc->SetAttr("dtype", 0); op_desc->SetAttr("dtype", 0);
...@@ -391,7 +391,7 @@ std::unique_ptr<::ir::Program> ConstructFowardIrProgram( ...@@ -391,7 +391,7 @@ std::unique_ptr<::ir::Program> ConstructFowardIrProgram(
auto place = param.place().GetType(); auto place = param.place().GetType();
auto op_desc = local_program.MutableBlock(0)->PrependOp(); auto op_desc = local_program.MutableBlock(0)->PrependOp();
op_desc->SetType("feed_with_place"); op_desc->SetType("data");
op_desc->SetAttr("index", 0); op_desc->SetAttr("index", 0);
// TODO(phlrain) : using tensor dtype // TODO(phlrain) : using tensor dtype
op_desc->SetAttr("dtype", 0); op_desc->SetAttr("dtype", 0);
...@@ -471,7 +471,7 @@ std::unique_ptr<::ir::Program> ConstructBackwardIrProgram( ...@@ -471,7 +471,7 @@ std::unique_ptr<::ir::Program> ConstructBackwardIrProgram(
continue; continue;
} }
auto op_desc = local_program.MutableBlock(0)->PrependOp(); auto op_desc = local_program.MutableBlock(0)->PrependOp();
op_desc->SetType("feed_with_place"); op_desc->SetType("data");
op_desc->SetAttr("index", 0); op_desc->SetAttr("index", 0);
// TODO(phlrain) : using tensor dtype // TODO(phlrain) : using tensor dtype
op_desc->SetAttr("dtype", 0); op_desc->SetAttr("dtype", 0);
......
...@@ -980,7 +980,7 @@ void BuildOpFuncList( ...@@ -980,7 +980,7 @@ void BuildOpFuncList(
if (op_name == "builtin.combine" || op_name == "pd.feed" || if (op_name == "builtin.combine" || op_name == "pd.feed" ||
op_name == "builtin.set_parameter" || op_name == "builtin.set_parameter" ||
op_name == "builtin.get_parameter" || op_name == "builtin.slice" || op_name == "builtin.get_parameter" || op_name == "builtin.slice" ||
op_name == "pd.feed_with_place" || op_name == "pd.shadow_output") { op_name == "pd.data" || op_name == "pd.shadow_output") {
VLOG(6) << "skip process " << op_name; VLOG(6) << "skip process " << op_name;
continue; continue;
} }
......
...@@ -1619,7 +1619,7 @@ void NewIRInterpreter::BuildInstruction() { ...@@ -1619,7 +1619,7 @@ void NewIRInterpreter::BuildInstruction() {
if (op_name == "builtin.combine" || op_name == "pd.feed" || if (op_name == "builtin.combine" || op_name == "pd.feed" ||
op_name == "builtin.set_parameter" || op_name == "builtin.set_parameter" ||
op_name == "builtin.get_parameter" || op_name == "builtin.slice" || op_name == "builtin.get_parameter" || op_name == "builtin.slice" ||
op_name == "pd.feed_with_place" || op_name == "pd.shaddow_output") { op_name == "pd.data" || op_name == "pd.shaddow_output") {
VLOG(6) << "skip process " << op_name; VLOG(6) << "skip process " << op_name;
continue; continue;
} }
......
...@@ -264,8 +264,8 @@ void HandleForSpecialOp( ...@@ -264,8 +264,8 @@ void HandleForSpecialOp(
variable_list); variable_list);
} }
if (op_name == "pd.feed_with_place") { if (op_name == "pd.data") {
VLOG(6) << "Handle for pd.feed_with_place"; VLOG(6) << "Handle for pd.data";
auto var_name = auto var_name =
op->attributes().at("name").dyn_cast<ir::StrAttribute>().AsString(); op->attributes().at("name").dyn_cast<ir::StrAttribute>().AsString();
...@@ -492,7 +492,7 @@ void BuildScope(const ir::Block& block, ...@@ -492,7 +492,7 @@ void BuildScope(const ir::Block& block,
if (op_name == "pd.feed" || op_name == "pd.fetch" || if (op_name == "pd.feed" || op_name == "pd.fetch" ||
op_name == "builtin.combine" || op_name == "builtin.set_parameter" || op_name == "builtin.combine" || op_name == "builtin.set_parameter" ||
op_name == "builtin.get_parameter" || op_name == "builtin.slice" || op_name == "builtin.get_parameter" || op_name == "builtin.slice" ||
op_name == "pd.feed_with_place" || op_name == "pd.shadow_output") { op_name == "pd.data" || op_name == "pd.shadow_output") {
HandleForSpecialOp(op, HandleForSpecialOp(op,
inner_scope, inner_scope,
var_name_prefix, var_name_prefix,
......
...@@ -172,7 +172,7 @@ phi::KernelKey GetKernelKey( ...@@ -172,7 +172,7 @@ phi::KernelKey GetKernelKey(
op->result(0).type().dyn_cast<DenseTensorType>().dtype())}; op->result(0).type().dyn_cast<DenseTensorType>().dtype())};
} }
if (op->name() == "pd.feed_with_place") { if (op->name() == "pd.data") {
// NOTE, for now feed op don't need a kernel, so the data type from Op // NOTE, for now feed op don't need a kernel, so the data type from Op
// Result the next op use base program datatype // Result the next op use base program datatype
auto t = auto t =
......
...@@ -986,7 +986,7 @@ struct FeedOpTranscriber : public OpTranscriber { ...@@ -986,7 +986,7 @@ struct FeedOpTranscriber : public OpTranscriber {
} }
}; };
struct FeedWithPlaceOpTranscriber : public OpTranscriber { struct DataOpTranscriber : public FeedOpTranscriber {
ir::AttributeMap TranslateOpAttribute( ir::AttributeMap TranslateOpAttribute(
ir::IrContext* ctx, ir::IrContext* ctx,
const std::string& normalized_op_name, const std::string& normalized_op_name,
...@@ -1007,16 +1007,6 @@ struct FeedWithPlaceOpTranscriber : public OpTranscriber { ...@@ -1007,16 +1007,6 @@ struct FeedWithPlaceOpTranscriber : public OpTranscriber {
return attribute_map; return attribute_map;
} }
std::vector<ir::OpResult> GenerateOperationInput(
ir::IrContext* ctx,
TranslationContext* param_map,
const OpDesc& op_desc,
const std::string& normalized_op_name,
const OpInputInfoList& input_infos,
ir::Program* program) override {
return {};
}
}; };
struct SplitOpTranscriber : public OpTranscriber { struct SplitOpTranscriber : public OpTranscriber {
...@@ -1473,7 +1463,7 @@ OpTranslator::OpTranslator() { ...@@ -1473,7 +1463,7 @@ OpTranslator::OpTranslator() {
special_handlers["assign_value"] = AssignValueOpTranscriber(); special_handlers["assign_value"] = AssignValueOpTranscriber();
special_handlers["cast"] = CastOpTranscriber(); special_handlers["cast"] = CastOpTranscriber();
special_handlers["feed"] = FeedOpTranscriber(); special_handlers["feed"] = FeedOpTranscriber();
special_handlers["feed_with_place"] = FeedWithPlaceOpTranscriber(); special_handlers["data"] = DataOpTranscriber();
special_handlers["fetch_v2"] = FetchOpTranscriber(); special_handlers["fetch_v2"] = FetchOpTranscriber();
special_handlers["increment"] = IncrementOpTranscriber(); special_handlers["increment"] = IncrementOpTranscriber();
special_handlers["lookup_table_v2"] = EmbeddingOpTranscriber(); special_handlers["lookup_table_v2"] = EmbeddingOpTranscriber();
......
...@@ -630,6 +630,18 @@ ...@@ -630,6 +630,18 @@
data_type : x data_type : x
backward : cumsum_grad backward : cumsum_grad
- op : data
args : (int64_t index, DataType dtype, str name, Place place)
output : Tensor(out)
infer_meta :
func : FeedWithPlaceInferMeta
param : [index, dtype]
kernel:
func : data
param : [index, dtype]
data_type : dtype
backend : place
- op : depthwise_conv2d - op : depthwise_conv2d
args : (Tensor input, Tensor filter, int[] strides={1, 1}, int[] paddings={0, 0}, str padding_algorithm="EXPLICIT", int groups=1, int[] dilations={1, 1}, str data_format="NCHW") args : (Tensor input, Tensor filter, int[] strides={1, 1}, int[] paddings={0, 0}, str padding_algorithm="EXPLICIT", int groups=1, int[] dilations={1, 1}, str data_format="NCHW")
output : Tensor(out) output : Tensor(out)
...@@ -838,18 +850,6 @@ ...@@ -838,18 +850,6 @@
inplace: (x -> out) inplace: (x -> out)
backward : expm1_grad backward : expm1_grad
- op : feed_with_place
args : (int64_t index, DataType dtype, str name, Place place)
output : Tensor(out)
infer_meta :
func : FeedWithPlaceInferMeta
param : [index, dtype]
kernel:
func : feed_with_place
param : [index, dtype]
data_type : dtype
backend : place
- op : fft_c2c - op : fft_c2c
args : (Tensor x, int64_t[] axes, str normalization, bool forward) args : (Tensor x, int64_t[] axes, str normalization, bool forward)
output : Tensor output : Tensor
......
...@@ -12,21 +12,21 @@ ...@@ -12,21 +12,21 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/feed_with_place_kernel.h" #include "paddle/phi/kernels/data_kernel.h"
#include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/feed_with_place_impl.h" #include "paddle/phi/kernels/impl/data_impl.h"
#include "paddle/phi/kernels/funcs/tensor_formatter.h" #include "paddle/phi/kernels/funcs/tensor_formatter.h"
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void FeedWithPlaceKernel(const Context& ctx, void DataKernel(const Context& ctx,
int64_t index, int64_t index,
phi::DataType data_type, phi::DataType data_type,
DenseTensor* out) {} DenseTensor* out) {}
template <typename T, typename Context> template <typename T, typename Context>
void ShadowOutputKernel(const Context& ctx, void ShadowOutputKernel(const Context& ctx,
...@@ -35,8 +35,7 @@ void ShadowOutputKernel(const Context& ctx, ...@@ -35,8 +35,7 @@ void ShadowOutputKernel(const Context& ctx,
} // namespace phi } // namespace phi
PD_REGISTER_KERNEL( PD_REGISTER_KERNEL(data, CPU, ALL_LAYOUT, phi::DataKernel, float) {}
feed_with_place, CPU, ALL_LAYOUT, phi::FeedWithPlaceKernel, float) {}
PD_REGISTER_KERNEL(shadow_feed, PD_REGISTER_KERNEL(shadow_feed,
CPU, CPU,
......
...@@ -19,11 +19,11 @@ ...@@ -19,11 +19,11 @@
namespace phi { namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void FeedWithPlaceKernel(const Context& ctx, void DataKernel(const Context& ctx,
int64_t index, int64_t index,
phi::DataType data_type, phi::DataType data_type,
// std::string name, // std::string name,
DenseTensor* out); DenseTensor* out);
template <typename T, typename Context> template <typename T, typename Context>
void ShadowOutputKernel(const Context& ctx, void ShadowOutputKernel(const Context& ctx,
......
...@@ -12,11 +12,11 @@ ...@@ -12,11 +12,11 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/phi/kernels/feed_with_place_kernel.h" #include "paddle/phi/kernels/data_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/feed_with_place_impl.h" #include "paddle/phi/kernels/impl/data_impl.h"
PD_REGISTER_KERNEL(shadow_feed, PD_REGISTER_KERNEL(shadow_feed,
GPU, GPU,
......
...@@ -18,12 +18,12 @@ import paddle ...@@ -18,12 +18,12 @@ import paddle
from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.layer_helper import LayerHelper
def feed_with_place(): def data():
helper = LayerHelper('feed_with_place', **locals()) helper = LayerHelper('data', **locals())
out = helper.create_variable_for_type_inference('float32') out = helper.create_variable_for_type_inference('float32')
helper.append_op( helper.append_op(
type='feed_with_place', type='data',
inputs={}, inputs={},
outputs={'out': out}, outputs={'out': out},
attrs={ attrs={
...@@ -46,7 +46,7 @@ class TestNewIr(unittest.TestCase): ...@@ -46,7 +46,7 @@ class TestNewIr(unittest.TestCase):
new_scope = paddle.static.Scope() new_scope = paddle.static.Scope()
with paddle.static.scope_guard(new_scope): with paddle.static.scope_guard(new_scope):
with paddle.static.program_guard(main_program): with paddle.static.program_guard(main_program):
out = feed_with_place() out = data()
if __name__ == "__main__": if __name__ == "__main__":
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册