未验证 提交 274e5e54 编写于 作者: K kangguangli 提交者: GitHub

[NewIR] Rename feed with place to data (#55778)

* fix bug: feed_with_place should consider variable existence

* fix

* fix build scope

* change method to set feed var name

* remove feed_with_place to placeholder

* fix

* rename to data

* fix

* fix
上级 e3b6e02f
......@@ -377,7 +377,7 @@ std::unique_ptr<::ir::Program> ConstructFowardIrProgram(
auto place = in_t.place().GetType();
auto op_desc = block->PrependOp();
op_desc->SetType("feed_with_place");
op_desc->SetType("data");
op_desc->SetAttr("index", 0);
// TODO(phlrain) : using tensor dtype
op_desc->SetAttr("dtype", 0);
......@@ -391,7 +391,7 @@ std::unique_ptr<::ir::Program> ConstructFowardIrProgram(
auto place = param.place().GetType();
auto op_desc = local_program.MutableBlock(0)->PrependOp();
op_desc->SetType("feed_with_place");
op_desc->SetType("data");
op_desc->SetAttr("index", 0);
// TODO(phlrain) : using tensor dtype
op_desc->SetAttr("dtype", 0);
......@@ -471,7 +471,7 @@ std::unique_ptr<::ir::Program> ConstructBackwardIrProgram(
continue;
}
auto op_desc = local_program.MutableBlock(0)->PrependOp();
op_desc->SetType("feed_with_place");
op_desc->SetType("data");
op_desc->SetAttr("index", 0);
// TODO(phlrain) : using tensor dtype
op_desc->SetAttr("dtype", 0);
......
......@@ -980,7 +980,7 @@ void BuildOpFuncList(
if (op_name == "builtin.combine" || op_name == "pd.feed" ||
op_name == "builtin.set_parameter" ||
op_name == "builtin.get_parameter" || op_name == "builtin.slice" ||
op_name == "pd.feed_with_place" || op_name == "pd.shadow_output") {
op_name == "pd.data" || op_name == "pd.shadow_output") {
VLOG(6) << "skip process " << op_name;
continue;
}
......
......@@ -1619,7 +1619,7 @@ void NewIRInterpreter::BuildInstruction() {
if (op_name == "builtin.combine" || op_name == "pd.feed" ||
op_name == "builtin.set_parameter" ||
op_name == "builtin.get_parameter" || op_name == "builtin.slice" ||
op_name == "pd.feed_with_place" || op_name == "pd.shaddow_output") {
op_name == "pd.data" || op_name == "pd.shaddow_output") {
VLOG(6) << "skip process " << op_name;
continue;
}
......
......@@ -264,8 +264,8 @@ void HandleForSpecialOp(
variable_list);
}
if (op_name == "pd.feed_with_place") {
VLOG(6) << "Handle for pd.feed_with_place";
if (op_name == "pd.data") {
VLOG(6) << "Handle for pd.data";
auto var_name =
op->attributes().at("name").dyn_cast<ir::StrAttribute>().AsString();
......@@ -492,7 +492,7 @@ void BuildScope(const ir::Block& block,
if (op_name == "pd.feed" || op_name == "pd.fetch" ||
op_name == "builtin.combine" || op_name == "builtin.set_parameter" ||
op_name == "builtin.get_parameter" || op_name == "builtin.slice" ||
op_name == "pd.feed_with_place" || op_name == "pd.shadow_output") {
op_name == "pd.data" || op_name == "pd.shadow_output") {
HandleForSpecialOp(op,
inner_scope,
var_name_prefix,
......
......@@ -172,7 +172,7 @@ phi::KernelKey GetKernelKey(
op->result(0).type().dyn_cast<DenseTensorType>().dtype())};
}
if (op->name() == "pd.feed_with_place") {
if (op->name() == "pd.data") {
// NOTE, for now feed op don't need a kernel, so the data type from Op
// Result the next op use base program datatype
auto t =
......
......@@ -986,7 +986,7 @@ struct FeedOpTranscriber : public OpTranscriber {
}
};
struct FeedWithPlaceOpTranscriber : public OpTranscriber {
struct DataOpTranscriber : public FeedOpTranscriber {
ir::AttributeMap TranslateOpAttribute(
ir::IrContext* ctx,
const std::string& normalized_op_name,
......@@ -1007,16 +1007,6 @@ struct FeedWithPlaceOpTranscriber : public OpTranscriber {
return attribute_map;
}
std::vector<ir::OpResult> GenerateOperationInput(
ir::IrContext* ctx,
TranslationContext* param_map,
const OpDesc& op_desc,
const std::string& normalized_op_name,
const OpInputInfoList& input_infos,
ir::Program* program) override {
return {};
}
};
struct SplitOpTranscriber : public OpTranscriber {
......@@ -1473,7 +1463,7 @@ OpTranslator::OpTranslator() {
special_handlers["assign_value"] = AssignValueOpTranscriber();
special_handlers["cast"] = CastOpTranscriber();
special_handlers["feed"] = FeedOpTranscriber();
special_handlers["feed_with_place"] = FeedWithPlaceOpTranscriber();
special_handlers["data"] = DataOpTranscriber();
special_handlers["fetch_v2"] = FetchOpTranscriber();
special_handlers["increment"] = IncrementOpTranscriber();
special_handlers["lookup_table_v2"] = EmbeddingOpTranscriber();
......
......@@ -630,6 +630,18 @@
data_type : x
backward : cumsum_grad
- op : data
args : (int64_t index, DataType dtype, str name, Place place)
output : Tensor(out)
infer_meta :
func : FeedWithPlaceInferMeta
param : [index, dtype]
kernel:
func : data
param : [index, dtype]
data_type : dtype
backend : place
- op : depthwise_conv2d
args : (Tensor input, Tensor filter, int[] strides={1, 1}, int[] paddings={0, 0}, str padding_algorithm="EXPLICIT", int groups=1, int[] dilations={1, 1}, str data_format="NCHW")
output : Tensor(out)
......@@ -838,18 +850,6 @@
inplace: (x -> out)
backward : expm1_grad
- op : feed_with_place
args : (int64_t index, DataType dtype, str name, Place place)
output : Tensor(out)
infer_meta :
func : FeedWithPlaceInferMeta
param : [index, dtype]
kernel:
func : feed_with_place
param : [index, dtype]
data_type : dtype
backend : place
- op : fft_c2c
args : (Tensor x, int64_t[] axes, str normalization, bool forward)
output : Tensor
......
......@@ -12,18 +12,18 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/feed_with_place_kernel.h"
#include "paddle/phi/kernels/data_kernel.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/feed_with_place_impl.h"
#include "paddle/phi/kernels/impl/data_impl.h"
#include "paddle/phi/kernels/funcs/tensor_formatter.h"
namespace phi {
template <typename T, typename Context>
void FeedWithPlaceKernel(const Context& ctx,
void DataKernel(const Context& ctx,
int64_t index,
phi::DataType data_type,
DenseTensor* out) {}
......@@ -35,8 +35,7 @@ void ShadowOutputKernel(const Context& ctx,
} // namespace phi
PD_REGISTER_KERNEL(
feed_with_place, CPU, ALL_LAYOUT, phi::FeedWithPlaceKernel, float) {}
PD_REGISTER_KERNEL(data, CPU, ALL_LAYOUT, phi::DataKernel, float) {}
PD_REGISTER_KERNEL(shadow_feed,
CPU,
......
......@@ -19,7 +19,7 @@
namespace phi {
template <typename T, typename Context>
void FeedWithPlaceKernel(const Context& ctx,
void DataKernel(const Context& ctx,
int64_t index,
phi::DataType data_type,
// std::string name,
......
......@@ -12,11 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/feed_with_place_kernel.h"
#include "paddle/phi/kernels/data_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/feed_with_place_impl.h"
#include "paddle/phi/kernels/impl/data_impl.h"
PD_REGISTER_KERNEL(shadow_feed,
GPU,
......
......@@ -18,12 +18,12 @@ import paddle
from paddle.fluid.layer_helper import LayerHelper
def feed_with_place():
helper = LayerHelper('feed_with_place', **locals())
def data():
helper = LayerHelper('data', **locals())
out = helper.create_variable_for_type_inference('float32')
helper.append_op(
type='feed_with_place',
type='data',
inputs={},
outputs={'out': out},
attrs={
......@@ -46,7 +46,7 @@ class TestNewIr(unittest.TestCase):
new_scope = paddle.static.Scope()
with paddle.static.scope_guard(new_scope):
with paddle.static.program_guard(main_program):
out = feed_with_place()
out = data()
if __name__ == "__main__":
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册