未验证 提交 85dbcefd 编写于 作者: W wanghuancoder 提交者: GitHub

[NewIR] add_n and combine support selected rows (#56754)

* add_n and combine support selected rows
上级 2a8839b0
...@@ -172,7 +172,7 @@ scalar_type_maps = { ...@@ -172,7 +172,7 @@ scalar_type_maps = {
'bool': 'ir::BoolAttribute', 'bool': 'ir::BoolAttribute',
} }
_NO_NEED_GEN_OPS = {'add_n', 'split_grad'} _NO_NEED_GEN_OPS = {'add_n', 'add_n_', 'add_n_with_kernel', 'split_grad'}
def to_phi_and_fluid_op_name(op_item): def to_phi_and_fluid_op_name(op_item):
......
...@@ -48,7 +48,10 @@ void PaddleDialect::initialize() { ...@@ -48,7 +48,10 @@ void PaddleDialect::initialize() {
#define GET_OP_LIST #define GET_OP_LIST
#include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_op.h" // NOLINT #include "paddle/fluid/ir/dialect/paddle_dialect/ir/pd_op.h" // NOLINT
>(); >();
RegisterOps<paddle::dialect::AddNOp, paddle::dialect::SplitGradOp>(); RegisterOps<paddle::dialect::AddNOp,
paddle::dialect::AddN_Op,
paddle::dialect::AddNWithKernelOp,
paddle::dialect::SplitGradOp>();
RegisterInterfaces<ParameterConvertInterface>(); RegisterInterfaces<ParameterConvertInterface>();
} }
......
...@@ -57,13 +57,18 @@ void AddNOp::Verify() { ...@@ -57,13 +57,18 @@ void AddNOp::Verify() {
"The size %d of inputs must be equal to 1.", input_size)); "The size %d of inputs must be equal to 1.", input_size));
if (auto vec_type = (*this)->operand(0).type().dyn_cast<ir::VectorType>()) { if (auto vec_type = (*this)->operand(0).type().dyn_cast<ir::VectorType>()) {
for (size_t i = 0; i < vec_type.size(); ++i) { for (size_t i = 0; i < vec_type.size(); ++i) {
PADDLE_ENFORCE(vec_type[i].isa<paddle::dialect::DenseTensorType>(), PADDLE_ENFORCE(vec_type[i].isa<paddle::dialect::DenseTensorType>() ||
vec_type[i].isa<paddle::dialect::SelectedRowsType>(),
phi::errors::PreconditionNotMet( phi::errors::PreconditionNotMet(
"Type validation failed for the 0th input.")); "Type validation failed for the 0th input."));
} }
} else { } else {
PADDLE_ENFORCE( PADDLE_ENFORCE(
(*this)->operand(0).type().isa<paddle::dialect::DenseTensorType>(), (*this)->operand(0).type().isa<paddle::dialect::DenseTensorType>() ||
(*this)
->operand(0)
.type()
.isa<paddle::dialect::SelectedRowsType>(),
phi::errors::PreconditionNotMet( phi::errors::PreconditionNotMet(
"Type validation failed for the 0th input.")); "Type validation failed for the 0th input."));
} }
...@@ -81,7 +86,8 @@ void AddNOp::Verify() { ...@@ -81,7 +86,8 @@ void AddNOp::Verify() {
phi::errors::PreconditionNotMet( phi::errors::PreconditionNotMet(
"The size %d of outputs must be equal to 1.", output_size)); "The size %d of outputs must be equal to 1.", output_size));
PADDLE_ENFORCE( PADDLE_ENFORCE(
(*this)->result(0).type().isa<paddle::dialect::DenseTensorType>(), (*this)->result(0).type().isa<paddle::dialect::DenseTensorType>() ||
(*this)->result(0).type().isa<paddle::dialect::SelectedRowsType>(),
phi::errors::PreconditionNotMet( phi::errors::PreconditionNotMet(
"Type validation failed for the 0th output.")); "Type validation failed for the 0th output."));
} }
...@@ -146,6 +152,262 @@ void AddNOp::InferMeta(phi::InferMetaContext *infer_meta) { ...@@ -146,6 +152,262 @@ void AddNOp::InferMeta(phi::InferMetaContext *infer_meta) {
fn(infer_meta); fn(infer_meta);
} }
OpInfoTuple AddN_Op::GetOpInfo() {
std::vector<paddle::dialect::OpInputInfo> inputs = {
paddle::dialect::OpInputInfo(
"inputs",
"ir::VectorType<paddle::dialect::DenseTensorType>",
false,
false,
false)};
std::vector<paddle::dialect::OpAttributeInfo> attributes = {};
std::vector<paddle::dialect::OpOutputInfo> outputs = {
paddle::dialect::OpOutputInfo(
"out", "paddle::dialect::DenseTensorType", false, false)};
paddle::dialect::OpRunTimeInfo run_time_info = paddle::dialect::OpRunTimeInfo(
"AddNInferMeta", {"inputs"}, {"add_n"}, {"inputs"}, {}, {}, {}, {});
return std::make_tuple(inputs, attributes, outputs, run_time_info, "add_n_");
}
void AddN_Op::Build(ir::Builder &builder,
ir::OperationArgument &argument,
ir::OpResult inputs_) {
VLOG(4) << "Builder construction inputs";
std::vector<ir::OpResult> argument_inputs = {inputs_};
argument.AddOperands(argument_inputs.begin(), argument_inputs.end());
VLOG(4) << "Builder construction attributes";
VLOG(4) << "Builder construction outputs";
ir::VectorType inputs = inputs_.type().dyn_cast<ir::VectorType>();
(void)inputs;
std::vector<phi::DenseTensor> vec_dense_inputs;
for (size_t i = 0; i < static_cast<size_t>(inputs.size()); i++) {
vec_dense_inputs.push_back(phi::DenseTensor(
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
.get(),
phi::DenseTensorMeta(
paddle::dialect::TransToPhiDataType(
inputs[i].dyn_cast<paddle::dialect::DenseTensorType>().dtype()),
inputs[i].dyn_cast<paddle::dialect::DenseTensorType>().dims(),
inputs[i]
.dyn_cast<paddle::dialect::DenseTensorType>()
.data_layout(),
inputs[i].dyn_cast<paddle::dialect::DenseTensorType>().lod(),
inputs[i].dyn_cast<paddle::dialect::DenseTensorType>().offset())));
}
std::vector<phi::MetaTensor> vec_meta_inputs;
for (size_t i = 0; i < vec_dense_inputs.size(); i++) {
vec_meta_inputs.push_back(phi::MetaTensor(&vec_dense_inputs[i]));
}
std::vector<const phi::MetaTensor *> meta_inputs;
for (size_t i = 0; i < static_cast<size_t>(vec_meta_inputs.size()); i++) {
meta_inputs.push_back(&vec_meta_inputs[i]);
}
phi::DenseTensor dense_out;
phi::MetaTensor meta_out(&dense_out);
phi::AddNInferMeta(meta_inputs, &meta_out);
std::vector<ir::Type> argument_outputs;
ir::Type out_dense_tensor_type = paddle::dialect::DenseTensorType::get(
ir::IrContext::Instance(),
paddle::dialect::TransToIrDataType(dense_out.dtype()),
dense_out.dims(),
dense_out.layout(),
dense_out.lod(),
dense_out.offset());
argument_outputs.push_back(out_dense_tensor_type);
argument.AddOutputs(argument_outputs.begin(), argument_outputs.end());
}
void AddN_Op::Verify() {
VLOG(4) << "Start Verifying inputs, outputs and attributes for: AddN_Op.";
VLOG(4) << "Verifying inputs:";
{
auto input_size = num_operands();
PADDLE_ENFORCE_EQ(
input_size,
1u,
phi::errors::PreconditionNotMet(
"The size %d of inputs must be equal to 1.", input_size));
if (auto vec_type =
(*this)->operand_source(0).type().dyn_cast<ir::VectorType>()) {
for (size_t i = 0; i < vec_type.size(); ++i) {
PADDLE_ENFORCE(vec_type[i].isa<paddle::dialect::DenseTensorType>() ||
vec_type[i].isa<paddle::dialect::SelectedRowsType>(),
phi::errors::PreconditionNotMet(
"Type validation failed for the 0th input."));
}
} else {
PADDLE_ENFORCE((*this)->operand_source(0)
.type()
.isa<paddle::dialect::DenseTensorType>() ||
(*this)
->operand_source(0)
.type()
.isa<paddle::dialect::SelectedRowsType>(),
phi::errors::PreconditionNotMet(
"Type validation failed for the 0th input."));
}
}
VLOG(4) << "Verifying attributes:";
{
// Attributes num is 0, not need to check attributes type.
}
VLOG(4) << "Verifying outputs:";
{
auto output_size = num_results();
PADDLE_ENFORCE_EQ(
output_size,
1u,
phi::errors::PreconditionNotMet(
"The size %d of outputs must be equal to 1.", output_size));
PADDLE_ENFORCE(
(*this)->result(0).type().isa<paddle::dialect::DenseTensorType>() ||
(*this)->result(0).type().isa<paddle::dialect::SelectedRowsType>(),
phi::errors::PreconditionNotMet(
"Type validation failed for the 0th output."));
}
VLOG(4) << "End Verifying for: AddN_Op.";
}
void AddN_Op::InferMeta(phi::InferMetaContext *infer_meta) {
auto fn = PD_INFER_META(phi::AddNInferMeta);
fn(infer_meta);
}
OpInfoTuple AddNWithKernelOp::GetOpInfo() {
std::vector<paddle::dialect::OpInputInfo> inputs = {
paddle::dialect::OpInputInfo(
"inputs",
"ir::VectorType<paddle::dialect::DenseTensorType>",
false,
false,
false)};
std::vector<paddle::dialect::OpAttributeInfo> attributes = {};
std::vector<paddle::dialect::OpOutputInfo> outputs = {
paddle::dialect::OpOutputInfo(
"out", "paddle::dialect::DenseTensorType", false, false)};
paddle::dialect::OpRunTimeInfo run_time_info = paddle::dialect::OpRunTimeInfo(
"AddNInferMeta", {"inputs"}, {"add_n"}, {"inputs"}, {}, {}, {}, {});
return std::make_tuple(
inputs, attributes, outputs, run_time_info, "add_n_with_kernel");
}
void AddNWithKernelOp::Build(ir::Builder &builder,
ir::OperationArgument &argument,
ir::OpResult inputs_) {
VLOG(4) << "Builder construction inputs";
std::vector<ir::OpResult> argument_inputs = {inputs_};
argument.AddOperands(argument_inputs.begin(), argument_inputs.end());
VLOG(4) << "Builder construction attributes";
VLOG(4) << "Builder construction outputs";
ir::VectorType inputs = inputs_.type().dyn_cast<ir::VectorType>();
(void)inputs;
std::vector<phi::DenseTensor> vec_dense_inputs;
for (size_t i = 0; i < static_cast<size_t>(inputs.size()); i++) {
vec_dense_inputs.push_back(phi::DenseTensor(
std::make_unique<paddle::experimental::DefaultAllocator>(
paddle::platform::CPUPlace())
.get(),
phi::DenseTensorMeta(
paddle::dialect::TransToPhiDataType(
inputs[i].dyn_cast<paddle::dialect::DenseTensorType>().dtype()),
inputs[i].dyn_cast<paddle::dialect::DenseTensorType>().dims(),
inputs[i]
.dyn_cast<paddle::dialect::DenseTensorType>()
.data_layout(),
inputs[i].dyn_cast<paddle::dialect::DenseTensorType>().lod(),
inputs[i].dyn_cast<paddle::dialect::DenseTensorType>().offset())));
}
std::vector<phi::MetaTensor> vec_meta_inputs;
for (size_t i = 0; i < vec_dense_inputs.size(); i++) {
vec_meta_inputs.push_back(phi::MetaTensor(&vec_dense_inputs[i]));
}
std::vector<const phi::MetaTensor *> meta_inputs;
for (size_t i = 0; i < static_cast<size_t>(vec_meta_inputs.size()); i++) {
meta_inputs.push_back(&vec_meta_inputs[i]);
}
phi::DenseTensor dense_out;
phi::MetaTensor meta_out(&dense_out);
phi::AddNInferMeta(meta_inputs, &meta_out);
std::vector<ir::Type> argument_outputs;
ir::Type out_dense_tensor_type = paddle::dialect::DenseTensorType::get(
ir::IrContext::Instance(),
paddle::dialect::TransToIrDataType(dense_out.dtype()),
dense_out.dims(),
dense_out.layout(),
dense_out.lod(),
dense_out.offset());
argument_outputs.push_back(out_dense_tensor_type);
argument.AddOutputs(argument_outputs.begin(), argument_outputs.end());
}
void AddNWithKernelOp::Verify() {
VLOG(4) << "Start Verifying inputs, outputs and attributes for: "
"AddNWithKernelOp.";
VLOG(4) << "Verifying inputs:";
{
auto input_size = num_operands();
PADDLE_ENFORCE_EQ(
input_size,
1u,
phi::errors::PreconditionNotMet(
"The size %d of inputs must be equal to 1.", input_size));
if (auto vec_type =
(*this)->operand_source(0).type().dyn_cast<ir::VectorType>()) {
for (size_t i = 0; i < vec_type.size(); ++i) {
PADDLE_ENFORCE(vec_type[i].isa<paddle::dialect::DenseTensorType>() ||
vec_type[i].isa<paddle::dialect::SelectedRowsType>(),
phi::errors::PreconditionNotMet(
"Type validation failed for the 0th input."));
}
} else {
PADDLE_ENFORCE((*this)->operand_source(0)
.type()
.isa<paddle::dialect::DenseTensorType>() ||
(*this)
->operand_source(0)
.type()
.isa<paddle::dialect::SelectedRowsType>(),
phi::errors::PreconditionNotMet(
"Type validation failed for the 0th input."));
}
}
VLOG(4) << "Verifying attributes:";
{
// Attributes num is 0, not need to check attributes type.
}
VLOG(4) << "Verifying outputs:";
{
auto output_size = num_results();
PADDLE_ENFORCE_EQ(
output_size,
1u,
phi::errors::PreconditionNotMet(
"The size %d of outputs must be equal to 1.", output_size));
PADDLE_ENFORCE(
(*this)->result(0).type().isa<paddle::dialect::DenseTensorType>() ||
(*this)->result(0).type().isa<paddle::dialect::SelectedRowsType>(),
phi::errors::PreconditionNotMet(
"Type validation failed for the 0th output."));
}
VLOG(4) << "End Verifying for: AddNWithKernelOp.";
}
void AddNWithKernelOp::InferMeta(phi::InferMetaContext *infer_meta) {
auto fn = PD_INFER_META(phi::AddNInferMeta);
fn(infer_meta);
}
const char *SplitGradOp::attributes_name[1] = {"axis"}; const char *SplitGradOp::attributes_name[1] = {"axis"};
OpInfoTuple SplitGradOp::GetOpInfo() { OpInfoTuple SplitGradOp::GetOpInfo() {
...@@ -364,3 +626,5 @@ void SplitGradOp::InferMeta(phi::InferMetaContext *infer_meta) { ...@@ -364,3 +626,5 @@ void SplitGradOp::InferMeta(phi::InferMetaContext *infer_meta) {
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::AddNOp) IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::AddNOp)
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::SplitGradOp) IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::SplitGradOp)
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::AddN_Op)
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::AddNWithKernelOp)
...@@ -24,6 +24,7 @@ paddle::dialect::AddNOp, paddle::dialect::SplitGradOp ...@@ -24,6 +24,7 @@ paddle::dialect::AddNOp, paddle::dialect::SplitGradOp
#include "paddle/fluid/framework/infershape_utils.h" #include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/ir/dialect/paddle_dialect/interface/infermeta.h" #include "paddle/fluid/ir/dialect/paddle_dialect/interface/infermeta.h"
#include "paddle/fluid/ir/dialect/paddle_dialect/interface/op_yaml_info.h" #include "paddle/fluid/ir/dialect/paddle_dialect/interface/op_yaml_info.h"
#include "paddle/fluid/ir/dialect/paddle_dialect/trait/inplace.h"
#include "paddle/fluid/ir/dialect/paddle_dialect/utils/op_yaml_info_util.h" #include "paddle/fluid/ir/dialect/paddle_dialect/utils/op_yaml_info_util.h"
#include "paddle/fluid/ir/dialect/paddle_dialect/utils/utils.h" #include "paddle/fluid/ir/dialect/paddle_dialect/utils/utils.h"
#include "paddle/ir/core/builder.h" #include "paddle/ir/core/builder.h"
...@@ -51,6 +52,47 @@ class AddNOp : public ir::Op<AddNOp, OpYamlInfoInterface, InferMetaInterface> { ...@@ -51,6 +52,47 @@ class AddNOp : public ir::Op<AddNOp, OpYamlInfoInterface, InferMetaInterface> {
static void InferMeta(phi::InferMetaContext *infer_meta); static void InferMeta(phi::InferMetaContext *infer_meta);
}; };
class AddN_Op : public ir::Op<AddN_Op,
paddle::dialect::OpYamlInfoInterface,
paddle::dialect::InferMetaInterface,
paddle::dialect::InplaceTrait> {
public:
using Op::Op;
static const char *name() { return "pd.add_n_"; }
static constexpr const char **attributes_name = nullptr;
static constexpr uint32_t attributes_num = 0;
static OpInfoTuple GetOpInfo();
static void Build(ir::Builder &builder, // NOLINT
ir::OperationArgument &argument, // NOLINT
ir::OpResult inputs_);
void Verify();
ir::Value inputs() { return operand_source(0); }
ir::OpResult out() { return result(0); }
static void InferMeta(phi::InferMetaContext *infer_meta);
};
class AddNWithKernelOp : public ir::Op<AddNWithKernelOp,
paddle::dialect::OpYamlInfoInterface,
paddle::dialect::InferMetaInterface> {
public:
using Op::Op;
static const char *name() { return "pd.add_n_with_kernel"; }
static constexpr const char **attributes_name = nullptr;
static constexpr uint32_t attributes_num = 0;
static OpInfoTuple GetOpInfo();
static void Build(ir::Builder &builder, // NOLINT
ir::OperationArgument &argument, // NOLINT
ir::OpResult inputs_);
void Verify();
ir::Value inputs() { return operand_source(0); }
ir::OpResult out() { return result(0); }
static void InferMeta(phi::InferMetaContext *infer_meta);
};
class SplitGradOp : public ir::Op<SplitGradOp, OpYamlInfoInterface> { class SplitGradOp : public ir::Op<SplitGradOp, OpYamlInfoInterface> {
public: public:
using Op::Op; using Op::Op;
...@@ -79,5 +121,7 @@ class SplitGradOp : public ir::Op<SplitGradOp, OpYamlInfoInterface> { ...@@ -79,5 +121,7 @@ class SplitGradOp : public ir::Op<SplitGradOp, OpYamlInfoInterface> {
IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::AddNOp) IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::AddNOp)
IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::SplitGradOp) IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::SplitGradOp)
IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::AddN_Op)
IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::AddNWithKernelOp)
#endif #endif
...@@ -118,8 +118,18 @@ void BuildPhiContext(ir::Operation* op, ...@@ -118,8 +118,18 @@ void BuildPhiContext(ir::Operation* op,
InListType inputs; InListType inputs;
auto& variable_array = var->Get<paddle::framework::VariableRefArray>(); auto& variable_array = var->Get<paddle::framework::VariableRefArray>();
for (size_t i = 0; i < variable_array.size(); ++i) { for (size_t i = 0; i < variable_array.size(); ++i) {
if (variable_array[i]->IsType<phi::DenseTensor>()) {
inputs.emplace_back(InType(const_cast<phi::DenseTensor*>( inputs.emplace_back(InType(const_cast<phi::DenseTensor*>(
&(variable_array[i]->Get<phi::DenseTensor>())))); &(variable_array[i]->Get<phi::DenseTensor>()))));
} else if (variable_array[i]->IsType<phi::SelectedRows>()) {
inputs.emplace_back(InType(const_cast<phi::SelectedRows*>(
&(variable_array[i]->Get<phi::SelectedRows>()))));
} else {
PADDLE_THROW(phi::errors::Unimplemented(
"Only support Vector<DenseTensor> and vector<SelectedRows> now, "
"not support vector<%d>.",
variable_array[i]->Type()));
}
} }
ctx->EmplaceBackInputs(inputs); ctx->EmplaceBackInputs(inputs);
} else { } else {
...@@ -315,8 +325,18 @@ void BuildPhiContext(ir::Operation* op, ...@@ -315,8 +325,18 @@ void BuildPhiContext(ir::Operation* op,
auto& variable_array = inner_scope->FindVar(name_map.at(out_ptr)) auto& variable_array = inner_scope->FindVar(name_map.at(out_ptr))
->Get<paddle::framework::VariableRefArray>(); ->Get<paddle::framework::VariableRefArray>();
for (size_t i = 0; i < variable_array.size(); ++i) { for (size_t i = 0; i < variable_array.size(); ++i) {
if (variable_array[i]->IsType<phi::DenseTensor>()) {
outputs.emplace_back(OutType(const_cast<phi::DenseTensor*>( outputs.emplace_back(OutType(const_cast<phi::DenseTensor*>(
&(variable_array[i]->Get<phi::DenseTensor>())))); &(variable_array[i]->Get<phi::DenseTensor>()))));
} else if (variable_array[i]->IsType<phi::SelectedRows>()) {
outputs.emplace_back(OutType(const_cast<phi::SelectedRows*>(
&(variable_array[i]->Get<phi::SelectedRows>()))));
} else {
PADDLE_THROW(phi::errors::Unimplemented(
"Only support Vector<DenseTensor> and vector<SelectedRows> now, "
"not support vector<%d>.",
variable_array[i]->Type()));
}
} }
ctx->EmplaceBackOutputs(outputs); ctx->EmplaceBackOutputs(outputs);
} else { } else {
......
...@@ -149,21 +149,27 @@ bool SkipFeedOp(ir::Operation* op, const std::set<std::string>& feed_names) { ...@@ -149,21 +149,27 @@ bool SkipFeedOp(ir::Operation* op, const std::set<std::string>& feed_names) {
op->attributes().at("name").dyn_cast<ir::StrAttribute>().AsString()); op->attributes().at("name").dyn_cast<ir::StrAttribute>().AsString());
} }
std::vector<phi::DenseTensor> GetFakeTensorList(ir::Value new_input_tmp) { std::vector<std::shared_ptr<phi::TensorBase>> GetFakeTensorList(
std::vector<phi::DenseTensor> vec_res; ir::Value new_input_tmp) {
std::vector<std::shared_ptr<phi::TensorBase>> vec_res;
auto input_type = new_input_tmp.type(); auto input_type = new_input_tmp.type();
std::vector<dialect::AllocatedDenseTensorType> types;
if (input_type.isa<dialect::AllocatedDenseTensorType>()) {
types.push_back(input_type.dyn_cast<dialect::AllocatedDenseTensorType>());
} else if (input_type.isa<ir::VectorType>()) {
auto vec_inner_types = input_type.dyn_cast<ir::VectorType>().data();
for (size_t i = 0; i < vec_inner_types.size(); ++i) {
types.push_back(
vec_inner_types[0].dyn_cast<dialect::AllocatedDenseTensorType>());
}
}
for (auto& type : types) { auto build_fake_dense_tensor =
[](const dialect::AllocatedDenseTensorType& type) {
auto ptr = new phi::Allocation(nullptr, 0, type.place());
std::shared_ptr<phi::Allocation> holder(ptr);
auto dtype = TransToPhiDataType(type.dtype());
phi::DenseTensorMeta meta(
dtype, type.dims(), type.data_layout(), type.lod(), type.offset());
return std::make_shared<phi::DenseTensor>(holder, meta);
};
auto build_fake_selected_rows =
[](const dialect::AllocatedSelectedRowsType& type) {
auto ptr = new phi::Allocation(nullptr, 0, type.place()); auto ptr = new phi::Allocation(nullptr, 0, type.place());
std::shared_ptr<phi::Allocation> holder(ptr); std::shared_ptr<phi::Allocation> holder(ptr);
...@@ -173,10 +179,37 @@ std::vector<phi::DenseTensor> GetFakeTensorList(ir::Value new_input_tmp) { ...@@ -173,10 +179,37 @@ std::vector<phi::DenseTensor> GetFakeTensorList(ir::Value new_input_tmp) {
phi::DenseTensorMeta meta( phi::DenseTensorMeta meta(
dtype, type.dims(), type.data_layout(), type.lod(), type.offset()); dtype, type.dims(), type.data_layout(), type.lod(), type.offset());
phi::DenseTensor fake_tensor(holder, meta); std::vector<int64_t> rows;
int64_t height = 0;
rows.clear();
auto sr = std::make_shared<phi::SelectedRows>(rows, height);
phi::DenseTensor dense_tensor(holder, meta);
*(sr->mutable_value()) = dense_tensor;
vec_res.push_back(fake_tensor); return sr;
};
if (input_type.isa<dialect::AllocatedDenseTensorType>()) {
vec_res.push_back(build_fake_dense_tensor(
input_type.dyn_cast<dialect::AllocatedDenseTensorType>()));
} else if (input_type.isa<dialect::AllocatedSelectedRowsType>()) {
vec_res.push_back(build_fake_selected_rows(
input_type.dyn_cast<dialect::AllocatedSelectedRowsType>()));
} else if (input_type.isa<ir::VectorType>()) {
auto vec_inner_types = input_type.dyn_cast<ir::VectorType>().data();
for (size_t i = 0; i < vec_inner_types.size(); ++i) {
if (vec_inner_types[0].isa<dialect::AllocatedDenseTensorType>()) {
vec_res.push_back(build_fake_dense_tensor(
vec_inner_types[0].dyn_cast<dialect::AllocatedDenseTensorType>()));
} else if (vec_inner_types[0].isa<dialect::AllocatedSelectedRowsType>()) {
vec_res.push_back(build_fake_selected_rows(
vec_inner_types[0].dyn_cast<dialect::AllocatedSelectedRowsType>()));
}
} }
}
return vec_res; return vec_res;
} }
...@@ -514,7 +547,7 @@ phi::KernelKey GetKernelKey( ...@@ -514,7 +547,7 @@ phi::KernelKey GetKernelKey(
auto fake_tensors = GetFakeTensorList(new_input_tmp); auto fake_tensors = GetFakeTensorList(new_input_tmp);
for (auto& fake_tensor : fake_tensors) { for (auto& fake_tensor : fake_tensors) {
kernel_key_parser.AssignKernelKeySet(fake_tensor); kernel_key_parser.AssignKernelKeySet(*fake_tensor);
} }
// Because we can't make sure the place when build data op // Because we can't make sure the place when build data op
...@@ -617,6 +650,12 @@ std::unique_ptr<ir::Program> PdOpLowerToKernelPass(ir::Program* prog, ...@@ -617,6 +650,12 @@ std::unique_ptr<ir::Program> PdOpLowerToKernelPass(ir::Program* prog,
new_in.type() new_in.type()
.dyn_cast<paddle::dialect::AllocatedDenseTensorType>() .dyn_cast<paddle::dialect::AllocatedDenseTensorType>()
.place()); .place());
} else if (new_in.type()
.isa<paddle::dialect::AllocatedSelectedRowsType>()) {
out_places.push_back(
new_in.type()
.dyn_cast<paddle::dialect::AllocatedSelectedRowsType>()
.place());
} else { } else {
PADDLE_THROW(phi::errors::Unimplemented( PADDLE_THROW(phi::errors::Unimplemented(
"only support dense tensor type for now")); "only support dense tensor type for now"));
...@@ -759,6 +798,14 @@ std::unique_ptr<ir::Program> PdOpLowerToKernelPass(ir::Program* prog, ...@@ -759,6 +798,14 @@ std::unique_ptr<ir::Program> PdOpLowerToKernelPass(ir::Program* prog,
if (op_info_parser != nullptr) { if (op_info_parser != nullptr) {
kernel_fn_str = op_info_parser->OpRuntimeInfo().kernel_func[0]; kernel_fn_str = op_info_parser->OpRuntimeInfo().kernel_func[0];
} }
if (op_item->name() == "pd.add_n_" ||
op_item->name() == "pd.add_n_with_kernel") {
if (op_item->result(0).type().isa<dialect::SelectedRowsType>()) {
kernel_fn_str = "add_n_sr";
}
}
auto kernel_key = auto kernel_key =
GetKernelKey(op_item, place, map_value_pair, op_info_parser.get()); GetKernelKey(op_item, place, map_value_pair, op_info_parser.get());
VLOG(6) << "kernel type " << kernel_key; VLOG(6) << "kernel type " << kernel_key;
...@@ -929,9 +976,22 @@ std::unique_ptr<ir::Program> PdOpLowerToKernelPass(ir::Program* prog, ...@@ -929,9 +976,22 @@ std::unique_ptr<ir::Program> PdOpLowerToKernelPass(ir::Program* prog,
for (size_t j = 0; j < pre_define_op->num_operands(); ++j) { for (size_t j = 0; j < pre_define_op->num_operands(); ++j) {
auto in_i = map_value_pair.at(pre_define_op->operand_source(j)); auto in_i = map_value_pair.at(pre_define_op->operand_source(j));
auto in_i_type = in_i.type(); auto in_i_type = in_i.type();
auto place = phi::Place place;
if (in_i_type.isa<dialect::AllocatedDenseTensorType>()) {
place =
in_i_type.dyn_cast<dialect::AllocatedDenseTensorType>() in_i_type.dyn_cast<dialect::AllocatedDenseTensorType>()
.place(); .place();
} else if (in_i_type
.isa<dialect::AllocatedSelectedRowsType>()) {
place =
in_i_type.dyn_cast<dialect::AllocatedSelectedRowsType>()
.place();
} else {
PADDLE_THROW(phi::errors::Unimplemented(
"builtin.combine Input type only support "
"VectorType<DenseTensorType> and "
"VectorType<SelectedRowsType>"));
}
// get input args def type // get input args def type
auto args_def = kernel.args_def(); auto args_def = kernel.args_def();
...@@ -949,12 +1009,30 @@ std::unique_ptr<ir::Program> PdOpLowerToKernelPass(ir::Program* prog, ...@@ -949,12 +1009,30 @@ std::unique_ptr<ir::Program> PdOpLowerToKernelPass(ir::Program* prog,
// build memcopy op // build memcopy op
auto out_place = auto out_place =
phi::TransToPhiPlace(kernel.InputAt(i).backend); phi::TransToPhiPlace(kernel.InputAt(i).backend);
auto out_type = dialect::AllocatedDenseTensorType::get(
ir::Type out_type;
if (in_i_type.isa<dialect::AllocatedDenseTensorType>()) {
out_type = dialect::AllocatedDenseTensorType::get(
ctx, ctx,
out_place, out_place,
pre_define_op->operand_source(j) pre_define_op->operand_source(j)
.type() .type()
.dyn_cast<dialect::DenseTensorType>()); .dyn_cast<dialect::DenseTensorType>());
} else if (in_i_type
.isa<dialect::AllocatedSelectedRowsType>()) {
out_type = dialect::AllocatedSelectedRowsType::get(
ctx,
out_place,
pre_define_op->operand_source(j)
.type()
.dyn_cast<dialect::SelectedRowsType>());
} else {
PADDLE_THROW(phi::errors::Unimplemented(
"builtin.combine Input type only support "
"VectorType<DenseTensorType> and "
"VectorType<SelectedRowsType>"));
}
in_i = AddPlaceTransferOp(in_i, in_i = AddPlaceTransferOp(in_i,
out_type, out_type,
place, place,
......
...@@ -1112,8 +1112,8 @@ struct AddNOpTranscriber : public OpTranscriber { ...@@ -1112,8 +1112,8 @@ struct AddNOpTranscriber : public OpTranscriber {
} }
const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name);
if (!op_info) { if (!op_info) {
IR_THROW( IR_THROW("Op assign_value should have corresponding OpInfo %s",
"Op assign_value should have corresponding OpInfo pd.assign_value_"); target_op_name);
} }
return op_info; return op_info;
......
...@@ -17,6 +17,7 @@ import random ...@@ -17,6 +17,7 @@ import random
import unittest import unittest
import numpy as np import numpy as np
from dygraph_to_static_util import test_and_compare_with_new_ir
from simnet_dygraph_model import BOW, HingeLoss from simnet_dygraph_model import BOW, HingeLoss
import paddle import paddle
...@@ -176,6 +177,7 @@ def train(conf_dict, to_static): ...@@ -176,6 +177,7 @@ def train(conf_dict, to_static):
class TestSimnet(unittest.TestCase): class TestSimnet(unittest.TestCase):
@test_and_compare_with_new_ir(True)
def test_dygraph_static_same_loss(self): def test_dygraph_static_same_loss(self):
if fluid.is_compiled_with_cuda(): if fluid.is_compiled_with_cuda():
fluid.set_flags({"FLAGS_cudnn_deterministic": True}) fluid.set_flags({"FLAGS_cudnn_deterministic": True})
......
...@@ -17,6 +17,7 @@ import random ...@@ -17,6 +17,7 @@ import random
import unittest import unittest
import numpy as np import numpy as np
from dygraph_to_static_util import test_and_compare_with_new_ir
from simnet_dygraph_model_v2 import BOW, HingeLoss from simnet_dygraph_model_v2 import BOW, HingeLoss
import paddle import paddle
...@@ -176,6 +177,7 @@ def train(conf_dict, to_static): ...@@ -176,6 +177,7 @@ def train(conf_dict, to_static):
class TestSimnet(unittest.TestCase): class TestSimnet(unittest.TestCase):
@test_and_compare_with_new_ir(True)
def test_dygraph_static_same_loss(self): def test_dygraph_static_same_loss(self):
if paddle.is_compiled_with_cuda(): if paddle.is_compiled_with_cuda():
paddle.fluid.set_flags({"FLAGS_cudnn_deterministic": True}) paddle.fluid.set_flags({"FLAGS_cudnn_deterministic": True})
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册