提交 1f8d8493 编写于 作者: Z zhupengyang 提交者: tensor-tang

[NPU] fix bug: npu permute's input w need unique name (#1832)

* fix bug: npu permute's input w need unique name

test=develop

* fix bn, eltwise, pool, shuffle_channel, softmax, transpose unique name

test=develop
上级 642e72b7
......@@ -30,12 +30,14 @@ namespace bridge {
node_map_type BatchNormConverter(
const std::shared_ptr<lite::OpLite> batch_norm_op,
const node_map_type& inputs_map) {
LOG(INFO) << "converting batchnorm...";
lite::Scope* scope = batch_norm_op->scope();
const lite::OpInfo* op_info = batch_norm_op->op_info();
auto scope = batch_norm_op->scope();
auto op_info = batch_norm_op->op_info();
auto op_type = op_info->Type();
auto unique_op_type = UniqueName(op_type);
LOG(INFO) << "Converting " + op_type + "...";
std::shared_ptr<ge::op::BatchNorm> output_node =
std::make_shared<ge::op::BatchNorm>(UniqueName("batch_norm"));
std::shared_ptr<ge::op::BatchNorm> batch_norm_node =
std::make_shared<ge::op::BatchNorm>(unique_op_type);
auto x_var_name = op_info->Input("X").front();
auto scale_var_name = op_info->Input("Scale").front();
......@@ -68,21 +70,21 @@ node_map_type BatchNormConverter(
int npu_mode = 1; // bnScale, bnBias tensor dims are 1xCx1x1
bool npu_use_global_stats = op_info->GetAttr<bool>("use_global_stats");
output_node->set_input_x(*inputs_map.at(x_var_name));
output_node->set_input_scale(*npu_scale);
output_node->set_input_b(*npu_bias);
output_node->set_input_mean(*npu_mean);
output_node->set_input_variance(*npu_variance);
output_node->set_attr_momentum(npu_momentum);
output_node->set_attr_epsilon(npu_epsilon);
output_node->set_attr_mode(npu_mode);
output_node->set_attr_use_global_stats(npu_use_global_stats);
batch_norm_node->set_input_x(*inputs_map.at(x_var_name));
batch_norm_node->set_input_scale(*npu_scale);
batch_norm_node->set_input_b(*npu_bias);
batch_norm_node->set_input_mean(*npu_mean);
batch_norm_node->set_input_variance(*npu_variance);
batch_norm_node->set_attr_momentum(npu_momentum);
batch_norm_node->set_attr_epsilon(npu_epsilon);
batch_norm_node->set_attr_mode(npu_mode);
batch_norm_node->set_attr_use_global_stats(npu_use_global_stats);
OpList::Global().add(inputs_map.at(x_var_name));
OpList::Global().add(output_node);
OpList::Global().add(batch_norm_node);
node_map_type outputs_map;
outputs_map[op_info->Output("Y").front()] = output_node;
outputs_map[op_info->Output("Y").front()] = batch_norm_node;
return outputs_map;
}
......
......@@ -30,11 +30,14 @@ namespace bridge {
node_map_type ElementwiseConverter(
const std::shared_ptr<lite::OpLite> elementwise_op,
const node_map_type& inputs_map) {
auto scope = elementwise_op->scope();
auto op_info = elementwise_op->op_info();
auto op_type = op_info->Type();
auto unique_op_type = UniqueName(op_type);
LOG(INFO) << "converting elementwise...";
lite::Scope* scope = elementwise_op->scope();
const lite::OpInfo* op_info = elementwise_op->op_info();
std::shared_ptr<ge::op::Eltwise> output_node =
std::make_shared<ge::op::Eltwise>(UniqueName("elementwise"));
std::shared_ptr<ge::op::Eltwise> elementwise_node =
std::make_shared<ge::op::Eltwise>(unique_op_type);
auto x_var_name = op_info->Input("X").front();
auto y_var_name = op_info->Input("Y").front();
......@@ -43,27 +46,27 @@ node_map_type ElementwiseConverter(
<< "npu elementwise only support inputs with same size";
CHECK(inputs_map.find(x_var_name) != inputs_map.end());
output_node->set_input_x1(*inputs_map.at(x_var_name));
elementwise_node->set_input_x1(*inputs_map.at(x_var_name));
OpList::Global().add(inputs_map.at(x_var_name));
if (inputs_map.find(y_var_name) != inputs_map.end()) {
output_node->set_input_x2(*inputs_map.at(y_var_name));
elementwise_node->set_input_x2(*inputs_map.at(y_var_name));
OpList::Global().add(inputs_map.at(y_var_name));
} else {
auto consty = std::make_shared<ge::op::Const>(y_var_name);
auto* y = scope->FindVar(y_var_name)->GetMutable<Tensor>();
consty->set_attr_value(CvtFromLiteTensor(y));
output_node->set_input_x2(*consty);
elementwise_node->set_input_x2(*consty);
OpList::Global().add(consty);
}
OpList::Global().add(output_node);
OpList::Global().add(elementwise_node);
// paddlelite has sum only
output_node->set_attr_mode(1);
elementwise_node->set_attr_mode(1);
node_map_type outputs_map;
outputs_map[op_info->Output("Out").front()] = output_node;
outputs_map[op_info->Output("Out").front()] = elementwise_node;
return outputs_map;
}
......
......@@ -29,12 +29,14 @@ namespace bridge {
node_map_type PoolConverter(const std::shared_ptr<lite::OpLite> pool_op,
const node_map_type& inputs_map) {
LOG(INFO) << "converting pool...";
lite::Scope* scope = pool_op->scope();
const lite::OpInfo* op_info = pool_op->op_info();
auto scope = pool_op->scope();
auto op_info = pool_op->op_info();
auto op_type = op_info->Type();
auto unique_op_type = UniqueName(op_type);
LOG(INFO) << "Converting " + op_type + "...";
std::shared_ptr<ge::op::Pooling> output_node =
std::make_shared<ge::op::Pooling>(UniqueName("pool"));
std::shared_ptr<ge::op::Pooling> pool_node =
std::make_shared<ge::op::Pooling>(unique_op_type);
auto x_var_name = op_info->Input("X").front();
auto pooling_type = op_info->GetAttr<std::string>("pooling_type");
int npu_mode = 0;
......@@ -61,21 +63,21 @@ node_map_type PoolConverter(const std::shared_ptr<lite::OpLite> pool_op,
npu_ceil_mode = op_info->GetAttr<bool>("ceil_mode") ? 1 : 0;
}
output_node->set_input_x(*inputs_map.at(x_var_name));
output_node->set_attr_mode(npu_mode);
output_node->set_attr_pad_mode(0);
output_node->set_attr_global_pooling(npu_global_pooling);
output_node->set_attr_window(npu_window);
output_node->set_attr_pad(npu_pad);
output_node->set_attr_stride(npu_stride);
output_node->set_attr_ceil_mode(npu_ceil_mode);
pool_node->set_input_x(*inputs_map.at(x_var_name));
pool_node->set_attr_mode(npu_mode);
pool_node->set_attr_pad_mode(0);
pool_node->set_attr_global_pooling(npu_global_pooling);
pool_node->set_attr_window(npu_window);
pool_node->set_attr_pad(npu_pad);
pool_node->set_attr_stride(npu_stride);
pool_node->set_attr_ceil_mode(npu_ceil_mode);
// output_node->set_attr_data_mode(npu_data_mode);
OpList::Global().add(inputs_map.at(x_var_name));
OpList::Global().add(output_node);
OpList::Global().add(pool_node);
node_map_type outputs_map;
outputs_map[op_info->Output("Out").front()] = output_node;
outputs_map[op_info->Output("Out").front()] = pool_node;
return outputs_map;
}
......
......@@ -30,22 +30,24 @@ namespace bridge {
node_map_type ShuffleChannelConverter(
const std::shared_ptr<lite::OpLite> shuffle_channel_op,
const node_map_type& inputs_map) {
LOG(INFO) << "converting shuffle_channel...";
lite::Scope* scope = shuffle_channel_op->scope();
const lite::OpInfo* op_info = shuffle_channel_op->op_info();
std::shared_ptr<ge::op::ShuffleChannel> output_node =
std::make_shared<ge::op::ShuffleChannel>(UniqueName("shuffle_channel"));
auto scope = shuffle_channel_op->scope();
auto op_info = shuffle_channel_op->op_info();
auto op_type = op_info->Type();
auto unique_op_type = UniqueName(op_type);
LOG(INFO) << "Converting " + op_type + "...";
std::shared_ptr<ge::op::ShuffleChannel> shuffle_channel_node =
std::make_shared<ge::op::ShuffleChannel>(unique_op_type);
auto x_var_name = op_info->Input("X").front();
output_node->set_input_x(*inputs_map.at(x_var_name));
output_node->set_attr_group(op_info->GetAttr<int>("group"));
shuffle_channel_node->set_input_x(*inputs_map.at(x_var_name));
shuffle_channel_node->set_attr_group(op_info->GetAttr<int>("group"));
OpList::Global().add(inputs_map.at(x_var_name));
OpList::Global().add(output_node);
OpList::Global().add(shuffle_channel_node);
node_map_type outputs_map;
outputs_map[op_info->Output("Out").front()] = output_node;
outputs_map[op_info->Output("Out").front()] = shuffle_channel_node;
return outputs_map;
}
......
......@@ -29,12 +29,14 @@ namespace bridge {
node_map_type SoftmaxConverter(const std::shared_ptr<lite::OpLite> softmax_op,
const node_map_type& inputs_map) {
LOG(INFO) << "converting softmax...";
lite::Scope* scope = softmax_op->scope();
const lite::OpInfo* op_info = softmax_op->op_info();
auto scope = softmax_op->scope();
auto op_info = softmax_op->op_info();
auto op_type = op_info->Type();
auto unique_op_type = UniqueName(op_type);
LOG(INFO) << "Converting " + op_type + "...";
std::shared_ptr<ge::op::Softmax> output_node =
std::make_shared<ge::op::Softmax>(UniqueName("softmax"));
std::shared_ptr<ge::op::Softmax> softmax_node =
std::make_shared<ge::op::Softmax>(unique_op_type);
auto x_var_name = op_info->Input("X").front();
auto x_dims = scope->FindVar(x_var_name)->GetMutable<Tensor>()->dims();
......@@ -46,14 +48,14 @@ node_map_type SoftmaxConverter(const std::shared_ptr<lite::OpLite> softmax_op,
}
CHECK(inputs_map.count(x_var_name));
output_node->set_input_x(*inputs_map.at(x_var_name));
output_node->set_attr_axis(axis);
softmax_node->set_input_x(*inputs_map.at(x_var_name));
softmax_node->set_attr_axis(axis);
OpList::Global().add(inputs_map.at(x_var_name));
OpList::Global().add(output_node);
OpList::Global().add(softmax_node);
node_map_type outputs_map;
outputs_map[op_info->Output("Out").front()] = output_node;
outputs_map[op_info->Output("Out").front()] = softmax_node;
return outputs_map;
}
......
......@@ -30,19 +30,21 @@ namespace bridge {
node_map_type TransposeConverter(
const std::shared_ptr<lite::OpLite> transpose_op,
const node_map_type& inputs_map) {
LOG(INFO) << "converting transpose...";
lite::Scope* scope = transpose_op->scope();
const lite::OpInfo* op_info = transpose_op->op_info();
auto scope = transpose_op->scope();
auto op_info = transpose_op->op_info();
auto op_type = op_info->Type();
auto unique_op_type = UniqueName(op_type);
LOG(INFO) << "Converting " + op_type + "...";
std::shared_ptr<ge::op::Permute> output_node =
std::make_shared<ge::op::Permute>(UniqueName("transpose"));
std::shared_ptr<ge::op::Permute> transpose_node =
std::make_shared<ge::op::Permute>(unique_op_type);
auto x_var_name = op_info->Input("X").front();
// paddlelite doesn't have this input
// w must be set, but it does nothing
auto w_var_name = "transpose_w";
auto w_var_name = unique_op_type + "/w";
auto* w = scope->Var(w_var_name)->GetMutable<Tensor>();
w->Resize(scope->FindVar(x_var_name)->GetMutable<Tensor>()->dims());
w->Resize({1});
auto* w_data = w->mutable_data<float>();
for (int i = 0; i < w->numel(); i++) {
w_data[i] = 1.f;
......@@ -55,15 +57,15 @@ node_map_type TransposeConverter(
auto npu_axis = ge::AttrValue::LIST_INT(axis.begin(), axis.end());
CHECK(inputs_map.count(x_var_name));
output_node->set_input_x(*inputs_map.at(x_var_name));
output_node->set_input_w(*npu_w);
output_node->set_attr_order(npu_axis);
transpose_node->set_input_x(*inputs_map.at(x_var_name));
transpose_node->set_input_w(*npu_w);
transpose_node->set_attr_order(npu_axis);
OpList::Global().add(inputs_map.at(x_var_name));
OpList::Global().add(output_node);
OpList::Global().add(transpose_node);
node_map_type outputs_map;
outputs_map[op_info->Output("Out").front()] = output_node;
outputs_map[op_info->Output("Out").front()] = transpose_node;
return outputs_map;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册