未验证 提交 d095bc36 编写于 作者: Z zhupengyang 提交者: GitHub

[NPU] support fill_constant and fill_constant_batch_size_like op bridge and uts (#3055)

* [NPU] support fill_constant and fill_constant_batch_size_like op bridge and uts
上级 5dac279a
......@@ -44,6 +44,8 @@ lite_cc_library(subgraph_bridge_argmax_op_npu SRCS argmax_op.cc DEPS ${npu_subgr
lite_cc_library(subgraph_bridge_instance_norm_op_npu SRCS instance_norm_op.cc DEPS ${npu_subgraph_bridge_deps})
lite_cc_library(subgraph_bridge_dropout_op_npu SRCS dropout_op.cc DEPS ${npu_subgraph_bridge_deps})
lite_cc_library(subgraph_bridge_layer_norm_op_npu SRCS layer_norm_op.cc DEPS ${npu_subgraph_bridge_deps})
lite_cc_library(subgraph_bridge_fill_constant_op_npu SRCS fill_constant_op.cc DEPS ${npu_subgraph_bridge_deps})
lite_cc_library(subgraph_bridge_fill_constant_batch_size_like_op_npu SRCS fill_constant_batch_size_like_op.cc DEPS ${npu_subgraph_bridge_deps})
set(npu_subgraph_bridges
subgraph_bridge_registry
......@@ -75,6 +77,8 @@ set(npu_subgraph_bridges
subgraph_bridge_instance_norm_op_npu
subgraph_bridge_dropout_op_npu
subgraph_bridge_layer_norm_op_npu
subgraph_bridge_fill_constant_op_npu
subgraph_bridge_fill_constant_batch_size_like_op_npu
CACHE INTERNAL "npu_subgraph_bridges")
message(STATUS "+++++ npu_subgraph_bridges: ${npu_subgraph_bridges}")
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "lite/kernels/npu/bridges/graph.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/utility.h"
namespace paddle {
namespace lite {
namespace subgraph {
namespace npu {
int FillConstantBatchSizeLikeConverter(void* ctx,
OpLite* op,
KernelBase* kernel) {
CHECK(ctx != nullptr);
CHECK(op != nullptr);
auto graph = static_cast<Graph*>(ctx);
auto op_info = op->op_info();
auto op_type = op_info->Type();
auto scope = op->scope();
VLOG(3) << "[NPU] Converting " + op_type + "...";
// Get input, output and op attributes
auto x_name = op_info->Input("Input").front();
auto x = scope->FindTensor(x_name);
auto x_dims = x->dims();
auto out_name = op_info->Output("Out").front();
auto out = scope->FindTensor(out_name);
auto out_shape = out->dims().Vectorize();
auto value = op_info->GetAttr<float>("value");
// dims, value node
std::vector<int> target_shape{out_shape.begin(), out_shape.end()};
auto dims_node = graph->Add(out_name + "/dims", target_shape);
auto value_node = graph->Add(out_name + "/value", std::vector<float>{value});
// Fill node
auto fill_node = graph->Add<ge::op::Fill>(out_name);
auto fill_op = fill_node->data<ge::op::Fill>();
fill_op->set_input_dims(*dims_node->data());
fill_op->set_input_value(*value_node->data());
return REBUILD_WHEN_SHAPE_CHANGED;
}
} // namespace npu
} // namespace subgraph
} // namespace lite
} // namespace paddle
REGISTER_SUBGRAPH_BRIDGE(
fill_constant_batch_size_like,
kNPU,
paddle::lite::subgraph::npu::FillConstantBatchSizeLikeConverter);
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "lite/kernels/npu/bridges/graph.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/utility.h"
namespace paddle {
namespace lite {
namespace subgraph {
namespace npu {
int FillConstantConverter(void* ctx, OpLite* op, KernelBase* kernel) {
CHECK(ctx != nullptr);
CHECK(op != nullptr);
auto graph = static_cast<Graph*>(ctx);
auto op_info = op->op_info();
auto op_type = op_info->Type();
auto scope = op->scope();
VLOG(3) << "[NPU] Converting " + op_type + "...";
// Get input, output and op attributes
auto out_name = op_info->Output("Out").front();
auto out = scope->FindTensor(out_name);
auto out_shape = out->dims().Vectorize();
auto value = op_info->GetAttr<float>("value");
// dims & value node
std::shared_ptr<Node> dims_node = nullptr;
if (!op_info->Input("ShapeTensor").empty()) {
auto dims_name = op_info->Input("ShapeTensor").front();
dims_node = graph->Get(dims_name);
} else {
std::vector<int> target_shape{out_shape.begin(), out_shape.end()};
dims_node = graph->Add(out_name + "/dims", target_shape);
}
auto value_node = graph->Add(out_name + "/value", std::vector<float>{value});
// Fill node
auto fill_node = graph->Add<ge::op::Fill>(out_name);
auto fill_op = fill_node->data<ge::op::Fill>();
fill_op->set_input_dims(*dims_node->data());
fill_op->set_input_value(*value_node->data());
return REBUILD_WHEN_SHAPE_CHANGED;
}
} // namespace npu
} // namespace subgraph
} // namespace lite
} // namespace paddle
REGISTER_SUBGRAPH_BRIDGE(fill_constant,
kNPU,
paddle::lite::subgraph::npu::FillConstantConverter);
......@@ -37,6 +37,8 @@ USE_SUBGRAPH_BRIDGE(fusion_elementwise_add_activation, kNPU);
USE_SUBGRAPH_BRIDGE(fusion_elementwise_sub_activation, kNPU);
USE_SUBGRAPH_BRIDGE(fusion_elementwise_mul_activation, kNPU);
USE_SUBGRAPH_BRIDGE(fusion_elementwise_div_activation, kNPU);
USE_SUBGRAPH_BRIDGE(fill_constant, kNPU)
USE_SUBGRAPH_BRIDGE(fill_constant_batch_size_like, kNPU)
USE_SUBGRAPH_BRIDGE(fc, kNPU);
USE_SUBGRAPH_BRIDGE(bilinear_interp, kNPU);
......
......@@ -134,7 +134,10 @@ TEST(fill_constant_batch_size_like, precision) {
LOG(INFO) << "test fill_constant_batch_size_like op";
Place place;
float abs_error = 1e-5;
#ifdef LITE_WITH_ARM
#if defined(LITE_WITH_NPU)
place = TARGET(kNPU);
abs_error = 1e-2; // use fp16 in npu
#elif defined(LITE_WITH_ARM)
place = TARGET(kARM);
#else
return;
......
......@@ -171,7 +171,10 @@ TEST(fill_constant, precision) {
LOG(INFO) << "test fill_constant op";
Place place;
float abs_error = 1e-5;
#ifdef LITE_WITH_ARM
#if defined(LITE_WITH_NPU)
place = TARGET(kNPU);
abs_error = 1e-2; // use fp16 in npu
#elif defined(LITE_WITH_ARM)
place = TARGET(kARM);
#else
return;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册