提交 c10a8b16 编写于 作者: Z zhupengyang 提交者: GitHub

fix npu path (#2210)

* move lite/backends/npu/bridges --> lite/kernels/npu/

test=develop

* fix namespace for npu

test=develop

* mv npu runtime file to lite/backends/npu

test=develop
上级 c5cd78ab
......@@ -3,7 +3,3 @@ if(NOT LITE_WITH_NPU)
endif()
lite_cc_library(npu_runtime SRCS runtime.cc DEPS npu_ddk_hiai)
if(NOT LITE_ON_TINY_PUBLISH)
add_subdirectory(bridge)
endif()
......@@ -27,9 +27,9 @@
#include "ai_ddk_lib/include/graph/model.h"
#include "ai_ddk_lib/include/graph/op/all_ops.h" // for ge::op::Data
#include "ai_ddk_lib/include/graph/operator_reg.h"
#include "lite/backends/npu/bridge/paddle_use_npu_bridges.h"
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/utils.h"
#include "lite/kernels/npu/bridges/paddle_use_npu_bridges.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/utils.h"
namespace paddle {
namespace lite {
......@@ -51,7 +51,7 @@ std::shared_ptr<ge::Operator> GenerateNPUProgramPass::CvtVarNode(
auto wgt = std::make_shared<ge::op::Const>(arg.name);
LOG(INFO) << "in convert const:" << arg.name;
VLOG(4) << dims;
wgt->set_attr_value(lite::npu::bridge::CvtFromLiteTensor(tensor));
wgt->set_attr_value(lite::kernels::npu::bridges::CvtFromLiteTensor(tensor));
return wgt;
} else {
CHECK_EQ(dims.size(), 4);
......@@ -74,13 +74,13 @@ std::shared_ptr<ge::Operator> GenerateNPUProgramPass::CvtVarNode(
void GenerateNPUProgramPass::CvtAllOpNodes(
const std::vector<Node*>& nodes2cvt,
lite::npu::bridge::node_map_type* converted_vars) {
const auto& bridges = lite::npu::bridge::Factory::Instance();
lite::kernels::npu::bridges::node_map_type* converted_vars) {
const auto& bridges = lite::kernels::npu::bridges::Factory::Instance();
const auto& cvtfunc_map = bridges.AllFunctions();
// return record all converted vars
// op node's inputs must be found in converted_vars
for (auto& node : nodes2cvt) {
lite::npu::bridge::node_map_type node_inputs;
lite::kernels::npu::bridges::node_map_type node_inputs;
auto& stmt = node->AsStmt();
for (auto& var_node : node->inlinks) {
auto& arg = var_node->AsArg();
......@@ -106,7 +106,7 @@ std::string GenerateNPUProgramPass::BuildNPUGraph(
const std::unordered_set<Node*>& out_data_vars,
int sub_id) {
auto ordered_nodes = GetTopologicalOrder(op_nodes);
lite::npu::bridge::node_map_type converted_vars;
lite::kernels::npu::bridges::node_map_type converted_vars;
CvtAllOpNodes(ordered_nodes, &converted_vars);
std::vector<std::string> in_var_names;
......@@ -132,7 +132,7 @@ std::string GenerateNPUProgramPass::BuildNPUGraph(
// Compiling IR graph to NPU model and store mode data into weight tensor with
// persistable=true, Sothat the model parser can recognize it and save it to
// param files
if (!lite::npu::bridge::BuildModel(inputs, outputs, weight)) {
if (!lite::kernels::npu::bridges::BuildModel(inputs, outputs, weight)) {
LOG(WARNING) << "Build NPU failed subgraph " << sub_id;
throw std::runtime_error("Build NPU failed subgraph.");
}
......@@ -172,7 +172,7 @@ void GenerateNPUProgramPass::GenNPUSubgraph(
void GenerateNPUProgramPass::Apply(const std::unique_ptr<SSAGraph>& graph) {
LOG(INFO) << "Before NPU Pass \n" << Visualize(graph.get());
const auto& bridges = lite::npu::bridge::Factory::Instance();
const auto& bridges = lite::kernels::npu::bridges::Factory::Instance();
const auto& op_map = bridges.AllFunctions();
std::vector<std::string> supported_op_types;
for (auto& i : op_map) {
......
......@@ -20,10 +20,10 @@
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/utils.h"
#include "lite/core/mir/pass.h"
#include "lite/core/mir/subgraph/subgraph_program_pass.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/utils.h"
namespace paddle {
namespace lite {
......@@ -41,7 +41,7 @@ class GenerateNPUProgramPass : public SubgraphProgramPass {
// nodes2cvt: op nodes to convert
// return cvted_vars: converted var nodes
void CvtAllOpNodes(const std::vector<Node*>& nodes2cvt,
lite::npu::bridge::node_map_type* cvted_vars);
lite::kernels::npu::bridges::node_map_type* cvted_vars);
std::shared_ptr<ge::Operator> CvtVarNode(lite::mir::Node* var_node,
const Scope* scope);
......
......@@ -7,3 +7,7 @@ message(STATUS "compile with lite NPU kernels")
add_kernel(graph_compute_npu NPU basic SRCS graph_compute.cc DEPS ${lite_kernel_deps} npu_runtime)
# lite_cc_test(test_graph_compute_npu SRCS graph_compute_test.cc DEPS graph_compute_npu)
if(NOT LITE_ON_TINY_PUBLISH)
add_subdirectory(bridges)
endif()
......@@ -18,13 +18,14 @@
#include "ai_ddk_lib/include/graph/op/all_ops.h"
#include "ai_ddk_lib/include/graph/operator.h"
#include "ai_ddk_lib/include/graph/operator_reg.h"
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/utils.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/utils.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
node_map_type ActConverter(const std::shared_ptr<lite::OpLite> act_op,
const node_map_type& inputs_map) {
......@@ -72,16 +73,20 @@ node_map_type ActConverter(const std::shared_ptr<lite::OpLite> act_op,
return outputs_map;
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
REGISTER_NPU_BRIDGE(sigmod, paddle::lite::npu::bridge::ActConverter);
REGISTER_NPU_BRIDGE(relu, paddle::lite::npu::bridge::ActConverter);
REGISTER_NPU_BRIDGE(tanh, paddle::lite::npu::bridge::ActConverter);
REGISTER_NPU_BRIDGE(elu, paddle::lite::npu::bridge::ActConverter);
REGISTER_NPU_BRIDGE(abs, paddle::lite::npu::bridge::ActConverter);
REGISTER_NPU_BRIDGE(softsign, paddle::lite::npu::bridge::ActConverter);
REGISTER_NPU_BRIDGE(softplus, paddle::lite::npu::bridge::ActConverter);
REGISTER_NPU_BRIDGE(hardsigmoid, paddle::lite::npu::bridge::ActConverter);
REGISTER_NPU_BRIDGE(sigmod, paddle::lite::kernels::npu::bridges::ActConverter);
REGISTER_NPU_BRIDGE(relu, paddle::lite::kernels::npu::bridges::ActConverter);
REGISTER_NPU_BRIDGE(tanh, paddle::lite::kernels::npu::bridges::ActConverter);
REGISTER_NPU_BRIDGE(elu, paddle::lite::kernels::npu::bridges::ActConverter);
REGISTER_NPU_BRIDGE(abs, paddle::lite::kernels::npu::bridges::ActConverter);
REGISTER_NPU_BRIDGE(softsign,
paddle::lite::kernels::npu::bridges::ActConverter);
REGISTER_NPU_BRIDGE(softplus,
paddle::lite::kernels::npu::bridges::ActConverter);
REGISTER_NPU_BRIDGE(hardsigmoid,
paddle::lite::kernels::npu::bridges::ActConverter);
......@@ -14,15 +14,16 @@
#include <gtest/gtest.h>
#include <random>
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/test_helper.h"
#include "lite/core/op_registry.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/test_helper.h"
#include "lite/operators/relu_op.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
void relu_ref(const std::shared_ptr<operators::ReluOp> op) {
Scope* scope = op->scope();
......@@ -91,8 +92,9 @@ TEST(NPUBridges, relu) {
}
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
......
......@@ -18,13 +18,14 @@
#include "ai_ddk_lib/include/graph/op/all_ops.h"
#include "ai_ddk_lib/include/graph/operator.h"
#include "ai_ddk_lib/include/graph/operator_reg.h"
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/utils.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/utils.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
node_map_type BatchNormConverter(
const std::shared_ptr<lite::OpLite> batch_norm_op,
......@@ -87,9 +88,11 @@ node_map_type BatchNormConverter(
return outputs_map;
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
REGISTER_NPU_BRIDGE(batch_norm, paddle::lite::npu::bridge::BatchNormConverter);
REGISTER_NPU_BRIDGE(batch_norm,
paddle::lite::kernels::npu::bridges::BatchNormConverter);
......@@ -14,14 +14,15 @@
#include "lite/operators/batch_norm_op.h"
#include <gtest/gtest.h>
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/test_helper.h"
#include "lite/core/op_registry.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/test_helper.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
template <typename dtype>
void batch_norm_ref(const std::shared_ptr<operators::BatchNormOp> op) {
......@@ -157,8 +158,9 @@ TEST(NPUBridges, batch_norm) {
}
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
......
......@@ -18,13 +18,14 @@
#include "ai_ddk_lib/include/graph/op/all_ops.h"
#include "ai_ddk_lib/include/graph/operator.h"
#include "ai_ddk_lib/include/graph/operator_reg.h"
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/utils.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/utils.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
node_map_type ConcatConverter(const std::shared_ptr<lite::OpLite> concat_op,
const node_map_type& inputs_map) {
......@@ -64,9 +65,11 @@ node_map_type ConcatConverter(const std::shared_ptr<lite::OpLite> concat_op,
return outputs_map;
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
REGISTER_NPU_BRIDGE(concat, paddle::lite::npu::bridge::ConcatConverter);
REGISTER_NPU_BRIDGE(concat,
paddle::lite::kernels::npu::bridges::ConcatConverter);
......@@ -15,14 +15,15 @@
#include "lite/operators/concat_op.h"
#include <gtest/gtest.h>
#include <random>
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/test_helper.h"
#include "lite/core/op_registry.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/test_helper.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
std::vector<size_t> stride_numel(const DDim& ddim) {
std::vector<size_t> strides(ddim.size());
......@@ -119,8 +120,9 @@ TEST(NPUBridges, concat) {
test_concat({{3, 3, 5, 2}, {3, 3, 5, 6}}, 3);
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
......
......@@ -18,13 +18,14 @@
#include "ai_ddk_lib/include/graph/op/all_ops.h"
#include "ai_ddk_lib/include/graph/operator.h"
#include "ai_ddk_lib/include/graph/operator_reg.h"
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/utils.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/utils.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
node_map_type ConvConverter(const std::shared_ptr<lite::OpLite> conv_op,
const node_map_type& inputs_map) {
......@@ -206,10 +207,12 @@ node_map_type ConvConverter(const std::shared_ptr<lite::OpLite> conv_op,
return outputs_map;
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
REGISTER_NPU_BRIDGE(conv2d, paddle::lite::npu::bridge::ConvConverter);
REGISTER_NPU_BRIDGE(depthwise_conv2d, paddle::lite::npu::bridge::ConvConverter);
REGISTER_NPU_BRIDGE(conv2d, paddle::lite::kernels::npu::bridges::ConvConverter);
REGISTER_NPU_BRIDGE(depthwise_conv2d,
paddle::lite::kernels::npu::bridges::ConvConverter);
......@@ -15,14 +15,15 @@
#include "lite/operators/conv_op.h"
#include <gtest/gtest.h>
#include <random>
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/test_helper.h"
#include "lite/core/op_registry.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/test_helper.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
void conv_ref(const std::shared_ptr<operators::ConvOpLite> op) {
Scope* scope = op->scope();
......@@ -268,8 +269,9 @@ TEST(NPUBridges, conv) {
#endif
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
......
......@@ -18,13 +18,14 @@
#include "ai_ddk_lib/include/graph/op/all_ops.h"
#include "ai_ddk_lib/include/graph/operator.h"
#include "ai_ddk_lib/include/graph/operator_reg.h"
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/utils.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/utils.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
node_map_type ConvTransposeConverter(
const std::shared_ptr<lite::OpLite> conv_transpose_op,
......@@ -136,10 +137,12 @@ node_map_type ConvTransposeConverter(
return outputs_map;
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
REGISTER_NPU_BRIDGE(conv2d_transpose,
paddle::lite::npu::bridge::ConvTransposeConverter);
REGISTER_NPU_BRIDGE(
conv2d_transpose,
paddle::lite::kernels::npu::bridges::ConvTransposeConverter);
......@@ -15,14 +15,15 @@
#include "lite/operators/conv_transpose_op.h"
#include <gtest/gtest.h>
#include <random>
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/test_helper.h"
#include "lite/core/op_registry.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/test_helper.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
template <typename DType>
void add_bias_with_relu(DType* data,
......@@ -360,8 +361,9 @@ TEST(NPUBridges, conv_transpose) {
#endif
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
......
......@@ -18,13 +18,14 @@
#include "ai_ddk_lib/include/graph/op/all_ops.h"
#include "ai_ddk_lib/include/graph/operator.h"
#include "ai_ddk_lib/include/graph/operator_reg.h"
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/utils.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/utils.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
node_map_type ElementwiseConverter(
const std::shared_ptr<lite::OpLite> elementwise_op,
......@@ -69,10 +70,11 @@ node_map_type ElementwiseConverter(
return outputs_map;
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
REGISTER_NPU_BRIDGE(elementwise_add,
paddle::lite::npu::bridge::ElementwiseConverter);
paddle::lite::kernels::npu::bridges::ElementwiseConverter);
......@@ -15,14 +15,15 @@
#include "lite/operators/elementwise_ops.h"
#include <gtest/gtest.h>
#include <random>
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/test_helper.h"
#include "lite/core/op_registry.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/test_helper.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
template <typename dtype>
void elementwise_add_ref(const std::shared_ptr<operators::ElementwiseOp> op) {
......@@ -173,8 +174,9 @@ TEST(NPUBridges, elementwise_add) {
}
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
......
......@@ -18,13 +18,14 @@
#include "ai_ddk_lib/include/graph/op/all_ops.h"
#include "ai_ddk_lib/include/graph/operator.h"
#include "ai_ddk_lib/include/graph/operator_reg.h"
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/utils.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/utils.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
node_map_type FCConverter(const std::shared_ptr<lite::OpLite> fc_op,
const node_map_type& inputs_map) {
......@@ -110,9 +111,10 @@ node_map_type FCConverter(const std::shared_ptr<lite::OpLite> fc_op,
return outputs_map;
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
REGISTER_NPU_BRIDGE(fc, paddle::lite::npu::bridge::FCConverter);
REGISTER_NPU_BRIDGE(fc, paddle::lite::kernels::npu::bridges::FCConverter);
......@@ -14,14 +14,15 @@
#include "lite/operators/fc_op.h"
#include <gtest/gtest.h>
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/test_helper.h"
#include "lite/core/op_registry.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/test_helper.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
void fc_ref(const std::shared_ptr<operators::FcOpLite> op) {
Scope* scope = op->scope();
......@@ -73,7 +74,7 @@ void test_fc(const std::vector<int64_t>& input_shape,
bool has_bias) {
CHECK_EQ(w_shape.size(), 2UL);
const auto& bridges = lite::npu::bridge::Factory::Instance();
const auto& bridges = lite::kernels::npu::bridges::Factory::Instance();
const auto& supported_lists = bridges.AllFunctions();
CHECK(bridges.HasType("fc"));
......@@ -128,8 +129,9 @@ TEST(NPUBridges, fc) {
}
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
......
......@@ -18,13 +18,14 @@
#include "ai_ddk_lib/include/graph/op/all_ops.h"
#include "ai_ddk_lib/include/graph/operator.h"
#include "ai_ddk_lib/include/graph/operator_reg.h"
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/utils.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/utils.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
node_map_type InterpolateConverter(
const std::shared_ptr<lite::OpLite> interpolate_op,
......@@ -132,12 +133,13 @@ node_map_type InterpolateConverter(
return outputs_map;
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
REGISTER_NPU_BRIDGE(bilinear_interp,
paddle::lite::npu::bridge::InterpolateConverter);
paddle::lite::kernels::npu::bridges::InterpolateConverter);
REGISTER_NPU_BRIDGE(nearest_interp,
paddle::lite::npu::bridge::InterpolateConverter);
paddle::lite::kernels::npu::bridges::InterpolateConverter);
......@@ -15,14 +15,15 @@
#include "lite/operators/interpolate_op.h"
#include <gtest/gtest.h>
#include <random>
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/test_helper.h"
#include "lite/core/op_registry.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/test_helper.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
template <typename DType>
void bilinear_interp_ref(const std::shared_ptr<operators::InterpolateOp> op) {
......@@ -393,8 +394,9 @@ TEST(NPUBridges, bilinear_interp) {
#endif
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
......
......@@ -18,13 +18,14 @@
#include "ai_ddk_lib/include/graph/op/all_ops.h"
#include "ai_ddk_lib/include/graph/operator.h"
#include "ai_ddk_lib/include/graph/operator_reg.h"
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/utils.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/utils.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
// Note: inputs_map the var_name contains only the data, the weight should be
// handle in this converter
......@@ -112,9 +113,10 @@ node_map_type MulConverter(const std::shared_ptr<lite::OpLite> mul_op,
return outputs_map;
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
REGISTER_NPU_BRIDGE(mul, paddle::lite::npu::bridge::MulConverter);
REGISTER_NPU_BRIDGE(mul, paddle::lite::kernels::npu::bridges::MulConverter);
......@@ -14,14 +14,15 @@
#include "lite/operators/mul_op.h"
#include <gtest/gtest.h>
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/test_helper.h"
#include "lite/core/op_registry.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/test_helper.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
void mul_ref(const std::shared_ptr<operators::MulOpLite> op) {
Scope* scope = op->scope();
......@@ -55,7 +56,7 @@ void test_mul(const std::vector<int64_t>& x_shape,
const std::vector<int64_t>& y_shape,
int x_num_col_dims,
int y_num_col_dims) {
const auto& bridges = lite::npu::bridge::Factory::Instance();
const auto& bridges = lite::kernels::npu::bridges::Factory::Instance();
const auto& supported_lists = bridges.AllFunctions();
CHECK(bridges.HasType("mul"));
......@@ -103,8 +104,9 @@ TEST(NPUBridges, mul) {
test_mul({1, 4, 1, 1}, {4, 8}, 1, 1);
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
......
......@@ -18,13 +18,14 @@
#include "ai_ddk_lib/include/graph/op/all_ops.h"
#include "ai_ddk_lib/include/graph/operator.h"
#include "ai_ddk_lib/include/graph/operator_reg.h"
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/utils.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/utils.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
node_map_type Pad2dConverter(const std::shared_ptr<lite::OpLite> pad2d_op,
const node_map_type& inputs_map) {
......@@ -78,9 +79,10 @@ node_map_type Pad2dConverter(const std::shared_ptr<lite::OpLite> pad2d_op,
return outputs_map;
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
REGISTER_NPU_BRIDGE(pad2d, paddle::lite::npu::bridge::Pad2dConverter);
REGISTER_NPU_BRIDGE(pad2d, paddle::lite::kernels::npu::bridges::Pad2dConverter);
......@@ -14,14 +14,15 @@
#include "lite/operators/pad2d_op.h"
#include <gtest/gtest.h>
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/test_helper.h"
#include "lite/core/op_registry.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/test_helper.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
template <typename dtype>
void pad2d_ref(const std::shared_ptr<operators::Pad2dOpLite> op) {
......@@ -180,8 +181,9 @@ TEST(NPUBridges, pad2d) {
#endif
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
......
......@@ -14,7 +14,7 @@
#pragma once
#include "lite/backends/npu/bridge/registry.h"
#include "lite/kernels/npu/bridges/registry.h"
USE_NPU_BRIDGE(mul);
USE_NPU_BRIDGE(fc);
......
......@@ -18,13 +18,14 @@
#include "ai_ddk_lib/include/graph/op/all_ops.h"
#include "ai_ddk_lib/include/graph/operator.h"
#include "ai_ddk_lib/include/graph/operator_reg.h"
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/utils.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/utils.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
node_map_type PoolConverter(const std::shared_ptr<lite::OpLite> pool_op,
const node_map_type& inputs_map) {
......@@ -80,9 +81,10 @@ node_map_type PoolConverter(const std::shared_ptr<lite::OpLite> pool_op,
return outputs_map;
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
REGISTER_NPU_BRIDGE(pool2d, paddle::lite::npu::bridge::PoolConverter);
REGISTER_NPU_BRIDGE(pool2d, paddle::lite::kernels::npu::bridges::PoolConverter);
......@@ -15,14 +15,15 @@
#include "lite/operators/pool_op.h"
#include <gtest/gtest.h>
#include <random>
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/test_helper.h"
#include "lite/core/op_registry.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/test_helper.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
void pool_ref(const std::shared_ptr<operators::PoolOpLite> op) {
Scope* scope = op->scope();
......@@ -240,8 +241,9 @@ TEST(NPUBridges, pool) {
}
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
......
......@@ -12,13 +12,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "lite/backends/npu/bridge/registry.h"
#include "lite/kernels/npu/bridges/registry.h"
#include <utility>
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
Factory& Factory::Instance() {
static Factory g_npu_bridge;
......@@ -33,7 +34,8 @@ void Factory::Insert(const std::string& op_type, const func_type& func_name) {
map_.insert(std::make_pair(op_type, func_name));
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
......@@ -25,8 +25,9 @@
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
// var_name, npu node point
using node_map_type =
......@@ -49,8 +50,9 @@ class Factory {
DISALLOW_COPY_AND_ASSIGN(Factory);
};
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
......@@ -73,8 +75,8 @@ class Factory {
__reg_npu_bridge_##op_type##__, \
"REGISTER_NPU_BRIDGE must be called in global namespace only once!"); \
int __reg_npu_bridge_##op_type##_Insert() { \
paddle::lite::npu::bridge::Factory::Instance().Insert(#op_type, \
cvt_func_name); \
paddle::lite::kernels::npu::bridges::Factory::Instance().Insert( \
#op_type, cvt_func_name); \
return 0; \
}
......
......@@ -19,13 +19,14 @@
#include "ai_ddk_lib/include/graph/op/all_ops.h"
#include "ai_ddk_lib/include/graph/operator.h"
#include "ai_ddk_lib/include/graph/operator_reg.h"
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/utils.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/utils.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
node_map_type ReshapeConverter(const std::shared_ptr<lite::OpLite> reshape_op,
const node_map_type& inputs_map) {
......@@ -112,10 +113,13 @@ node_map_type ReshapeConverter(const std::shared_ptr<lite::OpLite> reshape_op,
return outputs_map;
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
REGISTER_NPU_BRIDGE(reshape, paddle::lite::npu::bridge::ReshapeConverter);
REGISTER_NPU_BRIDGE(reshape2, paddle::lite::npu::bridge::ReshapeConverter);
REGISTER_NPU_BRIDGE(reshape,
paddle::lite::kernels::npu::bridges::ReshapeConverter);
REGISTER_NPU_BRIDGE(reshape2,
paddle::lite::kernels::npu::bridges::ReshapeConverter);
......@@ -15,14 +15,15 @@
#include "lite/operators/reshape_op.h"
#include <gtest/gtest.h>
#include <random>
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/test_helper.h"
#include "lite/core/op_registry.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/test_helper.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
void reshape_ref(const std::shared_ptr<lite::OpLite> op) {
auto scope = op->scope();
......@@ -190,8 +191,9 @@ TEST(NPUBridges, reshape) {
#endif
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
......
......@@ -18,13 +18,14 @@
#include "ai_ddk_lib/include/graph/op/all_ops.h"
#include "ai_ddk_lib/include/graph/operator.h"
#include "ai_ddk_lib/include/graph/operator_reg.h"
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/utils.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/utils.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
node_map_type ScaleConverter(const std::shared_ptr<lite::OpLite> scale_op,
const node_map_type& inputs_map) {
......@@ -80,9 +81,10 @@ node_map_type ScaleConverter(const std::shared_ptr<lite::OpLite> scale_op,
return outputs_map;
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
REGISTER_NPU_BRIDGE(scale, paddle::lite::npu::bridge::ScaleConverter);
REGISTER_NPU_BRIDGE(scale, paddle::lite::kernels::npu::bridges::ScaleConverter);
......@@ -15,14 +15,15 @@
#include "lite/operators/scale_op.h"
#include <gtest/gtest.h>
#include <random>
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/test_helper.h"
#include "lite/core/op_registry.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/test_helper.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
void scale_ref(const std::shared_ptr<operators::ScaleOp> op) {
Scope* scope = op->scope();
......@@ -114,8 +115,9 @@ TEST(NPUBridges, scale) {
}
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
......
......@@ -18,13 +18,14 @@
#include "ai_ddk_lib/include/graph/op/all_ops.h"
#include "ai_ddk_lib/include/graph/operator.h"
#include "ai_ddk_lib/include/graph/operator_reg.h"
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/utils.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/utils.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
node_map_type ShuffleChannelConverter(
const std::shared_ptr<lite::OpLite> shuffle_channel_op,
......@@ -50,10 +51,12 @@ node_map_type ShuffleChannelConverter(
return outputs_map;
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
REGISTER_NPU_BRIDGE(shuffle_channel,
paddle::lite::npu::bridge::ShuffleChannelConverter);
REGISTER_NPU_BRIDGE(
shuffle_channel,
paddle::lite::kernels::npu::bridges::ShuffleChannelConverter);
......@@ -14,14 +14,15 @@
#include "lite/operators/shuffle_channel_op.h"
#include <gtest/gtest.h>
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/test_helper.h"
#include "lite/core/op_registry.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/test_helper.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
void shuffle_channel_ref(
const std::shared_ptr<operators::ShuffleChannelOpLite> op) {
......@@ -106,8 +107,9 @@ TEST(NPUBridges, softmax) {
}
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
......
......@@ -18,13 +18,14 @@
#include "ai_ddk_lib/include/graph/op/all_ops.h"
#include "ai_ddk_lib/include/graph/operator.h"
#include "ai_ddk_lib/include/graph/operator_reg.h"
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/utils.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/utils.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
node_map_type SoftmaxConverter(const std::shared_ptr<lite::OpLite> softmax_op,
const node_map_type& inputs_map) {
......@@ -58,9 +59,11 @@ node_map_type SoftmaxConverter(const std::shared_ptr<lite::OpLite> softmax_op,
return outputs_map;
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
REGISTER_NPU_BRIDGE(softmax, paddle::lite::npu::bridge::SoftmaxConverter);
REGISTER_NPU_BRIDGE(softmax,
paddle::lite::kernels::npu::bridges::SoftmaxConverter);
......@@ -14,14 +14,15 @@
#include "lite/operators/softmax_op.h"
#include <gtest/gtest.h>
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/test_helper.h"
#include "lite/core/op_registry.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/test_helper.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
template <typename dtype>
void softmax_ref(const std::shared_ptr<operators::SoftmaxOp> op) {
......@@ -125,8 +126,9 @@ TEST(NPUBridges, softmax) {
}
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
......
......@@ -18,13 +18,15 @@
#include "ai_ddk_lib/include/graph/op/all_ops.h"
#include "ai_ddk_lib/include/graph/operator.h"
#include "ai_ddk_lib/include/graph/operator_reg.h"
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/utils.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/utils.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
node_map_type SplitConverter(const std::shared_ptr<lite::OpLite> split_op,
const node_map_type& inputs_map) {
lite::Scope* scope = split_op->scope();
......@@ -76,9 +78,10 @@ node_map_type SplitConverter(const std::shared_ptr<lite::OpLite> split_op,
return outputs_map;
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
REGISTER_NPU_BRIDGE(split, paddle::lite::npu::bridge::SplitConverter);
REGISTER_NPU_BRIDGE(split, paddle::lite::kernels::npu::bridges::SplitConverter);
......@@ -14,14 +14,15 @@
#include "lite/operators/split_op.h"
#include <gtest/gtest.h>
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/test_helper.h"
#include "lite/core/op_registry.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/test_helper.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
template <typename dtype>
void split_ref(const std::shared_ptr<operators::SplitOp> op) {
......@@ -99,7 +100,7 @@ void test_split(int bs,
int axis,
int num,
std::vector<int> sections) {
const auto& bridges = lite::npu::bridge::Factory::Instance();
const auto& bridges = lite::kernels::npu::bridges::Factory::Instance();
const auto& supported_lists = bridges.AllFunctions();
CHECK(bridges.HasType("split"));
// prepare input&output variables
......@@ -161,8 +162,9 @@ TEST(NPUBridges, split) {
test_split(4, 2, 3, 6, 3, 0, {5, 1});
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
......
......@@ -12,18 +12,19 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "lite/backends/npu/bridge/test_helper.h"
#include "lite/kernels/npu/bridges/test_helper.h"
#include <utility>
#include "ai_ddk_lib/include/graph/op/all_ops.h"
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/utils.h"
#include "lite/core/op_registry.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/utils.h"
#include "lite/operators/graph_op.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
void LauchOp(const std::shared_ptr<lite::OpLite> op,
const std::vector<std::string>& input_var_names,
......@@ -32,7 +33,7 @@ void LauchOp(const std::shared_ptr<lite::OpLite> op,
auto op_type = op->op_info()->Type();
// convert op to IR graph
const auto& bridges = lite::npu::bridge::Factory::Instance();
const auto& bridges = lite::kernels::npu::bridges::Factory::Instance();
const auto& supported_lists = bridges.AllFunctions();
CHECK(bridges.HasType(op_type));
......@@ -96,8 +97,9 @@ void LauchOp(const std::shared_ptr<lite::OpLite> op,
OpList::Global().clear();
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
......
......@@ -22,8 +22,9 @@
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
template <typename T>
std::shared_ptr<T> CreateOp(const cpp::OpDesc& opdesc, lite::Scope* scope) {
......@@ -58,7 +59,8 @@ void LauchOp(const std::shared_ptr<lite::OpLite> op,
const std::vector<std::string>& input_var_names,
const std::vector<std::string>& output_var_names);
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
......@@ -18,13 +18,14 @@
#include "ai_ddk_lib/include/graph/op/all_ops.h"
#include "ai_ddk_lib/include/graph/operator.h"
#include "ai_ddk_lib/include/graph/operator_reg.h"
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/utils.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/utils.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
node_map_type TransposeConverter(
const std::shared_ptr<lite::OpLite> transpose_op,
......@@ -68,10 +69,13 @@ node_map_type TransposeConverter(
return outputs_map;
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
REGISTER_NPU_BRIDGE(transpose, paddle::lite::npu::bridge::TransposeConverter);
REGISTER_NPU_BRIDGE(transpose2, paddle::lite::npu::bridge::TransposeConverter);
REGISTER_NPU_BRIDGE(transpose,
paddle::lite::kernels::npu::bridges::TransposeConverter);
REGISTER_NPU_BRIDGE(transpose2,
paddle::lite::kernels::npu::bridges::TransposeConverter);
......@@ -14,14 +14,15 @@
#include "lite/operators/transpose_op.h"
#include <gtest/gtest.h>
#include "lite/backends/npu/bridge/registry.h"
#include "lite/backends/npu/bridge/test_helper.h"
#include "lite/core/op_registry.h"
#include "lite/kernels/npu/bridges/registry.h"
#include "lite/kernels/npu/bridges/test_helper.h"
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
int data_index(std::vector<int> pos, DDimLite dims) {
int d1 = dims[1];
......@@ -139,8 +140,9 @@ TEST(NPUBridges, transpose) {
// test_transpose(1, 1, 1, 2, std::vector<int>{0,1,2,3});
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
......
......@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "lite/backends/npu/bridge/utils.h"
#include "lite/kernels/npu/bridges/utils.h"
#include <mutex> // NOLINT
#include <utility>
#include "ai_ddk_lib/include/graph/buffer.h"
......@@ -24,8 +24,9 @@
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
// Build HIAI IR graph to om model, and store om model data into lite tensor
bool BuildModel(std::vector<ge::Operator>& inputs, // NOLINT
......@@ -164,7 +165,8 @@ bool HasInputArg(const OpInfo* op_info,
}
}
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
......@@ -25,8 +25,9 @@
namespace paddle {
namespace lite {
namespace kernels {
namespace npu {
namespace bridge {
namespace bridges {
class OpList {
public:
......@@ -105,7 +106,8 @@ bool HasInputArg(const OpInfo* op_info,
const Scope* scope,
const std::string& argname);
} // namespace bridge
} // namespace bridges
} // namespace npu
} // namespace kernels
} // namespace lite
} // namespace paddle
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册