提交 1b834603 编写于 作者: Y yanghongtian

add graph

上级 c40f2427
......@@ -179,7 +179,7 @@ if(LITE_WITH_XPU)
endif()
if (LITE_WITH_HW_ASCEND_NPU)
include(device/ascend)
include(device/hw_ascend_npu)
endif()
include(external/mklml) # download mklml package
......
......@@ -64,5 +64,44 @@ else()
add_library(runtime_lib SHARED IMPORTED GLOBAL)
set_property(TARGET runtime_lib PROPERTY IMPORTED_LOCATION ${RT_LIB_FILE})
endif()
set(ascend_runtime_libs acl_lib register_lib runtime_lib CACHE INTERNAL "ascend runtime libs")
set(ascend_builder_libs acl_lib register_lib runtime_lib CACHE INTERNAL "ascend builder libs")
\ No newline at end of file
set(hw_ascend_npu_runtime_libs acl_lib register_lib runtime_lib CACHE INTERNAL "ascend runtime libs")
# find atc include folder and library
find_path(ATC_INC NAMES ge/ge_ir_build.h
PATHS ${ASCEND_HOME}/atc/include)
if (NOT ATC_INC)
message(FATAL_ERROR "Can not find ge/ge_ir_build.h in ${ASCEND_HOME}/atc/include")
endif()
include_directories("${ATC_INC}")
find_library(GRAPH_LIB_FILE graph PATHS ${ASCEND_HOME}/atc/lib64)
if (NOT GRAPH_LIB_FILE)
message(FATAL_ERROR "Can not find libgraph.so library in ${ASCEND_HOME}/atc/lib64")
else()
message(STATUS "Found Graph Library: ${GRAPH_LIB_FILE}")
add_library(graph_lib SHARED IMPORTED GLOBAL)
set_property(TARGET graph_lib PROPERTY IMPORTED_LOCATION ${GRAPH_LIB_FILE})
endif()
# find opp include folder and library
find_path(OPP_INC NAMES all_ops.h
PATHS ${ASCEND_HOME}/opp/op_proto/built-in/inc)
if (NOT OPP_INC)
message(FATAL_ERROR "Can not find all_ops.h in ${ASCEND_HOME}/opp/op_proto/built-in/inc")
endif()
include_directories("${OPP_INC}")
find_library(OPP_LIB_FILE opsproto PATHS ${ASCEND_HOME}/opp/op_proto/built-in)
if (NOT OPP_LIB_FILE)
message(FATAL_ERROR "Can not find libopsproto.so in ${ASCEND_HOME}/opp/op_proto/built-in")
else()
message(STATUS "Found OPP Library: ${OPP_LIB_FILE}")
add_library(opp_lib SHARED IMPORTED GLOBAL)
set_property(TARGET opp_lib PROPERTY IMPORTED_LOCATION ${OPP_LIB_FILE})
endif()
set(hw_ascend_npu_builder_libs graph_lib opp_lib CACHE INTERNAL "ascend builder libs")
......@@ -10,6 +10,7 @@ message(STATUS "LITE_WITH_NPU:\t${LITE_WITH_NPU}")
message(STATUS "LITE_WITH_XPU:\t${LITE_WITH_XPU}")
message(STATUS "LITE_WITH_FPGA:\t${LITE_WITH_FPGA}")
message(STATUS "LITE_WITH_BM:\t${LITE_WITH_BM}")
message(STATUS "LITE_WTH_HW_ASCEND_NPU:\t${LITE_WITH_HW_ASCEND_NPU}")
message(STATUS "LITE_WITH_PROFILE:\t${LITE_WITH_PROFILE}")
message(STATUS "LITE_WITH_CV:\t${LITE_WITH_CV}")
message(STATUS "LITE_WITH_ARM_LANG:\t${LITE_WITH_ARM_LANG}")
......
......@@ -11,4 +11,4 @@ add_subdirectory(fpga)
add_subdirectory(npu)
add_subdirectory(xpu)
add_subdirectory(bm)
# add_subdirectory(hw_ascennd_npu)
add_subdirectory(hw_ascend_npu)
if (NOT LITE_WITH_HW_ASCEND_NPU)
return()
endif()
message(STATUS "======compile hw_ascend_npu bridges, ${ascend_builder_libs}")
lite_cc_library(subgraph_bridge_graph_hw_ascend_npu
SRCS graph.cc
DEPS ${ascend_builder_libs})
set(hw_ascend_npu_subgraph_bridges
subgraph_bridge_graph_hw_ascend_npu
CACHE INTERNAL "hw_ascend_npu_subgraph_bridges")
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "lite/kernels/hw_ascend_npu/bridges/graph.h"
/// reference from opp package
#include <all_ops.h
#include <utility>
namespace paddle {
namespace lite {
namespace subgraph {
namespace hw_ascend_npu {
int Graph::Add(const std::string& name, std::shared_ptr<Node> node) {
auto it = nodes_.find(name);
if (it != nodes_.end()) {
// Only variable node can be shared with the same name
if (!node->is_var() || !it->second.back()->is_var()) {
LOG(FATAL) << "[NPU] Const or data node " << name << " is redefined.";
return -1;
}
} else {
auto ret = nodes_.insert(
std::make_pair(name, std::vector<std::shared_ptr<Node>>()));
CHECK(ret.second);
it = ret.first;
}
it->second.push_back(node);
return it->second.size();
}
// Const or data node
std::shared_ptr<Node> Graph::Add(const std::string& name,
const Tensor& tensor,
std::vector<int64_t> shape,
DataLayoutType layout) {
std::shared_ptr<Node> node = nullptr;
PrecisionType precision = tensor.precision();
if (tensor.persistable()) {
// Const node
node = Add<ge::Const>(name, precision, layout);
node->data<ge::Const>()->set_attr_value(CvtTensor(tensor, shape, layout));
} else {
// Data node
node = Add(name, shape, precision, layout);
}
return node;
}
// Data node
std::shared_ptr<Node> Graph::Add(const std::string& name,
std::vector<int64_t> shape,
PrecisionType precision,
DataLayoutType layout) {
auto node = Add<ge::Data>(name, precision, layout);
ge::TensorDesc desc(
ge::Shape(shape), CvtDataLayoutType(layout), CvtPrecisionType(precision));
node->data<ge::Data>()->update_input_desc_x(desc);
return node;
}
} // namespace hw_ascend_npu
} // namespace subgraph
} // namespace lite
} // namespace paddle
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
// reference from atc package
#include "graph/operators.h"
#include "lite/core/op_lite.h"
#include "lite/core/tensor.h"
namespace paddle {
namespace lite {
namespace subgraph {
namespace hw_ascend_npu {
// Graph and node is defined to collect all of converted HiAI IR nodes
class Node {
public:
enum class Role {
kVar = 0,
kConst,
kData,
};
Node(std::shared_ptr<ge::Operator> data,
PrecisionType precision,
DataLayoutType layout,
Role role)
: data_(data), precision_(precision), layout_(layout), role_(role) {}
Node(PrecisionType precision, DataLayoutType layout, Role role)
: precision_(precision), layout_(layout), role_(role) {}
void set_data(std::shared_ptr<ge::Operator> data) { data_ = data; }
void set_precision(PrecisionType precision) { precision_ = precision; }
void set_layout(DataLayoutType layout) { layout_ = layout; }
void set_role(Role role) { role_ = role; }
template <typename T>
std::shared_ptr<T> data() {
return std::static_pointer_cast<T>(data_);
}
std::shared_ptr<ge::Operator> data() { return data_; }
PrecisionType precision() const { return precision_; }
DataLayoutType layout() const { return layout_; }
bool is_var() const { return role_ == Role::kVar; }
bool is_const() const { return role_ == Role::kConst; }
bool is_data() const { return role_ == Role::kData; }
private:
std::shared_ptr<ge::Operator> data_{nullptr};
PrecisionType precision_{PRECISION(kFloat)};
DataLayoutType layout_{DATALAYOUT(kNCHW)};
Role role_{Role::kVar};
};
class Graph {
public:
int Add(const std::string& name, std::shared_ptr<Node> node);
// Variable, const or data node
template <typename T>
std::shared_ptr<Node> Add(const std::string& name,
PrecisionType precision = PRECISION(kFloat),
DataLayoutType layout = DATALAYOUT(kNCHW)) {
Node::Role role = Node::Role::kVar;
if (typeid(T) == typeid(ge::op::Const)) {
role = Node::Role::kConst;
} else if (typeid(T) == typeid(ge::op::Data)) {
role = Node::Role::kData;
}
auto node = std::make_shared<Node>(precision, layout, role);
auto idx = Add(name, node);
CHECK_GE(idx, 1);
// Generate a unique name for the created HiAI IR
node->set_data(
std::make_shared<T>(name + "__" + paddle::lite::to_string(idx)));
return node;
}
// Const or data node
std::shared_ptr<Node> Add(const std::string& name,
const Tensor& tensor,
std::vector<int64_t> shape,
DataLayoutType layout = DATALAYOUT(kNCHW));
std::shared_ptr<Node> Add(const std::string& name,
const Tensor& tensor,
DataLayoutType layout = DATALAYOUT(kNCHW)) {
return Add(name, tensor, tensor.dims().Vectorize(), layout);
}
std::shared_ptr<Node> Add(const std::string& name,
const Tensor& tensor,
DDim dims,
DataLayoutType layout = DATALAYOUT(kNCHW)) {
return Add(name, tensor, dims.Vectorize(), layout);
}
// Const node
template <typename T>
std::shared_ptr<Node> Add(const std::string& name,
const std::vector<T>& data,
std::vector<int64_t> shape = {},
DataLayoutType layout = DATALAYOUT(kNCHW)) {
if (shape.empty()) {
shape = {static_cast<int64_t>(data.size())};
} else {
int size = 1;
for (auto i : shape) {
size *= i;
}
CHECK_EQ(data.size(), size);
}
Tensor tensor;
tensor.Resize(shape);
tensor.set_persistable(true);
std::memcpy(reinterpret_cast<uint8_t*>(tensor.mutable_data<T>()),
reinterpret_cast<const uint8_t*>(data.data()),
data.size() * sizeof(T));
return Add(name, tensor, layout);
}
template <typename T>
std::shared_ptr<Node> Add(const std::string& name,
const std::vector<T>& data,
DDim dims,
DataLayoutType layout = DATALAYOUT(kNCHW)) {
return Add(name, data, dims.Vectorize(), layout);
}
template <typename T>
std::shared_ptr<Node> Add(const std::string& name,
T value,
std::vector<int64_t> shape = {1},
DataLayoutType layout = DATALAYOUT(kNCHW)) {
int64_t size = 1;
for (auto i : shape) {
size *= i;
}
std::vector<T> data(size, value);
return Add(name, data, shape, layout);
}
template <typename T>
std::shared_ptr<Node> Add(const std::string& name,
T value,
DDim dims,
DataLayoutType layout = DATALAYOUT(kNCHW)) {
return Add(name, value, dims.Vectorize(), layout);
}
// Data node
std::shared_ptr<Node> Add(const std::string& name,
std::vector<int64_t> shape,
PrecisionType precision = PRECISION(kFloat),
DataLayoutType layout = DATALAYOUT(kNCHW));
std::shared_ptr<Node> Add(const std::string& name,
DDim dims,
PrecisionType precision = PRECISION(kFloat),
DataLayoutType layout = DATALAYOUT(kNCHW)) {
return Add(name, dims.Vectorize(), precision, layout);
}
std::shared_ptr<Node> Get(std::string name) {
CHECK(Has(name)) << "[NPU] Node " << name << " not found.";
return nodes_.at(name).back();
}
bool Has(const std::string& name) {
return nodes_.find(name) != nodes_.end();
}
private:
std::unordered_map<std::string, std::vector<std::shared_ptr<Node>>> nodes_;
};
} // namespace hw_ascend_npu
} // namespace subgraph
} // namespace lite
} // namespace paddle
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册