提交 19425363 编写于 作者: H hangq

free meta_graph after compile graph

上级 9d8fb786
...@@ -61,5 +61,22 @@ std::vector<size_t> GetGraphOutputNodes(const schema::MetaGraph *meta_graph) { ...@@ -61,5 +61,22 @@ std::vector<size_t> GetGraphOutputNodes(const schema::MetaGraph *meta_graph) {
} }
return ret; return ret;
} }
std::vector<size_t> GetLinkedPostNodeIdx(const schema::MetaGraph &graph, const size_t &tensor_idx) {
std::vector<size_t> post_node_idxes;
for (size_t i = 0; i < graph.nodes()->size(); i++) {
auto node = graph.nodes()->GetAs<schema::CNode>(i);
if (node == nullptr) {
continue;
}
auto node_input_idxes = node->inputIndex();
auto is_contain = std::any_of(node_input_idxes->begin(), node_input_idxes->end(),
[&](const uint32_t &node_input_idx) { return node_input_idx == tensor_idx; });
if (is_contain) {
post_node_idxes.emplace_back(i);
}
}
return post_node_idxes;
}
} // namespace lite } // namespace lite
} // namespace mindspore } // namespace mindspore
...@@ -34,215 +34,8 @@ std::vector<size_t> GetGraphInputNodes(const schema::MetaGraph *meta_graph); ...@@ -34,215 +34,8 @@ std::vector<size_t> GetGraphInputNodes(const schema::MetaGraph *meta_graph);
std::vector<size_t> GetGraphOutputNodes(const schema::MetaGraph *meta_graph); std::vector<size_t> GetGraphOutputNodes(const schema::MetaGraph *meta_graph);
class OpNode { std::vector<size_t> GetLinkedPostNodeIdx(const schema::MetaGraph &graph, const size_t &tensor_idx);
public:
explicit OpNode(const NODE_ID &nodeId) : id(nodeId) {}
NODE_ID ID() { return id; };
void AddInEdge(NODE_ID nodeId) { inEdges.insert(nodeId); }
void AddOutEdge(NODE_ID nodeId) { outEdges.insert(nodeId); }
std::unordered_set<NODE_ID> GetAllInEdges() { return inEdges; }
std::unordered_set<NODE_ID> GetAllOutEdges() { return outEdges; }
protected:
NODE_ID id;
std::unordered_set<NODE_ID> inEdges;
std::unordered_set<NODE_ID> outEdges;
};
template <typename NODE_T>
class OpGraph {
public:
OpGraph() {}
~OpGraph();
int Build(const schema::MetaGraph *subGraphDef);
NODE_T *GetNode(NODE_ID nodeId);
NODE_T *AddNode(NODE_ID nodeId);
std::unordered_set<NODE_T *> GetInputNode();
std::unordered_set<NODE_T *> GetOutputNode();
void AddNodes(std::vector<NODE_T *> addNodes);
void DeleteNodes(std::vector<NODE_T *> deleteNodes);
void AddEdge(NODE_ID nodeId);
int AddEdge(NODE_ID srcId, NODE_ID dstId);
int AddEdge(const schema::CNode *srcNodeDef, const flatbuffers::Vector<flatbuffers::Offset<schema::CNode>> *opDefs);
std::unordered_map<NODE_T *, std::unordered_set<NODE_T *>> GetDepends();
protected:
std::unordered_map<NODE_ID, NODE_T *> nodes;
};
template <typename NODE_T>
int OpGraph<NODE_T>::Build(const schema::MetaGraph *subGraphDef) {
if (subGraphDef == nullptr) {
// MS_LOGE("subGraphDef is nullptr");
return RET_ERROR;
}
auto opDefs = subGraphDef->nodes();
uint32_t opCount = opDefs->size();
for (uint32_t i = 0; i < opCount; i++) {
auto opDef = opDefs->GetAs<schema::CNode>(i);
auto node = AddNode(std::string(opDef->name()->c_str()));
if (node == nullptr) {
// MS_LOGE("add srcNode failed,name %s", opDef->name()->c_str());
return RET_ERROR;
}
auto ret = AddEdge(opDef, opDefs);
if (ret != RET_OK) {
// MS_LOGE("%s add edge failed. ret:%d", opDef->name()->c_str(), ret);
return RET_ERROR;
}
}
return RET_OK;
}
template <typename NODE_T>
int OpGraph<NODE_T>::AddEdge(const schema::CNode *srcNodeDef,
const flatbuffers::Vector<flatbuffers::Offset<schema::CNode>> *nodeDefs) {
MS_ASSERT(srcNodeDef != nullptr);
MS_ASSERT(nodeDefs != nullptr);
NODE_ID srcId = std::string(srcNodeDef->name()->c_str());
uint32_t opCount = nodeDefs->size();
// for single op condition
AddNode(srcId);
for (auto index : *(srcNodeDef->outputIndex())) {
for (uint32_t i = 0; i < opCount; i++) {
auto dstNodeDef = nodeDefs->GetAs<schema::CNode>(i);
bool find = false;
auto inputIndex = dstNodeDef->inputIndex();
if (std::any_of(inputIndex->begin(), inputIndex->end(), [&index](int i) { return i == index; })) {
find = true;
}
if (!find) {
continue;
}
NODE_ID dstId = std::string(dstNodeDef->name()->c_str());
auto ret = AddEdge(srcId, dstId);
if (ret != RET_OK) {
return ret;
}
}
}
return RET_OK;
}
template <typename NODE_T>
int OpGraph<NODE_T>::AddEdge(NODE_ID srcId, NODE_ID dstId) {
auto srcNode = AddNode(srcId);
if (srcNode == nullptr) {
// MS_LOGE("add srcNode failed");
return RET_ERROR;
}
auto dstNode = AddNode(dstId);
if (dstNode == nullptr) {
// MS_LOGE("add dstNode failed");
return RET_ERROR;
}
srcNode->AddOutEdge(dstNode);
dstNode->AddInEdge(srcNode);
return RET_OK;
}
template <typename NODE_T>
NODE_T *OpGraph<NODE_T>::GetNode(NODE_ID nodeId) {
auto node = nodes.find(nodeId);
if (node == nodes.end()) {
return nullptr;
}
return node->second;
}
template <typename NODE_T>
NODE_T *OpGraph<NODE_T>::AddNode(NODE_ID nodeId) {
auto node = GetNode(nodeId);
if (node != nullptr) {
return node;
}
node = new (std::nothrow) NODE_T(nodeId);
if (node == nullptr) {
// MS_LOGE("new node failed");
return nullptr;
}
nodes[nodeId] = node;
return node;
}
template <typename NODE_T>
void OpGraph<NODE_T>::AddNodes(std::vector<NODE_T *> addNodes) {
for (auto node : addNodes) {
if (node == nullptr) {
return;
}
nodes[node->ID()] = node;
}
}
template <typename NODE_T>
void OpGraph<NODE_T>::DeleteNodes(std::vector<NODE_T *> deleteNodes) {
for (auto deletenode : deleteNodes) {
if (deletenode == nullptr) {
continue;
}
auto node = GetNode(deletenode->ID());
if (node == nullptr) {
continue;
}
nodes.erase(deletenode->ID());
}
}
template <typename NODE_T>
std::unordered_set<NODE_T *> OpGraph<NODE_T>::GetInputNode() {
std::unordered_set<NODE_T *> inputNodes;
for (const auto &iter : nodes) {
auto node = iter.second;
if (node->GetAllInEdges().empty()) {
inputNodes.insert(node);
}
}
return inputNodes;
}
template <typename NODE_T>
std::unordered_set<NODE_T *> OpGraph<NODE_T>::GetOutputNode() {
std::unordered_set<NODE_T *> outputNodes;
for (const auto &iter : nodes) {
auto node = iter.second;
if (node->GetAllOutEdges().empty()) {
outputNodes.insert(node);
}
}
return outputNodes;
}
template <typename NODE_T>
std::unordered_map<NODE_T *, std::unordered_set<NODE_T *>> OpGraph<NODE_T>::GetDepends() {
std::unordered_map<NODE_T *, std::unordered_set<NODE_T *>> depends;
for (auto nodeIter : nodes) {
depends[nodeIter.second] = nodeIter.second->GetAllInEdges();
}
return depends;
}
template <typename NODE_T>
OpGraph<NODE_T>::~OpGraph() {
for (auto iter : nodes) {
delete iter.second;
}
nodes.clear();
}
} // namespace lite } // namespace lite
} // namespace mindspore } // namespace mindspore
#endif // MINDSPORE_LITE_COMMON_GRAPH_UTIL_H_ #endif // MINDSPORE_LITE_COMMON_GRAPH_UTIL_H_
...@@ -32,10 +32,29 @@ ...@@ -32,10 +32,29 @@
namespace mindspore { namespace mindspore {
namespace lite { namespace lite {
static std::vector<schema::PrimitiveType> packed_op = {
schema::PrimitiveType_Conv2D, schema::PrimitiveType_DeConv2D,
schema::PrimitiveType_DepthwiseConv2D, schema::PrimitiveType_DeDepthwiseConv2D,
schema::PrimitiveType_MatMul};
// this method will not check whether tensor_idx is a weight tensor index, caller should ensure this.
static bool WeightTensorNeedCopy(const lite::Model *model, const uint32_t tensor_idx) {
MS_ASSERT(nullptr != model);
auto meta_graph = model->GetMetaGraph();
MS_ASSERT(nullptr != meta_graph);
auto post_node_idxes = GetLinkedPostNodeIdx(*meta_graph, tensor_idx);
return std::none_of(post_node_idxes.begin(), post_node_idxes.end(), [&](const size_t &post_node_idx) {
auto cNode = meta_graph->nodes()->GetAs<schema::CNode>(post_node_idx);
MS_ASSERT(cNode != nullptr);
return IsContain(packed_op, cNode->primitive()->value_type());
});
}
int LiteSession::ConvertTensors(const lite::Model *model) { int LiteSession::ConvertTensors(const lite::Model *model) {
MS_ASSERT(nullptr != model); MS_ASSERT(nullptr != model);
auto meta_graph = model->GetMetaGraph(); auto meta_graph = model->GetMetaGraph();
MS_ASSERT(nullptr != meta_graph); MS_ASSERT(nullptr != meta_graph);
copyed_tensor_idxes_.clear();
uint32_t tensorCount = meta_graph->allTensors()->size(); uint32_t tensorCount = meta_graph->allTensors()->size();
for (uint32_t i = 0; i < tensorCount; i++) { for (uint32_t i = 0; i < tensorCount; i++) {
auto *srcTensor = meta_graph->allTensors()->GetAs<schema::Tensor>(i); auto *srcTensor = meta_graph->allTensors()->GetAs<schema::Tensor>(i);
...@@ -54,16 +73,30 @@ int LiteSession::ConvertTensors(const lite::Model *model) { ...@@ -54,16 +73,30 @@ int LiteSession::ConvertTensors(const lite::Model *model) {
} }
} }
int dataType = srcTensor->dataType(); int dataType = srcTensor->dataType();
auto *dstTensor = new tensor::Tensor(TypeId(dataType), shape, srcTensor->format(), srcTensor->nodeType()); auto *dstTensor =
new (std::nothrow) tensor::Tensor(TypeId(dataType), shape, srcTensor->format(), srcTensor->nodeType());
if (dstTensor == nullptr) {
MS_LOG(ERROR) << "new " << i << "th tensor failed";
return RET_NULL_PTR;
}
if (srcTensor->nodeType() == schema::NodeType_ValueNode && srcTensor->data() != nullptr && if (srcTensor->nodeType() == schema::NodeType_ValueNode && srcTensor->data() != nullptr &&
srcTensor->data()->size() > 0) { srcTensor->data()->size() > 0) {
if (shape.empty()) { if (shape.empty()) {
shape.push_back(1); shape.push_back(1);
dstTensor->set_shape(shape);
} }
MS_ASSERT(dstTensor != nullptr);
MS_ASSERT(dstTensor->Size() == srcTensor->data()->size()); MS_ASSERT(dstTensor->Size() == srcTensor->data()->size());
// no copy data, do copy when call LiteKernel::Init if (WeightTensorNeedCopy(model, i)) {
dstTensor->SetData(const_cast<unsigned char *>(srcTensor->data()->data())); auto ret = dstTensor->MallocData();
if (ret != RET_OK) {
MS_LOG(ERROR) << "Malloc data for " << i << "th tensor failed";
return RET_ERROR;
}
memcpy(dstTensor->Data(), srcTensor->data()->data(), dstTensor->Size());
copyed_tensor_idxes_.emplace_back(i);
} else {
dstTensor->SetData(const_cast<unsigned char *>(srcTensor->data()->data()));
}
} }
auto quant_params = srcTensor->quantParams(); auto quant_params = srcTensor->quantParams();
if (quant_params != nullptr) { if (quant_params != nullptr) {
...@@ -74,7 +107,6 @@ int LiteSession::ConvertTensors(const lite::Model *model) { ...@@ -74,7 +107,6 @@ int LiteSession::ConvertTensors(const lite::Model *model) {
dstTensor->AddQuantParam(quant_arg); dstTensor->AddQuantParam(quant_arg);
} }
} }
this->tensors_.emplace_back(dstTensor); this->tensors_.emplace_back(dstTensor);
} }
...@@ -240,6 +272,7 @@ int LiteSession::CompileGraph(Model *model) { ...@@ -240,6 +272,7 @@ int LiteSession::CompileGraph(Model *model) {
} }
executor->Prepare(this->kernels_); executor->Prepare(this->kernels_);
model->FreeMetaGraph();
return RET_OK; return RET_OK;
} }
...@@ -277,7 +310,10 @@ int LiteSession::Init(Context *context) { ...@@ -277,7 +310,10 @@ int LiteSession::Init(Context *context) {
} }
#endif #endif
executor = new Executor(); executor = new Executor();
MS_ASSERT(nullptr != executor); if (nullptr == executor) {
MS_LOG(ERROR) << "new Executor failed";
return RET_ERROR;
}
return RET_OK; return RET_OK;
} }
...@@ -288,9 +324,12 @@ void LiteSession::BindThread(bool if_bind) { ...@@ -288,9 +324,12 @@ void LiteSession::BindThread(bool if_bind) {
} }
LiteSession::~LiteSession() { LiteSession::~LiteSession() {
for (auto *tensor : tensors_) { for (size_t i = 0; i < tensors_.size(); i++) {
// weight data can not be to free, we will free weight data when freeing meta_graph auto *tensor = tensors_.at(i);
if (tensor->TensorType() == schema::NodeType_ValueNode && !IsContain(this->inputs_, tensor)) { MS_ASSERT(tensor != nullptr);
// data of weight tensor of node in packed_op can not be to free, we will free weight data when freeing meta_graph
if (tensor->TensorType() == schema::NodeType_ValueNode && !IsContain(this->inputs_, tensor) &&
!IsContain(copyed_tensor_idxes_, i)) {
tensor->SetData(nullptr); tensor->SetData(nullptr);
} }
delete tensor; delete tensor;
......
...@@ -87,6 +87,7 @@ class LiteSession : public session::LiteSession { ...@@ -87,6 +87,7 @@ class LiteSession : public session::LiteSession {
Context *context_ = nullptr; Context *context_ = nullptr;
std::vector<kernel::LiteKernel *> kernels_; std::vector<kernel::LiteKernel *> kernels_;
std::vector<tensor::Tensor *> tensors_; std::vector<tensor::Tensor *> tensors_;
std::vector<size_t> copyed_tensor_idxes_;
// graph input tensors // graph input tensors
std::vector<tensor::Tensor *> inputs_; std::vector<tensor::Tensor *> inputs_;
// graph output tensors // graph output tensors
......
...@@ -135,7 +135,7 @@ mindspore::lite::PrimitiveC *Model::GetOp(const std::string &name) const { ...@@ -135,7 +135,7 @@ mindspore::lite::PrimitiveC *Model::GetOp(const std::string &name) const {
void Model::FreeMetaGraph() { void Model::FreeMetaGraph() {
MS_ASSERT(nullptr != model_impl_); MS_ASSERT(nullptr != model_impl_);
return model_impl_->FreeMetaGraph(); model_impl_->FreeMetaGraph();
} }
const schema::MetaGraph *Model::GetMetaGraph() const { const schema::MetaGraph *Model::GetMetaGraph() const {
......
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/ops/abs.h"
namespace mindspore {
namespace lite {
#ifndef PRIMITIVE_WRITEABLE
int Abs::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto val_offset = schema::CreateAbs(*fbb);
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Abs, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
#endif
} // namespace lite
} // namespace mindspore
...@@ -32,27 +32,9 @@ class Abs : public ArithmeticSelf { ...@@ -32,27 +32,9 @@ class Abs : public ArithmeticSelf {
Abs() = default; Abs() = default;
explicit Abs(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {} explicit Abs(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {}
#else #else
explicit Abs(schema::Primitive *primitive) : ArithmeticSelf(primitive) {} Abs() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto val_offset = schema::CreateAbs(fbb);
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_Abs, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
}; };
} // namespace lite } // namespace lite
......
...@@ -55,7 +55,19 @@ int Activation::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> ...@@ -55,7 +55,19 @@ int Activation::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr>
return RET_OK; return RET_OK;
} }
#else #else
int Activation::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_Activation();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_Activation return nullptr";
return RET_ERROR;
}
auto val_offset = schema::CreateActivation(*fbb, attr->type(), attr->alpha());
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Activation, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
int Activation::GetType() const { return this->primitive_->value_as_Activation()->type(); } int Activation::GetType() const { return this->primitive_->value_as_Activation()->type(); }
float Activation::GetAlpha() const { return this->primitive_->value_as_Activation()->alpha(); } float Activation::GetAlpha() const { return this->primitive_->value_as_Activation()->alpha(); }
#endif #endif
......
...@@ -30,34 +30,13 @@ class Activation : public PrimitiveC { ...@@ -30,34 +30,13 @@ class Activation : public PrimitiveC {
MS_DECLARE_PARENT(Activation, PrimitiveC); MS_DECLARE_PARENT(Activation, PrimitiveC);
Activation() = default; Activation() = default;
explicit Activation(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} explicit Activation(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs); int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override;
void SetType(int type); void SetType(int type);
void SetAlpha(float alpha); void SetAlpha(float alpha);
#else #else
explicit Activation(schema::Primitive *primitive) : PrimitiveC(primitive) {} Activation() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_Activation();
MS_ASSERT(attr != nullptr);
auto val_offset = schema::CreateActivation(fbb, attr->type(), attr->alpha());
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_Activation, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
int GetType() const; int GetType() const;
float GetAlpha() const; float GetAlpha() const;
......
...@@ -26,7 +26,19 @@ void ActivationGrad::SetType(int type) { ...@@ -26,7 +26,19 @@ void ActivationGrad::SetType(int type) {
} }
#else #else
int ActivationGrad::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_ActivationGrad();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_ActivationGrad return nullptr";
return RET_ERROR;
}
auto val_offset = schema::CreateActivationGrad(*fbb, attr->type());
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_ActivationGrad, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
int ActivationGrad::GetType() const { return this->primitive_->value_as_ActivationGrad()->type(); } int ActivationGrad::GetType() const { return this->primitive_->value_as_ActivationGrad()->type(); }
#endif #endif
......
...@@ -33,30 +33,9 @@ class ActivationGrad : public PrimitiveC { ...@@ -33,30 +33,9 @@ class ActivationGrad : public PrimitiveC {
explicit ActivationGrad(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} explicit ActivationGrad(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
void SetType(int type); void SetType(int type);
#else #else
explicit ActivationGrad(schema::Primitive *primitive) : PrimitiveC(primitive) {} ActivationGrad() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_ActivationGrad();
MS_ASSERT(attr != nullptr);
auto val_offset = schema::CreateActivationGrad(fbb, attr->type());
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_ActivationGrad, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
int GetType() const; int GetType() const;
}; };
......
...@@ -50,7 +50,19 @@ int Add::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs ...@@ -50,7 +50,19 @@ int Add::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs
} }
#else #else
int Add::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_Add();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_Add return nullptr";
return RET_ERROR;
}
auto val_offset = schema::CreateAdd(*fbb, attr->activationType());
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Add, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
int Add::GetActivationType() const { return this->primitive_->value_as_Add()->activationType(); } int Add::GetActivationType() const { return this->primitive_->value_as_Add()->activationType(); }
#endif #endif
......
...@@ -31,33 +31,12 @@ class Add : public Arithmetic { ...@@ -31,33 +31,12 @@ class Add : public Arithmetic {
MS_DECLARE_PARENT(Add, Arithmetic); MS_DECLARE_PARENT(Add, Arithmetic);
Add() = default; Add() = default;
explicit Add(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} explicit Add(schema::PrimitiveT *primitive) : Arithmetic(primitive) {}
int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs); int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override;
void SetActivationType(int activation_type); void SetActivationType(int activation_type);
#else #else
explicit Add(schema::Primitive *primitive) : Arithmetic(primitive) {} Add() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_Add();
MS_ASSERT(attr != nullptr);
auto val_offset = schema::CreateAdd(fbb, attr->activationType());
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_Add, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
int GetActivationType() const; int GetActivationType() const;
}; };
......
...@@ -24,7 +24,19 @@ int AddN::GetN() const { return this->primitive_->value.AsAddN()->N; } ...@@ -24,7 +24,19 @@ int AddN::GetN() const { return this->primitive_->value.AsAddN()->N; }
void AddN::SetN(int n) { this->primitive_->value.AsAddN()->N = n; } void AddN::SetN(int n) { this->primitive_->value.AsAddN()->N = n; }
#else #else
int AddN::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_AddN();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_AddN return nullptr";
return RET_ERROR;
}
auto val_offset = schema::CreateAddN(*fbb, attr->N());
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_AddN, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
int AddN::GetN() const { return this->primitive_->value_as_AddN()->N(); } int AddN::GetN() const { return this->primitive_->value_as_AddN()->N(); }
#endif #endif
......
...@@ -33,30 +33,9 @@ class AddN : public PrimitiveC { ...@@ -33,30 +33,9 @@ class AddN : public PrimitiveC {
explicit AddN(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} explicit AddN(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
void SetN(int n); void SetN(int n);
#else #else
explicit AddN(schema::Primitive *primitive) : PrimitiveC(primitive) {} AddN() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_AddN();
MS_ASSERT(attr != nullptr);
auto val_offset = schema::CreateAddN(fbb, attr->N());
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_AddN, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
int GetN() const; int GetN() const;
......
...@@ -32,7 +32,20 @@ void ArgMax::SetKeepDims(bool keep_dims) { this->primitive_->value.AsArgMax()->k ...@@ -32,7 +32,20 @@ void ArgMax::SetKeepDims(bool keep_dims) { this->primitive_->value.AsArgMax()->k
void ArgMax::SetAxisType(int axis_type) { this->primitive_->value.AsArgMax()->axisType = axis_type; } void ArgMax::SetAxisType(int axis_type) { this->primitive_->value.AsArgMax()->axisType = axis_type; }
#else #else
int ArgMax::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_ArgMax();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_ArgMax return nullptr";
return RET_ERROR;
}
auto val_offset =
schema::CreateArgMax(*fbb, attr->axis(), attr->outMaxValue(), attr->topK(), attr->keepDims(), attr->axisType());
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_ArgMax, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
int ArgMax::GetAxis() const { return this->primitive_->value_as_ArgMax()->axis(); } int ArgMax::GetAxis() const { return this->primitive_->value_as_ArgMax()->axis(); }
bool ArgMax::GetOutMaxValue() const { return this->primitive_->value_as_ArgMax()->outMaxValue(); } bool ArgMax::GetOutMaxValue() const { return this->primitive_->value_as_ArgMax()->outMaxValue(); }
int ArgMax::GetTopK() const { return this->primitive_->value_as_ArgMax()->topK(); } int ArgMax::GetTopK() const { return this->primitive_->value_as_ArgMax()->topK(); }
......
...@@ -37,31 +37,9 @@ class ArgMax : public PrimitiveC { ...@@ -37,31 +37,9 @@ class ArgMax : public PrimitiveC {
void SetKeepDims(bool keep_dims); void SetKeepDims(bool keep_dims);
void SetAxisType(int axis_type); void SetAxisType(int axis_type);
#else #else
explicit ArgMax(schema::Primitive *primitive) : PrimitiveC(primitive) {} ArgMax() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_ArgMax();
MS_ASSERT(attr != nullptr);
auto val_offset = schema::CreateArgMax(fbb, attr->axis(), attr->outMaxValue(),
attr->topK(), attr->keepDims(), attr->axisType());
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_ArgMax, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
int GetAxis() const; int GetAxis() const;
......
...@@ -32,7 +32,20 @@ void ArgMin::SetKeepDims(bool keep_dims) { this->primitive_->value.AsArgMin()->k ...@@ -32,7 +32,20 @@ void ArgMin::SetKeepDims(bool keep_dims) { this->primitive_->value.AsArgMin()->k
void ArgMin::SetAxisType(int axis_type) { this->primitive_->value.AsArgMin()->axisType = axis_type; } void ArgMin::SetAxisType(int axis_type) { this->primitive_->value.AsArgMin()->axisType = axis_type; }
#else #else
int ArgMin::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_ArgMin();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_ArgMin return nullptr";
return RET_ERROR;
}
auto val_offset =
schema::CreateArgMin(*fbb, attr->axis(), attr->outMaxValue(), attr->topK(), attr->keepDims(), attr->axisType());
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_ArgMin, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
int ArgMin::GetAxis() const { return this->primitive_->value_as_ArgMin()->axis(); } int ArgMin::GetAxis() const { return this->primitive_->value_as_ArgMin()->axis(); }
bool ArgMin::GetOutMaxValue() const { return this->primitive_->value_as_ArgMin()->outMaxValue(); } bool ArgMin::GetOutMaxValue() const { return this->primitive_->value_as_ArgMin()->outMaxValue(); }
int ArgMin::GetTopK() const { return this->primitive_->value_as_ArgMin()->topK(); } int ArgMin::GetTopK() const { return this->primitive_->value_as_ArgMin()->topK(); }
......
...@@ -37,31 +37,9 @@ class ArgMin : public PrimitiveC { ...@@ -37,31 +37,9 @@ class ArgMin : public PrimitiveC {
void SetKeepDims(bool keep_dims); void SetKeepDims(bool keep_dims);
void SetAxisType(int axis_type); void SetAxisType(int axis_type);
#else #else
explicit ArgMin(schema::Primitive *primitive) : PrimitiveC(primitive) {} ArgMin() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_ArgMin();
MS_ASSERT(attr != nullptr);
auto val_offset = schema::CreateArgMin(fbb, attr->axis(), attr->outMaxValue(),
attr->topK(), attr->keepDims(), attr->axisType());
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_ArgMin, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
int GetAxis() const; int GetAxis() const;
......
...@@ -32,7 +32,11 @@ class Arithmetic : public PrimitiveC { ...@@ -32,7 +32,11 @@ class Arithmetic : public PrimitiveC {
Arithmetic() = default; Arithmetic() = default;
explicit Arithmetic(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} explicit Arithmetic(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else #else
explicit Arithmetic(schema::Primitive *primitive) : PrimitiveC(primitive) {} // explicit Arithmetic(schema::Primitive *primitive) : PrimitiveC(primitive) {}
Arithmetic() = default;
int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override {
return RET_ERROR;
}
#endif #endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
bool Broadcasting() { return this->broadcasting_; } bool Broadcasting() { return this->broadcasting_; }
......
...@@ -29,7 +29,11 @@ class ArithmeticSelf : public PrimitiveC { ...@@ -29,7 +29,11 @@ class ArithmeticSelf : public PrimitiveC {
ArithmeticSelf() = default; ArithmeticSelf() = default;
explicit ArithmeticSelf(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} explicit ArithmeticSelf(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
#else #else
explicit ArithmeticSelf(schema::Primitive *primitive) : PrimitiveC(primitive) {} // explicit ArithmeticSelf(schema::Primitive *primitive) : PrimitiveC(primitive) {}
ArithmeticSelf() = default;
int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override {
return RET_ERROR;
}
#endif #endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
}; };
......
...@@ -49,7 +49,14 @@ int BatchNorm::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> & ...@@ -49,7 +49,14 @@ int BatchNorm::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &
} }
#else #else
int BatchNorm::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto val_offset = schema::CreateBatchNorm(*fbb);
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_BatchNorm, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
float BatchNorm::GetEpsilon() const { return this->primitive_->value_as_BatchNorm()->epsilon(); } float BatchNorm::GetEpsilon() const { return this->primitive_->value_as_BatchNorm()->epsilon(); }
#endif #endif
......
...@@ -31,30 +31,12 @@ class BatchNorm : public PrimitiveC { ...@@ -31,30 +31,12 @@ class BatchNorm : public PrimitiveC {
MS_DECLARE_PARENT(BatchNorm, PrimitiveC); MS_DECLARE_PARENT(BatchNorm, PrimitiveC);
BatchNorm() = default; BatchNorm() = default;
explicit BatchNorm(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} explicit BatchNorm(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs); int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override;
void SetEpsilon(float epsilon); void SetEpsilon(float epsilon);
#else #else
explicit BatchNorm(schema::Primitive *primitive) : PrimitiveC(primitive) {} BatchNorm() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto val_offset = schema::CreateBatchNorm(fbb);
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_BatchNorm, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
float GetEpsilon() const; float GetEpsilon() const;
}; };
......
...@@ -32,7 +32,31 @@ void BatchToSpace::SetBlockShape(const std::vector<int> &block_shape) { ...@@ -32,7 +32,31 @@ void BatchToSpace::SetBlockShape(const std::vector<int> &block_shape) {
void BatchToSpace::SetCrops(const std::vector<int> &crops) { this->primitive_->value.AsBatchToSpace()->crops = crops; } void BatchToSpace::SetCrops(const std::vector<int> &crops) { this->primitive_->value.AsBatchToSpace()->crops = crops; }
#else #else
int BatchToSpace::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_BatchToSpace();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_BatchToSpace return nullptr";
return RET_ERROR;
}
std::vector<int32_t> blockShape;
if (attr->blockShape() != nullptr) {
for (int i = 0; i < static_cast<int>(attr->blockShape()->size()); i++) {
blockShape.push_back(attr->blockShape()->data()[i]);
}
}
std::vector<int32_t> crops;
if (attr->crops() != nullptr) {
for (int i = 0; i < static_cast<int>(attr->crops()->size()); i++) {
crops.push_back(attr->crops()->data()[i]);
}
}
auto val_offset = schema::CreateBatchToSpaceDirect(*fbb, &blockShape, &crops);
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_BatchToSpace, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
std::vector<int> BatchToSpace::GetBlockShape() const { std::vector<int> BatchToSpace::GetBlockShape() const {
auto fb_vector = this->primitive_->value_as_BatchToSpace()->blockShape(); auto fb_vector = this->primitive_->value_as_BatchToSpace()->blockShape();
return std::vector<int>(fb_vector->begin(), fb_vector->end()); return std::vector<int>(fb_vector->begin(), fb_vector->end());
......
...@@ -35,39 +35,9 @@ class BatchToSpace : public PrimitiveC { ...@@ -35,39 +35,9 @@ class BatchToSpace : public PrimitiveC {
void SetBlockShape(const std::vector<int> &block_shape); void SetBlockShape(const std::vector<int> &block_shape);
void SetCrops(const std::vector<int> &crops); void SetCrops(const std::vector<int> &crops);
#else #else
explicit BatchToSpace(schema::Primitive *primitive) : PrimitiveC(primitive) {} BatchToSpace() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_BatchToSpace();
MS_ASSERT(attr != nullptr);
auto blockShape = std::make_unique<std::vector<int32_t>>();
for (int i = 0; i < static_cast<int>(attr->blockShape()->size()); i++) {
blockShape->push_back(attr->blockShape()->data()[i]);
}
auto crops = std::make_unique<std::vector<int32_t>>();
for (int i = 0; i < static_cast<int>(attr->crops()->size()); i++) {
crops->push_back(attr->crops()->data()[i]);
}
auto val_offset = schema::CreateBatchToSpaceDirect(fbb, blockShape.release(), crops.release());
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_BatchToSpace, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
std::vector<int> GetBlockShape() const; std::vector<int> GetBlockShape() const;
......
...@@ -54,7 +54,25 @@ int BiasAdd::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &in ...@@ -54,7 +54,25 @@ int BiasAdd::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &in
} }
#else #else
int BiasAdd::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_BiasAdd();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_BiasAdd return nullptr";
return RET_ERROR;
}
std::vector<int32_t> axis;
if (attr->axis() != nullptr) {
for (int i = 0; i < static_cast<int>(attr->axis()->size()); i++) {
axis.push_back(attr->axis()->data()[i]);
}
}
auto val_offset = schema::CreateBiasAddDirect(*fbb, &axis);
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_BiasAdd, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
std::vector<int> BiasAdd::GetAxis() const { std::vector<int> BiasAdd::GetAxis() const {
auto fb_vector = this->primitive_->value_as_BiasAdd()->axis(); auto fb_vector = this->primitive_->value_as_BiasAdd()->axis();
return std::vector<int>(fb_vector->begin(), fb_vector->end()); return std::vector<int>(fb_vector->begin(), fb_vector->end());
......
...@@ -32,38 +32,12 @@ class BiasAdd : public PrimitiveC { ...@@ -32,38 +32,12 @@ class BiasAdd : public PrimitiveC {
MS_DECLARE_PARENT(BiasAdd, PrimitiveC); MS_DECLARE_PARENT(BiasAdd, PrimitiveC);
BiasAdd() = default; BiasAdd() = default;
explicit BiasAdd(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} explicit BiasAdd(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs); int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override;
void SetAxis(const std::vector<int> &axis); void SetAxis(const std::vector<int> &axis);
#else #else
explicit BiasAdd(schema::Primitive *primitive) : PrimitiveC(primitive) {} BiasAdd() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_BiasAdd();
MS_ASSERT(attr != nullptr);
auto axis = std::make_unique<std::vector<int32_t>>();
for (int i = 0; i < static_cast<int>(attr->axis()->size()); i++) {
axis->push_back(attr->axis()->data()[i]);
}
auto val_offset = schema::CreateBiasAddDirect(fbb, axis.release());
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_BiasAdd, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
std::vector<int> GetAxis() const; std::vector<int> GetAxis() const;
}; };
......
...@@ -24,7 +24,25 @@ std::vector<int> BiasGrad::GetAxis() const { return this->primitive_->value.AsBi ...@@ -24,7 +24,25 @@ std::vector<int> BiasGrad::GetAxis() const { return this->primitive_->value.AsBi
void BiasGrad::SetAxis(const std::vector<int> &axis) { this->primitive_->value.AsBiasGrad()->axis = axis; } void BiasGrad::SetAxis(const std::vector<int> &axis) { this->primitive_->value.AsBiasGrad()->axis = axis; }
#else #else
int BiasGrad::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_BiasGrad();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_BiasGrad return nullptr";
return RET_ERROR;
}
std::vector<int32_t> axis;
if (attr->axis() != nullptr) {
for (int i = 0; i < static_cast<int>(attr->axis()->size()); i++) {
axis.push_back(attr->axis()->data()[i]);
}
}
auto val_offset = schema::CreateBiasGradDirect(*fbb, &axis);
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_BiasGrad, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
std::vector<int> BiasGrad::GetAxis() const { std::vector<int> BiasGrad::GetAxis() const {
auto fb_vector = this->primitive_->value_as_BiasGrad()->axis(); auto fb_vector = this->primitive_->value_as_BiasGrad()->axis();
return std::vector<int>(fb_vector->begin(), fb_vector->end()); return std::vector<int>(fb_vector->begin(), fb_vector->end());
......
...@@ -35,35 +35,9 @@ class BiasGrad : public PrimitiveC { ...@@ -35,35 +35,9 @@ class BiasGrad : public PrimitiveC {
void SetAxis(const std::vector<int> &axis); void SetAxis(const std::vector<int> &axis);
#else #else
explicit BiasGrad(schema::Primitive *primitive) : PrimitiveC(primitive) {} BiasGrad() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_BiasGrad();
MS_ASSERT(attr != nullptr);
auto axis = std::make_unique<std::vector<int32_t>>();
for (int i = 0; i < static_cast<int>(attr->axis()->size()); i++) {
axis->push_back(attr->axis()->data()[i]);
}
auto val_offset = schema::CreateBiasGradDirect(fbb, axis.release());
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_BiasGrad, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
std::vector<int> GetAxis() const; std::vector<int> GetAxis() const;
}; };
......
...@@ -26,7 +26,19 @@ void BNGradInput::SetEps(float eps) { this->primitive_->value.AsBNGradInput()->e ...@@ -26,7 +26,19 @@ void BNGradInput::SetEps(float eps) { this->primitive_->value.AsBNGradInput()->e
void BNGradInput::SetChannels(int channels) { this->primitive_->value.AsBNGradInput()->channels = channels; } void BNGradInput::SetChannels(int channels) { this->primitive_->value.AsBNGradInput()->channels = channels; }
#else #else
int BNGradInput::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_BNGradInput();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_BNGradInput return nullptr";
return RET_ERROR;
}
auto val_offset = schema::CreateBNGradInput(*fbb, attr->eps(), attr->channels());
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_BNGradInput, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
float BNGradInput::GetEps() const { return this->primitive_->value_as_BNGradInput()->eps(); } float BNGradInput::GetEps() const { return this->primitive_->value_as_BNGradInput()->eps(); }
int BNGradInput::GetChannels() const { return this->primitive_->value_as_BNGradInput()->channels(); } int BNGradInput::GetChannels() const { return this->primitive_->value_as_BNGradInput()->channels(); }
......
...@@ -34,30 +34,9 @@ class BNGradInput : public PrimitiveC { ...@@ -34,30 +34,9 @@ class BNGradInput : public PrimitiveC {
void SetEps(float eps); void SetEps(float eps);
void SetChannels(int channels); void SetChannels(int channels);
#else #else
explicit BNGradInput(schema::Primitive *primitive) : PrimitiveC(primitive) {} BNGradInput() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_BNGradInput();
MS_ASSERT(attr != nullptr);
auto val_offset = schema::CreateBNGradInput(fbb, attr->eps(), attr->channels());
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_BNGradInput, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
float GetEps() const; float GetEps() const;
int GetChannels() const; int GetChannels() const;
......
...@@ -26,7 +26,25 @@ void BroadcastTo::SetDstShape(const std::vector<int> &dst_shape) { ...@@ -26,7 +26,25 @@ void BroadcastTo::SetDstShape(const std::vector<int> &dst_shape) {
} }
#else #else
int BroadcastTo::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_BroadcastTo();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_BroadcastTo return nullptr";
return RET_ERROR;
}
std::vector<int32_t> dst_shape;
if (attr->dst_shape() != nullptr) {
for (int i = 0; i < static_cast<int>(attr->dst_shape()->size()); i++) {
dst_shape.push_back(attr->dst_shape()->data()[i]);
}
}
auto val_offset = schema::CreateBroadcastToDirect(*fbb, &dst_shape);
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_BroadcastTo, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
std::vector<int> BroadcastTo::GetDstShape() const { std::vector<int> BroadcastTo::GetDstShape() const {
auto fb_vector = this->primitive_->value_as_BroadcastTo()->dst_shape(); auto fb_vector = this->primitive_->value_as_BroadcastTo()->dst_shape();
return std::vector<int>(fb_vector->begin(), fb_vector->end()); return std::vector<int>(fb_vector->begin(), fb_vector->end());
......
...@@ -35,35 +35,9 @@ class BroadcastTo : public PrimitiveC { ...@@ -35,35 +35,9 @@ class BroadcastTo : public PrimitiveC {
void SetDstShape(const std::vector<int> &dst_shape); void SetDstShape(const std::vector<int> &dst_shape);
#else #else
explicit BroadcastTo(schema::Primitive *primitive) : PrimitiveC(primitive) {} BroadcastTo() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_BroadcastTo();
MS_ASSERT(attr != nullptr);
auto dst_shape = std::make_unique<std::vector<int32_t>>();
for (int i = 0; i < static_cast<int>(attr->dst_shape()->size()); i++) {
dst_shape->push_back(attr->dst_shape()->data()[i]);
}
auto val_offset = schema::CreateBroadcastToDirect(fbb, dst_shape.release());
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_BroadcastTo, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
std::vector<int> GetDstShape() const; std::vector<int> GetDstShape() const;
......
...@@ -26,7 +26,19 @@ void Cast::SetSrcT(int src_t) { this->primitive_->value.AsCast()->srcT = src_t; ...@@ -26,7 +26,19 @@ void Cast::SetSrcT(int src_t) { this->primitive_->value.AsCast()->srcT = src_t;
void Cast::SetDstT(int dst_t) { this->primitive_->value.AsCast()->dstT = dst_t; } void Cast::SetDstT(int dst_t) { this->primitive_->value.AsCast()->dstT = dst_t; }
#else #else
int Cast::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_Cast();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_Cast return nullptr";
return RET_ERROR;
}
auto val_offset = schema::CreateCast(*fbb, attr->srcT(), attr->dstT());
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Cast, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
int Cast::GetSrcT() const { return this->primitive_->value_as_Cast()->srcT(); } int Cast::GetSrcT() const { return this->primitive_->value_as_Cast()->srcT(); }
int Cast::GetDstT() const { return this->primitive_->value_as_Cast()->dstT(); } int Cast::GetDstT() const { return this->primitive_->value_as_Cast()->dstT(); }
......
...@@ -34,30 +34,9 @@ class Cast : public PrimitiveC { ...@@ -34,30 +34,9 @@ class Cast : public PrimitiveC {
void SetSrcT(int src_t); void SetSrcT(int src_t);
void SetDstT(int dst_t); void SetDstT(int dst_t);
#else #else
explicit Cast(schema::Primitive *primitive) : PrimitiveC(primitive) {} Cast() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_Cast();
MS_ASSERT(attr != nullptr);
auto val_offset = schema::CreateCast(fbb, attr->srcT(), attr->dstT());
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_Cast, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
int GetSrcT() const; int GetSrcT() const;
......
...@@ -32,26 +32,15 @@ class Ceil : public ArithmeticSelf { ...@@ -32,26 +32,15 @@ class Ceil : public ArithmeticSelf {
Ceil() = default; Ceil() = default;
explicit Ceil(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {} explicit Ceil(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {}
#else #else
explicit Ceil(schema::Primitive *primitive) : ArithmeticSelf(primitive) {} Ceil() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto val_offset = schema::CreateCeil(fbb);
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_Ceil, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override {
fbb.Clear(); MS_ASSERT(nullptr != primitive);
return prim; MS_ASSERT(nullptr != fbb);
auto val_offset = schema::CreateCeil(*fbb);
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Ceil, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
} }
#endif #endif
}; };
......
...@@ -26,7 +26,19 @@ void Clip::SetMax(float max) { this->primitive_->value.AsClip()->max = max; } ...@@ -26,7 +26,19 @@ void Clip::SetMax(float max) { this->primitive_->value.AsClip()->max = max; }
void Clip::SetMin(float min) { this->primitive_->value.AsClip()->min = min; } void Clip::SetMin(float min) { this->primitive_->value.AsClip()->min = min; }
#else #else
int Clip::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_Clip();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_Clip return nullptr";
return RET_ERROR;
}
auto val_offset = schema::CreateClip(*fbb, attr->max(), attr->min());
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Clip, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
float Clip::GetMax() const { return this->primitive_->value_as_Clip()->max(); } float Clip::GetMax() const { return this->primitive_->value_as_Clip()->max(); }
float Clip::GetMin() const { return this->primitive_->value_as_Clip()->min(); } float Clip::GetMin() const { return this->primitive_->value_as_Clip()->min(); }
......
...@@ -34,30 +34,9 @@ class Clip : public PrimitiveC { ...@@ -34,30 +34,9 @@ class Clip : public PrimitiveC {
void SetMax(float max); void SetMax(float max);
void SetMin(float min); void SetMin(float min);
#else #else
explicit Clip(schema::Primitive *primitive) : PrimitiveC(primitive) {} Clip() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_Clip();
MS_ASSERT(attr != nullptr);
auto val_offset = schema::CreateClip(fbb, attr->max(), attr->min());
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_Clip, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
float GetMax() const; float GetMax() const;
float GetMin() const; float GetMin() const;
......
...@@ -60,7 +60,19 @@ int Concat::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inp ...@@ -60,7 +60,19 @@ int Concat::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inp
} }
#else #else
int Concat::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_Concat();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_Concat return nullptr";
return RET_ERROR;
}
auto val_offset = schema::CreateConcat(*fbb, attr->axis(), attr->n());
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Concat, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
int Concat::GetAxis() const { return this->primitive_->value_as_Concat()->axis(); } int Concat::GetAxis() const { return this->primitive_->value_as_Concat()->axis(); }
int Concat::GetN() const { return this->primitive_->value_as_Concat()->n(); } int Concat::GetN() const { return this->primitive_->value_as_Concat()->n(); }
......
...@@ -31,34 +31,13 @@ class Concat : public PrimitiveC { ...@@ -31,34 +31,13 @@ class Concat : public PrimitiveC {
MS_DECLARE_PARENT(Concat, PrimitiveC); MS_DECLARE_PARENT(Concat, PrimitiveC);
Concat() = default; Concat() = default;
explicit Concat(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} explicit Concat(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs); int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override;
void SetAxis(int axis); void SetAxis(int axis);
void SetN(int n); void SetN(int n);
#else #else
explicit Concat(schema::Primitive *primitive) : PrimitiveC(primitive) {} Concat() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_Concat();
MS_ASSERT(attr != nullptr);
auto val_offset = schema::CreateConcat(fbb, attr->axis(), attr->n());
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_Concat, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
int GetAxis() const; int GetAxis() const;
......
...@@ -30,7 +30,19 @@ float ConstantOfShape::GetValue() const { return this->primitive_->value.AsConst ...@@ -30,7 +30,19 @@ float ConstantOfShape::GetValue() const { return this->primitive_->value.AsConst
void ConstantOfShape::SetValue(float value) { this->primitive_->value.AsConstantOfShape()->value = value; } void ConstantOfShape::SetValue(float value) { this->primitive_->value.AsConstantOfShape()->value = value; }
#else #else
int ConstantOfShape::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_ConstantOfShape();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_ConstantOfShape return nullptr";
return RET_ERROR;
}
auto val_offset = schema::CreateConstantOfShape(*fbb, attr->value());
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_ConstantOfShape, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
float ConstantOfShape::GetValue() const { return this->primitive_->value_as_ConstantOfShape()->value(); } float ConstantOfShape::GetValue() const { return this->primitive_->value_as_ConstantOfShape()->value(); }
#endif #endif
......
...@@ -33,30 +33,9 @@ class ConstantOfShape : public PrimitiveC { ...@@ -33,30 +33,9 @@ class ConstantOfShape : public PrimitiveC {
explicit ConstantOfShape(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} explicit ConstantOfShape(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
void SetValue(float value); void SetValue(float value);
#else #else
explicit ConstantOfShape(schema::Primitive *primitive) : PrimitiveC(primitive) {} ConstantOfShape() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_ConstantOfShape();
MS_ASSERT(attr != nullptr);
auto val_offset = schema::CreateConstantOfShape(fbb, attr->value());
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_ConstantOfShape, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
float GetValue() const; float GetValue() const;
......
...@@ -338,7 +338,23 @@ int Conv2D::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inp ...@@ -338,7 +338,23 @@ int Conv2D::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inp
} }
#else #else
int Conv2D::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_Conv2D();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_Conv2D return nullptr";
return RET_ERROR;
}
auto val_offset = schema::CreateConv2D(
*fbb, attr->format(), attr->group(), attr->channelIn(), attr->channelOut(), attr->kernelW(), attr->kernelH(),
attr->strideW(), attr->strideH(), attr->padMode(), attr->padUp(), attr->padDown(), attr->padLeft(),
attr->padRight(), attr->dilateW(), attr->dilateH(), attr->hasBias(), attr->activationType());
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Conv2D, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
int Conv2D::GetFormat() const { return this->primitive_->value_as_Conv2D()->format(); } int Conv2D::GetFormat() const { return this->primitive_->value_as_Conv2D()->format(); }
int Conv2D::GetGroup() const { return this->primitive_->value_as_Conv2D()->group(); } int Conv2D::GetGroup() const { return this->primitive_->value_as_Conv2D()->group(); }
int Conv2D::GetChannelIn() const { return this->primitive_->value_as_Conv2D()->channelIn(); } int Conv2D::GetChannelIn() const { return this->primitive_->value_as_Conv2D()->channelIn(); }
......
...@@ -34,7 +34,7 @@ class Conv2D : public PrimitiveC { ...@@ -34,7 +34,7 @@ class Conv2D : public PrimitiveC {
Conv2D() = default; Conv2D() = default;
explicit Conv2D(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} explicit Conv2D(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs); int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override;
void SetFormat(int format); void SetFormat(int format);
void SetGroup(int group); void SetGroup(int group);
void SetChannelIn(int channel_in); void SetChannelIn(int channel_in);
...@@ -63,34 +63,9 @@ class Conv2D : public PrimitiveC { ...@@ -63,34 +63,9 @@ class Conv2D : public PrimitiveC {
#else #else
public: public:
explicit Conv2D(schema::Primitive *primitive) : PrimitiveC(primitive) {} Conv2D() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_Conv2D();
MS_ASSERT(attr != nullptr);
auto val_offset = schema::CreateConv2D(fbb, attr->format(), attr->group(), attr->channelIn(), attr->channelOut(),
attr->kernelW(), attr->kernelH(), attr->strideW(), attr->strideH(),
attr->padMode(), attr->padUp(), attr->padDown(), attr->padLeft(),
attr->padRight(), attr->dilateW(), attr->dilateH(),
attr->hasBias(), attr->activationType());
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_Conv2D, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
public: public:
......
...@@ -68,7 +68,22 @@ void Conv2DGradFilter::SetActivationType(int activation_type) { ...@@ -68,7 +68,22 @@ void Conv2DGradFilter::SetActivationType(int activation_type) {
} }
#else #else
int Conv2DGradFilter::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_Conv2DGradFilter();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_Conv2DGradFilter return nullptr";
return RET_ERROR;
}
auto val_offset = schema::CreateConv2DGradFilter(
*fbb, attr->format(), attr->group(), attr->channelIn(), attr->channelOut(), attr->kernelW(), attr->kernelH(),
attr->strideW(), attr->strideH(), attr->padMode(), attr->padUp(), attr->padDown(), attr->padLeft(),
attr->padRight(), attr->dilateW(), attr->dilateH(), attr->hasBias(), attr->activationType());
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Conv2DGradFilter, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
int Conv2DGradFilter::GetFormat() const { return this->primitive_->value_as_Conv2DGradFilter()->format(); } int Conv2DGradFilter::GetFormat() const { return this->primitive_->value_as_Conv2DGradFilter()->format(); }
int Conv2DGradFilter::GetGroup() const { return this->primitive_->value_as_Conv2DGradFilter()->group(); } int Conv2DGradFilter::GetGroup() const { return this->primitive_->value_as_Conv2DGradFilter()->group(); }
int Conv2DGradFilter::GetChannelIn() const { return this->primitive_->value_as_Conv2DGradFilter()->channelIn(); } int Conv2DGradFilter::GetChannelIn() const { return this->primitive_->value_as_Conv2DGradFilter()->channelIn(); }
......
...@@ -49,35 +49,9 @@ class Conv2DGradFilter : public PrimitiveC { ...@@ -49,35 +49,9 @@ class Conv2DGradFilter : public PrimitiveC {
void SetHasBias(bool has_bias); void SetHasBias(bool has_bias);
void SetActivationType(int activation_type); void SetActivationType(int activation_type);
#else #else
explicit Conv2DGradFilter(schema::Primitive *primitive) : PrimitiveC(primitive) {} Conv2DGradFilter() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_Conv2DGradFilter();
MS_ASSERT(attr != nullptr);
auto val_offset = schema::CreateConv2DGradFilter(fbb, attr->format(), attr->group(),
attr->channelIn(), attr->channelOut(),
attr->kernelW(), attr->kernelH(), attr->strideW(), attr->strideH(),
attr->padMode(), attr->padUp(), attr->padDown(), attr->padLeft(),
attr->padRight(), attr->dilateW(), attr->dilateH(),
attr->hasBias(), attr->activationType());
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_Conv2DGradFilter, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
int GetFormat() const; int GetFormat() const;
int GetGroup() const; int GetGroup() const;
......
...@@ -66,7 +66,22 @@ void Conv2DGradInput::SetActivationType(int activation_type) { ...@@ -66,7 +66,22 @@ void Conv2DGradInput::SetActivationType(int activation_type) {
} }
#else #else
int Conv2DGradInput::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_Conv2DGradInput();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_Conv2DGradInput return nullptr";
return RET_ERROR;
}
auto val_offset = schema::CreateConv2DGradInput(
*fbb, attr->format(), attr->group(), attr->channelIn(), attr->channelOut(), attr->kernelW(), attr->kernelH(),
attr->strideW(), attr->strideH(), attr->padMode(), attr->padUp(), attr->padDown(), attr->padLeft(),
attr->padRight(), attr->dilateW(), attr->dilateH(), attr->hasBias(), attr->activationType());
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Conv2DGradInput, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
int Conv2DGradInput::GetFormat() const { return this->primitive_->value_as_Conv2DGradInput()->format(); } int Conv2DGradInput::GetFormat() const { return this->primitive_->value_as_Conv2DGradInput()->format(); }
int Conv2DGradInput::GetGroup() const { return this->primitive_->value_as_Conv2DGradInput()->group(); } int Conv2DGradInput::GetGroup() const { return this->primitive_->value_as_Conv2DGradInput()->group(); }
int Conv2DGradInput::GetChannelIn() const { return this->primitive_->value_as_Conv2DGradInput()->channelIn(); } int Conv2DGradInput::GetChannelIn() const { return this->primitive_->value_as_Conv2DGradInput()->channelIn(); }
......
...@@ -49,35 +49,9 @@ class Conv2DGradInput : public PrimitiveC { ...@@ -49,35 +49,9 @@ class Conv2DGradInput : public PrimitiveC {
void SetHasBias(bool has_bias); void SetHasBias(bool has_bias);
void SetActivationType(int activation_type); void SetActivationType(int activation_type);
#else #else
explicit Conv2DGradInput(schema::Primitive *primitive) : PrimitiveC(primitive) {} Conv2DGradInput() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_Conv2DGradInput();
MS_ASSERT(attr != nullptr);
auto val_offset = schema::CreateConv2DGradInput(fbb, attr->format(), attr->group(),
attr->channelIn(), attr->channelOut(),
attr->kernelW(), attr->kernelH(), attr->strideW(), attr->strideH(),
attr->padMode(), attr->padUp(), attr->padDown(), attr->padLeft(),
attr->padRight(), attr->dilateW(), attr->dilateH(),
attr->hasBias(), attr->activationType());
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_Conv2DGradInput, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
int GetFormat() const; int GetFormat() const;
int GetGroup() const; int GetGroup() const;
......
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/ops/cos.h"
namespace mindspore {
namespace lite {
#ifndef PRIMITIVE_WRITEABLE
int Cos::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto val_offset = schema::CreateCos(*fbb);
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Cos, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
#endif
} // namespace lite
} // namespace mindspore
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#include <set> #include <set>
#include <cmath> #include <cmath>
#include "ir/dtype/type_id.h" #include "ir/dtype/type_id.h"
#include "src/ops/primitive_c.h" #include "src/ops/arithmetic_self.h"
namespace mindspore { namespace mindspore {
namespace lite { namespace lite {
...@@ -31,27 +31,9 @@ class Cos : public ArithmeticSelf { ...@@ -31,27 +31,9 @@ class Cos : public ArithmeticSelf {
Cos() = default; Cos() = default;
explicit Cos(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {} explicit Cos(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {}
#else #else
explicit Cos(schema::Primitive *primitive) : ArithmeticSelf(primitive) {} Cos() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto val_offset = schema::CreateCos(fbb);
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_Cos, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
}; };
} // namespace lite } // namespace lite
......
...@@ -26,7 +26,25 @@ void Crop::SetAxis(int64_t axis) { this->primitive_->value.AsCrop()->axis = axis ...@@ -26,7 +26,25 @@ void Crop::SetAxis(int64_t axis) { this->primitive_->value.AsCrop()->axis = axis
void Crop::SetOffsets(const std::vector<int64_t> &offsets) { this->primitive_->value.AsCrop()->offsets = offsets; } void Crop::SetOffsets(const std::vector<int64_t> &offsets) { this->primitive_->value.AsCrop()->offsets = offsets; }
#else #else
int Crop::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_Crop();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_Crop return nullptr";
return RET_ERROR;
}
std::vector<int64_t> offsets;
if (attr->offsets() != nullptr) {
for (int i = 0; i < static_cast<int>(attr->offsets()->size()); i++) {
offsets.push_back(attr->offsets()->data()[i]);
}
}
auto val_offset = schema::CreateCropDirect(*fbb, attr->axis(), &offsets);
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Crop, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
int64_t Crop::GetAxis() const { return this->primitive_->value_as_Crop()->axis(); } int64_t Crop::GetAxis() const { return this->primitive_->value_as_Crop()->axis(); }
std::vector<int64_t> Crop::GetOffsets() const { std::vector<int64_t> Crop::GetOffsets() const {
auto fb_vector = this->primitive_->value_as_Crop()->offsets(); auto fb_vector = this->primitive_->value_as_Crop()->offsets();
......
...@@ -35,35 +35,9 @@ class Crop : public PrimitiveC { ...@@ -35,35 +35,9 @@ class Crop : public PrimitiveC {
void SetAxis(int64_t axis); void SetAxis(int64_t axis);
void SetOffsets(const std::vector<int64_t> &offsets); void SetOffsets(const std::vector<int64_t> &offsets);
#else #else
explicit Crop(schema::Primitive *primitive) : PrimitiveC(primitive) {} Crop() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_Crop();
MS_ASSERT(attr != nullptr);
auto offsets = std::make_unique<std::vector<int64_t>>();
for (int i = 0; i < static_cast<int>(attr->offsets()->size()); i++) {
offsets->push_back(attr->offsets()->data()[i]);
}
auto val_offset = schema::CreateCropDirect(fbb, attr->axis(), offsets.release());
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_Crop, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
int64_t GetAxis() const; int64_t GetAxis() const;
......
...@@ -58,7 +58,22 @@ void DeConv2D::SetActivationType(int activation_type) { ...@@ -58,7 +58,22 @@ void DeConv2D::SetActivationType(int activation_type) {
} }
#else #else
int DeConv2D::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_DeConv2D();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_DeConv2D return nullptr";
return RET_ERROR;
}
auto val_offset = schema::CreateDeConv2D(
*fbb, attr->format(), attr->group(), attr->channelIn(), attr->channelOut(), attr->kernelW(), attr->kernelH(),
attr->strideW(), attr->strideH(), attr->padMode(), attr->padUp(), attr->padDown(), attr->padLeft(),
attr->padRight(), attr->dilateW(), attr->dilateH(), attr->hasBias(), attr->activationType());
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_DeConv2D, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
int DeConv2D::GetFormat() const { return this->primitive_->value_as_DeConv2D()->format(); } int DeConv2D::GetFormat() const { return this->primitive_->value_as_DeConv2D()->format(); }
int DeConv2D::GetGroup() const { return this->primitive_->value_as_DeConv2D()->group(); } int DeConv2D::GetGroup() const { return this->primitive_->value_as_DeConv2D()->group(); }
int DeConv2D::GetChannelIn() const { return this->primitive_->value_as_DeConv2D()->channelIn(); } int DeConv2D::GetChannelIn() const { return this->primitive_->value_as_DeConv2D()->channelIn(); }
......
...@@ -49,34 +49,9 @@ class DeConv2D : public PrimitiveC { ...@@ -49,34 +49,9 @@ class DeConv2D : public PrimitiveC {
void SetHasBias(bool has_bias); void SetHasBias(bool has_bias);
void SetActivationType(int activation_type); void SetActivationType(int activation_type);
#else #else
explicit DeConv2D(schema::Primitive *primitive) : PrimitiveC(primitive) {} DeConv2D() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_DeConv2D();
MS_ASSERT(attr != nullptr);
auto val_offset = schema::CreateDeConv2D(fbb, attr->format(), attr->group(), attr->channelIn(), attr->channelOut(),
attr->kernelW(), attr->kernelH(), attr->strideW(), attr->strideH(),
attr->padMode(), attr->padUp(), attr->padDown(), attr->padLeft(),
attr->padRight(), attr->dilateW(), attr->dilateH(),
attr->hasBias(), attr->activationType());
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_DeConv2D, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
int GetFormat() const; int GetFormat() const;
......
...@@ -70,7 +70,24 @@ void DeDepthwiseConv2D::SetActivationType(int activation_type) { ...@@ -70,7 +70,24 @@ void DeDepthwiseConv2D::SetActivationType(int activation_type) {
} }
#else #else
int DeDepthwiseConv2D::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_DeDepthwiseConv2D();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_DeDepthwiseConv2D return nullptr";
return RET_ERROR;
}
auto val_offset = schema::CreateDeDepthwiseConv2D(
*fbb, attr->format(), attr->channelIn(), attr->channelMultiplier(), attr->kernelW(), attr->kernelH(),
attr->strideW(), attr->strideH(), attr->padMode(), attr->padUp(), attr->padDown(), attr->padLeft(),
attr->padRight(), attr->dilateW(), attr->dilateH(), attr->hasBias(), attr->activationType());
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_DeDepthwiseConv2D, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
int DeDepthwiseConv2D::GetFormat() const { return this->primitive_->value_as_DeDepthwiseConv2D()->format(); } int DeDepthwiseConv2D::GetFormat() const { return this->primitive_->value_as_DeDepthwiseConv2D()->format(); }
int DeDepthwiseConv2D::GetChannelIn() const { return this->primitive_->value_as_DeDepthwiseConv2D()->channelIn(); } int DeDepthwiseConv2D::GetChannelIn() const { return this->primitive_->value_as_DeDepthwiseConv2D()->channelIn(); }
int DeDepthwiseConv2D::GetChannelMultiplier() const { int DeDepthwiseConv2D::GetChannelMultiplier() const {
......
...@@ -48,34 +48,9 @@ class DeDepthwiseConv2D : public PrimitiveC { ...@@ -48,34 +48,9 @@ class DeDepthwiseConv2D : public PrimitiveC {
void SetHasBias(bool has_bias); void SetHasBias(bool has_bias);
void SetActivationType(int activation_type); void SetActivationType(int activation_type);
#else #else
explicit DeDepthwiseConv2D(schema::Primitive *primitive) : PrimitiveC(primitive) {} DeDepthwiseConv2D() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_DeDepthwiseConv2D();
MS_ASSERT(attr != nullptr);
auto val_offset = schema::CreateDeDepthwiseConv2D(fbb, attr->format(), attr->channelIn(), attr->channelMultiplier(),
attr->kernelW(), attr->kernelH(), attr->strideW(), attr->strideH(),
attr->padMode(), attr->padUp(), attr->padDown(), attr->padLeft(),
attr->padRight(), attr->dilateW(), attr->dilateH(),
attr->hasBias(), attr->activationType());
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_DeDepthwiseConv2D, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
int GetFormat() const; int GetFormat() const;
......
...@@ -26,7 +26,19 @@ void DepthToSpace::SetBlockSize(int block_size) { this->primitive_->value.AsDept ...@@ -26,7 +26,19 @@ void DepthToSpace::SetBlockSize(int block_size) { this->primitive_->value.AsDept
void DepthToSpace::SetFormat(int format) { this->primitive_->value.AsDepthToSpace()->format = (schema::Format)format; } void DepthToSpace::SetFormat(int format) { this->primitive_->value.AsDepthToSpace()->format = (schema::Format)format; }
#else #else
int DepthToSpace::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_DepthToSpace();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_DepthToSpace return nullptr";
return RET_ERROR;
}
auto val_offset = schema::CreateDepthToSpace(*fbb, attr->blockSize(), attr->format());
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_DepthToSpace, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
int DepthToSpace::GetBlockSize() const { return this->primitive_->value_as_DepthToSpace()->blockSize(); } int DepthToSpace::GetBlockSize() const { return this->primitive_->value_as_DepthToSpace()->blockSize(); }
int DepthToSpace::GetFormat() const { return this->primitive_->value_as_DepthToSpace()->format(); } int DepthToSpace::GetFormat() const { return this->primitive_->value_as_DepthToSpace()->format(); }
......
...@@ -34,30 +34,9 @@ class DepthToSpace : public PrimitiveC { ...@@ -34,30 +34,9 @@ class DepthToSpace : public PrimitiveC {
void SetBlockSize(int block_size); void SetBlockSize(int block_size);
void SetFormat(int format); void SetFormat(int format);
#else #else
explicit DepthToSpace(schema::Primitive *primitive) : PrimitiveC(primitive) {} DepthToSpace() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_DepthToSpace();
MS_ASSERT(attr != nullptr);
auto val_offset = schema::CreateDepthToSpace(fbb, attr->blockSize(), attr->format());
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_DepthToSpace, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
int GetBlockSize() const; int GetBlockSize() const;
......
...@@ -232,7 +232,22 @@ int DepthwiseConv2D::UnPackAttr(const Primitive &prim, const std::vector<AnfNode ...@@ -232,7 +232,22 @@ int DepthwiseConv2D::UnPackAttr(const Primitive &prim, const std::vector<AnfNode
} }
#else #else
int DepthwiseConv2D::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_DepthwiseConv2D();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_DepthwiseConv2D return nullptr";
return RET_ERROR;
}
auto val_offset = schema::CreateDepthwiseConv2D(
*fbb, attr->format(), attr->channelIn(), attr->channelMultiplier(), attr->kernelW(), attr->kernelH(),
attr->strideW(), attr->strideH(), attr->padMode(), attr->padUp(), attr->padDown(), attr->padLeft(),
attr->padRight(), attr->dilateW(), attr->dilateH(), attr->hasBias(), attr->activationType());
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_DepthwiseConv2D, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
int DepthwiseConv2D::GetFormat() const { return this->primitive_->value_as_DepthwiseConv2D()->format(); } int DepthwiseConv2D::GetFormat() const { return this->primitive_->value_as_DepthwiseConv2D()->format(); }
int DepthwiseConv2D::GetChannelIn() const { return this->primitive_->value_as_DepthwiseConv2D()->channelIn(); } int DepthwiseConv2D::GetChannelIn() const { return this->primitive_->value_as_DepthwiseConv2D()->channelIn(); }
int DepthwiseConv2D::GetChannelMultiplier() const { int DepthwiseConv2D::GetChannelMultiplier() const {
......
...@@ -33,7 +33,7 @@ class DepthwiseConv2D : public PrimitiveC { ...@@ -33,7 +33,7 @@ class DepthwiseConv2D : public PrimitiveC {
DepthwiseConv2D() = default; DepthwiseConv2D() = default;
explicit DepthwiseConv2D(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} explicit DepthwiseConv2D(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs); int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override;
void SetFormat(int format); void SetFormat(int format);
void SetChannelIn(int channel_in); void SetChannelIn(int channel_in);
void SetChannelMultiplier(int channel_multiplier); void SetChannelMultiplier(int channel_multiplier);
...@@ -58,35 +58,9 @@ class DepthwiseConv2D : public PrimitiveC { ...@@ -58,35 +58,9 @@ class DepthwiseConv2D : public PrimitiveC {
#else #else
public: public:
explicit DepthwiseConv2D(schema::Primitive *primitive) : PrimitiveC(primitive) {} DepthwiseConv2D() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_DepthwiseConv2D();
MS_ASSERT(attr != nullptr);
auto val_offset = schema::CreateDepthwiseConv2D(fbb, attr->format(),
attr->channelIn(), attr->channelMultiplier(),
attr->kernelW(), attr->kernelH(), attr->strideW(), attr->strideH(),
attr->padMode(), attr->padUp(), attr->padDown(), attr->padLeft(),
attr->padRight(), attr->dilateW(), attr->dilateH(),
attr->hasBias(), attr->activationType());
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_DepthwiseConv2D, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
public: public:
......
...@@ -28,9 +28,9 @@ class Dequant : public PrimitiveC { ...@@ -28,9 +28,9 @@ class Dequant : public PrimitiveC {
MS_DECLARE_PARENT(Dequant, PrimitiveC); MS_DECLARE_PARENT(Dequant, PrimitiveC);
Dequant() = default; Dequant() = default;
explicit Dequant(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} explicit Dequant(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs); int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override;
#else #else
explicit Dequant(schema::Primitive *primitive) : PrimitiveC(primitive) {} Dequant() = default;
#endif #endif
}; };
} // namespace lite } // namespace lite
......
...@@ -88,7 +88,22 @@ void DetectionPostProcess::SetUseRegularNms(bool use_regular_nms) { ...@@ -88,7 +88,22 @@ void DetectionPostProcess::SetUseRegularNms(bool use_regular_nms) {
} }
#else #else
int DetectionPostProcess::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_DetectionPostProcess();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_DetectionPostProcess return nullptr";
return RET_ERROR;
}
auto val_offset = schema::CreateDetectionPostProcess(
*fbb, attr->format(), attr->inputSize(), attr->hScale(), attr->wScale(), attr->xScale(), attr->yScale(),
attr->NmsIouThreshold(), attr->NmsScoreThreshold(), attr->MaxDetections(), attr->DetectionsPreClass(),
attr->MaxClassesPreDetection(), attr->NumClasses(), attr->UseRegularNms());
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_DetectionPostProcess, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
int DetectionPostProcess::GetFormat() const { return this->primitive_->value_as_DetectionPostProcess()->format(); } int DetectionPostProcess::GetFormat() const { return this->primitive_->value_as_DetectionPostProcess()->format(); }
int DetectionPostProcess::GetInputSize() const { int DetectionPostProcess::GetInputSize() const {
return this->primitive_->value_as_DetectionPostProcess()->inputSize(); return this->primitive_->value_as_DetectionPostProcess()->inputSize();
......
...@@ -45,36 +45,9 @@ class DetectionPostProcess : public PrimitiveC { ...@@ -45,36 +45,9 @@ class DetectionPostProcess : public PrimitiveC {
void SetNumClasses(int64_t num_classes); void SetNumClasses(int64_t num_classes);
void SetUseRegularNms(bool use_regular_nms); void SetUseRegularNms(bool use_regular_nms);
#else #else
explicit DetectionPostProcess(schema::Primitive *primitive) : PrimitiveC(primitive) {} DetectionPostProcess() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_DetectionPostProcess();
MS_ASSERT(attr != nullptr);
auto val_offset = schema::CreateDetectionPostProcess(fbb, attr->format(), attr->inputSize(),
attr->hScale(), attr->wScale(),
attr->xScale(), attr->yScale(),
attr->NmsIouThreshold(), attr->NmsScoreThreshold(),
attr->MaxDetections(), attr->DetectionsPreClass(),
attr->MaxClassesPreDetection(), attr->NumClasses(),
attr->UseRegularNms());
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_DetectionPostProcess, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
int GetFormat() const; int GetFormat() const;
int GetInputSize() const; int GetInputSize() const;
......
...@@ -26,7 +26,19 @@ void Div::SetActivationType(int activation_type) { ...@@ -26,7 +26,19 @@ void Div::SetActivationType(int activation_type) {
} }
#else #else
int Div::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_Div();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_Div return nullptr";
return RET_ERROR;
}
auto val_offset = schema::CreateDiv(*fbb, attr->activationType());
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Div, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
int Div::GetActivationType() const { return this->primitive_->value_as_Div()->activationType(); } int Div::GetActivationType() const { return this->primitive_->value_as_Div()->activationType(); }
#endif #endif
......
...@@ -34,30 +34,9 @@ class Div : public Arithmetic { ...@@ -34,30 +34,9 @@ class Div : public Arithmetic {
void SetActivationType(int activation_type); void SetActivationType(int activation_type);
#else #else
explicit Div(schema::Primitive *primitive) : Arithmetic(primitive) {} Div() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_Div();
MS_ASSERT(attr != nullptr);
auto val_offset = schema::CreateDiv(fbb, attr->activationType());
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_Div, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
int GetActivationType() const; int GetActivationType() const;
}; };
......
...@@ -24,7 +24,19 @@ float Dropout::GetRatio() const { return this->primitive_->value.AsDropout()->ra ...@@ -24,7 +24,19 @@ float Dropout::GetRatio() const { return this->primitive_->value.AsDropout()->ra
void Dropout::SetRatio(float ratio) { this->primitive_->value.AsDropout()->ratio = ratio; } void Dropout::SetRatio(float ratio) { this->primitive_->value.AsDropout()->ratio = ratio; }
#else #else
int Dropout::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_Dropout();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_Dropout return nullptr";
return RET_ERROR;
}
auto val_offset = schema::CreateDropout(*fbb, attr->ratio());
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Dropout, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
float Dropout::GetRatio() const { return this->primitive_->value_as_Dropout()->ratio(); } float Dropout::GetRatio() const { return this->primitive_->value_as_Dropout()->ratio(); }
#endif #endif
......
...@@ -34,30 +34,9 @@ class Dropout : public PrimitiveC { ...@@ -34,30 +34,9 @@ class Dropout : public PrimitiveC {
void SetRatio(float ratio); void SetRatio(float ratio);
#else #else
explicit Dropout(schema::Primitive *primitive) : PrimitiveC(primitive) {} Dropout() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_Dropout();
MS_ASSERT(attr != nullptr);
auto val_offset = schema::CreateDropout(fbb, attr->ratio());
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_Dropout, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
float GetRatio() const; float GetRatio() const;
}; };
......
...@@ -24,7 +24,19 @@ int Eltwise::GetMode() const { return this->primitive_->value.AsEltwise()->mode; ...@@ -24,7 +24,19 @@ int Eltwise::GetMode() const { return this->primitive_->value.AsEltwise()->mode;
void Eltwise::SetMode(int mode) { this->primitive_->value.AsEltwise()->mode = (schema::EltwiseMode)mode; } void Eltwise::SetMode(int mode) { this->primitive_->value.AsEltwise()->mode = (schema::EltwiseMode)mode; }
#else #else
int Eltwise::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_Eltwise();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_Eltwise return nullptr";
return RET_ERROR;
}
auto val_offset = schema::CreateEltwise(*fbb, attr->mode());
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Eltwise, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
int Eltwise::GetMode() const { return this->primitive_->value_as_Eltwise()->mode(); } int Eltwise::GetMode() const { return this->primitive_->value_as_Eltwise()->mode(); }
#endif #endif
......
...@@ -34,30 +34,9 @@ class Eltwise : public PrimitiveC { ...@@ -34,30 +34,9 @@ class Eltwise : public PrimitiveC {
void SetMode(int mode); void SetMode(int mode);
#else #else
explicit Eltwise(schema::Primitive *primitive) : PrimitiveC(primitive) {} Eltwise() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_Eltwise();
MS_ASSERT(attr != nullptr);
auto val_offset = schema::CreateEltwise(fbb, attr->mode());
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_Eltwise, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
int GetMode() const; int GetMode() const;
}; };
......
...@@ -24,7 +24,19 @@ float Elu::GetAlpha() const { return this->primitive_->value.AsElu()->alpha; } ...@@ -24,7 +24,19 @@ float Elu::GetAlpha() const { return this->primitive_->value.AsElu()->alpha; }
void Elu::SetAlpha(float alpha) { this->primitive_->value.AsElu()->alpha = alpha; } void Elu::SetAlpha(float alpha) { this->primitive_->value.AsElu()->alpha = alpha; }
#else #else
int Elu::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_Elu();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_Elu return nullptr";
return RET_ERROR;
}
auto val_offset = schema::CreateElu(*fbb, attr->alpha());
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Elu, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
float Elu::GetAlpha() const { return this->primitive_->value_as_Elu()->alpha(); } float Elu::GetAlpha() const { return this->primitive_->value_as_Elu()->alpha(); }
#endif #endif
......
...@@ -34,30 +34,9 @@ class Elu : public PrimitiveC { ...@@ -34,30 +34,9 @@ class Elu : public PrimitiveC {
void SetAlpha(float alpha); void SetAlpha(float alpha);
#else #else
explicit Elu(schema::Primitive *primitive) : PrimitiveC(primitive) {} Elu() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_Elu();
MS_ASSERT(attr != nullptr);
auto val_offset = schema::CreateElu(fbb, attr->alpha());
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_Elu, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
float GetAlpha() const; float GetAlpha() const;
}; };
......
...@@ -24,7 +24,21 @@ float EmbeddingLookup::GetMaxNorm() const { return this->primitive_->value.AsEmb ...@@ -24,7 +24,21 @@ float EmbeddingLookup::GetMaxNorm() const { return this->primitive_->value.AsEmb
void EmbeddingLookup::SetMaxNorm(float max_norm) { this->primitive_->value.AsEmbeddingLookup()->maxNorm = max_norm; } void EmbeddingLookup::SetMaxNorm(float max_norm) { this->primitive_->value.AsEmbeddingLookup()->maxNorm = max_norm; }
#else #else
int EmbeddingLookup::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_EmbeddingLookup();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_EmbeddingLookup return nullptr";
return RET_ERROR;
}
auto val_offset = schema::CreateEmbeddingLookup(*fbb, attr->maxNorm());
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_EmbeddingLookup, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
float EmbeddingLookup::GetMaxNorm() const { return this->primitive_->value_as_EmbeddingLookup()->maxNorm(); } float EmbeddingLookup::GetMaxNorm() const { return this->primitive_->value_as_EmbeddingLookup()->maxNorm(); }
#endif #endif
......
...@@ -34,30 +34,9 @@ class EmbeddingLookup : public PrimitiveC { ...@@ -34,30 +34,9 @@ class EmbeddingLookup : public PrimitiveC {
void SetMaxNorm(float max_norm); void SetMaxNorm(float max_norm);
#else #else
explicit EmbeddingLookup(schema::Primitive *primitive) : PrimitiveC(primitive) {} EmbeddingLookup() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_EmbeddingLookup();
MS_ASSERT(attr != nullptr);
auto val_offset = schema::CreateEmbeddingLookup(fbb, attr->maxNorm());
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_EmbeddingLookup, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
float GetMaxNorm() const; float GetMaxNorm() const;
......
...@@ -38,7 +38,32 @@ void EmbeddingLookupSparse::SetMaxNortm(float max_nortm) { ...@@ -38,7 +38,32 @@ void EmbeddingLookupSparse::SetMaxNortm(float max_nortm) {
} }
#else #else
int EmbeddingLookupSparse::UnPackToFlatBuilder(const schema::Primitive *primitive,
flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_EmbeddingLookupSparse();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_EmbeddingLookupSparse return nullptr";
return RET_ERROR;
}
std::vector<int32_t> spIds;
if (attr->spIds() != nullptr) {
for (int i = 0; i < static_cast<int>(attr->spIds()->size()); i++) {
spIds.push_back(attr->spIds()->data()[i]);
}
}
std::vector<float> spWeights;
if (attr->spWeights() != nullptr) {
for (int i = 0; i < static_cast<int>(attr->spWeights()->size()); i++) {
spWeights.push_back(attr->spWeights()->data()[i]);
}
}
auto val_offset = schema::CreateEmbeddingLookupSparseDirect(*fbb, &spIds, &spWeights);
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_EmbeddingLookupSparse, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
std::vector<int> EmbeddingLookupSparse::GetSpIds() const { std::vector<int> EmbeddingLookupSparse::GetSpIds() const {
auto fb_vector = this->primitive_->value_as_EmbeddingLookupSparse()->spIds(); auto fb_vector = this->primitive_->value_as_EmbeddingLookupSparse()->spIds();
return std::vector<int>(fb_vector->begin(), fb_vector->end()); return std::vector<int>(fb_vector->begin(), fb_vector->end());
......
...@@ -36,39 +36,9 @@ class EmbeddingLookupSparse : public PrimitiveC { ...@@ -36,39 +36,9 @@ class EmbeddingLookupSparse : public PrimitiveC {
void SetSpWeights(const std::vector<float> &sp_weights); void SetSpWeights(const std::vector<float> &sp_weights);
void SetMaxNortm(float max_nortm); void SetMaxNortm(float max_nortm);
#else #else
explicit EmbeddingLookupSparse(schema::Primitive *primitive) : PrimitiveC(primitive) {} EmbeddingLookupSparse() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_EmbeddingLookupSparse();
MS_ASSERT(attr != nullptr);
auto spIds = std::make_unique<std::vector<int32_t>>();
for (int i = 0; i < static_cast<int>(attr->spIds()->size()); i++) {
spIds->push_back(attr->spIds()->data()[i]);
}
auto spWeights = std::make_unique<std::vector<float>>();
for (int i = 0; i < static_cast<int>(attr->spWeights()->size()); i++) {
spWeights->push_back(attr->spWeights()->data()[i]);
}
auto val_offset = schema:: CreateEmbeddingLookupSparseDirect(fbb, spIds.release(), spWeights.release());
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_EmbeddingLookupSparse, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
std::vector<int> GetSpIds() const; std::vector<int> GetSpIds() const;
std::vector<float> GetSpWeights() const; std::vector<float> GetSpWeights() const;
......
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/ops/equal.h"
namespace mindspore {
namespace lite {
#ifndef PRIMITIVE_WRITEABLE
int Equal::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto val_offset = schema::CreateEqual(*fbb);
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Equal, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
#endif
} // namespace lite
} // namespace mindspore
...@@ -32,27 +32,9 @@ class Equal : public Arithmetic { ...@@ -32,27 +32,9 @@ class Equal : public Arithmetic {
Equal() = default; Equal() = default;
explicit Equal(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} explicit Equal(schema::PrimitiveT *primitive) : Arithmetic(primitive) {}
#else #else
explicit Equal(schema::Primitive *primitive) : Arithmetic(primitive) {} Equal() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto val_offset = schema::CreateEqual(fbb);
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_Equal, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
}; };
} // namespace lite } // namespace lite
......
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/ops/exp.h"
namespace mindspore {
namespace lite {
#ifndef PRIMITIVE_WRITEABLE
int Exp::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto val_offset = schema::CreateExp(*fbb);
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Exp, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
#endif
} // namespace lite
} // namespace mindspore
...@@ -32,27 +32,9 @@ class Exp : public ArithmeticSelf { ...@@ -32,27 +32,9 @@ class Exp : public ArithmeticSelf {
Exp() = default; Exp() = default;
explicit Exp(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {} explicit Exp(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {}
#else #else
explicit Exp(schema::Primitive *primitive) : ArithmeticSelf(primitive) {} Exp() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto val_offset = schema::CreateExp(fbb);
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_Exp, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
}; };
} // namespace lite } // namespace lite
......
...@@ -24,7 +24,20 @@ int ExpandDims::GetDim() const { return this->primitive_->value.AsExpandDims()-> ...@@ -24,7 +24,20 @@ int ExpandDims::GetDim() const { return this->primitive_->value.AsExpandDims()->
void ExpandDims::SetDim(int dim) { this->primitive_->value.AsExpandDims()->dim = dim; } void ExpandDims::SetDim(int dim) { this->primitive_->value.AsExpandDims()->dim = dim; }
#else #else
int ExpandDims::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_ExpandDims();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_ExpandDims return nullptr";
return RET_ERROR;
}
auto val_offset = schema::CreateExpandDims(*fbb, attr->dim());
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_ExpandDims, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
int ExpandDims::GetDim() const { return this->primitive_->value_as_ExpandDims()->dim(); } int ExpandDims::GetDim() const { return this->primitive_->value_as_ExpandDims()->dim(); }
#endif #endif
......
...@@ -34,30 +34,9 @@ class ExpandDims : public PrimitiveC { ...@@ -34,30 +34,9 @@ class ExpandDims : public PrimitiveC {
void SetDim(int dim); void SetDim(int dim);
#else #else
explicit ExpandDims(schema::Primitive *primitive) : PrimitiveC(primitive) {} ExpandDims() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_ExpandDims();
MS_ASSERT(attr != nullptr);
auto val_offset = schema::CreateExpandDims(fbb, attr->dim());
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_ExpandDims, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
int GetDim() const; int GetDim() const;
......
...@@ -32,7 +32,21 @@ void FakeQuantWithMinMaxVars::SetNumBits(int num_bits) { ...@@ -32,7 +32,21 @@ void FakeQuantWithMinMaxVars::SetNumBits(int num_bits) {
} }
#else #else
int FakeQuantWithMinMaxVars::UnPackToFlatBuilder(const schema::Primitive *primitive,
flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_FakeQuantWithMinMaxVars();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_FakeQuantWithMinMaxVars return nullptr";
return RET_ERROR;
}
auto val_offset = schema::CreateFakeQuantWithMinMaxVars(*fbb, attr->narrowRange(), attr->numBits());
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_FakeQuantWithMinMaxVars, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
bool FakeQuantWithMinMaxVars::GetNarrowRange() const { bool FakeQuantWithMinMaxVars::GetNarrowRange() const {
return this->primitive_->value_as_FakeQuantWithMinMaxVars()->narrowRange(); return this->primitive_->value_as_FakeQuantWithMinMaxVars()->narrowRange();
} }
......
...@@ -34,31 +34,9 @@ class FakeQuantWithMinMaxVars : public PrimitiveC { ...@@ -34,31 +34,9 @@ class FakeQuantWithMinMaxVars : public PrimitiveC {
void SetNarrowRange(bool narrow_range); void SetNarrowRange(bool narrow_range);
void SetNumBits(int num_bits); void SetNumBits(int num_bits);
#else #else
explicit FakeQuantWithMinMaxVars(schema::Primitive *primitive) : PrimitiveC(primitive) {} FakeQuantWithMinMaxVars() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_FakeQuantWithMinMaxVars();
MS_ASSERT(attr != nullptr);
auto val_offset = schema::CreateFakeQuantWithMinMaxVars(fbb, attr->narrowRange(), attr->numBits());
auto prim_offset = schema::CreatePrimitive(fbb,
schema::PrimitiveType_FakeQuantWithMinMaxVars, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
bool GetNarrowRange() const; bool GetNarrowRange() const;
int GetNumBits() const; int GetNumBits() const;
......
...@@ -24,7 +24,25 @@ std::vector<int> Fill::GetDims() const { return this->primitive_->value.AsFill() ...@@ -24,7 +24,25 @@ std::vector<int> Fill::GetDims() const { return this->primitive_->value.AsFill()
void Fill::SetDims(const std::vector<int> &dims) { this->primitive_->value.AsFill()->dims = dims; } void Fill::SetDims(const std::vector<int> &dims) { this->primitive_->value.AsFill()->dims = dims; }
#else #else
int Fill::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_Fill();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_Fill return nullptr";
return RET_ERROR;
}
std::vector<int32_t> dims;
if (attr->dims() != nullptr) {
for (int i = 0; i < static_cast<int>(attr->dims()->size()); i++) {
dims.push_back(attr->dims()->data()[i]);
}
}
auto val_offset = schema::CreateFillDirect(*fbb, &dims);
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Fill, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
std::vector<int> Fill::GetDims() const { std::vector<int> Fill::GetDims() const {
auto fb_vector = this->primitive_->value_as_Fill()->dims(); auto fb_vector = this->primitive_->value_as_Fill()->dims();
return std::vector<int>(fb_vector->begin(), fb_vector->end()); return std::vector<int>(fb_vector->begin(), fb_vector->end());
......
...@@ -35,35 +35,9 @@ class Fill : public PrimitiveC { ...@@ -35,35 +35,9 @@ class Fill : public PrimitiveC {
void SetDims(const std::vector<int> &dims); void SetDims(const std::vector<int> &dims);
#else #else
explicit Fill(schema::Primitive *primitive) : PrimitiveC(primitive) {} Fill() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_Fill();
MS_ASSERT(attr != nullptr);
auto dims = std::make_unique<std::vector<int32_t>>();
for (int i = 0; i < static_cast<int>(attr->dims()->size()); i++) {
dims->push_back(attr->dims()->data()[i]);
}
auto val_offset = schema::CreateFillDirect(fbb, dims.release());
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_Fill, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
std::vector<int> GetDims() const; std::vector<int> GetDims() const;
......
...@@ -77,6 +77,15 @@ int Flatten::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &in ...@@ -77,6 +77,15 @@ int Flatten::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &in
} }
return RET_OK; return RET_OK;
} }
#else
int Flatten::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto val_offset = schema::CreateFlatten(*fbb);
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Flatten, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
#endif #endif
} // namespace lite } // namespace lite
} // namespace mindspore } // namespace mindspore
...@@ -31,32 +31,13 @@ class Flatten : public PrimitiveC { ...@@ -31,32 +31,13 @@ class Flatten : public PrimitiveC {
MS_DECLARE_PARENT(Flatten, PrimitiveC); MS_DECLARE_PARENT(Flatten, PrimitiveC);
Flatten() = default; Flatten() = default;
explicit Flatten(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} explicit Flatten(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {}
int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override;
#else #else
explicit Flatten(schema::Primitive *primitive) : PrimitiveC(primitive) {} Flatten() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto val_offset = schema::CreateFlatten(fbb);
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_Flatten, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs);
}; };
} // namespace lite } // namespace lite
} // namespace mindspore } // namespace mindspore
......
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/ops/floor.h"
namespace mindspore {
namespace lite {
#ifndef PRIMITIVE_WRITEABLE
int Floor::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto val_offset = schema::CreateFloor(*fbb);
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Floor, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
#endif
} // namespace lite
} // namespace mindspore
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#include <set> #include <set>
#include <cmath> #include <cmath>
#include "ir/dtype/type_id.h" #include "ir/dtype/type_id.h"
#include "src/ops/primitive_c.h" #include "src/ops/arithmetic_self.h"
namespace mindspore { namespace mindspore {
namespace lite { namespace lite {
...@@ -32,27 +32,9 @@ class Floor : public ArithmeticSelf { ...@@ -32,27 +32,9 @@ class Floor : public ArithmeticSelf {
Floor() = default; Floor() = default;
explicit Floor(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {} explicit Floor(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {}
#else #else
explicit Floor(schema::Primitive *primitive) : ArithmeticSelf(primitive) {} Floor() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto val_offset = schema::CreateFloor(fbb);
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_Floor, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
}; };
} // namespace lite } // namespace lite
......
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/ops/floor_div.h"
namespace mindspore {
namespace lite {
#ifndef PRIMITIVE_WRITEABLE
int FloorDiv::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto val_offset = schema::CreateFloor(*fbb);
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Floor, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
#endif
} // namespace lite
} // namespace mindspore
...@@ -32,27 +32,9 @@ class FloorDiv : public Arithmetic { ...@@ -32,27 +32,9 @@ class FloorDiv : public Arithmetic {
FloorDiv() = default; FloorDiv() = default;
explicit FloorDiv(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} explicit FloorDiv(schema::PrimitiveT *primitive) : Arithmetic(primitive) {}
#else #else
explicit FloorDiv(schema::Primitive *primitive) : Arithmetic(primitive) {} FloorDiv() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto val_offset = schema::CreateFloorDiv(fbb);
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_FloorDiv, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
}; };
} // namespace lite } // namespace lite
......
/**
* Copyright 2019-2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "src/ops/floor_mod.h"
namespace mindspore {
namespace lite {
#ifndef PRIMITIVE_WRITEABLE
int FloorMod::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto val_offset = schema::CreateFloorMod(*fbb);
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_FloorMod, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
#endif
} // namespace lite
} // namespace mindspore
...@@ -32,27 +32,9 @@ class FloorMod : public Arithmetic { ...@@ -32,27 +32,9 @@ class FloorMod : public Arithmetic {
FloorMod() = default; FloorMod() = default;
explicit FloorMod(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} explicit FloorMod(schema::PrimitiveT *primitive) : Arithmetic(primitive) {}
#else #else
explicit FloorMod(schema::Primitive *primitive) : Arithmetic(primitive) {} FloorMod() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto val_offset = schema::CreateFloorMod(fbb);
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_FloorMod, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
}; };
} // namespace lite } // namespace lite
......
...@@ -31,7 +31,21 @@ void FullConnection::SetActivationType(int activationType) { ...@@ -31,7 +31,21 @@ void FullConnection::SetActivationType(int activationType) {
this->primitive_->value.AsFullConnection()->activationType = (schema::ActivationType)activationType; this->primitive_->value.AsFullConnection()->activationType = (schema::ActivationType)activationType;
} }
#else #else
int FullConnection::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_FullConnection();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_FullConnection return nullptr";
return RET_ERROR;
}
auto val_offset =
schema::CreateFullConnection(*fbb, attr->hasBias(), attr->axis(), attr->useAxis(), attr->activationType());
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_FullConnection, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
bool FullConnection::GetHasBias() const { return this->primitive_->value_as_FullConnection()->hasBias(); } bool FullConnection::GetHasBias() const { return this->primitive_->value_as_FullConnection()->hasBias(); }
int FullConnection::GetAxis() const { return this->primitive_->value_as_FullConnection()->axis(); } int FullConnection::GetAxis() const { return this->primitive_->value_as_FullConnection()->axis(); }
bool FullConnection::GetUseAxis() const { return this->primitive_->value_as_FullConnection()->useAxis(); } bool FullConnection::GetUseAxis() const { return this->primitive_->value_as_FullConnection()->useAxis(); }
......
...@@ -36,31 +36,9 @@ class FullConnection : public PrimitiveC { ...@@ -36,31 +36,9 @@ class FullConnection : public PrimitiveC {
void SetUseAxis(bool use_axis); void SetUseAxis(bool use_axis);
void SetActivationType(int activationType); void SetActivationType(int activationType);
#else #else
explicit FullConnection(schema::Primitive *primitive) : PrimitiveC(primitive) {} FullConnection() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_FullConnection();
MS_ASSERT(attr != nullptr);
auto val_offset = schema::CreateFullConnection(fbb, attr->hasBias(), attr->axis(),
attr->useAxis(), attr->activationType());
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_FullConnection, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
bool GetHasBias() const; bool GetHasBias() const;
......
...@@ -28,7 +28,20 @@ void FusedBatchNorm::SetMomentum(float momentum) { this->primitive_->value.AsFus ...@@ -28,7 +28,20 @@ void FusedBatchNorm::SetMomentum(float momentum) { this->primitive_->value.AsFus
void FusedBatchNorm::SetSpatial(int spatial) { this->primitive_->value.AsFusedBatchNorm()->spatial = spatial; } void FusedBatchNorm::SetSpatial(int spatial) { this->primitive_->value.AsFusedBatchNorm()->spatial = spatial; }
#else #else
int FusedBatchNorm::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_FusedBatchNorm();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_FusedBatchNorm return nullptr";
return RET_ERROR;
}
auto val_offset = schema::CreateFusedBatchNorm(*fbb, attr->epsilon(), attr->momentum(), attr->spatial());
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_FusedBatchNorm, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
float FusedBatchNorm::GetEpsilon() const { return this->primitive_->value_as_FusedBatchNorm()->epsilon(); } float FusedBatchNorm::GetEpsilon() const { return this->primitive_->value_as_FusedBatchNorm()->epsilon(); }
float FusedBatchNorm::GetMomentum() const { return this->primitive_->value_as_FusedBatchNorm()->momentum(); } float FusedBatchNorm::GetMomentum() const { return this->primitive_->value_as_FusedBatchNorm()->momentum(); }
int FusedBatchNorm::GetSpatial() const { return this->primitive_->value_as_FusedBatchNorm()->spatial(); } int FusedBatchNorm::GetSpatial() const { return this->primitive_->value_as_FusedBatchNorm()->spatial(); }
......
...@@ -35,30 +35,9 @@ class FusedBatchNorm : public PrimitiveC { ...@@ -35,30 +35,9 @@ class FusedBatchNorm : public PrimitiveC {
void SetMomentum(float momentum); void SetMomentum(float momentum);
void SetSpatial(int spatial); void SetSpatial(int spatial);
#else #else
explicit FusedBatchNorm(schema::Primitive *primitive) : PrimitiveC(primitive) {} FusedBatchNorm() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_FusedBatchNorm();
MS_ASSERT(attr != nullptr);
auto val_offset = schema::CreateFusedBatchNorm(fbb, attr->epsilon(), attr->momentum(), attr->spatial());
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_FusedBatchNorm, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
float GetEpsilon() const; float GetEpsilon() const;
float GetMomentum() const; float GetMomentum() const;
......
...@@ -29,7 +29,20 @@ void Gather::SetAxis(int axis) { this->primitive_->value.AsGather()->axis = axis ...@@ -29,7 +29,20 @@ void Gather::SetAxis(int axis) { this->primitive_->value.AsGather()->axis = axis
void Gather::SetBatchDims(int batch_dims) { this->primitive_->value.AsGather()->batchDims = batch_dims; } void Gather::SetBatchDims(int batch_dims) { this->primitive_->value.AsGather()->batchDims = batch_dims; }
#else #else
int Gather::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_Gather();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_Gather return nullptr";
return RET_ERROR;
}
auto val_offset = schema::CreateGather(*fbb, attr->axis(), attr->batchDims());
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Gather, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
int Gather::GetAxis() const { return this->primitive_->value_as_Gather()->axis(); } int Gather::GetAxis() const { return this->primitive_->value_as_Gather()->axis(); }
int Gather::GetBatchDims() const { return this->primitive_->value_as_Gather()->batchDims(); } int Gather::GetBatchDims() const { return this->primitive_->value_as_Gather()->batchDims(); }
......
...@@ -34,30 +34,9 @@ class Gather : public PrimitiveC { ...@@ -34,30 +34,9 @@ class Gather : public PrimitiveC {
void SetAxis(int axis); void SetAxis(int axis);
void SetBatchDims(int batch_dims); void SetBatchDims(int batch_dims);
#else #else
explicit Gather(schema::Primitive *primitive) : PrimitiveC(primitive) {} Gather() = default;
schema::Primitive *Init(schema::Primitive *primitive) {
flatbuffers::FlatBufferBuilder fbb(1024);
auto attr = primitive->value_as_Gather();
MS_ASSERT(attr != nullptr);
auto val_offset = schema::CreateGather(fbb, attr->axis(), attr->batchDims());
auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_Gather, val_offset.o);
fbb.Finish(prim_offset);
auto buf = fbb.GetBufferPointer();
MS_ASSERT(buf != nullptr);
auto buf_bak = new char[fbb.GetSize()];
memcpy(buf_bak, buf, fbb.GetSize());
auto root = flatbuffers::GetRoot<schema::Primitive>(buf_bak);
auto prim = const_cast<schema::Primitive *>(root);
delete[] buf_bak; int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override;
fbb.Clear();
return prim;
}
#endif #endif
int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override; int InferShape(std::vector<lite::tensor::Tensor *> inputs_, std::vector<lite::tensor::Tensor *> outputs_) override;
int GetAxis() const; int GetAxis() const;
......
...@@ -24,7 +24,20 @@ int GatherNd::GetBatchDims() const { return this->primitive_->value.AsGatherNd() ...@@ -24,7 +24,20 @@ int GatherNd::GetBatchDims() const { return this->primitive_->value.AsGatherNd()
void GatherNd::SetBatchDims(int batch_dims) { this->primitive_->value.AsGatherNd()->batchDims = batch_dims; } void GatherNd::SetBatchDims(int batch_dims) { this->primitive_->value.AsGatherNd()->batchDims = batch_dims; }
#else #else
int GatherNd::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) {
MS_ASSERT(nullptr != primitive);
MS_ASSERT(nullptr != fbb);
auto attr = primitive->value_as_GatherNd();
if (attr == nullptr) {
MS_LOG(ERROR) << "value_as_GatherNd return nullptr";
return RET_ERROR;
}
auto val_offset = schema::CreateGatherNd(*fbb, attr->batchDims());
auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_GatherNd, val_offset.o);
fbb->Finish(prim_offset);
return RET_OK;
}
int GatherNd::GetBatchDims() const { return this->primitive_->value_as_GatherNd()->batchDims(); } int GatherNd::GetBatchDims() const { return this->primitive_->value_as_GatherNd()->batchDims(); }
#endif #endif
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册