提交 0fc6f7e1 编写于 作者: L lyvette

Supplement resize_NN and pad parser of TFLite.

fix bug.

format.

Fix bug.

Supplement four activationType parser of TFLite.

modify origin parser.

Supplement ceil/floor/rank/reverse/squeeze.

Modify origin parser and supplement LRN.

SUpplement biasadd/div/fill/floordiv/floormod/gather/gathernd/pow

supplement unfinished argmin/expanddims/range

supplement unexisting batchnorm/fixedpoint/gatherv2/innerproduct

Modify
上级 b0b4fa08
......@@ -29,10 +29,10 @@ STATUS TfliteAddParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp
bool quantizedModel) {
MS_LOG(DEBUG) << "parse TfliteAddParser";
std::unique_ptr<schema::AddT> attr(new schema::AddT());
auto weight_index = tfliteOp->inputs[1];
const auto &weight_tensor = tfliteTensors[weight_index];
std::vector<tflite::TensorT *> weight_tensors{weight_tensor.get()};
if (RET_OK != ParseWeight(weight_tensors, tfliteModelBuffer, tensor_cache, schema::Format_KHWC)) {
return RET_ERROR;
}
......
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mindspore/lite/tools/converter/parser/tflite/tflite_argmin_parser.h"
#include <memory>
#include <vector>
namespace mindspore {
namespace lite {
STATUS TfliteArgminParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) {
MS_LOG(DEBUG) << "parse TfliteArgminParser";
std::unique_ptr<schema::ArgMinT> attr(new schema::ArgMinT());
const auto &tflite_attr = tfliteOp->builtin_options.AsArgMinOptions();
if (tflite_attr == nullptr) {
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
}
// get axis
auto axis_idx = tfliteOp->inputs[1];
std::for_each(tfliteTensors[axis_idx]->shape.begin(), tfliteTensors[axis_idx]->shape.end(), [&](int32_t sha){});
auto &buf_data = tfliteModelBuffer[tfliteTensors[axis_idx]->buffer];
auto data_ptr = buf_data->data.data();
attr->axis = *(static_cast<int32_t *>(static_cast<void *>(data_ptr)));
// the following use default values
attr->outMaxValue = false;
attr->topK = 1;
attr->keepDims = false;
attr->axisType = 0;
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_ArgMin;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_TfliteArgminParser("Argmin", new TfliteArgminParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_ARGMIN_PARSER_H
#define PREDICT_TFLITE_ARGMIN_PARSER_H
#include <vector>
#include <memory>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h"
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteArgminParser : public TfliteNodeParser {
public:
TfliteArgminParser() : TfliteNodeParser("Argmin") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_ARGMIN_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <memory>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_ceil_parser.h"
namespace mindspore {
namespace lite {
STATUS TfliteCeilParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) {
MS_LOG(DEBUG) << "parse TfliteCeilParser";
std::unique_ptr<schema::CeilT> attr(new schema::CeilT());
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Ceil;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_TfliteCeilParser("Ceil", new TfliteCeilParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_CEIL_PARSER_H
#define PREDICT_TFLITE_CEIL_PARSER_H
#include <memory>
#include <vector>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h"
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteCeilParser : public TfliteNodeParser {
public:
TfliteCeilParser() : TfliteNodeParser("Ceil") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_CEIL_PARSER_H
......@@ -27,11 +27,11 @@ STATUS TfliteConcatParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflit
schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) {
// MS_LOGD("parse TfliteConcatParser");
MS_LOG(DEBUG) << "parse TfliteConcatParser";
std::unique_ptr<schema::ConcatT> attr(new schema::ConcatT());
const auto &tfliteAttr = tfliteOp->builtin_options.AsConcatenationOptions();
if (tfliteAttr == nullptr) {
// MS_LOGE("get op: %s attr failed", op->name.c_str());
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
return RET_NULL_PTR;
}
......
......@@ -26,11 +26,11 @@ STATUS TfliteConvParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op,
TensorCache *tensor_cache, bool quantizedModel) {
// MS_LOGD("parse TfliteConvParser");
MS_LOG(DEBUG) << "parse TfliteConvParser";
std::unique_ptr<schema::Conv2DT> attr(new schema::Conv2DT());
const auto &tfliteAttr = tflite_op->builtin_options.AsConv2DOptions();
if (tfliteAttr == nullptr) {
// MS_LOGE("get op: %s attr failed", op->name.c_str());
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
return RET_NULL_PTR;
}
attr->group = 1;
......@@ -47,7 +47,7 @@ STATUS TfliteConvParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_
std::vector<tflite::TensorT *> weight_tensors{weight_tensor.get()};
if (RET_OK != ParseWeight(weight_tensors, tfliteModelBuffer, tensor_cache, schema::Format_KHWC)) {
// MS_LOGE("parse weight failed");
MS_LOG(ERROR) << "parse weight failed";
return RET_ERROR;
}
auto weight_shape = weight_tensor->shape;
......@@ -61,7 +61,7 @@ STATUS TfliteConvParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_
const auto &bias_tensor = tflite_tensors[bias_index];
std::vector<tflite::TensorT *> bias_tensors{bias_tensor.get()};
if (RET_OK != ParseBias(bias_tensors, tfliteModelBuffer, tensor_cache)) {
// MS_LOGE("parse bias failed");
MS_LOG(ERROR) << "parse bias failed";
return RET_ERROR;
}
}
......
......@@ -75,11 +75,11 @@ STATUS TfliteDepthwiseConv2DParser::Parse(const std::unique_ptr<tflite::Operator
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
// MS_LOGD("parse TfliteDepthwiseConv2DParser");
MS_LOG(DEBUG) << "parse TfliteDepthwiseConv2DParser";
std::unique_ptr<schema::DepthwiseConv2DT> attr(new schema::DepthwiseConv2DT());
const auto &tflite_attr = tflite_op->builtin_options.AsDepthwiseConv2DOptions();
if (tflite_attr == nullptr) {
// MS_LOGE("get op: %s attr failed", op->name.c_str());
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
return RET_NULL_PTR;
}
attr->strideW = tflite_attr->stride_w;
......@@ -105,7 +105,7 @@ STATUS TfliteDepthwiseConv2DParser::Parse(const std::unique_ptr<tflite::Operator
std::vector<tflite::TensorT *> weight_tensors{weight_tensor.get()};
if (RET_OK != ParseWeight(weight_tensors, tfliteModelBuffer, tensor_cache, schema::Format_KHWC)) {
// MS_LOGE("parse weight failed");
MS_LOG(ERROR) << "parse weight failed";
return RET_ERROR;
}
......@@ -115,7 +115,7 @@ STATUS TfliteDepthwiseConv2DParser::Parse(const std::unique_ptr<tflite::Operator
const auto &bias_tensor = tflite_tensors[bias_index];
std::vector<tflite::TensorT *> bias_tensors{bias_tensor.get()};
if (RET_OK != ParseBias(bias_tensors, tfliteModelBuffer, tensor_cache)) {
// MS_LOGE("parse bias failed");
MS_LOG(ERROR) << "parse bias failed";
return RET_ERROR;
}
}
......
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <memory>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_div_parser.h"
namespace mindspore {
namespace lite {
STATUS TfliteDivParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) {
MS_LOG(DEBUG) << "parse TfliteDivParser";
std::unique_ptr<schema::DivT> attr(new schema::DivT());
const auto &tfliteAttr = tfliteOp->builtin_options.AsDivOptions();
if (tfliteAttr == nullptr) {
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
return RET_NULL_PTR;
}
auto weight_index = tfliteOp->inputs[1];
const auto &weight_tensor = tfliteTensors[weight_index];
std::vector<tflite::TensorT *> weight_tensors{weight_tensor.get()};
if (RET_OK != ParseWeight(weight_tensors, tfliteModelBuffer, tensor_cache, schema::Format_KHWC)) {
MS_LOG(ERROR) << "parse weight failed";
return RET_ERROR;
}
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Div;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_TfliteDivParser("Div", new TfliteDivParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_DIV_PARSER_H
#define PREDICT_TFLITE_DIV_PARSER_H
#include <memory>
#include <vector>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h"
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteDivParser : public TfliteNodeParser {
public:
TfliteDivParser() : TfliteNodeParser("Div") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_DIV_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mindspore/lite/tools/converter/parser/tflite/tflite_expand_dims_parser.h"
#include <vector>
#include <memory>
namespace mindspore {
namespace lite {
STATUS TfliteExpandDimsParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) {
MS_LOG(DEBUG) << "parse TfliteExpandDimsParser";
std::unique_ptr<schema::ExpandDimsT> attr(new schema::ExpandDimsT());
const auto &tflite_attr = tfliteOp->builtin_options.AsExpandDimsOptions();
if (tflite_attr == nullptr) {
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
}
// get axis
auto axis_idx = tfliteOp->inputs[1];
std::for_each(tfliteTensors[axis_idx]->shape.begin(), tfliteTensors[axis_idx]->shape.end(), [&](int32_t sha){});
auto &buf_data = tfliteModelBuffer[tfliteTensors[axis_idx]->buffer];
auto data_ptr = buf_data->data.data();
attr->dim = *(static_cast<int32_t *>(static_cast<void *>(data_ptr)));
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_ExpandDims;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_tfliteExpandDimsParser("ExpandDims", new TfliteExpandDimsParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_EXPAND_DIMS_PARSER_H
#define PREDICT_TFLITE_EXPAND_DIMS_PARSER_H
#include <memory>
#include <vector>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h"
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteExpandDimsParser : public TfliteNodeParser {
public:
TfliteExpandDimsParser() : TfliteNodeParser("ExpandDims") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_EXPAND_DIMS_PARSER_H
......@@ -24,15 +24,14 @@ STATUS TfliteFakeQuantParser::Parse(const std::unique_ptr<tflite::OperatorT> &tf
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
// MS_LOGD("parse TfliteFullyConnectedParser");
MS_LOG(DEBUG) << "parse TfliteFullyConnectedParser";
std::unique_ptr<schema::FullConnectionT> attr(new schema::FullConnectionT());
auto weight_index = tfliteOp->inputs[1];
const auto &weight_tensor = tfliteTensors[weight_index];
std::vector<tflite::TensorT *> weight_tensors{weight_tensor.get()};
if (RET_OK != ParseWeight(weight_tensors, tfliteModelBuffer, tensor_cache, schema::Format_NHWC)) {
// MS_LOGE("parse weight failed");
MS_LOG(ERROR) << "parse weight failed";
return RET_ERROR;
}
if (tfliteOp->inputs.size() == 3) {
......@@ -41,7 +40,7 @@ STATUS TfliteFakeQuantParser::Parse(const std::unique_ptr<tflite::OperatorT> &tf
const auto &bias_tensor = tfliteTensors[bias_index];
std::vector<tflite::TensorT *> bias_tensors{bias_tensor.get()};
if (RET_OK != ParseBias(bias_tensors, tfliteModelBuffer, tensor_cache)) {
// MS_LOGE("parse bias failed");
MS_LOG(ERROR) << "parse bias failed";
return RET_ERROR;
}
}
......
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <memory>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_fill_parser.h"
namespace mindspore {
namespace lite {
STATUS TfliteFillParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) {
MS_LOG(DEBUG) << "parse TfliteFillParser";
std::unique_ptr<schema::FillT> attr(new schema::FillT());
if (tfliteOp->inputs.size() > 1) {
if (GetTfliteData(tfliteOp->inputs[1], tfliteTensors, tfliteModelBuffer, attr->dims)) {
return RET_ERROR;
}
}
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Fill;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_TfliteFillParser("Fill", new TfliteFillParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_FILL_PARSER_H
#define PREDICT_TFLITE_FILL_PARSER_H
#include <memory>
#include <vector>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h"
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteFillParser : public TfliteNodeParser {
public:
TfliteFillParser() : TfliteNodeParser("Fill") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_FILL_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mindspore/lite/tools/converter/parser/tflite/tflite_floor_div_parser.h"
#include <vector>
#include <memory>
namespace mindspore {
namespace lite {
STATUS TfliteFloorDivParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) {
MS_LOG(DEBUG) << "parse TfliteFloorDivParser";
std::unique_ptr<schema::FloorDivT> attr(new schema::FloorDivT());
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_FloorDiv;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_tfliteFloorDivParser("FloorDiv", new TfliteFloorDivParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_FLOOR_DIV_PARSER_H
#define PREDICT_TFLITE_FLOOR_DIV_PARSER_H
#include <memory>
#include <vector>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h"
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteFloorDivParser : public TfliteNodeParser {
public:
TfliteFloorDivParser() : TfliteNodeParser("FloorDiv") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_FLOOR_DIV_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mindspore/lite/tools/converter/parser/tflite/tflite_floor_mod_parser.h"
#include <vector>
#include <memory>
namespace mindspore {
namespace lite {
STATUS TfliteFloorModParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) {
MS_LOG(DEBUG) << "parse TfliteFloorModParser";
std::unique_ptr<schema::FloorModT> attr(new schema::FloorModT());
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_FloorMod;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_tfliteFloorModParser("FloorMod", new TfliteFloorModParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_FLOOR_MOD_PARSER_H
#define PREDICT_TFLITE_FLOOR_MOD_PARSER_H
#include <memory>
#include <vector>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h"
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteFloorModParser : public TfliteNodeParser {
public:
TfliteFloorModParser() : TfliteNodeParser("FloorMod") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_FLOOR_MOD_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mindspore/lite/tools/converter/parser/tflite/tflite_floor_parser.h"
#include <vector>
#include <memory>
namespace mindspore {
namespace lite {
STATUS TfliteFloorParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) {
MS_LOG(DEBUG) << "parse TfliteFloorParser";
std::unique_ptr<schema::FloorT> attr(new schema::FloorT());
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Floor;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_tfliteFloorParser("Floor", new TfliteFloorParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_FLOOR_PARSER_H
#define PREDICT_TFLITE_FLOOR_PARSER_H
#include <memory>
#include <vector>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h"
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteFloorParser : public TfliteNodeParser {
public:
TfliteFloorParser() : TfliteNodeParser("Floor") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_FLOOR_PARSER_H
......@@ -25,7 +25,7 @@ STATUS TfliteFullyConnectedParser::Parse(const std::unique_ptr<tflite::OperatorT
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
// MS_LOGD("parse TfliteFullyConnectedParser");
MS_LOG(DEBUG) << "parse TfliteFullyConnectedParser";
std::unique_ptr<schema::FullConnectionT> attr(new schema::FullConnectionT());
auto weight_index = tfliteOp->inputs[1];
......@@ -33,7 +33,7 @@ STATUS TfliteFullyConnectedParser::Parse(const std::unique_ptr<tflite::OperatorT
std::vector<tflite::TensorT *> weight_tensors{weight_tensor.get()};
if (RET_OK != ParseWeight(weight_tensors, tfliteModelBuffer, tensor_cache, schema::Format_NHWC)) {
// MS_LOGE("parse weight failed");
MS_LOG(ERROR) << "parse weight failed";
return RET_ERROR;
}
if (tfliteOp->inputs.size() == 3) {
......@@ -42,7 +42,7 @@ STATUS TfliteFullyConnectedParser::Parse(const std::unique_ptr<tflite::OperatorT
const auto &bias_tensor = tfliteTensors[bias_index];
std::vector<tflite::TensorT *> bias_tensors{bias_tensor.get()};
if (RET_OK != ParseBias(bias_tensors, tfliteModelBuffer, tensor_cache)) {
// MS_LOGE("parse bias failed");
MS_LOG(ERROR) << "parse bias failed";
return RET_ERROR;
}
}
......
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mindspore/lite/tools/converter/parser/tflite/tflite_gather_nd_parser.h"
#include <vector>
#include <memory>
namespace mindspore {
namespace lite {
STATUS TfliteGatherNdParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) {
MS_LOG(DEBUG) << "parse TfliteGatherNdParser";
std::unique_ptr<schema::GatherNdT> attr(new schema::GatherNdT());
const auto &tflite_attr = tfliteOp->builtin_options.AsGatherNdOptions();
if (tflite_attr == nullptr) {
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
}
attr->batchDims = 0; // default
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_GatherNd;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_tfliteGatherNdParser("GatherNd", new TfliteGatherNdParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_GATHER_ND_PARSER_H
#define PREDICT_TFLITE_GATHER_ND_PARSER_H
#include <memory>
#include <vector>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h"
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteGatherNdParser : public TfliteNodeParser {
public:
TfliteGatherNdParser() : TfliteNodeParser("GatherNd") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_GATHER_ND_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mindspore/lite/tools/converter/parser/tflite/tflite_gather_parser.h"
#include <vector>
#include <memory>
namespace mindspore {
namespace lite {
STATUS TfliteGatherParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) {
MS_LOG(DEBUG) << "parse TfliteGatherParser";
std::unique_ptr<schema::GatherT> attr(new schema::GatherT());
const auto &tflite_attr = tfliteOp->builtin_options.AsGatherOptions();
if (tflite_attr == nullptr) {
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
}
attr->axis = tflite_attr->axis;
attr->batchDims = 0; // default
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Gather;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_tfliteGatherParser("Gather", new TfliteGatherParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_GATHER_PARSER_H
#define PREDICT_TFLITE_GATHER_PARSER_H
#include <memory>
#include <vector>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h"
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteGatherParser : public TfliteNodeParser {
public:
TfliteGatherParser() : TfliteNodeParser("Gather") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_GATHER_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mindspore/lite/tools/converter/parser/tflite/tflite_gather_v2_parser.h"
#include <vector>
#include <memory>
namespace mindspore {
namespace lite {
STATUS TfliteGatherV2Parser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) {
MS_LOG(DEBUG) << "parse TfliteGatherV2Parser";
std::unique_ptr<schema::GatherT> attr(new schema::GatherT());
const auto &tflite_attr = tfliteOp->builtin_options.AsGatherOptions();
if (tflite_attr == nullptr) {
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
}
attr->axis = tflite_attr->axis;
attr->batchDims = 0; // default
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Gather;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_tfliteGatherV2Parser("GatherV2", new TfliteGatherV2Parser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_GATHER_V2_PARSER_H
#define PREDICT_TFLITE_GATHER_V2_PARSER_H
#include <memory>
#include <vector>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h"
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteGatherV2Parser : public TfliteNodeParser {
public:
TfliteGatherV2Parser() : TfliteNodeParser("GatherV2") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_GATHER_V2_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mindspore/lite/tools/converter/parser/tflite/tflite_inner_product_parser.h"
#include <vector>
#include <memory>
namespace mindspore {
namespace lite {
STATUS TfliteInnerProductParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) {
MS_LOG(DEBUG) << "parse TfliteInnerProductParser";
std::unique_ptr<schema::FullConnectionT> attr(new schema::FullConnectionT());
auto weight_index = tfliteOp->inputs[1];
const auto &weight_tensor = tfliteTensors[weight_index];
std::vector<tflite::TensorT *> weight_tensors{weight_tensor.get()};
if (RET_OK != ParseWeight(weight_tensors, tfliteModelBuffer, tensor_cache, schema::Format_NHWC)) {
MS_LOG(ERROR) << "parse weight failed";
return RET_ERROR;
}
if (tfliteOp->inputs.size() == 3) {
attr->hasBias = true;
auto bias_index = tfliteOp->inputs[2];
const auto &bias_tensor = tfliteTensors[bias_index];
std::vector<tflite::TensorT *> bias_tensors{bias_tensor.get()};
if (RET_OK != ParseBias(bias_tensors, tfliteModelBuffer, tensor_cache)) {
MS_LOG(ERROR) << "parse bias failed";
return RET_ERROR;
}
}
attr->axis = 1;
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_FullConnection;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_tfliteInnerProductParser("InnerProduct", new TfliteInnerProductParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_INNER_PRODUCT_PARSER_H
#define PREDICT_TFLITE_INNER_PRODUCT_PARSER_H
#include <memory>
#include <vector>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h"
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteInnerProductParser : public TfliteNodeParser {
public:
TfliteInnerProductParser() : TfliteNodeParser("InnerProduct") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_INNER_PRODUCT_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <memory>
#include <vector>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_leaky_relu_parser.h"
namespace mindspore {
namespace lite {
STATUS TfliteLeakyReluParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
MS_LOG(DEBUG) << "parse TfliteLeakyReluParser";
std::unique_ptr<schema::LeakyReLUT> attr(new schema::LeakyReLUT());
const auto &tflite_attr = tfliteOp->builtin_options.AsLeakyReluOptions();
if (tflite_attr == nullptr) {
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
}
attr->negativeSlope = tflite_attr->alpha;
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Activation;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_TfliteLeakyReluParser("LeakyRelu", new TfliteLeakyReluParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_LEAKY_RELU_PARSER_H
#define PREDICT_TFLITE_LEAKY_RELU_PARSER_H
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h"
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.h"
#include <vector>
#include <memory>
namespace mindspore {
namespace lite {
class TfliteLeakyReluParser : public TfliteNodeParser {
public:
TfliteLeakyReluParser() : TfliteNodeParser("LeakyRelu") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache, bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_LEAKY_RELU_PARSER_H
......@@ -27,7 +27,7 @@ STATUS TfliteLogisticParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfl
schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) {
// MS_LOGD("parse TfliteLogisticParser");
MS_LOG(DEBUG) << "parse TfliteLogisticParser";
std::unique_ptr<schema::ActivationT> attr(new schema::ActivationT());
attr->type = schema::ActivationType_SIGMOID;
......
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mindspore/lite/tools/converter/parser/tflite/tflite_lrn_parser.h"
#include <vector>
#include <memory>
namespace mindspore {
namespace lite {
STATUS TfliteLRNParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) {
MS_LOG(DEBUG) << "parse TfliteLRNParser";
std::unique_ptr<schema::LocalResponseNormalizationT> attr(new schema::LocalResponseNormalizationT());
const auto &tflite_attr = tfliteOp->builtin_options.AsLocalResponseNormalizationOptions();
if (tflite_attr == nullptr) {
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
}
attr->depth_radius = tflite_attr->radius;
attr->alpha = tflite_attr->alpha;
attr->beta = tflite_attr->beta;
attr->bias = tflite_attr->bias;
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_LocalResponseNormalization;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_tfliteLRNParser("LocalResponseNorm", new TfliteLRNParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_LRN_PARSER_H
#define PREDICT_TFLITE_ADD_PARSER_H
#include <memory>
#include <vector>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h"
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteLRNParser : public TfliteNodeParser {
public:
TfliteLRNParser() : TfliteNodeParser("LocalResponseNorm") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_LRN_PARSER_H
......@@ -26,11 +26,11 @@ STATUS TfliteMaxPoolingParser::Parse(const std::unique_ptr<tflite::OperatorT> &t
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op,
TensorCache *tensor_cache, bool quantizedModel) {
// MS_LOGD("paser TfliteMaxPoolingParser");
MS_LOG(DEBUG) << "parse TfliteMaxPoolingParser";
std::unique_ptr<schema::PoolingT> attr(new schema::PoolingT());
const auto &tflite_attr = tflite_op->builtin_options.AsPool2DOptions();
if (tflite_attr == nullptr) {
// MS_LOGE("get op: %s attr failed", op->name.c_str());
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
}
attr->format = schema::Format_NHWC;
// attr->global
......
......@@ -25,12 +25,13 @@ STATUS TfliteMeanParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteO
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
// MS_LOGI("paser TfliteMeanParser");
MS_LOG(DEBUG) << "parse TfliteMeanParser";
std::unique_ptr<schema::MeanT> attr(new schema::MeanT());
const auto &tflite_attr = tfliteOp->builtin_options.AsReducerOptions();
if (tflite_attr == nullptr) {
// MS_LOGE("get op: %s attr failed", op->name.c_str());
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
}
attr->keepDims = tflite_attr->keep_dims;
if (GetTfliteData(tfliteOp->inputs[1], tfliteTensors, tfliteModelBuffer, attr->axis)) {
return RET_ERROR;
......
......@@ -25,11 +25,11 @@ STATUS TfliteMeanPoolingParser::Parse(const std::unique_ptr<tflite::OperatorT> &
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
// MS_LOGD("paser TfliteMeanPoolingParser");
MS_LOG(DEBUG) << "parser TfliteMeanPoolingParser";
std::unique_ptr<schema::PoolingT> attr(new schema::PoolingT());
const auto &tflite_attr = tflite_op->builtin_options.AsPool2DOptions();
if (tflite_attr == nullptr) {
// MS_LOGE("get op: %s attr failed", op->name.c_str());
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
}
attr->format = schema::Format_NHWC;
// attr->global
......
......@@ -34,12 +34,12 @@ std::unique_ptr<tflite::ModelT> TfliteModelParser::ReadTfliteModelFromFlat(const
size_t size;
auto buf = ReadFile(model_path, &size);
if (buf == nullptr) {
// MS_LOGE("the file buffer is nullptr");
MS_LOG(ERROR) << "the file buffer is nullptr";
return nullptr;
}
flatbuffers::Verifier verify((const uint8_t *)buf, size);
if (!tflite::VerifyModelBuffer(verify)) {
// MS_LOGE("the buffer is invalid and fail to create graph");
MS_LOG(ERROR) << "the buffer is invalid and fail to create graph";
return nullptr;
}
return tflite::UnPackModel(buf);
......@@ -145,25 +145,26 @@ STATUS TfliteModelParser::ParseOp(const std::unique_ptr<tflite::ModelT> &tflite_
std::unique_ptr<schema::CNodeT> op(new schema::CNodeT);
op->name = opType + "-" + std::to_string(i++);
// MS_LOGD("parse op: [%s]", op->name.c_str());
MS_LOG(INFO) << "parse op: [%s]" << op->name.c_str();
// 1. init op attr params
auto node_parser = TfliteNodeParserRegistry::GetInstance()->GetNodeParser(opType);
if (node_parser == nullptr) {
// MS_LOGE("node %s parser is nullptr", opType.c_str());
return RET_NULL_PTR;
MS_LOG(ERROR) << "cannot find node parser, opType: "<< opType.c_str();
continue;
// return RET_NULL_PTR;
}
auto status = node_parser->Parse(tflite_op, tflite_subgraph->tensors, tflite_model->buffers,
tflite_model->operator_codes, op.get(), tensorCache, false);
if (status != RET_OK) {
// MS_LOGE("node %s parser failed", opType.c_str());
MS_LOG(ERROR) << "node " << opType.c_str() << " parser failed";
return RET_ERROR;
}
status = SetOpOutputIdx(tflite_subgraph, tflite_op, op.get(), tensorCache);
if (status != RET_OK) {
// MS_LOGE("Set Op %s Output Index Failed!", op->name.c_str());
MS_LOG(ERROR) << "Set Op " << op->name.c_str() << " Output Index Failed!";
return RET_ERROR;
}
......@@ -230,7 +231,7 @@ MetaGraphT *TfliteModelParser::Parse(const std::string &modelFile, const std::st
// set dst subGraph op attr etc.
auto status = ParseOp(tflite_model, tflite_subgraph, subGraph.get(), &tensorCache);
if (status != RET_OK) {
// MS_LOGE("ParseOp failed.");
MS_LOG(ERROR) << "ParseOp failed.";
return nullptr;
}
......
......@@ -27,24 +27,23 @@ STATUS TfliteMulParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp
schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) {
// MS_LOGD("parse TfliteMulParser");
MS_LOG(DEBUG) << "parse TfliteMulParser";
std::unique_ptr<schema::MulT> attr(new schema::MulT());
const auto &tfliteAttr = tfliteOp->builtin_options.AsMulOptions();
if (tfliteAttr == nullptr) {
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
return RET_NULL_PTR;
}
auto weight_index = tfliteOp->inputs[1];
const auto &weight_tensor = tfliteTensors[weight_index];
std::vector<tflite::TensorT *> weight_tensors{weight_tensor.get()};
if (RET_OK != ParseWeight(weight_tensors, tfliteModelBuffer, tensor_cache, schema::Format_KHWC)) {
// MS_LOGE("parse weight failed");
MS_LOG(ERROR) << "parse weight failed";
return RET_ERROR;
}
const auto &tfliteAttr = tfliteOp->builtin_options.AsMulOptions();
if (tfliteAttr == nullptr) {
// MS_LOGE("get op: %s attr failed", op->name.c_str());
return RET_NULL_PTR;
}
// tfliteAttr->fused_activation_function
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Mul;
......
......@@ -32,11 +32,11 @@ STATUS TfliteNodeParser::CopyTfliteTensorData(const std::vector<std::unique_ptr<
tensor->data.resize(data_size);
auto ret = memcpy_s(tensor->data.data(), data_size, tfliteModelBuffer[buffer_idx]->data.data(), data_size);
if (ret) {
// MS_LOGE("memcpy tensor data failed, error code: %d", ret);
MS_LOG(ERROR) << "memcpy tensor data failed, error code: %d" << ret;
return ret;
}
} else {
// MS_LOGE("src tensor data is empty.");
MS_LOG(ERROR) << "src tensor data is empty.";
return RET_ERROR;
}
return RET_OK;
......@@ -57,7 +57,7 @@ STATUS TfliteNodeParser::ParseWeight(const std::vector<tflite::TensorT *> &weigh
if (weight_tensor->buffer > 0) {
CopyTfliteTensorData(tfliteModelBuffer, weight_tensor, tensor.get());
}
// MS_LOGD("add weight tensor name: %s", weight_tensor->name.c_str());
MS_LOG(DEBUG) << "add weight tensor name: %s", weight_tensor->name.c_str();
tensor_cache->AddTensor(weight_tensor->name, tensor.release(), TF_CONST);
}
}
......
......@@ -49,6 +49,10 @@ class TfliteNodeParser {
STATUS ParseBias(const std::vector<tflite::TensorT *> &weight_tenosr,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer, TensorCache *tensor_cache);
STATUS ParseAttr(const std::vector<tflite::TensorT *> &attr_tenosrs,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
mindspore::lite::TensorCache *tensor_cache, schema::Format format);
STATUS CopyTfliteTensorData(const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const tflite::TensorT *tflite_tensor, schema::TensorT *tensor);
......
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <memory>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_pad_parser.h"
namespace mindspore {
namespace lite {
STATUS TflitePadParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
MS_LOG(DEBUG) << "parse TflitePadParser";
std::unique_ptr<schema::PadT> attr(new schema::PadT());
const auto &tflite_attr = tfliteOp->builtin_options.AsPadOptions();
if (tflite_attr == nullptr) {
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
}
attr->paddingMode = schema::PaddingMode_CONSTANT;
if (tfliteOp->inputs.size() > 1) {
if (GetTfliteData(tfliteOp->inputs[1], tfliteTensors, tfliteModelBuffer, attr->paddings)) {
return RET_ERROR;
}
}
// attr->constantValue = 0.0f;
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Pad;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_tflitePadParser("Pad", new TflitePadParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_PAD_PARSER_H
#define PREDICT_TFLITE_PAD_PARSER_H
#include <memory>
#include <vector>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h"
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TflitePadParser : public TfliteNodeParser {
public:
TflitePadParser() : TfliteNodeParser("Pad") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache, bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_PAD_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <memory>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_pow_parser.h"
namespace mindspore {
namespace lite {
STATUS TflitePowParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) {
MS_LOG(DEBUG) << "parse TflitePowParser";
std::unique_ptr<schema::PowerT> attr(new schema::PowerT());
const auto &tflite_attr = tfliteOp->builtin_options.AsPowOptions();
if (tflite_attr == nullptr) {
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
}
// the following use default values. This op is doing...
attr->power = 0.0f;
attr->scale = 0.0f;
attr->shift = 0.0f;
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Power;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_TflitePowParser("Pow", new TflitePowParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_POW_PARSER_H
#define PREDICT_TFLITE_POW_PARSER_H
#include <memory>
#include <vector>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h"
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TflitePowParser : public TfliteNodeParser {
public:
TflitePowParser() : TfliteNodeParser("Pow") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_POW_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mindspore/lite/tools/converter/parser/tflite/tflite_range_parser.h"
#include <vector>
#include <memory>
namespace mindspore {
namespace lite {
STATUS TfliteRangeParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) {
MS_LOG(DEBUG) << "parse TfliteRangeParser";
std::unique_ptr<schema::RangeT> attr(new schema::RangeT());
const auto &tflite_attr = tfliteOp->builtin_options.AsRangeOptions();
if (tflite_attr == nullptr) {
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
}
auto start_idx = tfliteOp->inputs[1];
std::for_each(tfliteTensors[start_idx]->shape.begin(), tfliteTensors[start_idx]->shape.end(), [&](int32_t sha){});
auto &start_buf_data = tfliteModelBuffer[tfliteTensors[start_idx]->buffer];
auto start_data_ptr = start_buf_data->data.data();
attr->start = *(static_cast<int32_t *>(static_cast<void *>(start_data_ptr)));
auto limit_idx = tfliteOp->inputs[2];
std::for_each(tfliteTensors[limit_idx]->shape.begin(), tfliteTensors[limit_idx]->shape.end(), [&](int32_t sha){});
auto &limit_buf_data = tfliteModelBuffer[tfliteTensors[limit_idx]->buffer];
auto limit_data_ptr = limit_buf_data->data.data();
attr->limit = *(static_cast<int32_t *>(static_cast<void *>(limit_data_ptr)));
if (tfliteOp->inputs.size() > 2) {
auto delta_idx = tfliteOp->inputs[3];
std::for_each(tfliteTensors[delta_idx]->shape.begin(), tfliteTensors[delta_idx]->shape.end(), [&](int32_t sha){});
auto &delta_buf_data = tfliteModelBuffer[tfliteTensors[delta_idx]->buffer];
auto delta_data_ptr = delta_buf_data->data.data();
attr->delta = *(static_cast<int32_t *>(static_cast<void *>(delta_data_ptr)));
} else {
attr->delta = 0; // default
}
attr->dType = 0; // default
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Range;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_tfliteRangeParser("Range", new TfliteRangeParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_RANGE_PARSER_H
#define PREDICT_TFLITE_RANGE_PARSER_H
#include <memory>
#include <vector>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h"
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteRangeParser : public TfliteNodeParser {
public:
TfliteRangeParser() : TfliteNodeParser("Range") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_RANGE_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mindspore/lite/tools/converter/parser/tflite/tflite_rank_parser.h"
#include <vector>
#include <memory>
namespace mindspore {
namespace lite {
STATUS TfliteRankParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) {
MS_LOG(DEBUG) << "parse TfliteRankParser";
std::unique_ptr<schema::RankT> attr(new schema::RankT());
const auto &tflite_attr = tfliteOp->builtin_options.AsRankOptions();
if (tflite_attr == nullptr) {
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
}
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Rank;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_tfliteRankParser("Rank", new TfliteRankParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_RANK_PARSER_H
#define PREDICT_TFLITE_RANK_PARSER_H
#include <memory>
#include <vector>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h"
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteRankParser : public TfliteNodeParser {
public:
TfliteRankParser() : TfliteNodeParser("Rank") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_RANK_PARSER_H
......@@ -20,13 +20,14 @@
namespace mindspore {
namespace lite {
STATUS TfliteActivationParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
// MS_LOGI("paser TfliteActivationParser");
STATUS TfliteRelu6Parser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
MS_LOG(DEBUG) << "parse TfliteRelu6Parser";
std::unique_ptr<schema::ActivationT> attr(new schema::ActivationT());
attr->type = schema::ActivationType_RELU6;
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
......@@ -36,6 +37,6 @@ STATUS TfliteActivationParser::Parse(const std::unique_ptr<tflite::OperatorT> &t
return RET_OK;
}
TfliteNodeRegister g_tfliteActivationParser("Relu6", new TfliteActivationParser());
TfliteNodeRegister g_TfliteRelu6Parser("Relu6", new TfliteRelu6Parser());
} // namespace lite
} // namespace mindspore
......@@ -24,9 +24,9 @@
namespace mindspore {
namespace lite {
class TfliteActivationParser : public TfliteNodeParser {
class TfliteRelu6Parser : public TfliteNodeParser {
public:
TfliteActivationParser() : TfliteNodeParser("Relu6") {}
TfliteRelu6Parser() : TfliteNodeParser("Relu6") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
......
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <memory>
#include <vector>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_relu_parser.h"
namespace mindspore {
namespace lite {
STATUS TfliteReluParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
MS_LOG(DEBUG) << "parse TfliteReluParser";
std::unique_ptr<schema::ActivationT> attr(new schema::ActivationT());
attr->type = schema::ActivationType_RELU;
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Activation;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_TfliteReluParser("Relu", new TfliteReluParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_RELU_PARSER_H
#define PREDICT_TFLITE_RELU_PARSER_H
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h"
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.h"
#include <vector>
#include <memory>
namespace mindspore {
namespace lite {
class TfliteReluParser : public TfliteNodeParser {
public:
TfliteReluParser() : TfliteNodeParser("Relu") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache, bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_RELU_PARSER_H
......@@ -26,12 +26,12 @@ STATUS TfliteReshapeParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfli
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op,
TensorCache *tensor_cache, bool quantizedModel) {
// MS_LOGD("parse TfliteReshapeParser");
MS_LOG(DEBUG) << "parse TfliteReshapeParser";
std::unique_ptr<schema::ReshapeT> attr(new schema::ReshapeT());
const auto &tfliteAttr = tfliteOp->builtin_options.AsReshapeOptions();
if (tfliteAttr == nullptr) {
// MS_LOGE("get op: %s attr failed", op->name.c_str());
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
return RET_NULL_PTR;
}
......
......@@ -25,14 +25,15 @@ STATUS TfliteResizeBilinearParser::Parse(const std::unique_ptr<tflite::OperatorT
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
// MS_LOGD("parse TfliteResizeBilinearParser");
MS_LOG(DEBUG) << "parse TfliteResizeBilinearParser";
std::unique_ptr<schema::ResizeT> attr(new schema::ResizeT());
const auto &tfliteAttr = tfliteOp->builtin_options.AsResizeBilinearOptions();
if (tfliteAttr == nullptr) {
// MS_LOGE("get op: %s attr failed", op->name.c_str());
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
return RET_NULL_PTR;
}
attr->format = schema::Format_NHWC;
attr->method = schema::ResizeMethod_BILINEAR;
attr->alignCorners = tfliteAttr->align_corners;
auto tfliteResizeTensorIndex = tfliteOp->inputs[1];
......@@ -42,6 +43,7 @@ STATUS TfliteResizeBilinearParser::Parse(const std::unique_ptr<tflite::OperatorT
auto width = buffData[1];
attr->newWidth = width;
attr->newHeight = height;
// attr->preserveAspectRatio
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
......
......@@ -26,7 +26,7 @@ namespace mindspore {
namespace lite {
class TfliteResizeBilinearParser : public TfliteNodeParser {
public:
TfliteResizeBilinearParser() : TfliteNodeParser("ResizeBilinear") {}
TfliteResizeBilinearParser() : TfliteNodeParser("ResizeBilinear") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
......@@ -38,5 +38,5 @@ class TfliteResizeBilinearParser : public TfliteNodeParser {
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_ADD_PARSER_H
#endif // PREDICT_TFLITE_RESIZE_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <memory>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_resize_nearest_neighbor_parser.h"
namespace mindspore {
namespace lite {
STATUS TfliteResizeNearestNeighborParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
MS_LOG(DEBUG) << "parse TfliteResizeNearestNeighborParser";
std::unique_ptr<schema::ResizeT> attr(new schema::ResizeT());
const auto &tfliteAttr = tfliteOp->builtin_options.AsResizeNearestNeighborOptions();
if (tfliteAttr == nullptr) {
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
return RET_NULL_PTR;
}
attr->format = schema::Format_NHWC;
attr->method = schema::ResizeMethod_NEAREST_NEIGHBOR;
attr->alignCorners = tfliteAttr->align_corners;
auto tfliteResizeTensorIndex = tfliteOp->inputs[1];
auto resizeTensorBufferIndex = tfliteTensors.at(tfliteResizeTensorIndex)->buffer;
auto buffData = reinterpret_cast<int32_t *>(tfliteModelBuffer.at(resizeTensorBufferIndex)->data.data());
auto height = buffData[0];
auto width = buffData[1];
attr->newWidth = width;
attr->newHeight = height;
// attr->preserveAspectRatio
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Resize;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_tfliteResizeNearestNeighborParser("NearestNeighbor",
new TfliteResizeNearestNeighborParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_RESIZE_NN_PARSER_H
#define PREDICT_TFLITE_RESIZE_NN_PARSER_H
#include <memory>
#include <vector>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h"
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteResizeNearestNeighborParser : public TfliteNodeParser {
public:
TfliteResizeNearestNeighborParser() : TfliteNodeParser("NearestNeighbor") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_RESIZE_NN_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mindspore/lite/tools/converter/parser/tflite/tflite_reverse_parser.h"
#include <vector>
#include <memory>
namespace mindspore {
namespace lite {
STATUS TfliteReverseParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) {
MS_LOG(DEBUG) << "parse TfliteReverseParser";
std::unique_ptr<schema::ReverseT> attr(new schema::ReverseT());
const auto &tfliteAttr = tfliteOp->builtin_options.AsReverseV2Options();
if (tfliteAttr == nullptr) {
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
return RET_NULL_PTR;
}
if (GetTfliteData(tfliteOp->inputs[1], tfliteTensors, tfliteModelBuffer, attr->axis)) {
return RET_ERROR;
}
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Reverse;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_tfliteReverseParser("Reverse", new TfliteReverseParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_REVERSE_PARSER_H
#define PREDICT_TFLITE_REVERSE_PARSER_H
#include <memory>
#include <vector>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h"
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteReverseParser : public TfliteNodeParser {
public:
TfliteReverseParser() : TfliteNodeParser("Reverse") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_REVERSE_PARSER_H
......@@ -25,7 +25,7 @@ STATUS TfliteRsqrtParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
// MS_LOGI("paser TfliteRsqrtParser");
MS_LOG(DEBUG) << "paser TfliteRsqrtParser";
std::unique_ptr<schema::RsqrtT> attr(new schema::RsqrtT());
if (op != nullptr) {
......
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <memory>
#include <vector>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_sigmoid_parser.h"
namespace mindspore {
namespace lite {
STATUS TfliteSigmoidParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
MS_LOG(DEBUG) << "parse TfliteSigmoidParser";
std::unique_ptr<schema::ActivationT> attr(new schema::ActivationT());
attr->type = schema::ActivationType_SIGMOID;
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Activation;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_TfliteSigmoidParser("Sigmoid", new TfliteSigmoidParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_SIGMOID_PARSER_H
#define PREDICT_TFLITE_SIGMOID_PARSER_H
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h"
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.h"
#include <vector>
#include <memory>
namespace mindspore {
namespace lite {
class TfliteSigmoidParser : public TfliteNodeParser {
public:
TfliteSigmoidParser() : TfliteNodeParser("Sigmoid") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache, bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_SIGMOID_PARSER_H
......@@ -26,7 +26,7 @@ STATUS TfliteSliceParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op,
TensorCache *tensor_cache, bool quantizedModel) {
// MS_LOGI("paser TfliteSliceParser");
MS_LOG(DEBUG) << "parse TfliteSliceParser";
std::unique_ptr<schema::SliceT> attr(new schema::SliceT());
if (GetTfliteData(tfliteOp->inputs[1], tfliteTensors, tfliteModelBuffer, attr->begin)) {
......
......@@ -26,11 +26,11 @@ STATUS TfliteSoftmaxParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfli
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op,
TensorCache *tensor_cache, bool quantizedModel) {
// MS_LOGI("paser TfliteSoftmaxParser");
MS_LOG(DEBUG) << "parse TfliteSoftmaxParser";
std::unique_ptr<schema::SoftMaxT> attr(new schema::SoftMaxT());
const auto &tflite_attr = tfliteOp->builtin_options.AsSoftmaxOptions();
if (tflite_attr == nullptr) {
// MS_LOGE("get op: %s attr failed", op->name.c_str());
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
}
// attr->axis
......
......@@ -25,11 +25,11 @@ STATUS TfliteSquaredDifferenceParser::Parse(const std::unique_ptr<tflite::Operat
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
// MS_LOGI("paser TfliteSquaredDifferenceParser");
MS_LOG(DEBUG) << "parse TfliteSquaredDifferenceParser";
std::unique_ptr<schema::SquaredDifferenceT> attr(new schema::SquaredDifferenceT());
const auto &tflite_attr = tfliteOp->builtin_options.AsSquaredDifferenceOptions();
if (tflite_attr == nullptr) {
// MS_LOGE("get op: %s attr failed", op->name.c_str());
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
}
if (op != nullptr) {
......
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <memory>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_squeeze_parser.h"
namespace mindspore {
namespace lite {
STATUS TfliteSqueezeParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
MS_LOG(DEBUG) << "parse TfliteSqueezeParser";
std::unique_ptr<schema::SqueezeT> attr(new schema::SqueezeT());
const auto &tflite_attr = tfliteOp->builtin_options.AsSqueezeOptions();
if (tflite_attr == nullptr) {
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
}
attr->axis = tflite_attr->squeeze_dims;
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Squeeze;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_tfliteSqueezeParser("Squeeze", new TfliteSqueezeParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_STACK_PARSER_H
#define PREDICT_TFLITE_STACK_PARSER_H
#include <memory>
#include <vector>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h"
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteSqueezeParser : public TfliteNodeParser {
public:
TfliteSqueezeParser() : TfliteNodeParser("Squeeze") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache, bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_Squeeze_PARSER_H
......@@ -25,11 +25,11 @@ STATUS TfliteStackParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
// MS_LOGI("paser TfliteStackParser");
MS_LOG(DEBUG) << "parse TfliteStackParser";
std::unique_ptr<schema::StackT> attr(new schema::StackT());
const auto &tflite_attr = tfliteOp->builtin_options.AsPackOptions();
if (tflite_attr == nullptr) {
// MS_LOGE("get op: %s attr failed", op->name.c_str());
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
}
attr->axis = tflite_attr->axis;
......
......@@ -27,23 +27,19 @@ STATUS TfliteSubParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp
schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) {
// MS_LOGD("parse TfliteSubParser");
MS_LOG(DEBUG) << "parse TfliteSubParser";
std::unique_ptr<schema::SubT> attr(new schema::SubT());
const auto &tfliteAttr = tfliteOp->builtin_options.AsSubOptions();
if (tfliteAttr == nullptr) {
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
}
auto weight_index = tfliteOp->inputs[1];
const auto &weight_tensor = tfliteTensors[weight_index];
std::vector<tflite::TensorT *> weight_tensors{weight_tensor.get()};
if (RET_OK != ParseWeight(weight_tensors, tfliteModelBuffer, tensor_cache, schema::Format_KHWC)) {
// MS_LOGE("parse weight failed");
return RET_ERROR;
}
const auto &tfliteAttr = tfliteOp->builtin_options.AsSubOptions();
if (tfliteAttr == nullptr) {
// MS_LOGE("get op: %s attr failed", op->name.c_str());
return RET_NULL_PTR;
MS_LOG(ERROR) << "parse weight failed";
}
// tfliteAttr->fused_activation_function
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
......
......@@ -25,8 +25,14 @@ STATUS TfliteTransposeParser::Parse(const std::unique_ptr<tflite::OperatorT> &tf
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
// MS_LOGD("parse TfliteTransposeParser");
MS_LOG(DEBUG) << "parse TfliteTransposeParser";
std::unique_ptr<schema::TransposeT> attr(new schema::TransposeT());
const auto &tfliteAttr = tfliteOp->builtin_options.AsTransposeOptions();
if (tfliteAttr == nullptr) {
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
return RET_NULL_PTR;
}
if (GetTfliteData(tfliteOp->inputs[1], tfliteTensors, tfliteModelBuffer, attr->perm)) {
return RET_ERROR;
}
......@@ -34,18 +40,11 @@ STATUS TfliteTransposeParser::Parse(const std::unique_ptr<tflite::OperatorT> &tf
auto weight_index = tfliteOp->inputs[1];
const auto &weight_tensor = tfliteTensors[weight_index];
std::vector<tflite::TensorT *> weight_tensors{weight_tensor.get()};
if (RET_OK != ParseWeight(weight_tensors, tfliteModelBuffer, tensor_cache, schema::Format_KHWC)) {
// MS_LOGE("parse weight failed");
MS_LOG(ERROR) << "parse weight failed";
return RET_ERROR;
}
const auto &tfliteAttr = tfliteOp->builtin_options.AsTransposeOptions();
if (tfliteAttr == nullptr) {
// MS_LOGE("get op: %s attr failed", op->name.c_str());
return RET_NULL_PTR;
}
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Transpose;
......
......@@ -56,12 +56,34 @@ std::map<tflite::BuiltinOperator, std::string> tfMsOpTypeMap{
{tflite::BuiltinOperator_SQUARED_DIFFERENCE, "SquaredDifference"},
{tflite::BuiltinOperator_FAKE_QUANT, "FakeQuant"},
{tflite::BuiltinOperator_TRANSPOSE_CONV, "DeConv2D"},
{tflite::BuiltinOperator_PAD, "Pad"},
{tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR, "NearestNeighbor"},
{tflite::BuiltinOperator_RELU, "Relu"},
{tflite::BuiltinOperator_LEAKY_RELU, "LeakyRelu"},
{tflite::BuiltinOperator_SQUEEZE, "Squeeze"},
{tflite::BuiltinOperator_POW, "Pow"},
{tflite::BuiltinOperator_ARG_MIN, "Argmin"},
{tflite::BuiltinOperator_CEIL, "Ceil"},
{tflite::BuiltinOperator_EXPAND_DIMS, "ExpandDims"},
{tflite::BuiltinOperator_FILL, "Fill"},
{tflite::BuiltinOperator_DIV, "Div"},
{tflite::BuiltinOperator_FLOOR, "flOOR"},
{tflite::BuiltinOperator_FLOOR_DIV, "FloorDiv"},
{tflite::BuiltinOperator_FLOOR_MOD, "FloorMod"},
{tflite::BuiltinOperator_GATHER, "Gather"},
{tflite::BuiltinOperator_GATHER_ND, "GatherND"},
{tflite::BuiltinOperator_REVERSE_V2, "reverse"},
{tflite::BuiltinOperator_RANGE, "Range"},
{tflite::BuiltinOperator_RANK, "Rank"},
{tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION, "LocalResponseNorm"},
{tflite::BuiltinOperator_GATHER, "GatherV2"},
};
std::string GetMSOpType(tflite::BuiltinOperator tfliteOpType) {
auto iter = tfMsOpTypeMap.find(tfliteOpType);
if (iter == tfMsOpTypeMap.end()) {
return "unsupported_op_type";
// return "unsupported_op_type";
return tflite::EnumNameBuiltinOperator(tfliteOpType);
}
return iter->second;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册