提交 3e13711b 编写于 作者: W wangzhe

add abs,cos,exp...(totally 27) parsers for tflite

上级 54fbdb97
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include "mindspore/core/utils/log_adapter.h"
#include "common/common_test.h"
#include "tools/converter/converter_flags.h"
#include "schema/inner/model_generated.h"
#include "tools/converter/parser/tflite/tflite_converter.h"
#include "tools/converter/parser/tflite/tflite_exp_parser.h"
#include "src/kernel_registry.h"
#include "src/lite_kernel.h"
namespace mindspore {
class TestTfliteExpParser : public mindspore::Common {
public:
TestTfliteExpParser() {}
};
TEST_F(TestTfliteExpParser, ExpParser) {
lite::converter::Flags flags;
flags.modelFile = "./test_data/Exp.tflite";
flags.fmk = lite::converter::FmkType_TFLITE;
lite::TfliteConverter converter;
schema::MetaGraphT *fb_graph = nullptr;
fb_graph = converter.Convert(&flags);
const auto &nodes = fb_graph->nodes;
nodes.back();
}
} // namespace mindspore
......@@ -12,6 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Revision History
// Version 0: Initial version.
// Version 1: Add subgraphs to schema.
// Version 2: Rename operators to conform to NN API.
// Version 3: Move buffer data from Model.Subgraph.Tensors to Model.Buffers.
namespace tflite;
// This corresponds to the version.
......@@ -34,6 +40,7 @@ enum TensorType : byte {
INT16 = 7,
COMPLEX64 = 8,
INT8 = 9,
FLOAT64 = 10,
}
// Custom quantization parameters for experimenting with new quantization
......@@ -74,6 +81,105 @@ table QuantizationParameters {
quantized_dimension:int;
}
// Sparse tensors.
// We use a modification of the TACO format.
// Reference: http://tensor-compiler.org/kjolstad-oopsla17-tensor-compiler.pdf
//
// To encode a conceptual n-dimensional dense tensor with dims (d0, ..., dn-1),
// potentially with a k-dimensional block (0 <= k <= n) with dims
// (dn, ..., dn+k-1), the format needs to specify:
// 1. In what order to traverse these dimensions. For example, to store a 2-D
// matrix in row major order, the traversal order would be (d0, d1),
// whereas to store it in column major order, the traversal order would be
// (d1, d0). If the 2-D matrix has a 2-D inner block, the traversal order
// could be (d0, d1, d2, d3).
// 2. How each block dimension in (dn, ..., dn+k-1) maps to the original
// tensor dimension in (d0, ..., dn-1).
// 3. In the traversal order defined above, the format (dense vs. sparse) and
// index metadata for each dimension. For a dense dimension, this is just
// the size of that dimension. For a sparse dimension, it's the same as
// the compressed index defined in the Compressed Sparse Row (CSR) format.
// (http://scipy-lectures.org/advanced/scipy_sparse/csr_matrix.html)
// The storage type for a dimension. Currently we support:
// 1. DENSE: each coordinate in this dimension is stored implicitly.
// 2. SPARSE_CSR: only the coordinates with non-zero elements are stored. The
// compression technique is the same what CSR uses.
// More types like a sparse dimension with a different compression technique
// could be added to the list in the future.
enum DimensionType : byte {
DENSE = 0,
SPARSE_CSR = 1,
}
table Int32Vector {
values:[int];
}
table Uint16Vector {
values:[ushort] (force_align: 4);
}
table Uint8Vector {
values:[ubyte] (force_align: 4);
}
// Variable-typed buffer to store the index metadata for a sparse dimension.
// The widest type is Int32 instead of UInt32 because tensor's shape is a int32
// vector. We don't want the per-dimensional index to overflow that range.
union SparseIndexVector {
Int32Vector,
Uint16Vector,
Uint8Vector
}
table DimensionMetadata {
// Whether a dimension is dense or sparse.
format:DimensionType;
// Index metadata used for a dimension.
// - If format is DimensionType.DENSE then we use the dense_size field to
// store the size of that dimension. Each index in that dimension is
// stored implicitly.
// - If format is DimensionType.SPARSE_CSR then we use array_segments and
// array_indices to encode that dimension. array_segments represents how
// to segment the indices array, each segment corresponds to one element
// in the previous dimension. array_indices represents the index of the
// non-zero elements within this dimension (as those in the CSR matrix
// format, where the first array is row pointers and the second array is
// column indices).
dense_size:int;
array_segments:SparseIndexVector;
array_indices:SparseIndexVector;
}
// Parameters to encode a sparse TfLite tensor.
table SparsityParameters {
// The traversal order of the dimensions defined in the `shape` field of the
// conceptual dense tensor. For a n-dimensional tensors with dims (d0, d1,
// ..., dn-1),
// - if not block sparse, the traversal_order is just a permutation of (d0,
// ..., dn-1). For example, a 2-D matrix stored in row-major order would
// have traversal_order = (d0, d1).
// - if block sparse with a k-dimensional block (0 <= k <= n), the
// traversal_order has n + k elements. The first n elements are still a
// permutation of (d0, ..., dn-1). The lask k elements are a permutation
// of (dn, ..., dn+k-1), defining how to traverse a block internally. For
// example, a 2-D matrix with 2-D blocks, both stored in row-major order
// would have traversal_order = (d0, d1, d2, d3).
traversal_order:[int];
// For an n-dimensional tensor with a k-dimensional block (0 <= k <= n),
// stores how a block dimension in (dn, ..., dn+k-1) maps to the original
// tensor dimension in (d0, ..., dn).
// It's stored in the order of (dn, ..., dn+k-1).
// If not block-sparse, this field is NULL.
block_map:[int];
// In the traversal order defined above, the metadata needed for
// each dimension to locate the non-zero values in the original dense tensor.
// The size of the dim_metadata array = the size of the traversal_order array
// = n + k.
dim_metadata:[DimensionMetadata];
}
table Tensor {
// The tensor shape. The meaning of each entry is operator-specific but
// builtin ops use: [batch size, height, width, number of channels] (That's
......@@ -93,12 +199,21 @@ table Tensor {
quantization:QuantizationParameters; // Optional.
is_variable:bool = false;
// Parameters to encode a sparse tensor. See the example in
// tensorflow/lite/testdata/sparse_tensor.json.
sparsity:SparsityParameters; // Optional.
// Encodes `shape` with unknown dimensions. Unknown dimensions are
// represented with -1.
shape_signature:[int]; // Optional.
}
// A list of builtin operators. Builtin operators are slightly faster than custom
// ones, but not by much. Moreover, while custom operators accept an opaque
// object containing configuration parameters, builtins have a predetermined
// set of acceptable options.
enum BuiltinOperator : byte {
ADD = 0,
AVERAGE_POOL_2D = 1,
......@@ -229,8 +344,14 @@ enum BuiltinOperator : byte {
WHILE = 119,
NON_MAX_SUPPRESSION_V4 = 120,
NON_MAX_SUPPRESSION_V5 = 121,
SCATTER_ND = 122,
SELECT_V2 = 123,
DENSIFY = 124,
SEGMENT_SUM = 125,
BATCH_MATMUL = 126
}
// Options for the builtin operators.
union BuiltinOptions {
Conv2DOptions,
......@@ -328,7 +449,12 @@ union BuiltinOptions {
WhileOptions,
DepthToSpaceOptions,
NonMaxSuppressionV4Options,
NonMaxSuppressionV5Options
NonMaxSuppressionV5Options,
ScatterNdOptions,
SelectV2Options,
DensifyOptions,
SegmentSumOptions,
BatchMatMulOptions
}
enum Padding : byte { SAME, VALID }
......@@ -365,6 +491,9 @@ table DepthwiseConv2DOptions {
padding:Padding;
stride_w:int;
stride_h:int;
// `depth_multiplier` is redundant. It's used by CPU kernels in
// TensorFlow 2.0 or below, but ignored in versions above.
// See comments in lite/c/builtin_op_data.h for more details.
depth_multiplier:int;
fused_activation_function:ActivationFunctionType;
// Parameters for DepthwiseConv version 2 or above.
......@@ -391,17 +520,22 @@ table LSHProjectionOptions {
table SVDFOptions {
rank:int;
fused_activation_function:ActivationFunctionType;
// For weights-only quantization, use asymmetric quantization for non
// constant inputs at evaluation time.
asymmetric_quantize_inputs:bool;
}
// An implementation of TensorFlow RNNCell.
table RNNOptions {
fused_activation_function:ActivationFunctionType;
asymmetric_quantize_inputs:bool;
}
// An implementation of TensorFlow dynamic_rnn with RNNCell.
table SequenceRNNOptions {
time_major:bool;
fused_activation_function:ActivationFunctionType;
asymmetric_quantize_inputs:bool;
}
// An implementation of TensorFlow bidrectional_dynamic_rnn with RNNCell.
......@@ -409,6 +543,7 @@ table BidirectionalSequenceRNNOptions {
time_major:bool;
fused_activation_function:ActivationFunctionType;
merge_outputs: bool;
asymmetric_quantize_inputs:bool;
}
enum FullyConnectedOptionsWeightsFormat: byte {
......@@ -428,6 +563,11 @@ table FullyConnectedOptions {
// If set to true, then the number of dimension is preserved. Furthermore,
// all but the last dimension of the input and output shapes will be equal.
keep_num_dims: bool;
// Parameters for FullyConnected version 7 or above.
// If set to true, then weights-only op will use asymmetric quantization for
// inputs.
asymmetric_quantize_inputs: bool;
}
table SoftmaxOptions {
......@@ -476,6 +616,9 @@ table LSTMOptions {
// Parameters for LSTM version 2 or above.
// Basic kernel is only supported in version 2 or above.
kernel_type: LSTMKernelType = FULL;
// Parameters for LSTM version 4 or above.
asymmetric_quantize_inputs: bool;
}
// An implementation of TensorFlow dynamic_rnn with LSTMCell.
......@@ -486,6 +629,9 @@ table UnidirectionalSequenceLSTMOptions {
// If true then first dimension is sequence, otherwise batch.
time_major:bool;
// Parameter for Unidirectional Sequence LSTM version 4.
asymmetric_quantize_inputs:bool;
}
table BidirectionalSequenceLSTMOptions {
......@@ -502,16 +648,21 @@ table BidirectionalSequenceLSTMOptions {
// Version 1 implementations assumed time_major to be true, so this default
// value should never change.
time_major: bool = true;
// Parameters for version 3 or above.
asymmetric_quantize_inputs:bool;
}
table ResizeBilinearOptions {
new_height: int (deprecated);
new_width: int (deprecated);
align_corners: bool;
half_pixel_centers: bool;
}
table ResizeNearestNeighborOptions {
align_corners: bool;
half_pixel_centers: bool;
}
// A call operation options
......@@ -806,6 +957,23 @@ table NonMaxSuppressionV4Options {
table NonMaxSuppressionV5Options {
}
table ScatterNdOptions {
}
table SelectV2Options {
}
table DensifyOptions {
}
table SegmentSumOptions {
}
table BatchMatMulOptions {
adj_x:bool;
adj_y:bool;
}
// An OperatorCode can be an enum value (BuiltinOperator) if the operator is a
// builtin, or a string if the operator is custom.
table OperatorCode {
......@@ -830,7 +998,7 @@ table Operator {
// complicate map lookups.
opcode_index:uint;
// Optional input and output tensors are indicated by -1.
// Optional input are indicated by -1.
inputs:[int];
outputs:[int];
......
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <memory>
#include "tools/converter/parser/tflite/tflite_abs_parser.h"
namespace mindspore {
namespace lite {
STATUS TfliteAbsParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
MS_LOG(INFO) << "parse TfliteAbsParser";
std::unique_ptr<schema::AbsT> attr(new schema::AbsT());
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Abs;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_TfliteAbsParser("Abs", new TfliteAbsParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_ABS_PARSER_H
#define PREDICT_TFLITE_ABS_PARSER_H
#include <memory>
#include <vector>
#include "tools/converter/parser/tflite/tflite_node_parser.h"
#include "tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteAbsParser : public TfliteNodeParser {
public:
TfliteAbsParser() : TfliteNodeParser("Abs") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_ABS_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <memory>
#include "tools/converter/parser/tflite/tflite_batch_to_sapce_nd_parser.h"
namespace mindspore {
namespace lite {
STATUS TfliteBatchToSpaceNDParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) {
MS_LOG(INFO) << "parse TfliteBatchToSpaceNDParser";
std::unique_ptr<schema::BatchToSpaceT> attr(new schema::BatchToSpaceT());
if (GetTfliteData(tfliteOp->inputs[1], tfliteTensors, tfliteModelBuffer, attr->blockShape)) {
MS_LOG(ERROR) << "BatchToSpaceNd get blockShape attr failed";
return RET_ERROR;
}
if (GetTfliteData(tfliteOp->inputs[2], tfliteTensors, tfliteModelBuffer, attr->crops)) {
MS_LOG(ERROR) << "BatchToSpaceNd get crops attr failed";
return RET_ERROR;
}
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_BatchToSpace;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_TfliteBatchToSpaceNDParser("BatchToSpaceND", new TfliteBatchToSpaceNDParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_BATCH_TO_SPACE_ND_PARSER_H
#define PREDICT_TFLITE_BATCH_TO_SPACE_ND_PARSER_H
#include <memory>
#include <vector>
#include "tools/converter/parser/tflite/tflite_node_parser.h"
#include "tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteBatchToSpaceNDParser : public TfliteNodeParser {
public:
TfliteBatchToSpaceNDParser() : TfliteNodeParser("BatchToSpaceND") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_BATCH_TO_SPACE_ND_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <memory>
#include "tools/converter/parser/tflite/tflite_cos_parser.h"
namespace mindspore {
namespace lite {
STATUS TfliteCosParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
MS_LOG(INFO) << "parse TfliteCosParser";
std::unique_ptr<schema::CosT> attr(new schema::CosT());
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Cos;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_TfliteCosParser("Cos", new TfliteCosParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_COS_PARSER_H
#define PREDICT_TFLITE_COS_PARSER_H
#include <memory>
#include <vector>
#include "tools/converter/parser/tflite/tflite_node_parser.h"
#include "tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteCosParser : public TfliteNodeParser {
public:
TfliteCosParser() : TfliteNodeParser("Cos") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_COS_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <memory>
#include "tools/converter/parser/tflite/tflite_exp_parser.h"
namespace mindspore {
namespace lite {
STATUS TfliteExpParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) {
MS_LOG(INFO) << "parse TfliteExpParser";
std::unique_ptr<schema::ExpT> attr(new schema::ExpT());
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Exp;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_TfliteExpParser("Exp", new TfliteExpParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_EXP_PARSER_H
#define PREDICT_TFLITE_EXP_PARSER_H
#include <memory>
#include <vector>
#include "tools/converter/parser/tflite/tflite_node_parser.h"
#include "tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteExpParser : public TfliteNodeParser {
public:
TfliteExpParser() : TfliteNodeParser("Exp") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_EXP_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <memory>
#include "tools/converter/parser/tflite/tflite_hard_swish_parser.h"
namespace mindspore {
namespace lite {
STATUS TfliteHardSwishParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
MS_LOG(INFO) << "parse TfliteHardSwishParser";
std::unique_ptr<schema::ActivationT> attr(new schema::ActivationT());
attr->type = schema::ActivationType_HSWISH;
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Activation;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_TfliteHardSwishParser("HardSwish", new TfliteHardSwishParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_HARD_SWISH_PARSER_H
#define PREDICT_TFLITE_HARD_SWISH_PARSER_H
#include <memory>
#include <vector>
#include "tools/converter/parser/tflite/tflite_node_parser.h"
#include "tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteHardSwishParser : public TfliteNodeParser {
public:
TfliteHardSwishParser() : TfliteNodeParser("HardSwish") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_HARD_SWISH_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <memory>
#include "tools/converter/parser/tflite/tflite_log_parser.h"
namespace mindspore {
namespace lite {
STATUS TfliteLogParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
MS_LOG(INFO) << "parse TfliteLogParser";
std::unique_ptr<schema::LogT> attr(new schema::LogT());
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Log;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_TfliteLogParser("Log", new TfliteLogParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_LOG_PARSER_H
#define PREDICT_TFLITE_LOG_PARSER_H
#include <memory>
#include <vector>
#include "tools/converter/parser/tflite/tflite_node_parser.h"
#include "tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteLogParser : public TfliteNodeParser {
public:
TfliteLogParser() : TfliteNodeParser("Log") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_LOG_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <memory>
#include "tools/converter/parser/tflite/tflite_logical_and_parser.h"
namespace mindspore {
namespace lite {
STATUS TfliteLogicalAndParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
MS_LOG(INFO) << "parse TfliteLogicalAndParser";
std::unique_ptr<schema::LogicalAndT> attr(new schema::LogicalAndT());
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_LogicalAnd;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_TfliteLogicalAndParser("LogicalAnd", new TfliteLogicalAndParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_LOGICAL_AND_PARSER_H
#define PREDICT_TFLITE_LOGICAL_AND_PARSER_H
#include <memory>
#include <vector>
#include "tools/converter/parser/tflite/tflite_node_parser.h"
#include "tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteLogicalAndParser : public TfliteNodeParser {
public:
TfliteLogicalAndParser() : TfliteNodeParser("LogicalAnd") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_LOGICAL_AND_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <memory>
#include "tools/converter/parser/tflite/tflite_logical_not_parser.h"
namespace mindspore {
namespace lite {
STATUS TfliteLogicalNotParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
MS_LOG(INFO) << "parse TfliteLogicalNotParser";
std::unique_ptr<schema::LogicalNotT> attr(new schema::LogicalNotT());
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_LogicalNot;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_TfliteLogicalNotParser("LogicalNot", new TfliteLogicalNotParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_LOGICAL_NOT_PARSER_H
#define PREDICT_TFLITE_LOGICAL_NOT_PARSER_H
#include <memory>
#include <vector>
#include "tools/converter/parser/tflite/tflite_node_parser.h"
#include "tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteLogicalNotParser : public TfliteNodeParser {
public:
TfliteLogicalNotParser() : TfliteNodeParser("LogicalNot") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_LOGICAL_NOT_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <memory>
#include "tools/converter/parser/tflite/tflite_logical_or_parser.h"
namespace mindspore {
namespace lite {
STATUS TfliteLogicalOrParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
MS_LOG(INFO) << "parse TfliteLogicalOrParser";
std::unique_ptr<schema::LogicalOrT> attr(new schema::LogicalOrT());
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_LogicalOr;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_TfliteLogicalOrParser("LogicalOr", new TfliteLogicalOrParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_LOGICAL_OR_PARSER_H
#define PREDICT_TFLITE_LOGICAL_OR_PARSER_H
#include <memory>
#include <vector>
#include "tools/converter/parser/tflite/tflite_node_parser.h"
#include "tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteLogicalOrParser : public TfliteNodeParser {
public:
TfliteLogicalOrParser() : TfliteNodeParser("LogicalOr") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_LOGICAL_OR_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <memory>
#include "tools/converter/parser/tflite/tflite_maximum_parser.h"
namespace mindspore {
namespace lite {
STATUS TfliteMaximumParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
MS_LOG(INFO) << "parse TfliteMaximumParser";
std::unique_ptr<schema::MaximumT> attr(new schema::MaximumT());
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Maximum;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_TfliteMaximumParser("Maximum", new TfliteMaximumParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_MAXIMUM_PARSER_H
#define PREDICT_TFLITE_MAXIMUM_PARSER_H
#include <memory>
#include <vector>
#include "tools/converter/parser/tflite/tflite_node_parser.h"
#include "tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteMaximumParser : public TfliteNodeParser {
public:
TfliteMaximumParser() : TfliteNodeParser("Maximum") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_MAXIMUM_PARSER_H
......@@ -16,7 +16,7 @@
#include <vector>
#include <memory>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_mean_parser.h"
#include "tools/converter/parser/tflite/tflite_mean_parser.h"
namespace mindspore {
namespace lite {
......@@ -30,10 +30,12 @@ STATUS TfliteMeanParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteO
const auto &tflite_attr = tfliteOp->builtin_options.AsReducerOptions();
if (tflite_attr == nullptr) {
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
return RET_ERROR;
}
attr->keepDims = tflite_attr->keep_dims;
if (GetTfliteData(tfliteOp->inputs[1], tfliteTensors, tfliteModelBuffer, attr->axis)) {
MS_LOG(ERROR) << "Mean get axis attr failed";
return RET_ERROR;
}
......
......@@ -19,8 +19,8 @@
#include <memory>
#include <vector>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h"
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.h"
#include "tools/converter/parser/tflite/tflite_node_parser.h"
#include "tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
......
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <memory>
#include "tools/converter/parser/tflite/tflite_minimum_parser.h"
namespace mindspore {
namespace lite {
STATUS TfliteMinimumParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
MS_LOG(INFO) << "parse TfliteMinimumParser";
std::unique_ptr<schema::MinimumT> attr(new schema::MinimumT());
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Minimum;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_TfliteMinimumParser("Minimum", new TfliteMinimumParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_MINIMUM_PARSER_H
#define PREDICT_TFLITE_MINIMUM_PARSER_H
#include <memory>
#include <vector>
#include "tools/converter/parser/tflite/tflite_node_parser.h"
#include "tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteMinimumParser : public TfliteNodeParser {
public:
TfliteMinimumParser() : TfliteNodeParser("Minimum") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_MINIMUM_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <memory>
#include "tools/converter/parser/tflite/tflite_one_hot_parser.h"
namespace mindspore {
namespace lite {
STATUS TfliteOneHotParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
MS_LOG(INFO) << "parse TfliteOneHotParser";
std::unique_ptr<schema::OneHotT> attr(new schema::OneHotT());
const auto &tflite_attr = tfliteOp->builtin_options.AsOneHotOptions();
if (tflite_attr == nullptr) {
MS_LOG(ERROR) << "get op: " << op->name << " attr failed";
return RET_NULL_PTR;
}
attr->axis = tflite_attr->axis;
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_OneHot;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_TfliteOneHotParser("OneHot", new TfliteOneHotParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_ONE_HOT_PARSER_H
#define PREDICT_TFLITE_ONE_HOT_PARSER_H
#include <memory>
#include <vector>
#include "tools/converter/parser/tflite/tflite_node_parser.h"
#include "tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteOneHotParser : public TfliteNodeParser {
public:
TfliteOneHotParser() : TfliteNodeParser("OneHot") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_ONE_HOT_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <memory>
#include "tools/converter/parser/tflite/tflite_reduce_any_parser.h"
namespace mindspore {
namespace lite {
STATUS TfliteReduceAnyParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
MS_LOG(INFO) << "parse TfliteReduceAnyParser";
std::unique_ptr<schema::ReduceT> attr(new schema::ReduceT());
const auto &tflite_attr = tfliteOp->builtin_options.AsReducerOptions();
if (tflite_attr == nullptr) {
MS_LOG(ERROR) << "get op: " << op->name << " attr failed";
return RET_NULL_PTR;
}
// attr->mode = schema::;
MS_LOG(ERROR) << "ms-lite haven't supported REDUCE_ANY now";
return RET_NOT_FIND_OP;
if (GetTfliteData(tfliteOp->inputs[1], tfliteTensors, tfliteModelBuffer, attr->axes)) {
MS_LOG(ERROR) << "REDUCE_ANY get axes attr failed";
return RET_ERROR;
}
attr->keepDims = tflite_attr->keep_dims;
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Reduce;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_TfliteReduceAnyParser("ReduceAny", new TfliteReduceAnyParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_REDUCE_ANY_PARSER_H
#define PREDICT_TFLITE_REDUCE_ANY_PARSER_H
#include <memory>
#include <vector>
#include "tools/converter/parser/tflite/tflite_node_parser.h"
#include "tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteReduceAnyParser : public TfliteNodeParser {
public:
TfliteReduceAnyParser() : TfliteNodeParser("ReduceAny") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_REDUCE_ANY_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <memory>
#include "tools/converter/parser/tflite/tflite_reduce_max_parser.h"
namespace mindspore {
namespace lite {
STATUS TfliteReduceMaxParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
MS_LOG(INFO) << "parse TfliteReduceMaxParser";
std::unique_ptr<schema::ReduceT> attr(new schema::ReduceT());
const auto &tflite_attr = tfliteOp->builtin_options.AsReducerOptions();
if (tflite_attr == nullptr) {
MS_LOG(ERROR) << "get op: " << op->name << " attr failed";
return RET_NULL_PTR;
}
attr->mode = schema::ReduceMode_ReduceMax;
if (GetTfliteData(tfliteOp->inputs[1], tfliteTensors, tfliteModelBuffer, attr->axes)) {
MS_LOG(ERROR) << "REDUCE_MAX get axes attr failed";
return RET_ERROR;
}
attr->keepDims = tflite_attr->keep_dims;
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Reduce;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_TfliteReduceMaxParser("ReduceMax", new TfliteReduceMaxParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_REDUCE_MAX_PARSER_H
#define PREDICT_TFLITE_REDUCE_MAX_PARSER_H
#include <memory>
#include <vector>
#include "tools/converter/parser/tflite/tflite_node_parser.h"
#include "tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteReduceMaxParser : public TfliteNodeParser {
public:
TfliteReduceMaxParser() : TfliteNodeParser("ReduceMax") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_REDUCE_MAX_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <memory>
#include "tools/converter/parser/tflite/tflite_reduce_min_parser.h"
namespace mindspore {
namespace lite {
STATUS TfliteReduceMinParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
MS_LOG(INFO) << "parse TfliteReduceMinParser";
std::unique_ptr<schema::ReduceT> attr(new schema::ReduceT());
const auto &tflite_attr = tfliteOp->builtin_options.AsReducerOptions();
if (tflite_attr == nullptr) {
MS_LOG(ERROR) << "get op: " << op->name << " attr failed";
return RET_NULL_PTR;
}
attr->mode = schema::ReduceMode_ReduceMin;
if (GetTfliteData(tfliteOp->inputs[1], tfliteTensors, tfliteModelBuffer, attr->axes)) {
MS_LOG(ERROR) << "REDUCE_MIN get axes attr failed";
return RET_ERROR;
}
attr->keepDims = tflite_attr->keep_dims;
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Reduce;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_TfliteReduceMinParser("ReduceMin", new TfliteReduceMinParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_REDUCE_MIN_PARSER_H
#define PREDICT_TFLITE_REDUCE_MIN_PARSER_H
#include <memory>
#include <vector>
#include "tools/converter/parser/tflite/tflite_node_parser.h"
#include "tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteReduceMinParser : public TfliteNodeParser {
public:
TfliteReduceMinParser() : TfliteNodeParser("ReduceMin") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_REDUCE_MIN_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <memory>
#include "tools/converter/parser/tflite/tflite_reduce_prod_parser.h"
namespace mindspore {
namespace lite {
STATUS TfliteReduceProdParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
MS_LOG(INFO) << "parse TfliteReduceProdParser";
std::unique_ptr<schema::ReduceT> attr(new schema::ReduceT());
const auto &tflite_attr = tfliteOp->builtin_options.AsReducerOptions();
if (tflite_attr == nullptr) {
MS_LOG(ERROR) << "get op: " << op->name << " attr failed";
return RET_NULL_PTR;
}
attr->mode = schema::ReduceMode_ReduceProd;
if (GetTfliteData(tfliteOp->inputs[1], tfliteTensors, tfliteModelBuffer, attr->axes)) {
MS_LOG(ERROR) << "REDUCE_PROD get axes attr failed";
return RET_ERROR;
}
attr->keepDims = tflite_attr->keep_dims;
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Reduce;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_TfliteReduceProdParser("ReduceProd", new TfliteReduceProdParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_REDUCE_PROD_PARSER_H
#define PREDICT_TFLITE_REDUCE_PROD_PARSER_H
#include <memory>
#include <vector>
#include "tools/converter/parser/tflite/tflite_node_parser.h"
#include "tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteReduceProdParser : public TfliteNodeParser {
public:
TfliteReduceProdParser() : TfliteNodeParser("ReduceProd") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_REDUCE_PROD_PARSER_H
......@@ -16,7 +16,7 @@
#include <vector>
#include <memory>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_rsqrt_parser.h"
#include "tools/converter/parser/tflite/tflite_rsqrt_parser.h"
namespace mindspore {
namespace lite {
......
......@@ -19,8 +19,8 @@
#include <memory>
#include <vector>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h"
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.h"
#include "tools/converter/parser/tflite/tflite_node_parser.h"
#include "tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
......
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <memory>
#include <utility>
#include "tools/converter/parser/tflite/tflite_scatter_nd_parser.h"
namespace mindspore {
namespace lite {
STATUS TfliteScatterNdParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
MS_LOG(INFO) << "parse TfliteScatterNdParser";
std::unique_ptr<schema::ScatterNDT> attr(new schema::ScatterNDT());
const auto &tflite_attr = tfliteOp->builtin_options.AsScatterNdOptions();
if (tflite_attr == nullptr) {
MS_LOG(ERROR) << "get op: " << op->name << " attr failed";
return RET_NULL_PTR;
}
/*
MS_LOG(DEBUG) << "op->inputIndex";
for (auto &i : op->inputIndex) {
MS_LOG(DEBUG) << i;
}
*/
std::swap(op->inputIndex[0], op->inputIndex[2]);
std::swap(op->inputIndex[1], op->inputIndex[2]);
/*
MS_LOG(DEBUG) << "op->inputIndex after resort";
for (auto &i : op->inputIndex) {
MS_LOG(DEBUG) << i;
}
*/
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_ScatterND;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_TfliteScatterNdParser("ScatterNd", new TfliteScatterNdParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_SCATTER_ND_PARSER_H
#define PREDICT_TFLITE_SCATTER_ND_PARSER_H
#include <memory>
#include <vector>
#include "tools/converter/parser/tflite/tflite_node_parser.h"
#include "tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteScatterNdParser : public TfliteNodeParser {
public:
TfliteScatterNdParser() : TfliteNodeParser("ScatterNd") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_SCATTER_ND_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <memory>
#include "tools/converter/parser/tflite/tflite_shape_parser.h"
namespace mindspore {
namespace lite {
STATUS TfliteShapeParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
MS_LOG(INFO) << "parse TfliteShapeParser";
std::unique_ptr<schema::ShapeT> attr(new schema::ShapeT());
// tflite_attr->out_type; // this attr is dropped
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Shape;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_TfliteShapeParser("Shape", new TfliteShapeParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_SHAPE_PARSER_H
#define PREDICT_TFLITE_SHAPE_PARSER_H
#include <memory>
#include <vector>
#include "tools/converter/parser/tflite/tflite_node_parser.h"
#include "tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteShapeParser : public TfliteNodeParser {
public:
TfliteShapeParser() : TfliteNodeParser("Shape") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_SHAPE_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <memory>
#include "tools/converter/parser/tflite/tflite_sin_parser.h"
namespace mindspore {
namespace lite {
STATUS TfliteSinParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
MS_LOG(INFO) << "parse TfliteSinParser";
std::unique_ptr<schema::SinT> attr(new schema::SinT());
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Sin;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_TfliteSinParser("Sin", new TfliteSinParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_SIN_PARSER_H
#define PREDICT_TFLITE_SIN_PARSER_H
#include <memory>
#include <vector>
#include "tools/converter/parser/tflite/tflite_node_parser.h"
#include "tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteSinParser : public TfliteNodeParser {
public:
TfliteSinParser() : TfliteNodeParser("Sin") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_SIN_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <memory>
#include "tools/converter/parser/tflite/tflite_split_parser.h"
namespace mindspore {
namespace lite {
STATUS TfliteSplitParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) {
MS_LOG(INFO) << "parse TfliteSplitParser";
std::unique_ptr<schema::SplitT> attr(new schema::SplitT());
const auto &tflite_attr = tfliteOp->builtin_options.AsSplitOptions();
if (tflite_attr == nullptr) {
MS_LOG(ERROR) << "get op: " << op->name << " attr failed";
return RET_NULL_PTR;
}
const auto tensor_shape = tfliteTensors[tfliteOp->inputs[1]].get()->shape;
auto axis =
*(reinterpret_cast<int32_t *>(tfliteModelBuffer[tfliteTensors[tfliteOp->inputs[0]]->buffer]->data.data()));
if (axis < 0) {
axis += tensor_shape.size();
}
if (axis >= tensor_shape.size()) {
MS_LOG(ERROR) << "axis value too large";
return RET_ERROR;
}
attr->splitDim = axis;
auto num_splits = tflite_attr->num_splits;
if (tensor_shape[axis] % num_splits != 0) {
MS_LOG(ERROR) << "num_splits can't divide tensor's length at axis " << axis;
return RET_ERROR;
}
attr->numberSplit = num_splits;
for (int i = 0; i <= num_splits; i++) {
attr->sizeSplits.push_back(tensor_shape[axis] / num_splits);
}
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Split;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_TfliteSplitParser("Split", new TfliteSplitParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_SPLIT_PARSER_H
#define PREDICT_TFLITE_SPLIT_PARSER_H
#include <memory>
#include <vector>
#include "tools/converter/parser/tflite/tflite_node_parser.h"
#include "tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteSplitParser : public TfliteNodeParser {
public:
TfliteSplitParser() : TfliteNodeParser("Split") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_SPLIT_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <memory>
#include "tools/converter/parser/tflite/tflite_split_v_parser.h"
namespace mindspore {
namespace lite {
STATUS TfliteSplitVParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
MS_LOG(INFO) << "parse TfliteSplitVParser";
std::unique_ptr<schema::SplitT> attr(new schema::SplitT());
const auto &tflite_attr = tfliteOp->builtin_options.AsSplitVOptions();
if (tflite_attr == nullptr) {
MS_LOG(ERROR) << "get op: " << op->name << " attr failed";
return RET_NULL_PTR;
}
attr->numberSplit = tflite_attr->num_splits;
if (GetTfliteData(tfliteOp->inputs[1], tfliteTensors, tfliteModelBuffer, attr->sizeSplits)) {
MS_LOG(ERROR) << "SPLIT_V get sizeSplits attr failed";
return RET_ERROR;
}
auto axis =
*(reinterpret_cast<int32_t *>(tfliteModelBuffer[tfliteTensors[tfliteOp->inputs[2]]->buffer]->data.data()));
const auto tensor_shape = tfliteTensors[tfliteOp->inputs[0]].get()->shape;
if (axis < 0) {
axis += tensor_shape.size();
}
if (axis >= tensor_shape.size()) {
MS_LOG(ERROR) << "axis value too large";
return RET_ERROR;
}
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Split;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_TfliteSplitVParser("SplitV", new TfliteSplitVParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_SPLIT_V_PARSER_H
#define PREDICT_TFLITE_SPLIT_V_PARSER_H
#include <memory>
#include <vector>
#include "tools/converter/parser/tflite/tflite_node_parser.h"
#include "tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteSplitVParser : public TfliteNodeParser {
public:
TfliteSplitVParser() : TfliteNodeParser("SplitV") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_SPLIT_V_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <memory>
#include "tools/converter/parser/tflite/tflite_sqrt_parser.h"
namespace mindspore {
namespace lite {
STATUS TfliteSqrtParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
MS_LOG(INFO) << "parse TfliteSqrtParser";
std::unique_ptr<schema::SqrtT> attr(new schema::SqrtT());
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Sqrt;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_TfliteSqrtParser("Sqrt", new TfliteSqrtParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_SQRT_PARSER_H
#define PREDICT_TFLITE_SQRT_PARSER_H
#include <memory>
#include <vector>
#include "tools/converter/parser/tflite/tflite_node_parser.h"
#include "tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteSqrtParser : public TfliteNodeParser {
public:
TfliteSqrtParser() : TfliteNodeParser("Sqrt") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_SQRT_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <memory>
#include "tools/converter/parser/tflite/tflite_square_parser.h"
namespace mindspore {
namespace lite {
STATUS TfliteSquareParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
MS_LOG(INFO) << "parse TfliteSquareParser";
std::unique_ptr<schema::SquareT> attr(new schema::SquareT());
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Square;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_TfliteSquareParser("Square", new TfliteSquareParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_SQUARE_PARSER_H
#define PREDICT_TFLITE_SQUARE_PARSER_H
#include <memory>
#include <vector>
#include "tools/converter/parser/tflite/tflite_node_parser.h"
#include "tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteSquareParser : public TfliteNodeParser {
public:
TfliteSquareParser() : TfliteNodeParser("Square") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_SQUARE_PARSER_H
......@@ -16,7 +16,7 @@
#include <vector>
#include <memory>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_squareddifference_parser.h"
#include "tools/converter/parser/tflite/tflite_squared_difference_parser.h"
namespace mindspore {
namespace lite {
......@@ -27,10 +27,6 @@ STATUS TfliteSquaredDifferenceParser::Parse(const std::unique_ptr<tflite::Operat
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
MS_LOG(DEBUG) << "parse TfliteSquaredDifferenceParser";
std::unique_ptr<schema::SquaredDifferenceT> attr(new schema::SquaredDifferenceT());
const auto &tflite_attr = tfliteOp->builtin_options.AsSquaredDifferenceOptions();
if (tflite_attr == nullptr) {
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
}
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
......
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_SQUARED_DIFFERENCE_PARSER_H
#define PREDICT_TFLITE_SQUARED_DIFFERENCE_PARSER_H
#include <memory>
#include <vector>
#include "tools/converter/parser/tflite/tflite_node_parser.h"
#include "tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteSquaredDifferenceParser : public TfliteNodeParser {
public:
TfliteSquaredDifferenceParser() : TfliteNodeParser("SquaredDifference") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache, bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_SQUARED_DIFFERENCE_PARSER_H
......@@ -16,20 +16,21 @@
#include <vector>
#include <memory>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_squeeze_parser.h"
#include "tools/converter/parser/tflite/tflite_squeeze_parser.h"
namespace mindspore {
namespace lite {
STATUS TfliteSqueezeParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
MS_LOG(DEBUG) << "parse TfliteSqueezeParser";
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
MS_LOG(INFO) << "parse TfliteSqueezeParser";
std::unique_ptr<schema::SqueezeT> attr(new schema::SqueezeT());
const auto &tflite_attr = tfliteOp->builtin_options.AsSqueezeOptions();
if (tflite_attr == nullptr) {
MS_LOG(ERROR) << "get op: " << op->name.c_str() << " attr failed";
MS_LOG(ERROR) << "get op: " << op->name << " attr failed";
return RET_NULL_PTR;
}
attr->axis = tflite_attr->squeeze_dims;
......@@ -42,6 +43,6 @@ STATUS TfliteSqueezeParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfli
return RET_OK;
}
TfliteNodeRegister g_tfliteSqueezeParser("Squeeze", new TfliteSqueezeParser());
TfliteNodeRegister g_TfliteSqueezeParser("Squeeze", new TfliteSqueezeParser());
} // namespace lite
} // namespace mindspore
......@@ -14,13 +14,13 @@
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_STACK_PARSER_H
#define PREDICT_TFLITE_STACK_PARSER_H
#ifndef PREDICT_TFLITE_SQUEEZE_PARSER_H
#define PREDICT_TFLITE_SQUEEZE_PARSER_H
#include <memory>
#include <vector>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h"
#include "mindspore/lite/tools/converter/parser/tflite/tflite_node_parser_registry.h"
#include "tools/converter/parser/tflite/tflite_node_parser.h"
#include "tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
......@@ -32,10 +32,10 @@ class TfliteSqueezeParser : public TfliteNodeParser {
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache, bool quantizedModel) override;
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_Squeeze_PARSER_H
#endif // PREDICT_TFLITE_SQUEEZE_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <memory>
#include "tools/converter/parser/tflite/tflite_strided_slice_parser.h"
namespace mindspore {
namespace lite {
STATUS TfliteStridedSliceParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
MS_LOG(INFO) << "parse TfliteStridedSliceParser";
std::unique_ptr<schema::StridedSliceT> attr(new schema::StridedSliceT());
const auto &tflite_attr = tfliteOp->builtin_options.AsStridedSliceOptions();
if (tflite_attr == nullptr) {
MS_LOG(ERROR) << "get op: " << op->name << " attr failed";
return RET_NULL_PTR;
}
if (GetTfliteData(tfliteOp->inputs[1], tfliteTensors, tfliteModelBuffer, attr->begin)) {
MS_LOG(ERROR) << "STRIDED_SLICE get begin attr failed";
return RET_ERROR;
}
if (GetTfliteData(tfliteOp->inputs[2], tfliteTensors, tfliteModelBuffer, attr->end)) {
MS_LOG(ERROR) << "STRIDED_SLICE get end attr failed";
return RET_ERROR;
}
if (GetTfliteData(tfliteOp->inputs[3], tfliteTensors, tfliteModelBuffer, attr->stride)) {
MS_LOG(ERROR) << "STRIDED_SLICE get stride attr failed";
return RET_ERROR;
}
attr->beginMask = tflite_attr->begin_mask;
attr->endMask = tflite_attr->end_mask;
attr->ellipsisMask = tflite_attr->ellipsis_mask;
attr->newAxisMask = tflite_attr->new_axis_mask;
attr->shrinkAxisMask = tflite_attr->shrink_axis_mask;
// attr->isScale; // isScale is actually not used in ms-lite
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_StridedSlice;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_TfliteStridedSliceParser("StridedSlice", new TfliteStridedSliceParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_STRIDED_SLICE_PARSER_H
#define PREDICT_TFLITE_STRIDED_SLICE_PARSER_H
#include <memory>
#include <vector>
#include "tools/converter/parser/tflite/tflite_node_parser.h"
#include "tools/converter/parser/tflite/tflite_node_parser_registry.h"
namespace mindspore {
namespace lite {
class TfliteStridedSliceParser : public TfliteNodeParser {
public:
TfliteStridedSliceParser() : TfliteNodeParser("StridedSlice") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet, schema::CNodeT *op,
TensorCache *tensor_cache,
bool quantizedModel) override;
};
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_STRIDED_SLICE_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <memory>
#include "mindspore/lite/tools/converter/parser/tflite/tflite_sum_parser.h"
namespace mindspore {
namespace lite {
STATUS TfliteSumParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
const std::vector<std::unique_ptr<tflite::BufferT>> &tfliteModelBuffer,
const std::vector<std::unique_ptr<tflite::OperatorCodeT>> &tfliteOpSet,
schema::CNodeT *op, TensorCache *tensor_cache, bool quantizedModel) {
MS_LOG(INFO) << "parse TfliteSumParser";
std::unique_ptr<schema::ReduceT> attr(new schema::ReduceT());
const auto &tflite_attr = tfliteOp->builtin_options.AsReducerOptions();
if (tflite_attr == nullptr) {
MS_LOG(ERROR) << "get op: " << op->name << " attr failed";
return RET_NULL_PTR;
}
attr->mode = schema::ReduceMode_ReduceSum;
if (GetTfliteData(tfliteOp->inputs[1], tfliteTensors, tfliteModelBuffer, attr->axes)) {
MS_LOG(ERROR) << "SUM get axes attr failed";
return RET_ERROR;
}
attr->keepDims = tflite_attr->keep_dims;
if (op != nullptr) {
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Reduce;
op->primitive->value.value = attr.release();
}
return RET_OK;
}
TfliteNodeRegister g_TfliteSumParser("Sum", new TfliteSumParser());
} // namespace lite
} // namespace mindspore
......@@ -14,8 +14,8 @@
* limitations under the License.
*/
#ifndef PREDICT_TFLITE_SQUAREDDIFFERENCE_PARSER_H
#define PREDICT_TFLITE_SQUAREDDIFFERENCE_PARSER_H
#ifndef PREDICT_TFLITE_SUM_PARSER_H
#define PREDICT_TFLITE_SUM_PARSER_H
#include <memory>
#include <vector>
......@@ -24,9 +24,9 @@
namespace mindspore {
namespace lite {
class TfliteSquaredDifferenceParser : public TfliteNodeParser {
class TfliteSumParser : public TfliteNodeParser {
public:
TfliteSquaredDifferenceParser() : TfliteNodeParser("SquaredDifference") {}
TfliteSumParser() : TfliteNodeParser("Sum") {}
STATUS Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp,
const std::vector<std::unique_ptr<tflite::TensorT>> &tfliteTensors,
......@@ -37,5 +37,4 @@ class TfliteSquaredDifferenceParser : public TfliteNodeParser {
} // namespace lite
} // namespace mindspore
#endif // PREDICT_TFLITE_SQUAREDDIFFERENCE_PARSER_H
#endif // PREDICT_TFLITE_SUM_PARSER_H
......@@ -77,6 +77,32 @@ std::map<tflite::BuiltinOperator, std::string> tfMsOpTypeMap{
{tflite::BuiltinOperator_RANK, "Rank"},
{tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION, "LocalResponseNorm"},
{tflite::BuiltinOperator_GATHER, "GatherV2"},
{tflite::BuiltinOperator_EXP, "Exp"},
{tflite::BuiltinOperator_SPLIT_V, "SplitV"},
{tflite::BuiltinOperator_SPLIT, "Split"},
{tflite::BuiltinOperator_BATCH_TO_SPACE_ND, "BatchToSpaceND"},
{tflite::BuiltinOperator_STRIDED_SLICE, "StridedSlice"},
{tflite::BuiltinOperator_ONE_HOT, "OneHot"},
{tflite::BuiltinOperator_SHAPE, "Shape"},
{tflite::BuiltinOperator_SQUEEZE, "Squeeze"},
{tflite::BuiltinOperator_ABS, "Abs"},
{tflite::BuiltinOperator_SIN, "Sin"},
{tflite::BuiltinOperator_COS, "Cos"},
{tflite::BuiltinOperator_LOG, "Log"},
{tflite::BuiltinOperator_SQRT, "Sqrt"},
{tflite::BuiltinOperator_SQUARE, "Square"},
{tflite::BuiltinOperator_LOGICAL_NOT, "LogicalNot"},
{tflite::BuiltinOperator_LOGICAL_AND, "LogicalAnd"},
{tflite::BuiltinOperator_LOGICAL_OR, "LogicalOr"},
{tflite::BuiltinOperator_HARD_SWISH, "HardSwish"},
{tflite::BuiltinOperator_SUM, "Sum"},
{tflite::BuiltinOperator_REDUCE_PROD, "ReduceProd"},
{tflite::BuiltinOperator_REDUCE_MAX, "ReduceMax"},
{tflite::BuiltinOperator_REDUCE_MIN, "ReduceMin"},
// {tflite::BuiltinOperator_REDUCE_ANY, "ReduceAny"},
{tflite::BuiltinOperator_SCATTER_ND, "ScatterNd"},
{tflite::BuiltinOperator_MAXIMUM, "Maximum"},
{tflite::BuiltinOperator_MINIMUM, "Minimum"},
};
std::string GetMSOpType(tflite::BuiltinOperator tfliteOpType) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册