提交 40dced5d 编写于 作者: M mindspore-ci-bot 提交者: Gitee

!4651 fix bug

Merge pull request !4651 from 徐安越/master
......@@ -194,6 +194,7 @@ union PrimitiveType {
Return,
MakeTuple,
ToFormat,
Proposal,
}
enum QuantType: int {
......
......@@ -578,6 +578,7 @@ table ExpandDims {
table Tile {
multiples: [int];
dims: [int];
}
table Cast {
......@@ -885,4 +886,15 @@ table ToFormat {
}
table Return {
}
\ No newline at end of file
}
table Proposal {
feat_stride : float;
base_size : float;
min_size : float;
ratio : [float];
scale : [float];
pre_nms_topn : int;
post_nms_topn : int;
nms_thresh : float;
}
......@@ -98,10 +98,15 @@ int Benchmark::ReadInputFile() {
MS_ASSERT(cur_tensor != nullptr);
size_t size;
char *binBuf = ReadFile(_flags->input_data_list[i].c_str(), &size);
if (binBuf == nullptr) {
MS_LOG(ERROR) << "ReadFile return nullptr";
return RET_ERROR;
}
auto tensorDataSize = cur_tensor->Size();
if (size != tensorDataSize) {
std::cerr << "Input binary file size error, required: %zu, in fact: %zu" << tensorDataSize << size << std::endl;
MS_LOG(ERROR) << "Input binary file size error, required: %zu, in fact: %zu" << tensorDataSize << size;
std::cerr << "Input binary file size error, required: %zu, in fact: %zu" << tensorDataSize
<< size << std::endl;
MS_LOG(ERROR) << "Input binary file size error, required: " << tensorDataSize << ", in fact: " << size;
return RET_ERROR;
}
auto inputData = cur_tensor->MutableData();
......@@ -508,6 +513,17 @@ int Benchmark::Init() {
MS_LOG(INFO) << "WarmUpLoopCount = " << this->_flags->warmUpLoopCount;
MS_LOG(INFO) << "NumThreads = " << this->_flags->numThreads;
MS_LOG(INFO) << "calibDataPath = " << this->_flags->calibDataPath;
if (this->_flags->loopCount < 1) {
MS_LOG(ERROR) << "LoopCount:" << this->_flags->loopCount << " must be greater than 0";
return RET_ERROR;
}
if (this->_flags->numThreads < 1) {
MS_LOG(ERROR) << "numThreads:" << this->_flags->numThreads << " must be greater than 0";
return RET_ERROR;
}
if (this->_flags->cpuBindMode == -1) {
MS_LOG(INFO) << "cpuBindMode = MID_CPU";
} else if (this->_flags->cpuBindMode == 1) {
......
add_library(caffe_parser_mid OBJECT
${CMAKE_CURRENT_SOURCE_DIR}/caffe.pb.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_argmax_parser.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_argmax_parser.h
${CMAKE_CURRENT_SOURCE_DIR}/caffe_batchnorm_parser.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_batchnorm_parser.h
${CMAKE_CURRENT_SOURCE_DIR}/caffe_concat_parser.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_concat_parser.h
${CMAKE_CURRENT_SOURCE_DIR}/caffe_conv_base_parser.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_conv_base_parser.h
${CMAKE_CURRENT_SOURCE_DIR}/caffe_converter.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_converter.h
${CMAKE_CURRENT_SOURCE_DIR}/caffe_convolution_parser.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_convolution_parser.h
${CMAKE_CURRENT_SOURCE_DIR}/caffe_crop_parser.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_crop_parser.h
${CMAKE_CURRENT_SOURCE_DIR}/caffe_deconvolution_parser.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_deconvolution_parser.h
${CMAKE_CURRENT_SOURCE_DIR}/caffe_eltwise_parser.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_eltwise_parser.h
${CMAKE_CURRENT_SOURCE_DIR}/caffe_flatten_parser.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_flatten_parser.h
${CMAKE_CURRENT_SOURCE_DIR}/caffe_innerproduct_parser.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_innerproduct_parser.h
${CMAKE_CURRENT_SOURCE_DIR}/caffe_inspector.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_inspector.h
${CMAKE_CURRENT_SOURCE_DIR}/caffe_model_parser.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_model_parser.h
${CMAKE_CURRENT_SOURCE_DIR}/caffe_node_parser.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_node_parser.h
${CMAKE_CURRENT_SOURCE_DIR}/caffe_node_parser_registry.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_node_parser_registry.h
${CMAKE_CURRENT_SOURCE_DIR}/caffe_parse_utils.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_parse_utils.h
${CMAKE_CURRENT_SOURCE_DIR}/caffe_pooling_parser.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_pooling_parser.h
${CMAKE_CURRENT_SOURCE_DIR}/caffe_power_parser.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_power_parser.h
${CMAKE_CURRENT_SOURCE_DIR}/caffe_prelu_parser.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_prelu_parser.h
${CMAKE_CURRENT_SOURCE_DIR}/caffe_relu_parser.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_relu_parser.h
${CMAKE_CURRENT_SOURCE_DIR}/caffe_reshape_parser.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_reshape_parser.h
${CMAKE_CURRENT_SOURCE_DIR}/caffe_scale_parser.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_scale_parser.h
${CMAKE_CURRENT_SOURCE_DIR}/caffe_sigmoid_parser.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_sigmoid_parser.h
${CMAKE_CURRENT_SOURCE_DIR}/caffe_softmax_parser.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_softmax_parser.h
${CMAKE_CURRENT_SOURCE_DIR}/caffe_inspector.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_inspector.h
${CMAKE_CURRENT_SOURCE_DIR}/caffe_interp_parser.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_interp_parser.h
${CMAKE_CURRENT_SOURCE_DIR}/caffe_permute_parser.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_permute_parser.h)
${CMAKE_CURRENT_SOURCE_DIR}/caffe_tile_parser.cc
${CMAKE_CURRENT_SOURCE_DIR}/caffe_proposal_parser.cc)
......@@ -14,8 +14,8 @@
* limitations under the License.
*/
#include "tools/converter/parser/caffe/caffe_conv_base_parser.h"
#include <algorithm>
#include "mindspore/lite/tools/converter/parser/caffe/caffe_conv_base_parser.h"
const uint32_t PAD_DEFAULT_VALUE = 0;
const uint32_t STRIDE_DEFAULT_VALUE = 1;
......@@ -35,7 +35,7 @@ STATUS CaffeConvBaseParser::ParsePads(const caffe::ConvolutionParameter &convPar
*/
if (convParam.has_pad_h() || convParam.has_pad_w()) {
if (convParam.pad_size() != 0) {
// MS_LOGE("Either pad or pad_h/w should be specified; not both");
MS_LOG(ERROR) << "Either pad or pad_h/w should be specified; not both";
return RET_ERROR;
}
......@@ -76,11 +76,11 @@ STATUS CaffeConvBaseParser::ParsePads(const caffe::ConvolutionParameter &convPar
STATUS CaffeConvBaseParser::ParseStrides(const caffe::ConvolutionParameter &convParam, std::vector<int64_t> *stride) {
if (convParam.has_stride_h() || convParam.has_stride_w()) {
if (convParam.stride_size() != 0) {
// MS_LOGE("Either stride or stride_h/w should be specified; not both");
MS_LOG(ERROR) << "Either stride or stride_h/w should be specified; not both";
return RET_ERROR;
}
if (!convParam.has_stride_h() || !convParam.has_stride_w()) {
// MS_LOGE("stride_h/w must appear at the same time!");
MS_LOG(ERROR) << "stride_h/w must appear at the same time!";
return RET_ERROR;
}
(*stride)[0] = convParam.stride_h();
......@@ -120,14 +120,14 @@ STATUS CaffeConvBaseParser::ParseDilations(const caffe::ConvolutionParameter &co
STATUS CaffeConvBaseParser::ParseKernels(const caffe::ConvolutionParameter &convParam, std::vector<int64_t> *kernel) {
if (convParam.has_kernel_h() || convParam.has_kernel_w()) {
if (convParam.kernel_size_size() != 0) {
// MS_LOGE("Either kernel_size or kernel_h/w should be specified; not both.")
MS_LOG(ERROR) << "Either kernel_size or kernel_h/w should be specified; not both.";
return RET_ERROR;
}
if (convParam.has_kernel_h() && convParam.has_kernel_w()) {
(*kernel)[0] = convParam.kernel_h();
(*kernel)[1] = convParam.kernel_w();
} else {
// MS_LOGE("kernel_h/w must appear at the same time!");
MS_LOG(ERROR) << "kernel_h/w must appear at the same time!";
return RET_ERROR;
}
} else if (convParam.kernel_size_size() != 0) {
......@@ -157,40 +157,27 @@ int CaffeConvBaseParser::ParseGroup(const caffe::ConvolutionParameter &convParam
return group;
}
int CaffeConvBaseParser::ParseChannelIn(const caffe::LayerParameter &proto, const int &group) {
int res = 0;
auto &weightBlob = proto.blobs(0);
if (weightBlob.has_shape()) {
res = weightBlob.shape().dim(1) * group;
} else {
// get shape information from Blob parameters(caffe proto v1)
if (proto.type() == "Deconvolution") {
res = weightBlob.num() * group;
} else {
res = weightBlob.channels() * group;
}
}
return res;
}
int CaffeConvBaseParser::ParseChannelOut(const caffe::ConvolutionParameter &convParam) {
int CaffeConvBaseParser::ParseChannelOut(const caffe::ConvolutionParameter &convParam, int32_t *channelOut) {
MS_ASSERT(channelOut != nullptr);
if (!convParam.has_num_output()) {
// MS_LOGE("Parse num_output for failed.");
MS_LOG(ERROR) << "Parse num_output for failed.";
return RET_ERROR;
}
return convParam.num_output();
*channelOut = convParam.num_output();
return RET_OK;
}
STATUS CaffeConvBaseParser::ParseWeight(const caffe::LayerParameter &weight,
std::vector<schema::TensorT *> *weightVec) {
// Layer must have Filter
if (weight.blobs_size() == 0) {
// MS_LOGE("No filter data in layer %s", weight.name().c_str());
MS_LOG(ERROR) << "No filter data in layer " << weight.name().c_str();
return RET_ERROR;
}
auto filter = ConvertWeight(weight.blobs(0));
if (filter == nullptr) {
// MS_LOGE("Convert weight for layer %s failed", weight.name().c_str());
MS_LOG(ERROR) << "Convert weight for layer " << weight.name().c_str() << " failed";
return RET_ERROR;
}
weightVec->push_back(filter);
......@@ -200,13 +187,13 @@ STATUS CaffeConvBaseParser::ParseWeight(const caffe::LayerParameter &weight,
if (convParam.bias_term() && weight.blobs_size() > 1) {
auto bias = ConvertWeight(weight.blobs(1));
if (bias == nullptr) {
// MS_LOGE("Convert bias for layer %s failed", weight.name().c_str());
MS_LOG(ERROR) << "Convert bias for layer " << weight.name().c_str() << " failed";
return RET_ERROR;
}
std::vector<int32_t> shape = bias->dims;
if (shape.size() != CAFFE_CONV_BIAS_DIM_NUM) {
// MS_LOGE("Bias dim-num of layer %s is not supported");
MS_LOG(ERROR) << "Bias dim-num of layer "<< weight.name().c_str() << " is not supported";
return RET_ERROR;
}
weightVec->push_back(bias);
......
......@@ -40,9 +40,7 @@ class CaffeConvBaseParser {
int ParseGroup(const caffe::ConvolutionParameter &convParam, const std::string &layerType);
int ParseChannelOut(const caffe::ConvolutionParameter &convParam);
int ParseChannelIn(const caffe::LayerParameter &proto, const int &group);
int ParseChannelOut(const caffe::ConvolutionParameter &convParam, int32_t *channelOut);
STATUS ParseWeight(const caffe::LayerParameter &weight, std::vector<schema::TensorT *> *weightVec);
};
......
......@@ -16,6 +16,7 @@
#include <memory>
#include "mindspore/lite/tools/converter/parser/caffe/caffe_convolution_parser.h"
#include "utils/log_adapter.h"
namespace mindspore {
namespace lite {
......@@ -62,7 +63,8 @@ STATUS CaffeConvolutionParser::Parse(const caffe::LayerParameter &proto, const c
std::vector<int64_t> pad(4, 0);
auto status = convParser.ParsePads(convParam, &pad);
if (status != RET_OK) {
// MS_LOGE("ParsePads for %s failed", proto.name().c_str());
MS_LOG(ERROR) << "ParsePads for " << proto.name().c_str() <<" failed";
return RET_ERROR;
}
attr->padUp = pad[0];
attr->padDown = pad[1];
......@@ -73,7 +75,8 @@ STATUS CaffeConvolutionParser::Parse(const caffe::LayerParameter &proto, const c
std::vector<int64_t> stride(2, 0);
status = convParser.ParseStrides(convParam, &stride);
if (status != RET_OK) {
// MS_LOGE("ParseStrides for %s failed", proto.name().c_str());
MS_LOG(ERROR) << "ParseStrides for " << proto.name().c_str() << " failed";
return RET_ERROR;
}
attr->strideH = stride[0];
attr->strideW = stride[1];
......@@ -82,7 +85,8 @@ STATUS CaffeConvolutionParser::Parse(const caffe::LayerParameter &proto, const c
std::vector<int64_t> dilation(2, 0);
status = convParser.ParseDilations(convParam, &dilation);
if (status != RET_OK) {
// MS_LOGE("ParseDilations for %s failed", proto.name().c_str());
MS_LOG(ERROR) << "ParseDilations for " << proto.name().c_str() << " failed";
return RET_ERROR;
}
attr->dilateH = dilation[0];
attr->dilateW = dilation[1];
......@@ -91,15 +95,26 @@ STATUS CaffeConvolutionParser::Parse(const caffe::LayerParameter &proto, const c
std::vector<int64_t> kernel(2, 0);
status = convParser.ParseKernels(convParam, &kernel);
if (status != RET_OK) {
// MS_LOGE("ParseKernels for %s failed", proto.name().c_str());
MS_LOG(ERROR) << "ParseKernels for " << proto.name().c_str() << " failed";
return RET_ERROR;
}
attr->kernelH = kernel[0];
attr->kernelW = kernel[1];
attr->hasBias = convParam.bias_term();
attr->group = convParser.ParseGroup(convParam, proto.type());
attr->channelOut = convParser.ParseChannelOut(convParam);
attr->channelIn = convParser.ParseChannelIn(weight, attr->group);
auto ret = convParser.ParseChannelOut(convParam, &(attr->channelOut));
if (ret != RET_OK) {
MS_LOG(ERROR) << "conv channel out failed";
return RET_ERROR;
}
auto &weightBlob = weight.blobs(0);
if (weightBlob.has_shape()) {
attr->channelIn = weightBlob.shape().dim(1) * attr->group;
} else {
// get shape information from Blob parameters(caffe proto v1)
attr->channelIn = weightBlob.channels() * attr->group;
}
attr->padMode = schema::PadMode_CAFFE;
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_Conv2D;
......@@ -108,9 +123,9 @@ STATUS CaffeConvolutionParser::Parse(const caffe::LayerParameter &proto, const c
ParseGroupConvolution(op, attr);
status = convParser.ParseWeight(weight, weightVec);
if (status != RET_OK) {
// MS_LOGE("ParseWeight for %s failed", proto.name().c_str());
MS_LOG(ERROR) << "ParseWeight for " << proto.name().c_str() << " failed";
}
return RET_OK;
return status;
}
CaffeNodeRegistrar g_caffeConvolutionParser("Convolution", new CaffeConvolutionParser());
......
......@@ -26,7 +26,7 @@ void CaffeDeconvolutionParser::ParseGroupDeconvolution(schema::CNodeT *op, schem
std::unique_ptr<schema::DeDepthwiseConv2DT> deDepthwiseConv2DParam(new schema::DeDepthwiseConv2DT());
if (deDepthwiseConv2DParam == nullptr) {
// MS_LOGW("new DeDepthwiseConv2DT failed");
MS_LOG(ERROR) << "new DeDepthwiseConv2DT failed";
return;
}
deDepthwiseConv2DParam->format = attr->format;
......@@ -61,7 +61,8 @@ STATUS CaffeDeconvolutionParser::Parse(const caffe::LayerParameter &proto, const
std::vector<int64_t> pad(4, 0);
auto status = convParser.ParsePads(convParam, &pad);
if (status != RET_OK) {
// MS_LOGE("ParsePads for %s failed", proto.name().c_str());
MS_LOG(ERROR) << "ParsePads for " << proto.name().c_str() << " failed";
return RET_ERROR;
}
attr->padUp = pad[0];
attr->padDown = pad[1];
......@@ -72,7 +73,8 @@ STATUS CaffeDeconvolutionParser::Parse(const caffe::LayerParameter &proto, const
std::vector<int64_t> stride(2, 0);
status = convParser.ParseStrides(convParam, &stride);
if (status != RET_OK) {
// MS_LOGE("ParseStrides for %s failed", proto.name().c_str());
MS_LOG(ERROR) << "ParseStrides for " << proto.name().c_str() << " failed";
return RET_ERROR;
}
attr->strideH = stride[0];
attr->strideW = stride[1];
......@@ -81,7 +83,8 @@ STATUS CaffeDeconvolutionParser::Parse(const caffe::LayerParameter &proto, const
std::vector<int64_t> dilation(2, 0);
status = convParser.ParseDilations(convParam, &dilation);
if (status != RET_OK) {
// MS_LOGE("ParseDilations for %s failed", proto.name().c_str());
MS_LOG(ERROR) << "ParseDilations for " << proto.name().c_str() << " failed";
return RET_ERROR;
}
attr->dilateH = dilation[0];
attr->dilateW = dilation[1];
......@@ -90,15 +93,29 @@ STATUS CaffeDeconvolutionParser::Parse(const caffe::LayerParameter &proto, const
std::vector<int64_t> kernel(2, 0);
status = convParser.ParseKernels(convParam, &kernel);
if (status != RET_OK) {
// MS_LOGE("ParseKernels for %s failed", proto.name().c_str());
MS_LOG(ERROR) << "ParseKernels for " << proto.name().c_str() << " failed";
return RET_ERROR;
}
attr->kernelH = kernel[0];
attr->kernelW = kernel[1];
attr->hasBias = convParam.bias_term();
attr->group = convParser.ParseGroup(convParam, proto.type());
attr->channelOut = convParser.ParseChannelOut(convParam);
attr->channelIn = convParser.ParseChannelIn(weight, attr->group);
auto ret = convParser.ParseChannelOut(convParam, &(attr->channelOut));
if (ret != RET_OK) {
MS_LOG(ERROR) << "deconv channel get failed";
return RET_ERROR;
}
auto &weightBlob = weight.blobs(0);
if (weightBlob.has_shape()) {
if (attr->group == 1)
attr->channelIn = weightBlob.shape().dim(0) * attr->group;
else
attr->channelIn = weightBlob.shape().dim(1) * attr->group;
} else {
// get shape information from Blob parameters(caffe proto v1)
attr->channelIn = weightBlob.num() * attr->group;
}
attr->padMode = schema::PadMode_CAFFE;
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.type = schema::PrimitiveType_DeConv2D;
......@@ -106,9 +123,9 @@ STATUS CaffeDeconvolutionParser::Parse(const caffe::LayerParameter &proto, const
ParseGroupDeconvolution(op, attr);
status = convParser.ParseWeight(weight, weightVec);
if (status != RET_OK) {
// MS_LOGE("ParseWeight for %s failed", proto.name().c_str());
MS_LOG(ERROR) << "ParseWeight for " << proto.name().c_str() << " failed";
}
return RET_OK;
return status;
}
CaffeNodeRegistrar g_caffeDeconvolutionParser("Deconvolution", new CaffeDeconvolutionParser());
......
......@@ -24,6 +24,7 @@
#include "tools/converter/parser/caffe/caffe.pb.h"
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
#include "include/errorcode.h"
#include "utils/log_adapter.h"
namespace mindspore {
namespace lite {
......
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <memory>
#include <vector>
#include "mindspore/lite/tools/converter/parser/caffe/caffe_proposal_parser.h"
namespace mindspore {
namespace lite {
STATUS CaffeProposalParser::Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
std::unique_ptr<schema::ProposalT> attr(new schema::ProposalT());
const caffe::ProposalParameter proposal_param = proto.proposal_param();
if (proposal_param.has_feat_stride()) {
attr->feat_stride = proposal_param.feat_stride();
}
if (proposal_param.has_base_size()) {
attr->base_size = proposal_param.base_size();
}
if (proposal_param.has_min_size()) {
attr->min_size = proposal_param.min_size();
}
if (proposal_param.has_pre_nms_topn()) {
attr->pre_nms_topn = proposal_param.pre_nms_topn();
}
if (proposal_param.has_post_nms_topn()) {
attr->post_nms_topn = proposal_param.post_nms_topn();
}
if (proposal_param.has_nms_thresh()) {
attr->nms_thresh = proposal_param.nms_thresh();
}
const int num_ratio = proposal_param.ratio_size();
attr->ratio.resize(num_ratio);
for (int i = 0; i < num_ratio; ++i) {
attr->ratio[i] = proposal_param.ratio(i);
}
const int num_scale = proposal_param.scale_size();
attr->scale.resize(num_scale);
for (int i = 0; i < num_scale; ++i) {
attr->scale[i] = proposal_param.scale(i);
}
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.value = attr.release();
op->primitive->value.type = schema::PrimitiveType_Tile;
return RET_OK;
}
CaffeNodeRegistrar g_caffeProposalParser("Proposal", new CaffeProposalParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LITE_CAFFE_PROPOSAL_PARSER_H
#define LITE_CAFFE_PROPOSAL_PARSER_H
#include <vector>
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser_registry.h"
namespace mindspore {
namespace lite {
class CaffeProposalParser : public CaffeNodeParser {
public:
CaffeProposalParser() : CaffeNodeParser("proposal") {}
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;
};
} // namespace lite
} // namespace mindspore
#endif // LITE_CAFFE_PROPOSAL_PARSER_H
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <memory>
#include <vector>
#include "mindspore/lite/tools/converter/parser/caffe/caffe_tile_parser.h"
namespace mindspore {
namespace lite {
STATUS CaffeTileParser::Parse(const caffe::LayerParameter &proto,
const caffe::LayerParameter &weight,
schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) {
std::unique_ptr<schema::TileT> attr(new schema::TileT());
const caffe::TileParameter tile_param = proto.tile_param();
std::vector<int> dims;
std::vector<int> multiples;
dims.clear();
multiples.clear();
if (tile_param.has_axis()) {
dims.push_back(tile_param.axis());
} else {
dims.push_back(1);
}
if (tile_param.has_tiles()) {
multiples.push_back(tile_param.tiles());
} else {
multiples.push_back(1);
}
attr->dims = dims;
attr->multiples = multiples;
op->primitive = std::make_unique<schema::PrimitiveT>();
op->primitive->value.value = attr.release();
op->primitive->value.type = schema::PrimitiveType_Tile;
return RET_OK;
}
CaffeNodeRegistrar g_caffeTileParser("Tile", new CaffeTileParser());
} // namespace lite
} // namespace mindspore
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LITE_CAFFE_TILE_PARSER_H
#define LITE_CAFFE_TILE_PARSER_H
#include <vector>
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h"
#include "mindspore/lite/tools/converter/parser/caffe/caffe_node_parser_registry.h"
namespace mindspore {
namespace lite {
class CaffeTileParser : public CaffeNodeParser {
public:
CaffeTileParser() : CaffeNodeParser("tile") {}
STATUS Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight, schema::CNodeT *op,
std::vector<schema::TensorT *> *weightVec) override;
};
} // namespace lite
} // namespace mindspore
#endif // LITE_CAFFE_TILE_PARSER_H
......@@ -47,7 +47,11 @@ STATUS TfliteTileParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_
MS_LOG(ERROR) << "get tile -> multiples failed";
return RET_ERROR;
}
std::vector<int> dims(attr->multiples.size(), 0);
for (int i = 0; i < dims.size(); ++i) {
dims[i] = i;
}
attr->dims = dims;
op->primitive->value.type = schema::PrimitiveType_Tile;
op->primitive->value.value = attr.release();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册