diff --git a/mindspore/lite/java/native/runtime/ms_tensor.cpp b/mindspore/lite/java/native/runtime/ms_tensor.cpp index 3a42f810a1b3a6fb6f9c38bbebf37a934a21db95..6117d791bf3ebe3ac69ff7801deabf7755479671 100644 --- a/mindspore/lite/java/native/runtime/ms_tensor.cpp +++ b/mindspore/lite/java/native/runtime/ms_tensor.cpp @@ -227,7 +227,7 @@ extern "C" JNIEXPORT jboolean JNICALL Java_com_mindspore_lite_MSTensor_setByteBu jobject buffer) { jbyte *p_data = reinterpret_cast(env->GetDirectBufferAddress(buffer)); // get buffer poiter jlong data_len = env->GetDirectBufferCapacity(buffer); // get buffer capacity - if (!p_data) { + if (p_data == nullptr) { MS_LOGE("GetDirectBufferAddress return null"); return NULL; } diff --git a/mindspore/lite/src/common/file_utils.h b/mindspore/lite/src/common/file_utils.h index ff1ec03e6415c26547192ba0d0178ed290b14c78..2c6a2521bc70292ee69262a47504e42605db2909 100644 --- a/mindspore/lite/src/common/file_utils.h +++ b/mindspore/lite/src/common/file_utils.h @@ -34,7 +34,7 @@ char *ReadFile(const char *file, size_t *size); std::string RealPath(const char *path); template -void WriteToTxt(const std::string& file_path, void *data, size_t element_size) { +void WriteToTxt(const std::string &file_path, void *data, size_t element_size) { std::ofstream out_file; out_file.open(file_path, std::ios::out); auto real_data = reinterpret_cast(data); @@ -44,7 +44,7 @@ void WriteToTxt(const std::string& file_path, void *data, size_t element_size) { out_file.close(); } -int WriteToBin(const std::string& file_path, void *data, size_t size); +int WriteToBin(const std::string &file_path, void *data, size_t size); int CompareOutputData(float *output_data, float *correct_data, int data_size); void CompareOutput(float *output_data, std::string file_path); @@ -55,4 +55,3 @@ std::string GetAndroidPackagePath(); } // namespace mindspore #endif // MINDSPORE_LITE_COMMON_FILE_UTILS_H_ - diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_util.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_util.cc index d987b6e8f97e4571e4b7f475d47ce4d5decfcc75..12f187cbbf126430ae30c4fb14373058d0513df4 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_util.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_util.cc @@ -126,14 +126,10 @@ std::map tfMsActivationF }; std::map type_map = { - {tflite::TensorType_FLOAT64, TypeId::kNumberTypeFloat64}, - {tflite::TensorType_FLOAT32, TypeId::kNumberTypeFloat32}, - {tflite::TensorType_FLOAT16, TypeId::kNumberTypeFloat16}, - {tflite::TensorType_INT32, TypeId::kNumberTypeInt32}, - {tflite::TensorType_INT16, TypeId::kNumberTypeInt16}, - {tflite::TensorType_INT8, TypeId::kNumberTypeInt8}, - {tflite::TensorType_INT64, TypeId::kNumberTypeInt64}, - {tflite::TensorType_UINT8, TypeId::kNumberTypeUInt8}, + {tflite::TensorType_FLOAT64, TypeId::kNumberTypeFloat64}, {tflite::TensorType_FLOAT32, TypeId::kNumberTypeFloat32}, + {tflite::TensorType_FLOAT16, TypeId::kNumberTypeFloat16}, {tflite::TensorType_INT32, TypeId::kNumberTypeInt32}, + {tflite::TensorType_INT16, TypeId::kNumberTypeInt16}, {tflite::TensorType_INT8, TypeId::kNumberTypeInt8}, + {tflite::TensorType_INT64, TypeId::kNumberTypeInt64}, {tflite::TensorType_UINT8, TypeId::kNumberTypeUInt8}, {tflite::TensorType_BOOL, TypeId::kNumberTypeBool}, }; @@ -190,11 +186,8 @@ size_t GetDataTypeSize(const TypeId &data_type) { } } -STATUS getPaddingParam(const std::unique_ptr &tensor, - schema::PadMode pad_mode, - int strideH, int strideW, - int windowH, int windowW, - std::vector *params) { +STATUS getPaddingParam(const std::unique_ptr &tensor, schema::PadMode pad_mode, int strideH, + int strideW, int windowH, int windowW, std::vector *params) { if (tensor == nullptr) { MS_LOG(ERROR) << "the input tensor is null"; return RET_ERROR; @@ -208,12 +201,18 @@ STATUS getPaddingParam(const std::unique_ptr &tensor, auto shape = tensor->shape; int H_input = shape.at(1); int W_input = shape.at(2); - + if (strideH == 0) { + MS_LOG(ERROR) << "strideH is zero"; + return RET_ERROR; + } int H_output = ceil(H_input * 1.0 / strideH); int pad_needed_H = (H_output - 1) * strideH + windowH - H_input; padUp = floor(pad_needed_H / 2.0); padDown = pad_needed_H - padUp; - + if (strideW == 0) { + MS_LOG(ERROR) << "strideW is zero"; + return RET_ERROR; + } int W_output = ceil(W_input * 1.0 / strideW); int pad_needed_W = (W_output - 1) * strideW + windowW - W_input; padLeft = floor(pad_needed_W / 2.0); @@ -227,9 +226,7 @@ STATUS getPaddingParam(const std::unique_ptr &tensor, return RET_OK; } -void Split(const std::string &src_str, - std::vector *dst_str, - const std::string &chr) { +void Split(const std::string &src_str, std::vector *dst_str, const std::string &chr) { std::string ::size_type p1 = 0, p2 = src_str.find(chr); while (std::string::npos != p2) { dst_str->push_back(src_str.substr(p1, p2 - p1)); diff --git a/model_zoo/official/lite/image_classification/app/src/main/cpp/MSNetWork.h b/model_zoo/official/lite/image_classification/app/src/main/cpp/MSNetWork.h index 27da5f5dfa6b8e3d29e74282eccde624993b2d0e..2412019e39ec0809c5383edfd3d76bc9ddc24294 100644 --- a/model_zoo/official/lite/image_classification/app/src/main/cpp/MSNetWork.h +++ b/model_zoo/official/lite/image_classification/app/src/main/cpp/MSNetWork.h @@ -32,7 +32,6 @@ #include #include - struct ImgDims { int channel = 0; int width = 0; @@ -43,8 +42,6 @@ struct ImgDims { std::shared_ptr sess = nullptr; };*/ - - class MSNetWork { public: MSNetWork(); @@ -55,6 +52,7 @@ class MSNetWork { int ReleaseNets(void); + protected: mindspore::session::LiteSession *session; mindspore::lite::Model *model; static const int RET_CATEGORY_SUM = 601; diff --git a/model_zoo/official/lite/image_classification/app/src/main/cpp/MindSporeNetnative.cpp b/model_zoo/official/lite/image_classification/app/src/main/cpp/MindSporeNetnative.cpp index fdf2e1c8276665f553dbabac9caa33417b18aecc..f58394c0bde4805c629ef0319da97291f39044eb 100644 --- a/model_zoo/official/lite/image_classification/app/src/main/cpp/MindSporeNetnative.cpp +++ b/model_zoo/official/lite/image_classification/app/src/main/cpp/MindSporeNetnative.cpp @@ -76,10 +76,10 @@ cv::Mat PreProcessImageData(cv::Mat input) { imgFloatTmp.convertTo(imgResized256, CV_32FC3, normalizMin / normalizMax); - int offsetX = 16; - int offsetY = 16; - int cropWidth = 224; - int cropHeight = 224; + const int offsetX = 16; + const int offsetY = 16; + const int cropWidth = 224; + const int cropHeight = 224; // Standardization processing. float meanR = 0.485;