提交 29fabd13 编写于 作者: M mindspore-ci-bot 提交者: Gitee

!5561 Fix C++ coding standard problem

Merge pull request !5561 from yeyunpeng2020/r0.7
......@@ -227,7 +227,7 @@ extern "C" JNIEXPORT jboolean JNICALL Java_com_mindspore_lite_MSTensor_setByteBu
jobject buffer) {
jbyte *p_data = reinterpret_cast<jbyte *>(env->GetDirectBufferAddress(buffer)); // get buffer poiter
jlong data_len = env->GetDirectBufferCapacity(buffer); // get buffer capacity
if (!p_data) {
if (p_data == nullptr) {
MS_LOGE("GetDirectBufferAddress return null");
return NULL;
}
......
......@@ -34,7 +34,7 @@ char *ReadFile(const char *file, size_t *size);
std::string RealPath(const char *path);
template <typename T>
void WriteToTxt(const std::string& file_path, void *data, size_t element_size) {
void WriteToTxt(const std::string &file_path, void *data, size_t element_size) {
std::ofstream out_file;
out_file.open(file_path, std::ios::out);
auto real_data = reinterpret_cast<T *>(data);
......@@ -44,7 +44,7 @@ void WriteToTxt(const std::string& file_path, void *data, size_t element_size) {
out_file.close();
}
int WriteToBin(const std::string& file_path, void *data, size_t size);
int WriteToBin(const std::string &file_path, void *data, size_t size);
int CompareOutputData(float *output_data, float *correct_data, int data_size);
void CompareOutput(float *output_data, std::string file_path);
......@@ -55,4 +55,3 @@ std::string GetAndroidPackagePath();
} // namespace mindspore
#endif // MINDSPORE_LITE_COMMON_FILE_UTILS_H_
......@@ -126,14 +126,10 @@ std::map<tflite::ActivationFunctionType, schema::ActivationType> tfMsActivationF
};
std::map<int, TypeId> type_map = {
{tflite::TensorType_FLOAT64, TypeId::kNumberTypeFloat64},
{tflite::TensorType_FLOAT32, TypeId::kNumberTypeFloat32},
{tflite::TensorType_FLOAT16, TypeId::kNumberTypeFloat16},
{tflite::TensorType_INT32, TypeId::kNumberTypeInt32},
{tflite::TensorType_INT16, TypeId::kNumberTypeInt16},
{tflite::TensorType_INT8, TypeId::kNumberTypeInt8},
{tflite::TensorType_INT64, TypeId::kNumberTypeInt64},
{tflite::TensorType_UINT8, TypeId::kNumberTypeUInt8},
{tflite::TensorType_FLOAT64, TypeId::kNumberTypeFloat64}, {tflite::TensorType_FLOAT32, TypeId::kNumberTypeFloat32},
{tflite::TensorType_FLOAT16, TypeId::kNumberTypeFloat16}, {tflite::TensorType_INT32, TypeId::kNumberTypeInt32},
{tflite::TensorType_INT16, TypeId::kNumberTypeInt16}, {tflite::TensorType_INT8, TypeId::kNumberTypeInt8},
{tflite::TensorType_INT64, TypeId::kNumberTypeInt64}, {tflite::TensorType_UINT8, TypeId::kNumberTypeUInt8},
{tflite::TensorType_BOOL, TypeId::kNumberTypeBool},
};
......@@ -190,11 +186,8 @@ size_t GetDataTypeSize(const TypeId &data_type) {
}
}
STATUS getPaddingParam(const std::unique_ptr<tflite::TensorT> &tensor,
schema::PadMode pad_mode,
int strideH, int strideW,
int windowH, int windowW,
std::vector<int> *params) {
STATUS getPaddingParam(const std::unique_ptr<tflite::TensorT> &tensor, schema::PadMode pad_mode, int strideH,
int strideW, int windowH, int windowW, std::vector<int> *params) {
if (tensor == nullptr) {
MS_LOG(ERROR) << "the input tensor is null";
return RET_ERROR;
......@@ -208,12 +201,18 @@ STATUS getPaddingParam(const std::unique_ptr<tflite::TensorT> &tensor,
auto shape = tensor->shape;
int H_input = shape.at(1);
int W_input = shape.at(2);
if (strideH == 0) {
MS_LOG(ERROR) << "strideH is zero";
return RET_ERROR;
}
int H_output = ceil(H_input * 1.0 / strideH);
int pad_needed_H = (H_output - 1) * strideH + windowH - H_input;
padUp = floor(pad_needed_H / 2.0);
padDown = pad_needed_H - padUp;
if (strideW == 0) {
MS_LOG(ERROR) << "strideW is zero";
return RET_ERROR;
}
int W_output = ceil(W_input * 1.0 / strideW);
int pad_needed_W = (W_output - 1) * strideW + windowW - W_input;
padLeft = floor(pad_needed_W / 2.0);
......@@ -227,9 +226,7 @@ STATUS getPaddingParam(const std::unique_ptr<tflite::TensorT> &tensor,
return RET_OK;
}
void Split(const std::string &src_str,
std::vector<std::string> *dst_str,
const std::string &chr) {
void Split(const std::string &src_str, std::vector<std::string> *dst_str, const std::string &chr) {
std::string ::size_type p1 = 0, p2 = src_str.find(chr);
while (std::string::npos != p2) {
dst_str->push_back(src_str.substr(p1, p2 - p1));
......
......@@ -32,7 +32,6 @@
#include <memory>
#include <utility>
struct ImgDims {
int channel = 0;
int width = 0;
......@@ -43,8 +42,6 @@ struct ImgDims {
std::shared_ptr<mindspore::session::LiteSession> sess = nullptr;
};*/
class MSNetWork {
public:
MSNetWork();
......@@ -55,6 +52,7 @@ class MSNetWork {
int ReleaseNets(void);
protected:
mindspore::session::LiteSession *session;
mindspore::lite::Model *model;
static const int RET_CATEGORY_SUM = 601;
......
......@@ -76,10 +76,10 @@ cv::Mat PreProcessImageData(cv::Mat input) {
imgFloatTmp.convertTo(imgResized256, CV_32FC3, normalizMin / normalizMax);
int offsetX = 16;
int offsetY = 16;
int cropWidth = 224;
int cropHeight = 224;
const int offsetX = 16;
const int offsetY = 16;
const int cropWidth = 224;
const int cropHeight = 224;
// Standardization processing.
float meanR = 0.485;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册