未验证 提交 e267d2b5 编写于 作者: Y Yuan Shuai 提交者: GitHub

[LITE][TEST] Enhance mobilenet test, mobile_light demo (#3245)

* Ehance mobilenetv1/v2, add std_dev computation. test=develop

* Ehance cxx mobile_light demo, add std_dev computation. test=develop
上级 8173ff94
......@@ -119,21 +119,21 @@ void TestModel(const std::vector<Place>& valid_places,
// Get detailed result
size_t output_tensor_num = predictor.GetOutputNames().size();
VLOG(1) << "output tesnor num:" << output_tensor_num;
VLOG(1) << "output tensor num:" << output_tensor_num;
for (size_t tidx = 0; tidx < output_tensor_num; ++tidx) {
auto* output_tensor = predictor.GetOutput(tidx);
VLOG(1) << "============= output tensor " << tidx << " =============\n";
auto out_dims = output_tensor->dims();
VLOG(1) << "out_dims:" << out_dims;
float sum = 0.f;
for (int i = 0; i < out_dims.production(); ++i) {
sum += output_tensor->data<float>()[i];
}
VLOG(1) << "out_dims.production():" << out_dims.production();
VLOG(1) << "output tensor sum value:" << sum;
VLOG(1) << "output tensor mean value:" << sum / out_dims.production();
auto out_data = output_tensor->data<float>();
auto out_mean = compute_mean<float>(out_data, out_dims.production());
auto out_std_dev = compute_standard_deviation<float>(
out_data, out_dims.production(), true, out_mean);
VLOG(1) << "output tensor dims:" << out_dims;
VLOG(1) << "output tensor elements num:" << out_dims.production();
VLOG(1) << "output tensor standard deviation:" << out_std_dev;
VLOG(1) << "output tensor mean value:" << out_mean;
// print result
for (int i = 0; i < out_dims.production(); ++i) {
......
......@@ -121,21 +121,21 @@ void TestModel(const std::vector<Place>& valid_places,
// Get detailed result
size_t output_tensor_num = predictor.GetOutputNames().size();
VLOG(1) << "output tesnor num:" << output_tensor_num;
VLOG(1) << "output tensor num:" << output_tensor_num;
for (size_t tidx = 0; tidx < output_tensor_num; ++tidx) {
auto* output_tensor = predictor.GetOutput(tidx);
VLOG(1) << "============= output tensor " << tidx << " =============\n";
auto out_dims = output_tensor->dims();
VLOG(1) << "out_dims:" << out_dims;
float sum = 0.f;
for (int i = 0; i < out_dims.production(); ++i) {
sum += output_tensor->data<float>()[i];
}
VLOG(1) << "out_dims.production():" << out_dims.production();
VLOG(1) << "output tensor sum value:" << sum;
VLOG(1) << "output tensor mean value:" << sum / out_dims.production();
auto out_data = output_tensor->data<float>();
auto out_mean = compute_mean<float>(out_data, out_dims.production());
auto out_std_dev = compute_standard_deviation<float>(
out_data, out_dims.production(), true, out_mean);
VLOG(1) << "output tensor dims:" << out_dims;
VLOG(1) << "output tensor elements num:" << out_dims.production();
VLOG(1) << "output tensor standard deviation:" << out_std_dev;
VLOG(1) << "output tensor mean value:" << out_mean;
// print result
for (int i = 0; i < out_dims.production(); ++i) {
......
......@@ -17,6 +17,7 @@
#include <gflags/gflags.h>
#include <sys/time.h>
#include <time.h>
#include <cmath>
// for eval
DEFINE_string(model_dir, "", "model dir");
......@@ -43,5 +44,31 @@ inline double GetCurrentUS() {
return 1e+6 * time.tv_sec + time.tv_usec;
}
template <typename T>
double compute_mean(const T* in, const size_t length) {
double sum = 0.;
for (size_t i = 0; i < length; ++i) {
sum += in[i];
}
return sum / length;
}
template <typename T>
double compute_standard_deviation(const T* in,
const size_t length,
bool has_mean = false,
double mean = 10000) {
if (!has_mean) {
mean = compute_mean<T>(in, length);
}
double variance = 0.;
for (size_t i = 0; i < length; ++i) {
variance += pow((in[i] - mean), 2);
}
variance /= length;
return sqrt(variance);
}
} // namespace lite
} // namespace paddle
......@@ -14,6 +14,7 @@
#include <sys/time.h>
#include <time.h>
#include <cmath>
#include <iostream>
#include <string>
#include <vector>
......@@ -36,6 +37,32 @@ std::string ShapePrint(const shape_t& shape) {
return shape_str;
}
template <typename T>
double compute_mean(const T* in, const size_t length) {
double sum = 0.;
for (size_t i = 0; i < length; ++i) {
sum += in[i];
}
return sum / length;
}
template <typename T>
double compute_standard_deviation(const T* in,
const size_t length,
bool has_mean = false,
double mean = 10000) {
if (!has_mean) {
mean = compute_mean<T>(in, length);
}
double variance = 0.;
for (size_t i = 0; i < length; ++i) {
variance += pow((in[i] - mean), 2);
}
variance /= length;
return sqrt(variance);
}
inline double GetCurrentUS() {
struct timeval time;
gettimeofday(&time, NULL);
......@@ -108,17 +135,17 @@ void RunModel(std::string model_dir,
predictor->GetOutput(tidx);
std::cout << "\n--- output tensor " << tidx << " ---" << std::endl;
auto out_shape = output_tensor->shape();
std::cout << "out_shape(NCHW):" << ShapePrint(out_shape) << std::endl;
auto out_data = output_tensor->data<float>();
auto out_mean = compute_mean<float>(out_data, ShapeProduction(out_shape));
auto out_std_dev = compute_standard_deviation<float>(
out_data, ShapeProduction(out_shape), true, out_mean);
float sum = 0.f;
for (int i = 0; i < ShapeProduction(out_shape); ++i) {
sum += output_tensor->data<float>()[i];
}
std::cout << "output shape(NCHW):" << ShapePrint(out_shape) << std::endl;
std::cout << "output tensor " << tidx
<< " elem num:" << ShapeProduction(out_shape) << std::endl;
std::cout << "output tensor " << tidx << " sum value:" << sum << std::endl;
std::cout << "output tensor " << tidx
<< " mean value:" << sum / ShapeProduction(out_shape)
<< " standard deviation:" << out_std_dev << std::endl;
std::cout << "output tensor " << tidx << " mean value:" << out_mean
<< std::endl;
// print output
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册