提交 d687b71d 编写于 作者: A Aaron Xiao 提交者: Jiangtao Hu

Perception: Fix inference tools BUILD.

上级 c0ca7d1a
load("//tools:cpplint.bzl", "cpplint")
package(default_visibility = ["//visibility:public"])
cc_binary(
name = "cal_table_generator",
srcs = ["cal_table_generator.cc"],
deps = [
"//modules/perception/inference:inference_factory_lib",
"//modules/perception/inference:inference_lib",
"//modules/perception/inference/tensorrt:batch_stream",
"//modules/perception/inference/tensorrt:entropy_calibrator",
"//modules/perception/inference/tensorrt:rt_net",
"//modules/perception/inference/utils:inference_util_lib",
],
)
cc_binary(
name = "denseline_sample",
srcs = ["denseline_sample.cc"],
deps = [
"//modules/perception/inference:inference_factory_lib",
"//modules/perception/inference:inference_lib",
"//modules/perception/inference/tensorrt:batch_stream",
"//modules/perception/inference/tensorrt:entropy_calibrator",
"//modules/perception/inference/tensorrt:rt_net",
"//modules/perception/inference/utils:inference_util_lib",
],
)
cc_binary(
name = "lane_sample",
srcs = ["lane_sample.cc"],
deps = [
"//modules/perception/inference:inference_factory_lib",
"//modules/perception/inference:inference_lib",
"//modules/perception/inference/tensorrt:batch_stream",
"//modules/perception/inference/tensorrt:entropy_calibrator",
"//modules/perception/inference/tensorrt:rt_net",
"//modules/perception/inference/utils:inference_util_lib",
],
)
cc_binary(
name = "yolo_sample",
srcs = ["yolo_sample.cc"],
deps = [
"//modules/perception/inference:inference_factory_lib",
"//modules/perception/inference:inference_lib",
"//modules/perception/inference/tensorrt:batch_stream",
"//modules/perception/inference/tensorrt:entropy_calibrator",
"//modules/perception/inference/tensorrt:rt_net",
"//modules/perception/inference/utils:inference_util_lib",
],
)
cpplint()
# DO NOT MODIFY
# THIS FILE IS GENERATED AUTOMATICALLY TO HELP WRITE BUILD FILE
# DELETE THIS FILE WHEN YOU HAVE CREATED THE COMPLETE BUILD FILE
load("//tools:cpplint.bzl", "cpplint")
package(default_visibility = ["//visibility:public"])
cc_library(
name = "cal_table_generator",
srcs = [
"cal_table_generator.cc",
],
deps = [
],
)
cc_library(
name = "denseline_sample",
srcs = [
"denseline_sample.cc",
],
deps = [
],
)
cc_library(
name = "lane_sample",
srcs = [
"lane_sample.cc",
],
deps = [
],
)
cc_library(
name = "yolo_sample",
srcs = [
"yolo_sample.cc",
],
deps = [
],
)
cpplint()
......@@ -13,10 +13,12 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include <gflags/gflags.h>
#include <iostream>
#include "gflags/gflags.h"
#include "opencv2/opencv.hpp"
#include "cybertron/common/log.h"
#include "modules/perception/inference/inference.h"
#include "modules/perception/inference/tensorrt/batch_stream.h"
#include "modules/perception/inference/tensorrt/entropy_calibrator.h"
......@@ -46,9 +48,9 @@ DEFINE_bool(hwc_input, true, "input blob is hwc order.");
int evaluate_image_list() {
CHECK_EQ(FLAGS_image_channel_num, 3);
int height = FLAGS_height;
int width = FLAGS_width;
int count = FLAGS_image_channel_num * width * height;
const int height = FLAGS_height;
const int width = FLAGS_width;
const int count = FLAGS_image_channel_num * width * height;
std::ifstream fin;
fin.open(FLAGS_test_list, std::ifstream::in);
......@@ -69,8 +71,7 @@ int evaluate_image_list() {
}
fin.close();
std::string out_file;
out_file = FLAGS_batch_root + "/Batch0";
std::string out_file = FLAGS_batch_root + "/Batch0";
std::ofstream out_car(out_file, std::ofstream::out | std::ofstream::binary);
// std::ofstream out_car(out_file, std::ofstream::out);
if (!out_car.is_open()) {
......@@ -79,7 +80,7 @@ int evaluate_image_list() {
}
std::vector<float> cpu_data(count);
// main loop
for (int i = 0; i < img_list.size(); i++) {
for (size_t i = 0; i < img_list.size(); i++) {
std::string image_path = img_list[i] + FLAGS_image_ext;
cv::Mat img = cv::imread(image_path, CV_LOAD_IMAGE_COLOR);
cv::Mat img_org;
......@@ -157,8 +158,6 @@ int evaluate_image_list() {
int main(int argc, char **argv) {
google::ParseCommandLineFlags(&argc, &argv, true);
std::string proto_file = FLAGS_proto_file;
std::string weight_file = FLAGS_weight_file;
if (FLAGS_gen_batch) {
evaluate_image_list();
}
......@@ -173,7 +172,8 @@ int main(int argc, char **argv) {
(new nvinfer1::Int8EntropyCalibrator(stream, 0, true,
FLAGS_cal_table_root));
apollo::perception::inference::RTNet *rt_net =
new apollo::perception::inference::RTNet(proto_file, weight_file, outputs,
new apollo::perception::inference::RTNet(FLAGS_proto_file,
FLAGS_weight_file, outputs,
inputs, calibrator);
rt_net->Init(std::map<std::string, std::vector<int>>());
rt_net->Infer();
......
......@@ -14,9 +14,13 @@
* limitations under the License.
*****************************************************************************/
#include <iostream>
#include "/usr/include/opencv2/opencv.hpp"
#include "/usr/local/include/gflags/gflags.h"
#include "gflags/gflags.h"
#include "opencv2/opencv.hpp"
#include "cybertron/common/log.h"
#include "modules/perception/inference/inference.h"
#include "modules/perception/inference/inference_factory.h"
#include "modules/perception/inference/tensorrt/batch_stream.h"
#include "modules/perception/inference/tensorrt/entropy_calibrator.h"
#include "modules/perception/inference/tensorrt/rt_net.h"
......@@ -43,27 +47,27 @@ int main(int argc, char **argv) {
google::ParseCommandLineFlags(&argc, &argv, true);
std::vector<float> output_data_vec1;
int height = FLAGS_height;
int width = FLAGS_width;
int count = FLAGS_image_channel * width * height;
const int height = FLAGS_height;
const int width = FLAGS_width;
const int count = FLAGS_image_channel * width * height;
std::string proto_file = "./denseline_parser/deploy.prototxt";
std::string weight_file = "./denseline_parser/deploy.caffemodel";
std::string model_root = "./denseline_parser/";
const std::string proto_file = "./denseline_parser/deploy.prototxt";
const std::string weight_file = "./denseline_parser/deploy.caffemodel";
const std::string model_root = "./denseline_parser/";
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
LOG(INFO) << prop.name;
AINFO << prop.name;
apollo::perception::inference::Inference *rt_net;
std::string input_blob_name = "data";
const std::string input_blob_name = "data";
std::vector<std::string> inputs{input_blob_name};
std::vector<std::string> outputs;
apollo::perception::inference::load_data<std::string>(FLAGS_names_file,
&outputs);
for (auto name : outputs) {
LOG(INFO) << "outputs name: " << name;
AINFO << "outputs name: " << name;
}
// std::shared_ptr<nvinfer1::Int8EntropyCalibrator> calibrator = nullptr;
......@@ -74,11 +78,11 @@ int main(int argc, char **argv) {
// apollo::perception::inference::BatchStream stream(2, 50, "./batches/");
// calibrator
// .reset(new nvinfer1::Int8EntropyCalibrator(stream, 0, true, "./"));
LOG(INFO) << "int8";
AINFO << "int8";
rt_net = new apollo::perception::inference::RTNet(
proto_file, weight_file, outputs, inputs, calibrator);
} else {
LOG(INFO) << "fp32";
AINFO << "fp32";
rt_net = apollo::perception::inference::CreateInferenceByName(
"RTNet", proto_file, weight_file, outputs, inputs);
}
......@@ -95,7 +99,7 @@ int main(int argc, char **argv) {
for (auto image_file : image_lists) {
cv::Mat img = cv::imread(FLAGS_image_root + image_file + FLAGS_image_ext);
LOG(INFO) << img.channels();
AINFO << img.channels();
cv::Rect roi(0, FLAGS_offset_y, img.cols, img.rows - FLAGS_offset_y);
cv::Mat img_roi = img(roi);
img_roi.copyTo(img);
......@@ -124,26 +128,23 @@ int main(int argc, char **argv) {
std::vector<float> tmp_vec(blob->cpu_data(),
blob->cpu_data() + blob->count());
// if(output_name=="conv2" || output_name == "conv1") {
if (1) {
int cnt = 0;
LOG(INFO) << output_name << " " << blob->channels() << " "
<< blob->height() << " " << blob->width();
double sum = 0;
for (int i = 0; i < blob->count(); ++i) {
sum += blob->cpu_data()[i];
if (i < 100) {
LOG(INFO) << blob->cpu_data()[i];
}
AINFO << output_name << " " << blob->channels() << " "
<< blob->height() << " " << blob->width();
double sum = 0;
for (int i = 0; i < blob->count(); ++i) {
sum += blob->cpu_data()[i];
if (i < 100) {
AINFO << blob->cpu_data()[i];
}
LOG(INFO) << output_name << ", sum : " << std::endl;
}
AINFO << output_name << ", sum : " << std::endl;
// end of if
output_data_vec.insert(output_data_vec.end(), tmp_vec.begin(),
tmp_vec.end());
}
}
apollo::perception::inference::write_result(FLAGS_res_dir, output_data_vec);
LOG(INFO) << "Hello, World!";
delete rt_net;
// rt_net has deletle calibrator?
// delete calibrator;
......
......@@ -14,9 +14,13 @@
* limitations under the License.
*****************************************************************************/
#include <iostream>
#include "/usr/include/opencv2/opencv.hpp"
#include "/usr/local/include/gflags/gflags.h"
#include "gflags/gflags.h"
#include "opencv2/opencv.hpp"
#include "cybertron/common/log.h"
#include "modules/perception/inference/inference.h"
#include "modules/perception/inference/inference_factory.h"
#include "modules/perception/inference/tensorrt/batch_stream.h"
#include "modules/perception/inference/tensorrt/entropy_calibrator.h"
#include "modules/perception/inference/tensorrt/rt_net.h"
......@@ -29,6 +33,7 @@ DEFINE_string(test_list, "image_list.txt", "path of image list");
DEFINE_string(image_root, "./images/", "path of image dir");
DEFINE_string(image_ext, ".jpg", "path of image ext");
DEFINE_string(res_dir, "./result.dat", "path of result");
int main(int argc, char **argv) {
google::ParseCommandLineFlags(&argc, &argv, true);
std::vector<float> output_data_vec1;
......@@ -39,16 +44,16 @@ int main(int argc, char **argv) {
color_table.push_back(cv::Scalar(255, 0, 0)); // for mask value = 3
color_table.push_back(cv::Scalar(240, 32, 160)); // for mask value = 4
std::string proto_file = "./lane_parser/caffe.pt";
std::string weight_file = "./lane_parser/caffe.caffemodel";
std::string model_root = "./lane_parser/";
const std::string proto_file = "./lane_parser/caffe.pt";
const std::string weight_file = "./lane_parser/caffe.caffemodel";
const std::string model_root = "./lane_parser/";
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
LOG(INFO) << prop.name;
AINFO << prop.name;
apollo::perception::inference::Inference *rt_net;
std::string input_blob_name = "data";
const std::string input_blob_name = "data";
std::vector<std::string> inputs{"data"};
std::vector<std::string> outputs{"loc_pred", "obj_pred", "cls_pred",
......@@ -72,9 +77,9 @@ int main(int argc, char **argv) {
rt_net = apollo::perception::inference::CreateInferenceByName(
"RTNet", proto_file, weight_file, outputs, inputs);
}
int height = 608;
int width = 1024;
int offset_y = 0;
const int height = 608;
const int width = 1024;
const int offset_y = 0;
std::vector<int> shape = {1, 3, height, width};
std::map<std::string, std::vector<int>> shape_map{{input_blob_name, shape}};
......@@ -95,11 +100,10 @@ int main(int argc, char **argv) {
img_roi.copyTo(img);
cv::resize(img, img, cv::Size(width, height));
int count = 1 * width * height;
const int count = 1 * width * height;
float *input = new float[count];
for (int i = 0; i < count; i++) {
input[i] = (img.data[i] - 128) * 0.0078125;
;
}
cudaMemcpy(input_blob->mutable_gpu_data(), input, count * sizeof(float),
cudaMemcpyHostToDevice);
......@@ -111,29 +115,7 @@ int main(int argc, char **argv) {
auto blob = rt_net->get_blob(output_name);
std::vector<float> tmp_vec(blob->cpu_data(),
blob->cpu_data() + blob->count());
#if 0
if (output_name == "output") {
int cnt = 0;
std::cout << output_name << " " << blob->channels() << " "
<< blob->height() << " " << blob->width() << std::endl;
for (int c = 0; c < blob->channels(); c++) {
for (int j = 0; j < blob->height(); j++) {
for (int w = 0; w < blob->width(); w++) {
int offset = blob->offset(0, c, j, w);
cnt++;
std::cout << blob->cpu_data()[offset] << std::endl;
}
}
}
std::cout << std::endl;
}
#endif
if (output_name == "output") {
int output_height = blob->height();
int output_width = blob->width();
int output_channels = blob->channels();
int spatial_dim = output_height * output_width;
const float *output_data = blob->cpu_data();
for (int h = 0; h < img.rows; h++) {
for (int w = 0; w < img.cols; w++) {
......@@ -152,9 +134,6 @@ int main(int argc, char **argv) {
}
apollo::perception::inference::write_result(FLAGS_res_dir, output_data_vec);
std::cout << std::endl;
std::cout << "Hello, World!" << std::endl;
delete rt_net;
return 0;
......
......@@ -14,9 +14,12 @@
* limitations under the License.
*****************************************************************************/
#include <iostream>
#include "/usr/include/opencv2/opencv.hpp"
#include "/usr/local/include/gflags/gflags.h"
#include "gflags/gflags.h"
#include "opencv2/opencv.hpp"
#include "modules/perception/inference/inference.h"
#include "modules/perception/inference/inference_factory.h"
#include "modules/perception/inference/tensorrt/batch_stream.h"
#include "modules/perception/inference/tensorrt/entropy_calibrator.h"
#include "modules/perception/inference/tensorrt/rt_net.h"
......@@ -29,12 +32,13 @@ DEFINE_string(test_list, "image_list.txt", "path of image list");
DEFINE_string(image_root, "./images/", "path of image dir");
DEFINE_string(image_ext, ".jpg", "path of image ext");
DEFINE_string(res_dir, "./result.dat", "path of result");
int main(int argc, char **argv) {
google::ParseCommandLineFlags(&argc, &argv, true);
std::string proto_file = "./yolo/caffe.pt";
std::string weight_file = "./yolo/caffe.caffemodel";
std::string model_root = "./yolo";
const std::string proto_file = "./yolo/caffe.pt";
const std::string weight_file = "./yolo/caffe.caffemodel";
const std::string model_root = "./yolo";
apollo::perception::inference::Inference *rt_net;
std::vector<std::string> outputs{"loc_pred", "obj_pred", "cls_pred",
......@@ -55,9 +59,9 @@ int main(int argc, char **argv) {
rt_net = apollo::perception::inference::CreateInferenceByName(
"RTNet", proto_file, weight_file, outputs, inputs);
}
int height = 576;
int width = 1440;
int offset_y = 312;
const int height = 576;
const int width = 1440;
const int offset_y = 312;
std::vector<int> shape = {1, height, width, 3};
std::map<std::string, std::vector<int>> shape_map{{"data", shape}};
rt_net->Init(shape_map);
......@@ -74,7 +78,7 @@ int main(int argc, char **argv) {
img_roi.copyTo(img);
cv::resize(img, img, cv::Size(width, height));
auto input = rt_net->get_blob("data")->mutable_cpu_data();
int count = 3 * width * height;
const int count = 3 * width * height;
for (int i = 0; i < count; i++) {
input[i] = img.data[i];
}
......@@ -84,23 +88,13 @@ int main(int argc, char **argv) {
for (auto output_name : outputs) {
std::vector<int> shape;
auto blob = rt_net->get_blob(output_name);
int size = blob->count();
const int size = blob->count();
std::vector<float> tmp_vec(blob->cpu_data(), blob->cpu_data() + size);
#if 0
std::cout << output_name << " " << std::endl;
for (int i = 0; i < 500; i++) {
std::cout << tmp_vec[i] << " ";
}
std::cout << std::endl;
#endif
output_data_vec.insert(output_data_vec.end(), tmp_vec.begin(),
tmp_vec.end());
}
}
apollo::perception::inference::write_result(FLAGS_res_dir, output_data_vec);
std::cout << std::endl;
std::cout << "Hello, World!" << std::endl;
delete rt_net;
return 0;
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册