提交 197f0a92 编写于 作者: Y Yilin Gui 提交者: Jiangtao Hu

Perception: fix camera & sensor fusion pipeline (#1835)

* Perception: fix camera & sensor fusion pipeline; add onboard visualization confs

* Perception: remove visualization conf files

* Perception: remove visualization conf files

* Perception: remove visualization conf files
上级 24906b7c
......@@ -331,7 +331,7 @@ void WriteFusionTracking(std::ofstream &fout,
return;
}
AINFO << "Write track results:" << frame_num;
if (camera_name == "onsemi_narrow") {
if (camera_name == "front_12mm") {
for (size_t i = 0; i < tracked_object.size(); ++i) {
base::ObjectPtr ptr = tracked_object[i];
char output[300];
......@@ -361,7 +361,7 @@ void WriteFusionTracking(std::ofstream &fout,
ptr->velocity[2]);
fout << output << std::endl;
}
} else if (camera_name == "onsemi_obstacle") {
} else if (camera_name == "front_6mm") {
for (size_t i = 0; i < tracked_object.size(); ++i) {
base::ObjectPtr ptr = tracked_object[i];
char output[300];
......
......@@ -300,7 +300,7 @@ bool ObstacleCameraPerception::Perception(
frame->data_provider->sensor_name());
CHECK(frame->calibration_service != nullptr);
// lane detector and postprocessor: work on onsemi_obstacle only
// lane detector and postprocessor: work on front_6mm only
if (lane_calibration_working_sensor_name_
== frame->data_provider->sensor_name()) {
LaneDetectorOptions lane_detetor_options;
......
......@@ -26,7 +26,7 @@ namespace camera {
struct CameraPerceptionInitOptions : public BaseInitOptions {
// TODO(Xun): modified to be configurable
std::string lane_calibration_working_sensor_name = "onsemi_obstacle";
std::string lane_calibration_working_sensor_name = "front_6mm";
std::string calibrator_method = "LaneLineCalibrator";
};
......
......@@ -33,7 +33,7 @@ DEFINE_bool(lane_ego_debug, false, "lane ego debug");
DEFINE_bool(lane_result_output, false, "output the lane result");
DEFINE_bool(lane_points_output, false, "output the detected lane points");
DEFINE_string(image_dir, "./image/", "test image directory");
DEFINE_string(camera_intrinsics_yaml, "params/onsemi_obstacle_intrinsics.yaml",
DEFINE_string(camera_intrinsics_yaml, "params/front_6mm_intrinsics.yaml",
"camera intrinsics_yaml");
namespace apollo {
......
......@@ -43,7 +43,7 @@ void lane_postprocessor_eval() {
init_options.root_dir = "data/";
base::BrownCameraDistortionModel model;
bool flag = common::LoadBrownCameraIntrinsic(
"params/onsemi_obstacle_intrinsics.yaml", &model);
"params/front_6mm_intrinsics.yaml", &model);
if (!flag) {
AERROR << "LoadBrownCameraIntrinsic Error!";
return;
......
sensor_meta {
name: "onsemi_obstacle"
name: "front_6mm"
type: STEREO_CAMERA
orientation: FRONT
}
......@@ -154,7 +154,7 @@ int main() {
}
DataProvider::InitOptions dp_init_options;
dp_init_options.sensor_name = "onsemi_obstacle";
dp_init_options.sensor_name = "front_6mm";
dp_init_options.image_height = FLAGS_height;
dp_init_options.image_width = FLAGS_width;
......@@ -169,7 +169,7 @@ int main() {
init_options.conf_file = FLAGS_detector_conf;
base::BrownCameraDistortionModel model;
common::LoadBrownCameraIntrinsic("params/onsemi_obstacle_intrinsics.yaml",
common::LoadBrownCameraIntrinsic("params/front_6mm_intrinsics.yaml",
&model);
init_options.base_camera_model = model.get_camera_model();
auto pinhole =
......
child_frame_id: onsemi_narrow
child_frame_id: front_12mm
transform:
rotation:
z: -0.4907472563510946
......
......@@ -4,7 +4,7 @@ header:
nsecs: 0
seq: 0
frame_id: velodyne64
child_frame_id: onsemi_obstacle
child_frame_id: front_6mm
transform:
rotation:
x: -0.5038530686895437
......
......@@ -35,9 +35,9 @@ DEFINE_string(image_color, "bgr", "color space of image");
DEFINE_string(config_root, "conf/perception/camera/", "config_root");
DEFINE_string(tf_file, "", "tf file");
DEFINE_string(config_file, "obstacle.pt", "config_file");
DEFINE_string(narrow_name, "onsemi_narrow", " camera for projecting");
DEFINE_string(base_camera_name, "onsemi_obstacle", "camera to be peojected");
DEFINE_string(sensor_name, "onsemi_obstacle,onsemi_narrow", "camera to use");
DEFINE_string(narrow_name, "front_12mm", " camera for projecting");
DEFINE_string(base_camera_name, "front_6mm", "camera to be peojected");
DEFINE_string(sensor_name, "front_6mm,front_12mm", "camera to use");
DEFINE_string(params_dir, "/home/caros/cyber/params", "params dir");
DEFINE_string(visualize_dir, "/tmp/0000", "visualize dir");
DEFINE_double(camera_fps, 15, "camera_fps");
......
......@@ -146,11 +146,11 @@ void Visualizer::ShowResult(const cv::Mat &img, const CameraFrame &frame) {
if (frame.timestamp - last_timestamp_ > 0.02) {
cv::Mat bigimg(world_h_, small_w_ + wide_pixel_, CV_8UC3);
camera_image_["onsemi_obstacle"].copyTo(bigimg(cv::Rect(0,
camera_image_["front_6mm"].copyTo(bigimg(cv::Rect(0,
0,
small_w_,
small_h_)));
camera_image_["onsemi_narrow"].copyTo(bigimg(cv::Rect(0,
camera_image_["front_12mm"].copyTo(bigimg(cv::Rect(0,
small_h_,
small_w_,
small_h_)));
......
......@@ -111,15 +111,6 @@ bool HMTrackersObjectsAssociation::Associate(
&association_result->assignments, &association_result->unassigned_tracks,
&association_result->unassigned_measurements);
// const auto& assignments = association_result->assignments;
// for (size_t i = 0; i < assignments.size(); ++i) {
// int track_id =
// fusion_tracks[assignments[i].first]->GetTrackId();
// int obs_id =
// sensor_objects[assignments[i].second]->GetBaseObject()->track_id;
// ADEBUG << "local_track_id_assign: " << track_id << ", " << obs_id;
//}
// start do post assign
std::vector<TrackMeasurmentPair> post_assignments;
PostIdAssign(fusion_tracks, sensor_objects,
......@@ -147,15 +138,6 @@ bool HMTrackersObjectsAssociation::Associate(
<< ", unassigned_measuremnets = "
<< association_result->unassigned_measurements.size();
// const auto& final_assignments = association_result->assignments;
// for (size_t i = 0; i < final_assignments.size(); ++i) {
// int track_id =
// fusion_tracks[final_assignments[i].first]->GetTrackId();
// int obs_id =
// sensor_objects[final_assignments[i].second]->GetBaseObject()->track_id;
// ADEBUG << "final_track_id_assign: " << track_id << ", " << obs_id;
//}
return state;
}
void HMTrackersObjectsAssociation::PostIdAssign(
......@@ -200,14 +182,14 @@ bool HMTrackersObjectsAssociation::MinimizeAssignment(
std::vector<size_t>* unassigned_measurements) {
common::GatedHungarianMatcher<float>::OptimizeFlag opt_flag =
common::GatedHungarianMatcher<float>::OptimizeFlag::OPTMIN;
common::SecureMat<float> global_costs = optimizer_.global_costs();
common::SecureMat<float>* global_costs = optimizer_.mutable_global_costs();
int rows = static_cast<int>(unassigned_tracks->size());
int cols = static_cast<int>(unassigned_measurements->size());
global_costs.Resize(rows, cols);
global_costs->Resize(rows, cols);
for (int r_i = 0; r_i < rows; r_i++) {
for (int c_i = 0; c_i < cols; c_i++) {
global_costs(r_i, c_i) = static_cast<float>(association_mat[r_i][c_i]);
(*global_costs)(r_i, c_i) = static_cast<float>(association_mat[r_i][c_i]);
}
}
std::vector<TrackMeasurmentPair> local_assignments;
......@@ -380,7 +362,7 @@ void HMTrackersObjectsAssociation::IdAssign(
/* when camera system has sub-fusion of obstacle & narrow, they share
* the same track-id sequence. thus, latest camera object is ok for
* camera id assign and its information is more up to date. */
if (sensor_id == "onsemi_obstacle" || sensor_id == "onsemi_narrow") {
if (sensor_id == "front_6mm" || sensor_id == "front_12mm") {
obj = fusion_tracks[i]->GetLatestCameraObject();
}
if (obj == nullptr) {
......@@ -399,7 +381,7 @@ void HMTrackersObjectsAssociation::IdAssign(
// with the track which only have narrow camera object
// In post id_assign, we do this.
if (post == false &&
(sensor_id == "onsemi_obstacle" || sensor_id == "onsemi_narrow"))
(sensor_id == "front_6mm" || sensor_id == "front_12mm"))
continue;
if (it != sensor_id_2_track_ind.end()) {
......
......@@ -44,7 +44,7 @@ struct DstExistanceFusionOptions {
{"camera_smartereye", 110},
{"camera_front_obstacle", 110},
{"camera_front_narrow", 150},
{"onsemi_obstacle", 110},
{"front_6mm", 110},
};
double track_object_max_match_distance_ = 4.0;
};
......
......@@ -71,7 +71,7 @@ struct DstTypeFusionOptions {
std::map<std::string, double> camera_max_valid_dist_ = {
{"camera_smartereye", 110},
{"camera_front_obstacle", 110},
{"onsemi_obstacle", 110},
{"front_6mm", 110},
{"camera_front_narrow", 150},
};
std::map<std::string, double> sensor_reliability_ = {
......@@ -79,7 +79,7 @@ struct DstTypeFusionOptions {
{"velodyne_64", 0.5},
{"velodyne128", 0.5},
{"camera_smartereye", 0.95},
{"onsemi_obstacle", 0.95},
{"front_6mm", 0.95},
{"camera_front_obstacle", 0.95},
{"camera_front_narrow", 0.5},
};
......@@ -88,7 +88,7 @@ struct DstTypeFusionOptions {
{"velodyne_64", 0.5},
{"velodyne128", 0.5},
{"camera_smartereye", 0.2},
{"onsemi_obstacle", 0.2},
{"front_6mm", 0.2},
{"camera_front_obstacle", 0.2},
{"camera_front_narrow", 0.2},
};
......
......@@ -181,8 +181,8 @@ bool PbfGatekeeper::RadarAbleToPublish(const TrackPtr &track, bool is_night) {
bool PbfGatekeeper::CameraAbleToPublish(const TrackPtr &track, bool is_night) {
bool visible_in_camera = track->IsCameraVisible();
SensorId2ObjectMap &camera_objects = track->GetCameraObjects();
auto iter = camera_objects.find("onsemi_obstacle");
auto iter_narrow = camera_objects.find("onsemi_narrow");
auto iter = camera_objects.find("front_6mm");
auto iter_narrow = camera_objects.find("front_12mm");
iter = iter != camera_objects.end() ? iter : iter_narrow;
if (params_.publish_if_has_camera && visible_in_camera &&
iter != camera_objects.end() && params_.use_camera_3d && !is_night) {
......
......@@ -55,9 +55,9 @@ TEST(PbfGatekeeperTest, test) {
EXPECT_TRUE(sensor_manager->GetSensorInfo("velodyne64", &lidar_info));
EXPECT_TRUE(sensor_manager->GetSensorInfo("radar_front", &radar_front_info));
EXPECT_TRUE(
sensor_manager->GetSensorInfo("onsemi_obstacle", &camera_obstacle_info));
sensor_manager->GetSensorInfo("front_6mm", &camera_obstacle_info));
EXPECT_TRUE(
sensor_manager->GetSensorInfo("onsemi_narrow", &camera_narrow_info));
sensor_manager->GetSensorInfo("front_12mm", &camera_narrow_info));
EXPECT_TRUE(sensor_manager->GetSensorInfo("radar_rear", &radar_rear_info));
SensorPtr lidar_sensor(new Sensor(lidar_info));
......
......@@ -41,7 +41,6 @@ class CameraPerceptionVizMessage : public cyber::message::IntraMessage {
}
~CameraPerceptionVizMessage() = default;
CameraPerceptionVizMessage(const CameraPerceptionVizMessage&) = delete;
CameraPerceptionVizMessage& operator=(
const CameraPerceptionVizMessage&) = delete;
......
......@@ -177,8 +177,12 @@ bool GetProjectMatrix(
FusionCameraDetectionComponent::~FusionCameraDetectionComponent() {}
bool FusionCameraDetectionComponent::Init() {
// TODO(someone): read output-channel name from config
writer_ = node_->CreateWriter<PerceptionObstacles>("/perception/obstacles");
sensorframe_writer_ = node_->CreateWriter<SensorFrameMessage>(
"/perception/inner/PrefusedObjects");
camera_viz_writer_ = node_->CreateWriter<CameraPerceptionVizMessage>(
"/perception/inner/camera_viz_msg");
if (InitConfig() != cyber::SUCC) {
AERROR << "InitConfig() failed.";
return false;
......@@ -239,7 +243,7 @@ void FusionCameraDetectionComponent::OnReceiveImage(
// protobuf msg
std::shared_ptr<apollo::perception::PerceptionObstacles> out_message(
new (std::nothrow) apollo::perception::PerceptionObstacles);
apollo::common::ErrorCode error_code = apollo::common::PERCEPTION_ERROR_NONE;
apollo::common::ErrorCode error_code = apollo::common::OK;
// prefused msg
std::shared_ptr<SensorFrameMessage> prefused_message(new (std::nothrow)
......@@ -259,6 +263,10 @@ void FusionCameraDetectionComponent::OnReceiveImage(
return;
}
bool send_sensorframe_ret =
sensorframe_writer_->Write(prefused_message);
AINFO << "send out prefused msg, ts: " << std::to_string(msg_timestamp)
<< "ret: " << send_sensorframe_ret;
// Send output msg
if (output_final_obstacles_) {
writer_->Write(out_message);
......@@ -609,7 +617,7 @@ int FusionCameraDetectionComponent::InternalProc(
prefused_message->error_code_ = *error_code;
return cyber::FAIL;
}
*error_code = apollo::common::ErrorCode::PERCEPTION_ERROR_NONE;
*error_code = apollo::common::ErrorCode::OK;
prefused_message->error_code_ = *error_code;
prefused_message->frame_->camera_frame_supplement.on_use = true;
......@@ -636,7 +644,9 @@ int FusionCameraDetectionComponent::InternalProc(
camera_name, msg_timestamp, camera2world_trans.matrix(), image_blob,
camera_frame.tracked_objects, camera_frame.lane_objects,
*error_code));
// Send(camera_perception_viz_message_channel_name_, viz_msg);
bool send_viz_ret = camera_viz_writer_->Write(viz_msg);
AINFO << "send out camera visualization msg, ts: "
<< std::to_string(msg_timestamp) << " send_viz_ret: " << send_viz_ret;
}
return cyber::SUCC;
......
......@@ -37,6 +37,7 @@
#include "modules/perception/onboard/proto/fusion_camera_detection_component.pb.h"
#include "modules/perception/onboard/transform_wrapper/transform_wrapper.h"
#include "modules/perception/proto/perception_obstacle.pb.h"
#include "modules/perception/onboard/component/camera_perception_viz_message.h"
namespace apollo {
namespace perception {
......@@ -160,8 +161,11 @@ class FusionCameraDetectionComponent :
std::shared_ptr<apollo::cyber::Writer<
apollo::perception::PerceptionObstacles>> writer_;
// std::shared_ptr<apollo::cyber::Writer<SensorFrameMessage>>
// sensorframe_writer_;
std::shared_ptr<apollo::cyber::Writer<SensorFrameMessage>>
sensorframe_writer_;
std::shared_ptr<apollo::cyber::Writer<
CameraPerceptionVizMessage>> camera_viz_writer_;
};
CYBER_REGISTER_COMPONENT(FusionCameraDetectionComponent);
......
......@@ -73,7 +73,7 @@ bool FusionComponent::Proc(const std::shared_ptr<SensorFrameMessage>& message) {
// send msg for visualization
if (FLAGS_obs_enable_visualization) {
// Send("/apollo/perception/inner/PrefusedObjects", viz_message);
writer_->Write(out_message);
inner_writer_->Write(viz_message);
}
}
}
......
......@@ -3,7 +3,7 @@ syntax = "proto2";
package apollo.perception.onboard;
message FusionCameraDetection {
optional string camera_names = 1 [default = "onsemi_obstacle,onsemi_narrow"];
optional string camera_names = 1 [default = "front_6mm,front_12mm"];
optional string input_camera_channel_names = 2 [default = "/sensor/camera/traffic/image_short,/sensor/camera/obstacle/image_narrow"];
optional double timestamp_offset = 3 [default = 0.0];
optional string camera_obstacle_perception_conf_dir = 4 [default = "conf/perception/camera"];
......@@ -17,7 +17,7 @@ message FusionCameraDetection {
optional string prefused_channel_name = 12 [default = "/perception/inner/PrefusedObjects"];
optional double default_camera_pitch = 13 [default = 0.0];
optional double default_camera_height = 14 [default = 1.5];
optional string lane_calibration_working_sensor_name = 15 [default = "onsemi_obstacle"];
optional string lane_calibration_working_sensor_name = 15 [default = "front_6mm"];
optional string calibrator_method = 16 [default = "LaneLineCalibrator"];
optional string calib_service_name = 17 [default = "OnlineCalibrationService"];
optional bool run_calib_service = 18 [default = true];
......
[CameraVisualization]
visualizer_conf_model_name = GLFusionVisualizer_camera_onboard
main_camera_name = onsemi_obstacle
show_hdmap = true
hdmap_search_distance = 150.0
camera_names: onsemi_obstacle,onsemi_narrow
input_camera_channel_names : /sensor/camera/traffic/image_short,/sensor/camera/obstacle/image_narrow
timestamp_offset : 0.0
camera_obstacle_perception_conf_dir : conf/perception/camera
# camera_obstacle_perception_conf_file : obstacle_fusion.pt
camera_obstacle_perception_conf_file : obstacle.pt
frame_capacity : 20
image_channel_num : 3
enable_undistortion : false
enable_visualization : false
output_final_obstacles : false
output_obstacles_channel_name : /perception/obstacles
camera_perception_viz_message_channel_name : /perception/inner/camera_viz_msg
prefused_channel_name : /perception/inner/PrefusedObjects
default_camera_pitch : 0.0
default_camera_height : 1.5
lane_calibration_working_sensor_name : onsemi_obstacle
calibrator_method : LaneLineCalibrator
calib_service_name : OnlineCalibrationService
run_calib_service : true
output_camera_debug_msg : true
camera_debug_channel_name : /perception/obstacles_camera_debug
ts_diff : 0.1
\ No newline at end of file
[FusionCameraDetection]
camera_names = onsemi_obstacle,onsemi_narrow
input_camera_channel_names = /sensor/camera/traffic/image_short,/sensor/camera/obstacle/image_narrow
timestamp_offset = 0.0
camera_obstacle_perception_conf_dir = conf/perception/camera
# camera_obstacle_perception_conf_file = obstacle_fusion.pt
camera_obstacle_perception_conf_file = obstacle.pt
frame_capacity = 20
image_channel_num = 3
enable_undistortion = false
enable_visualization = false
output_final_obstacles = false
output_obstacles_channel_name = /perception/obstacles
camera_perception_viz_message_channel_name = /perception/inner/camera_viz_msg
prefused_channel_name = /perception/inner/PrefusedObjects
default_camera_pitch = 0.0
default_camera_height = 1.5
lane_calibration_working_sensor_name = onsemi_obstacle
calibrator_method = LaneLineCalibrator
calib_service_name = OnlineCalibrationService
run_calib_service = true
output_camera_debug_msg = true
camera_debug_channel_name = /perception/obstacles_camera_debug
ts_diff = 0.1
[FusionCameraDetectionViz]
camera_names = onsemi_obstacle,onsemi_narrow
input_camera_channel_names = /sensor/camera/traffic/image_short,/sensor/camera/obstacle/image_narrow
timestamp_offset = 0.0
camera_obstacle_perception_conf_dir = conf/perception/camera
# camera_obstacle_perception_conf_file = obstacle_fusion.pt
camera_obstacle_perception_conf_file = obstacle.pt
frame_capacity = 20
image_channel_num = 3
enable_undistortion = true
enable_visualization = true
output_final_obstacles = false
output_obstacles_channel_name = /perception/obstacles
camera_perception_viz_message_channel_name = /perception/inner/camera_viz_msg
prefused_channel_name = /perception/inner/PrefusedObjects
default_camera_pitch = 0.0
default_camera_height = 1.5
lane_calibration_working_sensor_name = onsemi_obstacle
calibrator_method = LaneLineCalibrator
calib_service_name = OnlineCalibrationService
run_calib_service = true
output_camera_debug_msg = true
camera_debug_channel_name = /perception/obstacles_camera_debug
ts_diff = 0.1
\ No newline at end of file
camera_names: "front_6mm,front_12mm"
input_camera_channel_names : "/apollo/sensor/camera/front_6mm/image,/apollo/camera/obstacle/front_12mm/image"
input_camera_channel_names : "/apollo/sensor/camera/front_6mm/image,/apollo/sensor/camera/front_12mm/image"
timestamp_offset : 0.0
camera_obstacle_perception_conf_dir : "/apollo/modules/perception/production/conf/perception/camera"
camera_obstacle_perception_conf_file : "obstacle.pt"
......
[DisplayOptions]
show_polygon = false
show_camera_objects_3d = true
show_map_roi_3d = true
show_map_road_boundary_3d = true
show_track_id_2d = false
show_track_id_3d = false
show_onsemi_obstacle = true
show_onsemi_narrow = true
show_track_color = true
show_camera_ego_lane = true
show_camera_ego_lane_only = true
model_configs {
name: "GLFusionVisualizer_camera_onboard"
version: "1.0.0"
# render modes
array_string_params {
name: "render_modes"
values: "render_camera MultiCamera3DViewport CameraProj2DViewport Camera2DViewport Lane2DViewport"
}
array_string_params {
name: "viewports_names"
values: "MultiCamera3DViewport"
values: "DummyViewport"
values: "Camera2DViewport"
values: "CameraProj2DViewport"
values: "Lane2DViewport"
}
array_string_params {
name: "lidar_ids"
values: "velodyne64"
#values: "ldlidar_1"
# values: "ldlidar_4"
}
array_string_params {
name: "radar_ids"
values: "radar_front"
# values: "radar_rear"
}
array_string_params {
name: "camera_ids"
values: "onsemi_obstacle"
values: "onsemi_narrow"
# values: "camera_left_forward"
# values: "camera_right_forward"
# values: "camera_rear"
# values: "camera_left_backward"
# values: "camera_right_backward"
}
array_string_params {
name: "ultrasonic_ids"
values: "ultrasonic_16"
}
string_params {
name: "main_sensor_name"
value: "onsemi_obstacle"
}
array_double_params {
name: "main_car_lwh"
values: 4.925
values: 1.864
values: 1.477
}
integer_params {
name: "max_data_buffer_size"
value: 30
}
double_params {
name: "max_buffer_interval"
value: 0.4
}
integer_params {
name: "full_window_viewport_idx"
value: -1
}
double_params {
# radius incremental of reference circle (in meters)
name: "ref_circle_radius_inc"
value: 40.0
# value: 20.0
}
string_params {
# path of display options' init config file
name: "display_opt_init_filepath"
value: "conf/perception/camera_onboard/modules/display_options.ini"
}
array_double_params {
# crop region of screenshots; [left, top, right, bottom]
# [0.0, 0.0, 0.5, 1.0] means capturing the left half
name: "crop_box_normalized"
values: 0.0
values: 0.0
values: 0.5
# values: 1.0
values: 1.0
}
integer_params {
# number of reference circle
name: "ref_circle_num"
value: 4
}
}
......@@ -5,7 +5,7 @@ detector_param {
root_dir : "/apollo/modules/perception/production/data/perception/camera/models/yolo_obstacle_detector"
config_file : "config.pt"
}
camera_name : "onsemi_obstacle"
camera_name : "front_6mm"
}
detector_param {
plugin_param{
......@@ -13,7 +13,7 @@ detector_param {
root_dir : "/apollo/modules/perception/production/data/perception/camera/models/yolo_obstacle_detector"
config_file : "config.pt"
}
camera_name : "onsemi_narrow"
camera_name : "front_12mm"
}
tracker_param {
......@@ -45,7 +45,7 @@ lane_param {
root_dir : "/apollo/modules/perception/production/data/perception/camera/models/lane_detector/"
config_file : "config.pt"
}
camera_name : "onsemi_obstacle"
camera_name : "front_6mm"
}
lane_postprocessor_param {
name : "DenselineLanePostprocessor"
......
[TrafficLightsPerceptionComponent]
tl_tf2_frame_id = world
tl_tf2_child_frame_id = perception_localization_100hz
tf2_timeout_second = 0.01
camera_names = onsemi_traffic,onsemi_narrow,onsemi_obstacle,onsemi_wide
camera_channel_names = /sensor/camera/traffic/image_long,/sensor/camera/obstacle/image_narrow,/sensor/camera/traffic/image_short,/sensor/camera/obstacle/image_wide
tl_image_timestamp_offset = 0.0
max_process_image_fps = 8
query_tf_interval_seconds = 0.3
valid_hdmap_interval = 1.5
image_sys_ts_diff_threshold = 0.5
sync_interval_seconds = 0.5
camera_traffic_light_perception_conf_dir = conf/perception/camera
camera_traffic_light_perception_conf_file = trafficlight.pt
default_image_border_size = 100
traffic_light_output_channel_name = /perception/traffic_light_status
simulation_channel_name = /perception/traffic_light_simulation
\ No newline at end of file
module_config {
module_library : "lib/libperception_camera_cyber_viz.so"
components {
comname : "FusionCameraDetectionViz"
comclass : "FusionCameraDetectionComponent"
confpath : "conf/perception/camera_onboard/camera_detection_component.cy.config"
sender : {
outchannelnames : ["/perception/inner/PrefusedObjects", "/perception/obstacles", "/perception/inner/camera_viz_msg", "/perception/obstacles_camera_debug"]
}
}
components {
comname : "CameraVisualization"
comclass : "CameraVisualizationComponent"
# confpath : "conf/perception/camera_detection_component.cy.config"
receiver : {
inchannelnames : ["/perception/inner/camera_viz_msg"]
inqosfilter : "PassThrough"
}
}
channels {
channelname : "/sensor/camera/traffic/image_short"
msgtype : "adu.common.sensor.Image"
}
channels {
channelname : "/sensor/camera/obstacle/image_narrow"
msgtype : "adu.common.sensor.Image"
}
channels {
channelname : "/perception/obstacles"
msgtype : "adu.common.perception.PerceptionObstacles"
}
channels {
channelname : "/perception/inner/PrefusedObjects"
msgtype : "SensorFrameMessage"
}
channels {
channelname : "/perception/inner/camera_viz_msg"
msgtype : "CameraPerceptionVizMessage"
}
channels {
channelname : "/perception/obstacles_camera_debug"
msgtype : "adu.common.perception.camera.CameraDebug"
}
}
#child_frame_id: onsemi_narrow
child_frame_id: front_12mm
transform:
rotation:
......
......@@ -4,7 +4,6 @@ header:
secs: 0
nsecs: 0
frame_id: velodyne128
#child_frame_id: onsemi_obstacle
child_frame_id: front_6mm
transform:
translation:
......
child_frame_id: onsemi_narrow
child_frame_id: front_12mm
transform:
rotation:
z: -0.4907472563510946
......
......@@ -4,7 +4,7 @@ header:
secs: 0
nsecs: 0
frame_id: velodyne64
child_frame_id: onsemi_obstacle
child_frame_id: front_6mm
transform:
rotation:
x: -0.5085689478608929
......
......@@ -3,7 +3,7 @@ header:
stamp:
secs: 0
nsecs: 0
frame_id: onsemi_obstacle
frame_id: front_6mm
height: 1080
width: 1920
distortion_model: plumb_bob
......
......@@ -2,7 +2,7 @@ header:
stamp:
secs: 0
nsecs: 0
frame_id: onsemi_obstacle
frame_id: front_6mm
seq: 0
child_frame_id: onsemi_traffic
transform:
......
......@@ -5,13 +5,13 @@ sensor_meta {
}
sensor_meta {
name: "onsemi_narrow"
name: "front_12mm"
type: MONOCULAR_CAMERA
orientation: FRONT
}
sensor_meta {
name: "onsemi_obstacle"
name: "front_6mm"
type: MONOCULAR_CAMERA
orientation: FRONT
}
......
......@@ -9,10 +9,10 @@ camera_valid_dist {
valid_dist: 150
}
camera_valid_dist {
camera_name: "onsemi_obstacle"
camera_name: "front_6mm"
valid_dist: 110
}
camera_valid_dist {
camera_name: "onsemi_narrow"
camera_name: "front_12mm"
valid_dist: 150
}
......@@ -19,14 +19,14 @@ lidar_params {
}
camera_params {
name: "onsemi_obstacle"
name: "front_6mm"
valid_dist: 110
reliability: 0.95
reliability_for_unknown: 0.2
}
camera_params {
name: "onsemi_narrow"
name: "front_12mm"
valid_dist: 150
reliability: 0.5
reliability_for_unknown: 0.2
......
......@@ -10,7 +10,14 @@
<module>
<name>perception</name>
<dag_conf>dag_streaming_perception.dag</dag_conf>
<dag_conf>/apollo/modules/perception/production/dag/dag_streaming_perception.dag</dag_conf>
<!-- if not set, use default process -->
<process_name>perception</process_name>
<version>1.0.0</version>
</module>
<module>
<name>perception_camera</name>
<dag_conf>/apollo/modules/perception/production/dag/dag_streaming_perception_camera.dag</dag_conf>
<!-- if not set, use default process -->
<process_name>perception</process_name>
<version>1.0.0</version>
......
<!--this file list the modules which will be loaded dynamicly and
their process name to be running in -->
<cyber>
<desc>cyber modules list config</desc>
<version>1.0.0</version>
<!-- sample module config, and the files should have relative path like
./bin/cyber_launch
./bin/mainboard
./conf/dag_streaming_0.conf -->
<module>
<name>perception_camera_viz</name>
<dag_conf>dag_streaming_perception_camera_viz.dag</dag_conf>
<!-- if not set, use default process -->
<process_name></process_name>
<version>1.0.0</version>
</module>
</cyber>
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册