提交 1e739e8b 编写于 作者: L Liu Jiaming

docker_dev: merge upstream master

export CYBER_PATH=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
binary_path="/apollo/bazel-bin/cyber"
cyber_tool_path="/apollo/bazel-bin/cyber/tools"
apollo_tool_path="/apollo/bazel-bin/modules/tools"
#! /usr/bin/env bash
if [ -z "${APOLLO_ROOT_DIR}" ]; then
export APOLLO_ROOT_DIR="$(cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd -P)"
source ${APOLLO_ROOT_DIR}/scripts/apollo.bashrc
fi
export CYBER_PATH="${APOLLO_ROOT_DIR}/cyber"
export QT5_PATH="/usr/local/qt5"
export LD_LIBRARY_PATH=${QT5_PATH}/lib:$LD_LIBRARY_PATH
export QT_QPA_PLATFORM_PLUGIN_PATH=${QT5_PATH}/plugins
add_to_path "${QT5_PATH}/bin"
bazel_bin_path="${APOLLO_ROOT_DIR}/bazel-bin"
apollo_tool_path="${bazel_bin_path}/modules/tools"
visualizer_path="${apollo_tool_path}/visualizer"
cyber_bin_path="${bazel_bin_path}/cyber"
cyber_tool_path="${bazel_bin_path}/cyber/tools"
recorder_path="${cyber_tool_path}/cyber_recorder"
monitor_path="${cyber_tool_path}/cyber_monitor"
visualizer_path="${apollo_tool_path}/visualizer"
PYTHON_LD_PATH="/apollo/bazel-bin/cyber/py_wrapper"
launch_path="${CYBER_PATH}/tools/cyber_launch"
channel_path="${CYBER_PATH}/tools/cyber_channel"
node_path="${CYBER_PATH}/tools/cyber_node"
service_path="${CYBER_PATH}/tools/cyber_service"
qt_path=/usr/local/qt5
rosbag_to_record_path="/apollo/bazel-bin/modules/data/tools/rosbag_to_record"
rosbag_to_record_path="${bazel_bin_path}/modules/data/tools/rosbag_to_record"
# TODO(all): place all these in one place and add_to_path
for entry in "${cyber_bin_path}" "${recorder_path}" "${monitor_path}" "${launch_path}" \
"${channel_path}" "${node_path}" "${service_path}" \
"${visualizer_path}" \
"${rosbag_to_record_path}" ; do
add_to_path "${entry}"
done
export LD_LIBRARY_PATH=${qt_path}/lib:$LD_LIBRARY_PATH
export QT_QPA_PLATFORM_PLUGIN_PATH=${qt_path}/plugins
export PATH=${binary_path}:${recorder_path}:${monitor_path}:${launch_path}:${channel_path}:${node_path}:${service_path}:${qt_path}/bin:${visualizer_path}:${rosbag_to_record_path}:$PATH
PYTHON_LD_PATH="${bazel_bin_path}/cyber/py_wrapper"
export PYTHONPATH=${PYTHON_LD_PATH}:${CYBER_PATH}/python:$PYTHONPATH
export CYBER_DOMAIN_ID=80
export CYBER_IP=127.0.0.1
export GLOG_log_dir=/apollo/data/log
export GLOG_alsologtostderr=1
export GLOG_log_dir="${APOLLO_ROOT_DIR}/data/log"
export GLOG_alsologtostderr=0
export GLOG_colorlogtostderr=1
export GLOG_minloglevel=0
......
......@@ -34,6 +34,7 @@ VERSION_X86_64="dev-x86_64-18.04-20200607_2302"
VERSION_AARCH64="dev-aarch64-20170927_1111"
VERSION_OPT=""
NO_PULL_IMAGE=""
USER_AGREE="no"
# Check whether user has agreed license agreement
function check_agreement() {
......@@ -51,13 +52,19 @@ function check_agreement() {
cat $AGREEMENT_FILE
tip="Type 'y' or 'Y' to agree to the license agreement above, or type any other key to exit"
echo $tip
read -n 1 user_agreed
if [ "$user_agreed" == "y" ] || [ "$user_agreed" == "Y" ]; then
if [ "$USER_AGREE" == "yes" ]; then
cp $AGREEMENT_FILE $agreement_record
echo "$tip" >> $agreement_record
echo "$user_agreed" >> $agreement_record
else
exit 1
read -n 1 user_agreed
if [ "$user_agreed" == "y" ] || [ "$user_agreed" == "Y" ]; then
cp $AGREEMENT_FILE $agreement_record
echo "$tip" >> $agreement_record
echo "$user_agreed" >> $agreement_record
else
exit 1
fi
fi
}
......@@ -106,6 +113,10 @@ if [ -e /proc/sys/kernel ]; then
echo "/apollo/data/core/core_%e.%p" | sudo tee /proc/sys/kernel/core_pattern > /dev/null
fi
if [ "$1" == "-y" ]; then
USER_AGREE="yes"
fi
source ${APOLLO_ROOT_DIR}/scripts/apollo_base.sh
check_agreement
......@@ -168,6 +179,8 @@ while [ $# -gt 0 ] ; do
NO_PULL_IMAGE="yes"
info "running without pulling docker image"
;;
-y)
;;
stop)
stop_containers
exit 0
......
......@@ -5,6 +5,12 @@
- Follow the tutorial to set up [docker environment](https://github.com/ApolloAuto/apollo/blob/master/docs/howto/how_to_build_and_release.md).
- Download localization data from [Apollo Data Open Platform](http://data.apollo.auto/?name=sensor%20data&data_key=multisensor&data_type=1&locale=en-us&lang=en)(US only).
the localization data is a experimental dataset to verify the availability of localization. It contains localization map(local_map/), vehicle params(params/), sensor recording data(records/). The specific attributes are as follows:
duration: 5 mins
mileage: 3km
areas: city roads in Sunnyvale
weather: sunny day
## 2. Build Apollo
First check and make sure you are in development docker container before you proceed. Now you will need to build from the source.
......@@ -119,4 +125,4 @@ The meaning of each col in the table
- **std**: the standard deviation of the error
- **max**: the maximum value of the error
- **< xx**: percentage of frames whose error is smaller than the indicated range
- **con_frame()**: the maximum number of consecutive frames that satisfy the conditions in parentheses
\ No newline at end of file
- **con_frame()**: the maximum number of consecutive frames that satisfy the conditions in parentheses
......@@ -7,6 +7,12 @@
- 按照[教程](https://github.com/ApolloAuto/apollo/blob/master/README_cn.md)设置Docker环境
-[Apollo数据平台](http://data.apollo.auto/?name=sensor%20data&data_key=multisensor&data_type=1&locale=en-us&lang=en)下载多传感器融合定位demo数据包(仅限美国地区),使用其中*apollo3.5*文件夹下的数据。
此定位数据为实验性质的demo数据,用于验证定位模块的可用性。数据主要包含定位地图(local_map/), 车辆参数(params/), 传感器数据(records/)。具体属性如下:
时长:5分钟
里程:3km
场景:Sunnyvale 城市道路
天气:晴天
## 2. 编译apollo工程
### 2.1 构建docker容器
......@@ -148,4 +154,4 @@ python /apollo/scripts/record_bag.py --stop
- max: 误差的最大值
- <30cm: 距离误差少于30cm的帧所占的百分比
- <1.0d: 角度误差小于1.0d的帧所占的百分比
- con_frame(): 满足括号内条件的最大连续帧数
\ No newline at end of file
- con_frame(): 满足括号内条件的最大连续帧数
......@@ -5,6 +5,12 @@
- Follow the tutorial to set up [docker environment](https://github.com/ApolloAuto/apollo/blob/master/docs/howto/how_to_build_and_release.md) and [build Apollo](https://github.com/ApolloAuto/apollo/blob/master/docs/howto/how_to_launch_Apollo.md).
- Download localization data from [Apollo Data Open Platform](http://data.apollo.auto/?name=sensor%20data&data_key=multisensor&data_type=1&locale=en-us&lang=en)(US only)
the localization data is a experimental dataset to verify the availability of localization. It contains localization map(ndt_map/), vehicle params(params/), sensor recording data(records/). The specific attributes are as follows:
duration: 5 mins
mileage: 3km
areas: city roads in Sunnyvale
weather: sunny day
## 2. Configuring Parameters
Assume that the path to download localization data from is DATA_PATH.
### 2.1. Configure Sensor Extrinsics
......@@ -132,4 +138,4 @@ The meaning of each col in the table
- **std**: the standard deviation of the error
- **max**: the maximum value of the error
- **< xx**: percentage of frames whose error is smaller than the indicated range
- **con_frame()**: the maximum number of consecutive frames that satisfy the conditions in parentheses
\ No newline at end of file
- **con_frame()**: the maximum number of consecutive frames that satisfy the conditions in parentheses
......@@ -7,6 +7,12 @@
- 按照[教程](https://github.com/ApolloAuto/apollo/blob/master/README_cn.md)设置Docker环境并搭建Apollo工程
-[Apollo数据平台](http://data.apollo.auto/?name=sensor%20data&data_key=multisensor&data_type=1&locale=en-us&lang=en)下载定位数据(仅限美国地区)
此定位数据为实验性质的demo数据,用于验证定位模块的可用性。数据主要包含定位地图(ndt_map/), 车辆参数(params/), 传感器数据(records/)。具体属性如下:
时长:5分钟
里程:3km
场景:Sunnyvale 城市道路
天气:晴天
## 2. 配置定位模块
为了使定位模块正确运行,需要对地图路径和传感器外参进行配置。假设下载的定位数据的所在路径为DATA_PATH。
......@@ -156,4 +162,4 @@ NDT模块的统计结果只有一组,即定位输出`/apollo/localization/pose
- max: 误差的最大值
- <30cm: 距离误差少于30cm的帧所占的百分比
- <1.0d: 角度误差小于1.0d的帧所占的百分比
- con_frame(): 满足括号内条件的最大连续帧数
\ No newline at end of file
- con_frame(): 满足括号内条件的最大连续帧数
......@@ -211,7 +211,7 @@ budaoshi@in_dev_docker:/apollo/modules/tools/sensor_calibration$ python extract_
#### 1. 上传预处理后的数据至BOS
**注意:**必须使用开通过权限的 bucket,确认`Bucket名称``所属地域`和提交商务注册时的Bucket名称和所属区域保持一致。
**注意:** 必须使用开通过权限的 bucket,确认`Bucket名称``所属地域`和提交商务注册时的Bucket名称和所属区域保持一致。
在BOS bucket中新建目录sensor_calibration,作为后续云标定服务读取数据的`Input Data Path`,把前面预处理生成的数据拷贝至该目录。目录结构如下:
......@@ -246,7 +246,7 @@ budaoshi@in_dev_docker:/apollo/modules/tools/sensor_calibration$ python extract_
- 默认使用1个6mm、1个12mm摄像头,用户如需要使用第二个6mm摄像头时,需要自行指定文件名,并进行相应修改。
## NEXT
现在,您已经完成摄像头感知设备标定,接下来可以开始[基于摄像头的封闭园区自动驾驶搭建--感知适配](Perception_Configuration_cn.md)
现在,您已经完成摄像头感知设备标定, 接下来可以开始[封闭园区自动驾驶搭建--虚拟车道线制作](../Lidar_Based_Auto_Driving/Virtual_Lane_Generation_cn.md)(如果在适配基于激光雷达的自动驾驶过程中已经制作了虚拟车道线文件,则可以跳过此步骤,进行[基于摄像头的封闭园区自动驾驶搭建--感知适配](Perception_Configuration_cn.md))
## 常见问题
#### 1. 提交后,显示`UNAUTHORIZED`
![lidar_calibration_unauthorized](images/lidar_calibration_unauthorized.png)
......
......@@ -51,7 +51,7 @@
- 直接用数据线将设备连接在IPC的USB3.0接口。接口顺序无固定顺序,用户可自行指定。
![camera_integration_background](images/camera_integration_background.jpeg)
![camera_integration_background](images/camera_integration_background.PNG)
![camera_integration_line](images/camera_integration_line.jpeg)
......
......@@ -115,5 +115,5 @@
#### 1. 自动驾驶过程中车轮来回摆动,有画龙现象
这种情况一般是车辆动力学标定没有做好,建议重新进行动力学标定
#### 2. 车辆换道、停止等效果不理想
- 在保证感知、定位稳定的前提下,可以尝试修改planning配置文件中的相关参数进行调整,详情参考[封闭园区自动驾驶搭建--规划适配](Planning_Configuration_cn.md)
- 在保证感知、定位稳定的前提下,可以尝试修改planning配置文件中的相关参数进行调整,详情参考[封闭园区自动驾驶搭建--规划适配](../Lidar_Based_Auto_Driving/Planning_Configuration_cn.md)
......@@ -193,7 +193,7 @@ budaoshi@in_dev_docker:/apollo/modules/tools/sensor_calibration$ python extract_
#### 1. 上传预处理后的数据至BOS
**注意:**必须使用开通过权限的 bucket,确认`Bucket名称``所属地域`和提交商务注册时的Bucket名称和所属区域保持一致。
**注意:** 必须使用开通过权限的 bucket,确认`Bucket名称``所属地域`和提交商务注册时的Bucket名称和所属区域保持一致。
在BOS bucket中新建目录sensor_calibration,作为后续云标定服务读取数据的`Input Data Path`,把前面预处理生成的数据拷贝至该目录。目录结构如下:
......@@ -229,7 +229,7 @@ budaoshi@in_dev_docker:/apollo/modules/tools/sensor_calibration$ python extract_
确认邮件得到的外参文件合理后,将邮件发送的外参文件的`rotation``translation`的值替换掉`modules/calibration/data/dev_kit/velodyne_params/velodyne16_novatel_extrinsics.yaml`中对应的`rotation``translation`值。注意不要修改`frame_id`、不要直接替换文件。
## NEXT
现在,您已经完成激光雷达感知设备标定,接下来可以开始[基于激光雷达的封闭园区自动驾驶搭建--感知适配](Perception_Configuration_cn.md)
现在,您已经完成激光雷达感知设备标定,接下来可以开始[封闭园区自动驾驶搭建--虚拟车道线制作](Virtual_Lane_Generation_cn.md)
## 常见问题
#### 1. 提交后,显示`UNAUTHORIZED`,如下图所示
......
# 基于激光雷达的封闭园区自动驾驶搭建--感知设备集成
- [基于激光雷达的封闭园区自动驾驶搭建--感知设备集成](#基于激光雷达的封闭园区自动驾驶搭建--感知设备集成)
- [概览](#概览)
- [前提条件](#前提条件)
- [激光雷达安装与数据验证](#激光雷达安装与数据验证)
- [激光雷达的安装固定](#激光雷达的安装固定)
- [激光雷达与车辆的接线](#激光雷达与车辆的接线)
- [1.线缆端口及定义](#1线缆端口及定义)
- [2.确保整车断电](#2确保整车断电)
- [3.将端口1(8pin公头)与激光雷达本体上的8pin母头连接](#3将端口18pin公头与激光雷达本体上的8pin母头连接)
- [4.将激光雷达电源接口(端口4、端口5)接入车上的12V接线盒](#4将激光雷达电源接口端口4端口5接入车上的12v接线盒)
- [5.将端口2(授时)与IMU的授时端口相连](#5将端口2授时与imu的授时端口相连)
- [6.将端口3(网线接口)通过网线与工控机相连](#6将端口3网线接口通过网线与工控机相连)
- [7.再次确认上述安装步骤](#7再次确认上述安装步骤)
- [激光雷达的配置及启动](#激光雷达的配置及启动)
- [1.修改工控机IP地址](#1修改工控机ip地址)
- [2.修改`modules/transform/conf/static_transform_conf.pb.txt`文件](#2修改modulestransformconfstatic_transform_confpbtxt文件)
- [激光雷达数据的验证](#激光雷达数据的验证)
- [毫米波雷达安装与数据验证(根据自身需求配置)](#毫米波雷达安装与数据验证根据自身需求配置)
- [毫米波雷达接口及线序](#毫米波雷达接口及线序)
- [毫米波雷达的安装固定](#毫米波雷达的安装固定)
- [毫米波雷达的配置及启动](#毫米波雷达的配置及启动)
- [毫米波雷达数据的验证](#毫米波雷达数据的验证)
- [NEXT](#next)
- [常见问题](#常见问题)
- [1. 无法打开Lidar配置网页](#1-无法打开lidar配置网页)
## 概览
该用户手册旨在帮助用户完成激光雷达感知设备的安装、集成及数据验证。
## 前提条件
- 完成了[循迹搭建--车辆循迹演示](../Waypoint_Following/Start_Waypoint_Following_cn.md)
## 主要步骤
- 激光雷达安装配置与数据验证
- 毫米波雷达安装配置与数据验证
## 激光雷达安装与数据验证
- 激光雷达型号:80-VLP-16(velodyne 16线激光雷达)
......@@ -105,91 +127,106 @@ budaoshi@in_dev_docker:/apollo$ cyber_launch start modules/drivers/velodyne/laun
- 打开新的终端,并使用`bash docker/scripts/dev_into.sh`命令进入docker环境,在新终端中输入`cyber_monitor`命令,查看是否有`/apollo/sensor/lidar16/PointCloud2``/apollo/sensor/lidar16/Scan``/apollo/sensor/lidar16/compensator/PointCloud2`三个channel,并使用上下方向键选择channel,使用右方向键查看channel详细数据,数据无异常则说明激光雷达适配成功
![lidar_integration_cyber_monitor](images/lidar_integration_cyber_monitor.png)
![lidar_integration_cyber_monitor](images/lidar_integration_cyber_monitor.png)
![lidar_integration_channel](images/lidar_integration_channel.png)
## 毫米波雷达安装与数据验证
![lidar_integration_channel](images/lidar_integration_channel.png)
## 毫米波雷达安装与数据验证(根据自身需求配置)
**注意:** 在目前的感知方案中,没有融合毫米波雷达数据,对于没有二次开发需求的用户可以不配置毫米波;对于有二次开发需求的用户,可按照本章节内容配置毫米波雷达
- 毫米波雷达型号:continental AS 408-21
- continental AS 408-21简介:ARS408-21 是大陆 40X 毫米波雷达传感器系列中最新推出的高端产品,可以适用于不同的应用场景。ARS 408-21 很好的处理了测量性能与高安全性之间的矛盾,可实时检测目标的距离并根据当前车速判断是否存在碰撞风险可靠;具有自动故障检测功能,可识别传感器问题,并自动输出故障码鲁棒、轻量化设计;通过使用相对简单的雷达测量技术,以及在汽车行业的深度研发和批量生产基础,可以保证产品鲁棒和轻量化性能。
![lidar_integration_radar_look](images/lidar_integration_radar_look.jpeg)
![lidar_integration_radar_look](images/lidar_integration_radar_look.jpeg)
### 毫米波雷达接口及线序
- continental AS 408-21传感器采用12V直流供电,使用CAN通信接口。使用时,通过如下图所示的连接线缆将传感器CAN通信接口与Apollo的CAN1口连接,电源接口接入12V直流电源(车辆上提供12V电源接线盒),注意正负极。
![lidar_integration_radar_line](images/lidar_integration_radar_line.png)
![lidar_integration_radar_line](images/lidar_integration_radar_line.png)
- 传感器接口及定义如下图所示:其中,端口1接12V直流电源;端口8接GND;端口4接CAN_L;端口7接CAN_H。
![lidar_integration_radar_picture](images/lidar_integration_radar_picture.png)
![lidar_integration_radar_picture](images/lidar_integration_radar_picture.png)
![lidar_integration_radar_pin](images/lidar_integration_radar_pin.png)
![lidar_integration_radar_pin](images/lidar_integration_radar_pin.png)
- 毫米波雷达CAN接口与工控机的CAN1接口连接,如下图所示:
![lidar_integration_radar_connection](images/lidar_integration_radar_connection.jpeg)
![lidar_integration_radar_connection](images/lidar_integration_radar_connection.jpeg)
### 毫米波雷达的安装固定
- 传感器应安装在车前方中心处,当人正向面对车辆正前方时,传感器的正面朝向人,传感器的连接口朝向人的右手边,如下图所示:
![lidar_integration_radar_installation_look](images/lidar_integration_radar_installation_look.jpeg)
![lidar_integration_radar_installation_look](images/lidar_integration_radar_installation_look.jpeg)
![lidar_integration_radar_installation_position](images/lidar_integration_radar_installation_position.png)
![lidar_integration_radar_installation_position](images/lidar_integration_radar_installation_position.png)
- 毫米波雷达要牢靠固定在车身上,连接到毫米波雷达的接头要牢靠接插。离地面高0.5米,不能向下倾斜,向上仰0-2度以内,高度误差±0.2米,俯仰角误差0-2度(向上仰小于2度,不能向下倾斜),翻滚角误差±2度(radar左右两侧的平齐程度),航向角误差±2度(radar是否正对前方)。
### 毫米波雷达的配置及启动
- 传感器参数的配置:该传感器配置文件位于`/apollo/modules/drivers/radar/conti_radar/conf/`目录下,需要根据实际情况进行配置。
- 由于使用的是socket can, 并且只安装了前向毫米波雷达,所以在radar_front_conf.pb.txt和contiar_conf.pb.txt中,我们需要如下配置:
```
can_card_parameter {
brand:SOCKET_CAN_RAW
type: PCI_CARD
channel_id: CHANNEL_ID_ONE
}
```
另外,我们还需要在配置文件modules/drivers/radar/contiradar/dag/conti_radar.dag中删除后向毫米波雷达(rear components)的相关配置,否则,由于后向毫米波雷达使用的can 2,而我们的socket can没有can 2将会导致canbus无法正常启动。
- 正确启动Apollo及DreamView,选择车辆型号及运行模式,并打开`radar`模块开关,如下图所示:
- 由于只使用了前向毫米波,需要在配置文件modules/drivers/radar/contiradar/dag/conti_radar.dag中删除后向毫米波雷达(rear components)的相关配置,否则,由于后向毫米波雷达使用的can 2,而我们的socket can没有can 2将会导致canbus无法正常启动。修改后的`conti_radar.dag`文件内容如下:
![lidar_integration_radar_dreamview](images/lidar_integration_radar_dreamview.png)
```
module_config {
module_library : "/apollo/bazel-bin/modules/drivers/radar/conti_radar/libconti_radar.so"
components {
class_name : "ContiRadarCanbusComponent"
config {
name: "conti_radar_front"
config_file_path: "/apollo/modules/drivers/radar/conti_radar/conf/radar_front_conf.pb.txt"
}
}
}
```
- 输入`cyber_monitor`命令,应显示`/apollo/sensor/conti_radar`话题,如下图所示:
- 进入can卡目录启动can卡
```
cd ~/SocketCan/
bash start.sh
```
> **图片是ros操作命令,非Cyber**
- 正确启动Apollo及DreamView,选择车辆型号及运行模式,并打开`radar`模块开关,如下图所示:
![lidar_integration_radar_rostopic1](images/lidar_integration_radar_rostopic1.png)
![lidar_integration_radar_dreamview](images/lidar_integration_radar_dreamview.png)
- 按键盘上下键选择该话题并按回车键,显示该话题,如下图所示:
- 使用`cyber_monitor`工具,查看`/apollo/sensor/radar/front`数据
![lidar_integration_radar_rostopic2](images/lidar_integration_radar_rostopic2.png)
![lidar_integration_radar_rostopic2](images/lidar_integration_radar_rostopic2.png)
![lidar_integration_radar_rostopic3](images/lidar_integration_radar_rostopic3.png)
![lidar_integration_radar_rostopic4](images/lidar_integration_radar_rostopic4.png)
- 主要参数的含义如下表所示:
|参数 | 含义 |
|---|---|
|`longitude_dist` | 距目标的纵向距离 |
| `longitude_dist` | 距目标的纵向距离 |
| `lateral_dist` | 距目标的横向距离 |
|`longitude_vel` | 目标的纵向速度 |
| `lateral_vel` | 目标的横向速度 |
| `longitude_vel` | 目标的纵向速度 |
| `lateral_vel` | 目标的横向速度 |
### 毫米波雷达数据的验证
- 纵向距离(`longitude_dist`)/横向距离(`lateral_dist`)的验证:该传感器默认使用`长距离`模式,检测距离为0.2~250m,在车辆正前方检测距离范围内,分别放置障碍物,查看`/apollo/sensor/conti_radar`话题中的`longitute_dist``lateral_dist`数据是否正常(单位m)。下图中红色部分为长距离模式下传感器检测范围,误差在+/-0.4m内为正常。
![lidar_integration_radar_verify_picture](images/lidar_integration_radar_verify_picture.png)
![lidar_integration_radar_verify_picture](images/lidar_integration_radar_verify_picture.png)
- 纵向速度(`longitude_vel`)/横向速度(`lateral_vel`)的验证:使得目标物体以固定速度在检测范围内运动,检测数据是否在误差允许有范围内。
- 该传感器各项参数的测量范围及分辨率如下图所示:
![lidar_integration_radar_verify_excel](images/lidar_integration_radar_verify_excel.png)
![lidar_integration_radar_verify_excel](images/lidar_integration_radar_verify_excel.png)
## NEXT
现在,您已经完成激光雷达感知设备集成,接下来可以开始[基于激光雷达的封闭园区自动驾驶搭建--感知设备标定](Sensor_Calibration_cn.md)
## 常见问题
#### 1. 无法打开Lidar配置网页
一般情况下只要Lidar与工控机与Lidar连接的网口IP处于相同号段就可以在浏览器中打开配置网页,确认确认本地IP已经正确修改还不能打开配置页面,请确认Lidar的IP地址是否被修改,可以下载`wireshark`来查看Lidar IP
......@@ -10,6 +10,7 @@
- [1. 上传数据至BOS](#1-上传数据至bos)
- [2. 提交虚拟车道线生成任务](#2-提交虚拟车道线生成任务)
- [3. 获取虚拟车道线并添加到apollo中](#3-获取虚拟车道线并添加到apollo中)
- [NEXT](#next)
## 前提条件
......@@ -70,3 +71,5 @@
![virtual_lane_tree2](images/virtual_lane_tree2.png)
将上图中的2020-01-16-08-08-42整个文件夹拷贝到/apollo/modules/map/data/下,重命名为自己的地图名字(例如gongyuan),重启DreamView即可在地图下拉框中看到自己刚添加的地图。
## NEXT
现在,您已经完成虚拟车道线制作,根据您使用的是基于Lidar的感知方案还是基于Camera的感知方案,接下来可以开始[基于激光雷达的封闭园区自动驾驶搭建--感知适配](Perception_Configuration_cn.md)[基于摄像头的封闭园区自动驾驶搭建--感知适配](../Camera_Based_Auto_Driving/Perception_Configuration_cn.md)
\ No newline at end of file
......@@ -11,21 +11,18 @@
|序号 | 待修改文件 | 修改内容 |
|---|---|---|
| 1 | `modules/common/data/global_flagfile.txt` | 添加`--half_vehicle_width=0.43` |
| 2 | `modules/perception/production/launch/dev_kit_perception_camera.launch` |重命名为`perception_camera.launch ` 并替换原`perception_camera.launch`文件 |
| 3 | `modules/perception/production/conf/perception/camera/fusion_camera_detection_component.pb.txt` | 文件中`output_obstacles_channel_name`对应的内容修改为 `/apollo/perception/smartereyeobstacles` |
| 4 | `/apollo/modules/perception/production/conf/perception/camera/fusion_camera_detection_component.pb.txt` | 文件中第二行`input_camera_channel_names : "/apollo/sensor/camera/front_6mm/image,/apollo/sensor/camera/front_12mm/image"`修改为`input_camera_channel_names : "/apollo/sensor/smartereye/image"` |
## 感知开环验证及测试
把车辆开到户外,手动控制车辆,看感知是否有数据。
1. 进入docker环境,用gpu编译项目,编译项目,启动DreamView。
1. 进入docker环境,编译apollo,启动DreamView。
```
cd /apollo
bash docker/scripts/dev_start.sh
bash docker/scripts/dev_into.sh
bash apollo.sh build_opt_gpu
bash apollo.sh build_opt
bash scripts/bootstrap.sh
```
......@@ -44,12 +41,7 @@
|`/tf`| 确保能正常输出数据 |
|`/tf_static` | 确保能正常输出数据 |
4. 使用如下命令启动perception模块,使用`cyber_monitor`查看`/apollo/perception/smartereyeobstacles`是否正常输出,并在dreamview上查看障碍物信息:
```
budaoshi@in_dev_docker:/apollo$ cyber_launch start modules/perception/production/launch/perception_camera.launch
```
查看车前方运动的人或者自行车(自行车上要有人),在DreamView上查看障碍物颜色以及位置速度信息(自行车青蓝色,行人黄色,车辆绿色),如下图所示:
4.`DreamView``Module Controller`界面点击`ThirdPartyPerception`按钮,使用`cyber_monitor`查看`/apollo/perception/obstacles`是否正常输出,并在`DreamView`上查看障碍物信息。查看车前方运动的人或者自行车(自行车上要有人),在DreamView上查看障碍物颜色以及位置速度信息(自行车青蓝色,行人黄色,车辆绿色),如下图所示:
![camera_adaption_dreamview_vehicle](images/camera_adaption_dreamview_vehicle.png)
......@@ -59,4 +51,4 @@ budaoshi@in_dev_docker:/apollo$ cyber_launch start modules/perception/production
![camera_adaption_dreamview_obstacle2](images/camera_adaption_dreamview_obstacle2.png)
如果在dreamview上能看到障碍物并且`/apollo/perception/smartereyeobstacles`有障碍物信息,则开环测试通过。
\ No newline at end of file
如果在dreamview上能看到障碍物并且`/apollo/perception/obstacles`有障碍物信息,则开环测试通过。
\ No newline at end of file
......@@ -12,15 +12,15 @@
## 启动流程
1. 进入docker环境,用gpu编译项目,编译项目,启动DreamView。
1. 进入docker环境,编译apollo,启动DreamView。
cd /apollo
bash docker/scripts/dev_start.sh
bash docker/scripts/dev_into.sh
bash apollo.sh build_opt_gpu
bash apollo.sh build_opt
bash scripts/bootstrap.sh
2. 在浏览器中打开(http://localhost:8888), 选择`Dev_Kit`并选择相应高精地图,在Module Controller标签页启动GPS、Localization、Camera、Transform模块。
2. 在浏览器中打开(http://localhost:8888), 选择`Dev_Kit`并选择相应高精地图,在Module Controller标签页启动Canbus、GPS、Localization、Camera、Transform模块。
![camera_demonstration_dreamview](images/camera_demonstration_dreamview.jpeg)
......@@ -34,7 +34,7 @@
|`/tf`|确保能正常输出数据|
|`/tf_static`|确保能正常输出数据|
4. 使用`cyber_launch start modules/perception/production/launch/perception_camera.launch`命令启动摄像头感知,使用`cyber_monitor`查看`/apollo/perception/obstacles`是否正常输出,并在DreamView上查看障碍物信息:查看车前方10米处运动的人或者自行车(自行车上要有人),在DreamView上查看障碍物颜色以及位置速度信息(自行车青蓝色,行人黄色,车辆绿色),如下图所示:
4. `DreamView``Module Controller`界面点击`ThirdPartyPerception`按钮启动双目相机感知,使用`cyber_monitor`查看`/apollo/perception/obstacles`是否正常输出,并在DreamView上查看障碍物信息:查看车前方10米处运动的人或者自行车(自行车上要有人),在DreamView上查看障碍物颜色以及位置速度信息(自行车青蓝色,行人黄色,车辆绿色),如下图所示:
![camera_demonstration_dreamview_obstacle](images/camera_demonstration_dreamview_obstacle.png)
......@@ -44,7 +44,7 @@
![camera_demonstration_perception_obstacle2](images/camera_demonstration_perception_obstacle2.png)
确保在Dreamview上能看到障碍物并且`/apollo/perception/smartereyeobstacles`有障碍物信息。
确保在Dreamview上能看到障碍物并且`/apollo/perception/obstacles`有障碍物信息。
5. 在Module Controller标签页启动Planning、Prediction、Routing、Control模块确保这些模块能够正常启动。
......
# 循迹搭建--车辆动力学云标定
车辆云标定是通过采集车辆底盘油门踏板量、刹车踏板量、车辆速度、加速度,通过机器学习的方式,生成相应车辆的踏板标定表,生成的标定表数据量大,精度高,提高车辆的控制精度。车辆踏板标定系统主要包括三个部分:
- [循迹搭建--车辆动力学云标定](#循迹搭建--车辆动力学云标定)
- [概览](#概览)
- [前提条件](#前提条件)
- [主要步骤](#主要步骤)
- [百度云对象存储BOS注册](#百度云对象存储bos注册)
- [开通云服务账号](#开通云服务账号)
- [修改配置文件](#修改配置文件)
- [标定数据采集](#标定数据采集)
- [准备](#准备)
- [开始采集](#开始采集)
- [标定任务提交](#标定任务提交)
- [文件夹结构要求](#文件夹结构要求)
- [1. 准备百度云对象存储BOS服务](#1-准备百度云对象存储bos服务)
- [2. 和Apollo开发套件商务联系开通云标定服务账号](#2-和apollo开发套件商务联系开通云标定服务账号)
- [3. 上传预处理后的数据至BOS](#3-上传预处理后的数据至bos)
- [4. 提交标定任务据至BOS](#4-提交标定任务据至bos)
- [5. 获取标定结果及标定表](#5-获取标定结果及标定表)
- [6. 将标定表写入控制配置文件中](#6-将标定表写入控制配置文件中)
- [结果显示](#结果显示)
- [NEXT](#next)
- [常见问题](#常见问题)
## 概览
车辆云标定是通过采集车辆底盘油门踏板量、刹车踏板量、车辆速度、加速度作为车辆纵向动力模型的输入,通过机器学习的方式,生成相应车辆的踏板标定表,生成的标定表数据量大,精度高,提高车辆的控制精度。车辆踏板标定系统主要包括三个部分:
-`DreamView`数据采集监视器内采集标定数据;
......@@ -8,6 +33,10 @@
- 生成用于性能评估的可视化标定结果。
`DreamView`中,提供了一个踏板标定数据采集的监视器,用于监视采集标定数据的过程。通过配置文件设定标定条件(case),每个数据帧代表一种标定条件,每次采集数据时,每符合标定条件一次,则进度条增加一点,直至进度条增加满为止。
![vehicle_calibration_online_mainboard1](images/vehicle_calibration_online_mainboard1.png)
## 前提条件
- [循迹搭建--车辆集成](Vehicle_Integration_cn.md)
......@@ -26,13 +55,13 @@
- 可视化结果分析
## 前言
## 百度云对象存储BOS注册
注册方法参考[百度云对象存储BOS注册与基本使用向导](../../Apollo_Fuel/Apply_BOS_Account_cn.md)
`DreamView`中,提供了一个踏板标定数据采集的监视器,用于监视采集标定数据的过程。通过配置文件设定标定条件(case),每个数据帧代表一种标定条件,已采集的数据帧数量显示为进度条。
## 开通云服务账号
请与商务部门联系开通云服务账号,需要提供上一步骤中注册的`Bucket名称``所属地域`和用户邮箱信息
![vehicle_calibration_online_mainboard1](images/vehicle_calibration_online_mainboard1.png)
## 修改标定配置文件
## 修改配置文件
在开始标定前,需要根据实际车辆的参数配置,进行车辆标定文件的配置。目前车辆踏板标定默认支持的是MKZ乘用车,而对于教学小车,我们需要根据其最大速度、速度、油门、刹车踏板的限制,针对性的修改标定文件,以适配小车的踏板标定。需要修改标定配置文件,在目录`apollo/modules/dreamview/conf/data_collection_table.pb.txt`,设计小车的标定条件如下:
......@@ -191,13 +220,13 @@
### 开始采集
准备所需channel:
车辆标定,需要录制包含车辆底盘信息、定位信息的数据作为输入,所需要的channel如下表所示:
| 模块 | channel名称 |
|---|---|
| Canbus | /apollo/canbus/chassis |
| Localization | /apollo/localization/pose |
车辆标定,需要采集包含车辆底盘、定位信息的数据作为输入,所需要的channel如下表所示:
| 模块 | channel名称 | 检查项目 |
|---|---|---|
| Canbus | `/apollo/canbus/chassis` | 确保能正常输出数据 |
| GPS | `/apollo/sensor/gnss/best_pose` | 确保能正常输出数据,`sol_type:` 选项显示为`NARROW_INT` |
| Localization | `/apollo/localization/pose` | 确保能正常输出数据 |
为获取上述channel,在命令提示行内启动`canbus`模块、`GPS`模块、`localization`模块:
```
......@@ -207,15 +236,15 @@
```
并通过cyber_monitor检查上述channel数据反馈正常。
`DreamView`界面点击左侧边栏,选择`Module Controller`,可以看到`Canbus`开关已经打开,这时开始点击`Recorder`开关,**(请务必切记先打开<Recorder>开关,再进行标定数据采集操作,实际情况常常会因未打开<Recorder>开关造成数据未记录保存,影响效率)**,这时Apollo系统就开始录制记录`canbus``localization` 数据了。
`DreamView`界面点击左侧边栏,选择`Module Controller`,可以看到`Canbus`开关已经打开,这时开始点击`Recorder`开关,**(请务必切记先打开<Recorder>开关,再进行标定数据采集操作,实际情况常常会因未打开<Recorder>开关造成数据未记录保存,影响效率)**,这时Apollo系统就开始录制记录车辆标定数据了。
![vehicle_calibration_online_dreamview2](images/vehicle_calibration_online_dreamview2.png)
这时,我们使用车辆遥控器,根据右侧数据采集监视器,采集相应标定条件的车辆数据,直到采集监视器内各标定条件进度条(**蓝色**)全部集满后,再次点击`Recorder`开关,关闭数据记录,结束车辆标定
这时,我们使用遥控器遥控车辆,根据右侧数据采集监视器,采集相应标定条件的车辆数据,直到采集监视器内各标定条件进度条(**蓝色**)全部集满后,点击关闭`Recorder`开关,关闭数据记录,结束车辆标定数据采集
![vehicle_calibration_online_mainboard2](images/vehicle_calibration_online_mainboard2.png)
所有采集的标定数据都保存在`apollo/data/bag/`目录下,找到以`年-月-日-时-分-秒_s`命名的文件夹,根据时间找到属于车辆标定采集的数据。我们可以分批采集数据,即每一次可以采集一部分数据,这样数据会记录在不同的`年-月-日-时-分-秒_s`文件夹,也可以一次全部采集,这样所有的数据都记录在同一个文件夹内,通常完成上述标定条件的车辆标定需要20分钟左右。
所有采集的标定数据都保存在`apollo/data/bag/`目录下,找到以`年-月-日-时-分-秒_s`命名的文件夹,根据时间找到属于车辆标定采集的数据。我们可以分批采集数据,即每一次可以采集一部分数据,这样数据会记录在不同的`年-月-日-时-分-秒_s`文件夹,也可以一次全部采集,这样所有的数据都记录在同一个文件夹内,通常完成上述标定条件的车辆标定数据采集需要20分钟左右。
## 标定任务提交
......@@ -225,6 +254,8 @@
在使用云标定服务前,需要准备[百度云对象存储BOS服务](https://cloud.baidu.com/product/bos.html)用于存放标定预处理后的数据。
**注意:** 必须使用开通过权限的 bucket,确认`Bucket名称``所属地域`和提交商务注册时的`Bucket名称``所属区域`保持一致。
#### 2. 和Apollo开发套件商务联系开通云标定服务账号
#### 3. 上传预处理后的数据至BOS
......@@ -235,7 +266,7 @@
![vehicle_calibration_online_structure](images/vehicle_calibration_online_structure.png)
2)`Origin Folder`建议为**BOS的根目录**,确保`task001`目录是在BOS根目录下创建的
2)`Origin Folder`确保是**BOS的根目录**,确保`task001`目录是在BOS根目录下创建的(根目录:登录BOS存储服务器后首先看到的目录即为根目录,一般是`Bucket name`目录)
3)`task001、task002...`代表一次车辆标定任务文件夹,即每次标定任务提交时,会训练一个`task文件夹`内的车辆数据;`task文件夹`上一层(`Origin Folder`)可以是BOS根目录或其他目录;
......@@ -244,7 +275,7 @@
4)`Records`文件内导入采集的车辆标定数据,车辆的录制数据在`apollo/data/bag/`目录下;
**需要注意:**`Records`文件夹下需要有2层子文件夹,第一层为大时间(年月任)文件夹结构需要图上图保持一致;
5) `Configuration File`即为`vehicle_param.pb.txt`(车辆配置文件),该文件在`apollo/modules/calition/data/dev_kit`文件夹内,将该文件夹下的`vehicle_param.pb.txt`拷贝至BOS对应的车辆标定文件夹下,如上图`Vehicle2`内;
5) `Configuration File`即为`vehicle_param.pb.txt`(车辆配置文件),该文件在`apollo/modules/calition/data/dev_kit`文件夹内,将该文件夹下的`vehicle_param.pb.txt`拷贝至BOS对应的车辆标定文件夹下,如上图`Vehicle2`文件夹内;
6)总结上述文件夹结构:
......@@ -265,6 +296,8 @@
- 云标定任务完成后,将在注册的邮箱(请与商务联系)中收到一封标定结果邮件。如果标定任务成功,将包含标定表及标定结果图片。
#### 6. 将标定表写入控制配置文件中
- 生成的标定表以 `车型_calibration_table.pb.txt`命名,将标定表内的标定数据为calibration_table字段,把全部数据拷贝替换至`apollo/modules/calibration/data/dev_kit/control_conf.pb.txt`下对应的`lon_controller_conf`字段下面的`calibration_table`段内。
注:云标定数据片段示例(截取了前4段)
......@@ -303,3 +336,29 @@
![vehicle_calibration_online_brake](images/vehicle_calibration_online_brake.png)
## NEXT
车辆云标定能够提供精度较高的车辆踏板标定表,对于自动驾驶的控制精度有显著提升。当然,使用`D-KIT`文件夹内默认的标定表数据,也可以进行循迹等自动驾驶控制。
现在,您已经完成车辆动力学标定,接下来可以开始[车辆循迹演示](Start_Waypoint_Following_cn.md)
## 常见问题
#### 1. 提交后,显示`UNAUTHORIZED`,如下图所示
![vehicle_calibration_unauthorized](images/vehicle_calibration_unauthorized.png)
出现该问题,一般是`Partner ID`输入有误。
#### 2.在进行车辆标定时,收到如下报错邮件,数据完整性检查出现错误
![vehicle_calibration_online_error1](images/vehicle_calibration_online_error1.png)
排查方法:
1. 查看`文件夹结构`是否正确;
2. 查看是否在车辆文件夹内添加车辆配置文件(`vehicle_param.pb.txt`
3. 查看录制数据包内是否存在`gps/localization`数据:
- 通过`cyber_recorder info xxxxxx.record.xxxxx`
检查采集数据内是否存在下面3个`channel`的数据:
- `/apollo/canbus/chassis`
- `/apollo/sensor/gnss/best_pose`
- `/apollo/localization/pose`
- 下图为正确的数据包示意
![vehicle_calibration_online_record_check](images/vehicle_calibration_online_record_check.png)
循迹搭建--车辆集成
===================
## 目录
- [概览](#概览)
- [工控机安装](#工控机安装)
- [导航设备安装](#导航设备安装)
# 循迹搭建--车辆集成
- [循迹搭建--车辆集成](#循迹搭建--车辆集成)
- [概览](#概览)
- [前提条件](#前提条件)
- [工控机安装](#工控机安装)
- [导航设备安装](#导航设备安装)
- [安装主机和天线](#安装主机和天线)
- [安装需要的组件](#安装需要的组件)
- [挂载](#挂载)
- [配线](#配线)
- [量取杆臂值](#量取杆臂值)
- [NEXT](#next)
## 概览
......@@ -13,6 +17,11 @@
![waypoint_following.png](images/waypoint_following.png)
## 前提条件
- 完成了[循迹搭建--Apollo系统安装](Apollo_Installation_cn.md)
## 工控机安装
工控机的安装包括工控机主机的安装、显示器的安装、CAN线的安装、路由器的安装和无线键盘的安装,分别介绍如下:
......@@ -76,7 +85,10 @@
- 当主机和天线处在正确位置时量取天线到主机的距离。主机的中点和天线的中点标记在设备的外部。
- 距离被测量并记录为X轴偏移、Y轴偏移和Z轴偏移。坐标轴由主机的位置确定。偏移量的误差应该被控制在一厘米以内以获取高精度的定位信息。杆臂是指后天线(Primary)的几何中心位置相对于主机几何中心在直角坐标系内x,y,z三方向的位置差。通过如下指令进行补偿设置:`$cmd,set,leverarm,gnss,x_offset,y_offset,z_offset*ff`。x_offset:X方向的杆臂误差,单位为米,以此类推。注:上述坐标XYZ为设备坐标轴配置后的实际坐标,一般应与载体坐标系一致,注意补偿的是后天线(Primary)杆臂值。当进行导航模式配置后必须对设备进行重新上电启动。举例如下:
![图片](images/gps_vehicle_map1.png)
![图片](images/gps_vehicle_map2.png)
![图片](images/gps_vehicle_map1.png)
![图片](images/gps_vehicle_map2.png)
如上图所示:开发套件M2主机放置与车辆后轴中间附近(关于车辆坐标系说明请参考[Apollo车辆坐标系说明](../../coordination_cn.md)),M2的Y轴正向与车辆前进方向一致,前后天线分别安装至车辆前后中间位置,后天线与M2主机在X轴的原点位置重合,则X轴偏移x_offset的值为0;后天线在M2主机Y轴的负向0.1m处,则Y轴偏移y_offset的值为-0.1;后天线在M2主机Z轴的正向0.6m处,则Z轴偏移z_offset的值为0.6,则杆臂值为(0,-0.1,0.6)。
## NEXT
现在,您已经完成车辆集成,接下来可以开始[循迹搭建--定位模块配置](Localization_Configuration_cn.md)
can_conf {
can_card_parameter {
brand:SOCKET_CAN_RAW
type: PCI_CARD
channel_id: CHANNEL_ID_ONE
}
enable_debug_mode: false
enable_receiver_log: false
}
radar_conf {
max_distance_valid: true
sensor_id_valid: false
radar_power_valid: false
output_type_valid: true
send_quality_valid: true
send_ext_info_valid: true
sort_index_valid: false
store_in_nvm_valid: true
ctrl_relay_valid: false
rcs_threshold_valid: true
max_distance: 250
sensor_id: 0
output_type: OUTPUT_TYPE_OBJECTS
radar_power: 0
ctrl_relay: 0
send_ext_info: true
send_quality: true
sort_index: 0
store_in_nvm: 1
rcs_threshold: RCS_THRESHOLD_STANDARD
# unit: nanoseconds, 20ms for 50Hz
input_send_interval: 20000000
}
radar_channel: "/apollo/sensor/radar/front"
header:
seq: 0
stamp:
secs: 0
nsecs: 0
frame_id: velodyne128
transform:
rotation:
x: 0.0
y: 0.0
z: 0.0
w: 1.0
translation:
x: 0.77
y: 0.0
z: -1.20
child_frame_id: radar_front
......@@ -3151,7 +3151,7 @@ debug@2.6.9, debug@^2.2.0, debug@^2.3.3, debug@^2.6.8, debug@^2.6.9:
dependencies:
ms "2.0.0"
debug@^3.0.0, debug@^3.1.0, debug@^3.1.1, debug@^3.2.5, debug@^3.2.6:
debug@^3.0.0, debug@^3.1.0, debug@^3.1.1, debug@^3.2.5:
version "3.2.6"
resolved "https://registry.yarnpkg.com/debug/-/debug-3.2.6.tgz#e83d17de16d8a7efb7717edbe5fb10135eee629b"
integrity sha512-mel+jf7nrtEl5Pn1Qx46zARXKDpBbvzezse7p7LqINmdoIk8PYP5SySaxEmYv6TZ0JyEKA1hsCId6DIhgITtWQ==
......@@ -3359,7 +3359,7 @@ detect-indent@^4.0.0:
dependencies:
repeating "^2.0.0"
detect-libc@^1.0.2, detect-libc@^1.0.3:
detect-libc@^1.0.3:
version "1.0.3"
resolved "https://registry.yarnpkg.com/detect-libc/-/detect-libc-1.0.3.tgz#fa137c4bd698edf55cd5cd02ac559f91a4c4ba9b"
integrity sha1-+hN8S9aY7fVc1c0CrFWfkaTEups=
......@@ -5074,7 +5074,7 @@ https-browserify@^1.0.0:
resolved "https://registry.yarnpkg.com/https-browserify/-/https-browserify-1.0.0.tgz#ec06c10e0a34c0f2faf199f7fd7fc78fffd03c73"
integrity sha1-7AbBDgo0wPL68Zn3/X/Hj//QPHM=
iconv-lite@0.4.24, iconv-lite@^0.4.17, iconv-lite@^0.4.4, iconv-lite@~0.4.13:
iconv-lite@0.4.24, iconv-lite@^0.4.17, iconv-lite@~0.4.13:
version "0.4.24"
resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.24.tgz#2022b4b25fbddc21d2f524974a474aafe733908b"
integrity sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==
......@@ -5103,13 +5103,6 @@ iferr@^0.1.5:
resolved "https://registry.yarnpkg.com/iferr/-/iferr-0.1.5.tgz#c60eed69e6d8fdb6b3104a1fcbca1c192dc5b501"
integrity sha1-xg7taebY/bazEEofy8ocGS3FtQE=
ignore-walk@^3.0.1:
version "3.0.3"
resolved "https://registry.yarnpkg.com/ignore-walk/-/ignore-walk-3.0.3.tgz#017e2447184bfeade7c238e4aefdd1e8f95b1e37"
integrity sha512-m7o6xuOaT1aqheYHKf8W6J5pYH85ZI9w077erOzLje3JsB1gkafkAhHHY19dqjulgIZHFm32Cp5uNZgcQqdJKw==
dependencies:
minimatch "^3.0.4"
ignore@^3.3.3, ignore@^3.3.5:
version "3.3.10"
resolved "https://registry.yarnpkg.com/ignore/-/ignore-3.3.10.tgz#0a97fb876986e8081c631160f8f9f389157f0043"
......@@ -6578,15 +6571,6 @@ natural-compare@^1.4.0:
resolved "https://registry.yarnpkg.com/natural-compare/-/natural-compare-1.4.0.tgz#4abebfeed7541f2c27acfb29bdbbd15c8d5ba4f7"
integrity sha1-Sr6/7tdUHywnrPspvbvRXI1bpPc=
needle@^2.2.1:
version "2.4.1"
resolved "https://registry.yarnpkg.com/needle/-/needle-2.4.1.tgz#14af48732463d7475696f937626b1b993247a56a"
integrity sha512-x/gi6ijr4B7fwl6WYL9FwlCvRQKGlUNvnceho8wxkwXqN8jvVmmmATTmZPRRG7b/yC1eode26C2HO9jl78Du9g==
dependencies:
debug "^3.2.6"
iconv-lite "^0.4.4"
sax "^1.2.4"
negotiator@0.6.2:
version "0.6.2"
resolved "https://registry.yarnpkg.com/negotiator/-/negotiator-0.6.2.tgz#feacf7ccf525a77ae9634436a64883ffeca346fb"
......@@ -6676,22 +6660,6 @@ node-libs-browser@^2.0.0:
util "^0.11.0"
vm-browserify "^1.0.1"
node-pre-gyp@*:
version "0.14.0"
resolved "https://registry.yarnpkg.com/node-pre-gyp/-/node-pre-gyp-0.14.0.tgz#9a0596533b877289bcad4e143982ca3d904ddc83"
integrity sha512-+CvDC7ZttU/sSt9rFjix/P05iS43qHCOOGzcr3Ry99bXG7VX953+vFyEuph/tfqoYu8dttBkE86JSKBO2OzcxA==
dependencies:
detect-libc "^1.0.2"
mkdirp "^0.5.1"
needle "^2.2.1"
nopt "^4.0.1"
npm-packlist "^1.1.6"
npmlog "^4.0.2"
rc "^1.2.7"
rimraf "^2.6.1"
semver "^5.3.0"
tar "^4.4.2"
node-sass@^4.5.3:
version "4.13.1"
resolved "https://registry.yarnpkg.com/node-sass/-/node-sass-4.13.1.tgz#9db5689696bb2eec2c32b98bfea4c7a2e992d0a3"
......@@ -6727,14 +6695,6 @@ noop-logger@^0.1.1:
dependencies:
abbrev "1"
nopt@^4.0.1:
version "4.0.3"
resolved "https://registry.yarnpkg.com/nopt/-/nopt-4.0.3.tgz#a375cad9d02fd921278d954c2254d5aa57e15e48"
integrity sha512-CvaGwVMztSMJLOeXPrez7fyfObdZqNUK1cPAEzLHrTybIua9pMdmmPR5YwtfNftIOMv3DPUhFaxsZMNTQO20Kg==
dependencies:
abbrev "1"
osenv "^0.1.4"
normalize-package-data@^2.3.2, normalize-package-data@^2.3.4:
version "2.5.0"
resolved "https://registry.yarnpkg.com/normalize-package-data/-/normalize-package-data-2.5.0.tgz#e66db1838b200c1dfc233225d12cb36520e234a8"
......@@ -6781,13 +6741,6 @@ normalize-url@^1.4.0:
query-string "^4.1.0"
sort-keys "^1.0.0"
npm-bundled@^1.0.1:
version "1.1.1"
resolved "https://registry.yarnpkg.com/npm-bundled/-/npm-bundled-1.1.1.tgz#1edd570865a94cdb1bc8220775e29466c9fb234b"
integrity sha512-gqkfgGePhTpAEgUsGEgcq1rqPXA+tv/aVBlgEzfXwA1yiUJF7xtEt3CtVwOjNYQOVknDk0F20w58Fnm3EtG0fA==
dependencies:
npm-normalize-package-bin "^1.0.1"
npm-conf@^1.1.0:
version "1.1.3"
resolved "https://registry.yarnpkg.com/npm-conf/-/npm-conf-1.1.3.tgz#256cc47bd0e218c259c4e9550bf413bc2192aff9"
......@@ -6796,20 +6749,6 @@ npm-conf@^1.1.0:
config-chain "^1.1.11"
pify "^3.0.0"
npm-normalize-package-bin@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/npm-normalize-package-bin/-/npm-normalize-package-bin-1.0.1.tgz#6e79a41f23fd235c0623218228da7d9c23b8f6e2"
integrity sha512-EPfafl6JL5/rU+ot6P3gRSCpPDW5VmIzX959Ob1+ySFUuuYHWHekXpwdUZcKP5C+DS4GEtdJluwBjnsNDl+fSA==
npm-packlist@^1.1.6:
version "1.4.8"
resolved "https://registry.yarnpkg.com/npm-packlist/-/npm-packlist-1.4.8.tgz#56ee6cc135b9f98ad3d51c1c95da22bbb9b2ef3e"
integrity sha512-5+AZgwru5IevF5ZdnFglB5wNlHG1AOOuw28WhUq8/8emhBmLv6jX5by4WJCh7lW0uSYZYS6DXqIsyZVIXRZU9A==
dependencies:
ignore-walk "^3.0.1"
npm-bundled "^1.0.1"
npm-normalize-package-bin "^1.0.1"
npm-run-path@^2.0.0:
version "2.0.2"
resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-2.0.2.tgz#35a9232dfa35d7067b4cb2ddf2357b1871536c5f"
......@@ -6817,7 +6756,7 @@ npm-run-path@^2.0.0:
dependencies:
path-key "^2.0.0"
"npmlog@0 || 1 || 2 || 3 || 4", npmlog@^4.0.0, npmlog@^4.0.1, npmlog@^4.0.2, npmlog@^4.1.2:
"npmlog@0 || 1 || 2 || 3 || 4", npmlog@^4.0.0, npmlog@^4.0.1, npmlog@^4.1.2:
version "4.1.2"
resolved "https://registry.yarnpkg.com/npmlog/-/npmlog-4.1.2.tgz#08a7f2a8bf734604779a9efa4ad5cc717abb954b"
integrity sha512-2uUqazuKlTaSI/dC8AzicUck7+IrEaOnN/e0jd3Xtt1KcGpwx30v50mL7oPyr/h9bL3E4aZccVwpwP+5W9Vjkg==
......@@ -7059,7 +6998,7 @@ os-tmpdir@^1.0.0, os-tmpdir@^1.0.1, os-tmpdir@~1.0.2:
resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274"
integrity sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ=
osenv@0, osenv@^0.1.4:
osenv@0:
version "0.1.5"
resolved "https://registry.yarnpkg.com/osenv/-/osenv-0.1.5.tgz#85cdfafaeb28e8677f416e287592b5f3f49ea410"
integrity sha512-0CWcCECdMVc2Rw3U5w9ZjqX6ga6ubk1xDVKxtBQPK7wis/0F2r9T6k4ydGYhecl7YUBxBVxhL5oisPsNxAPe2g==
......@@ -8597,7 +8536,7 @@ sass-loader@^6.0.5:
neo-async "^2.5.0"
pify "^3.0.0"
sax@>=0.6.0, sax@^1.2.4, sax@~1.2.1, sax@~1.2.4:
sax@>=0.6.0, sax@~1.2.1, sax@~1.2.4:
version "1.2.4"
resolved "https://registry.yarnpkg.com/sax/-/sax-1.2.4.tgz#2816234e2378bddc4e5354fab5caa895df7100d9"
integrity sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==
......@@ -9455,7 +9394,7 @@ tar@^2.0.0:
fstream "^1.0.12"
inherits "2"
tar@^4.4.2, tar@^4.4.8:
tar@^4.4.8:
version "4.4.13"
resolved "https://registry.yarnpkg.com/tar/-/tar-4.4.13.tgz#43b364bc52888d555298637b10d60790254ab525"
integrity sha512-w2VwSrBoHa5BsSyH+KxEqeQBAllHhccyMFVHtGtdMpF4W7IRWfZjFiQceJPChOeTsSDVUpER2T8FA93pr0L+QA==
......@@ -10218,9 +10157,9 @@ websocket-driver@>=0.5.1:
websocket-extensions ">=0.1.1"
websocket-extensions@>=0.1.1:
version "0.1.3"
resolved "https://registry.yarnpkg.com/websocket-extensions/-/websocket-extensions-0.1.3.tgz#5d2ff22977003ec687a4b87073dfbbac146ccf29"
integrity sha512-nqHUnMXmBzT0w570r2JpJxfiSD1IzoI+HGVdd3aZ0yNi3ngvQ4jv1dtHt5VGxfI2yj5yqImPhOK4vmIh2xMbGg==
version "0.1.4"
resolved "https://registry.yarnpkg.com/websocket-extensions/-/websocket-extensions-0.1.4.tgz#7f8473bc839dfd87608adb95d7eb075211578a42"
integrity sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg==
whatwg-fetch@>=0.10.0:
version "3.0.0"
......
......@@ -41,7 +41,14 @@
## Generate Localization Map
Localization map is used for LiDAR-based localization, which is a grid-cell representation of the environment. Each cell stores the statistics of laser reflection intensity and altitude. The map is organized as a group of map nodes. For more information, please refer to `apollo/modules/localization/msf/local_map`.
We provide a script (`apollo/scripts/msf_local_map_creator.sh`) to generate localization map. You need to provide a group of point cloud frames (as .pcd file), corresponding poses file, and UTM zone id. The format of the poses file is `pcd_number timestamp x y z qx qy qz qw`.
We provide a script (`apollo/scripts/msf_local_map_creator.sh`) to generate localization map. Usage: `bash msf_local_map_creator.sh [pcd_folder] [pose_file] [zone_id] [map_folder]`.
`pcd_folder`: contains all pcd files(as .pcd file) which are used to create map.
`pose_file`: a file which contains all poses of pcd files. Each line corresponds to a pcd file, and the format of each line is "pcd_number timestamp x y z qx qy qz qw". Thereinto, "pcd_number" corresponds to the pcd file name.
`zone_id`: UTM zone id.
`map_folder`: destination folder for map.
After running this script, you will get a localization map in map_folder, which contains a "config.xml" and "map" folder。
Actually, in this script, we first create lossless map and then convert lossless map to lossy map which losses some not essential message and has smaller size. The lossless map is also avaliable.
## Visualization Tool
We provide a simple online visualization tool for debug multi-sensor fusion localization module. The parameter `use_visualize` in localization.conf is used to enable the online visualization tool.
......
......@@ -23,6 +23,7 @@ cc_library(
"//modules/planning/common:message_process",
"//modules/planning/proto:planning_cc_proto",
"//modules/prediction/proto:prediction_obstacle_cc_proto",
"//modules/storytelling/proto:story_cc_proto",
],
)
......
......@@ -239,6 +239,7 @@ cc_library(
"//modules/planning/proto:pad_msg_cc_proto",
"//modules/prediction/proto:prediction_obstacle_cc_proto",
"//modules/routing/proto:routing_cc_proto",
"//modules/storytelling/proto:story_cc_proto",
],
)
......@@ -498,6 +499,7 @@ cc_library(
"//modules/prediction/proto:prediction_obstacle_cc_proto",
"//modules/prediction/util:data_extraction",
"//modules/routing/proto:routing_cc_proto",
"//modules/storytelling/proto:story_cc_proto",
"//third_party:boost",
"@com_google_absl//absl/strings",
],
......
......@@ -18,6 +18,7 @@
#include "absl/strings/str_cat.h"
#include "cyber/common/file.h"
#include "modules/planning/common/planning_gflags.h"
namespace apollo {
......@@ -27,7 +28,9 @@ LearningData FeatureOutput::learning_data_;
std::size_t FeatureOutput::idx_learning_data_ = 0;
int FeatureOutput::learning_data_file_index_ = 0;
void FeatureOutput::Close() { Clear(); }
void FeatureOutput::Close() {
Clear();
}
void FeatureOutput::Clear() {
learning_data_.Clear();
......@@ -45,7 +48,7 @@ void FeatureOutput::InsertLearningDataFrame(
const LearningDataFrame& learning_data_frame) {
learning_data_.add_learning_data()->CopyFrom(learning_data_frame);
if (FLAGS_planning_offline_mode == 2) {
if (FLAGS_planning_learning_mode == 1) {
// write frames into a file
if (learning_data_.learning_data_size() >=
FLAGS_learning_data_frame_num_per_file) {
......@@ -63,18 +66,20 @@ LearningDataFrame* FeatureOutput::GetLatestLearningDataFrame() {
return size > 0 ? learning_data_.mutable_learning_data(size - 1) : nullptr;
}
void FeatureOutput::InsertPlanningResult() {}
void FeatureOutput::InsertPlanningResult() {
}
void FeatureOutput::WriteLearningData(const std::string& record_file) {
if (FLAGS_planning_offline_mode != 2) {
void FeatureOutput::WriteLearningData(
const std::string& record_file) {
if (FLAGS_planning_learning_mode != 1) {
return;
}
std::string src_file_name =
record_file.substr(record_file.find_last_of("/") + 1);
src_file_name = src_file_name.empty() ? "00000" : src_file_name;
const std::string dest_file =
absl::StrCat(FLAGS_planning_data_dir, "/", src_file_name, ".",
learning_data_file_index_, ".bin");
const std::string dest_file = absl::StrCat(
FLAGS_planning_data_dir, "/", src_file_name, ".",
learning_data_file_index_, ".bin");
cyber::common::SetProtoToBinaryFile(learning_data_, dest_file);
// cyber::common::SetProtoToASCIIFile(learning_data_, dest_file + ".txt");
learning_data_.Clear();
......@@ -83,7 +88,7 @@ void FeatureOutput::WriteLearningData(const std::string& record_file) {
void FeatureOutput::WriteRemainderiLearningData(
const std::string& record_file) {
if (FLAGS_planning_offline_mode != 2) {
if (FLAGS_planning_learning_mode != 1) {
return;
}
......
......@@ -490,7 +490,7 @@ void Frame::ReadTrafficLights() {
void Frame::ReadLearningDataFrame() {
learning_data_frame_.Clear();
if (FLAGS_planning_offline_mode != 1) {
if (FLAGS_planning_learning_mode != 2 && FLAGS_planning_learning_mode != 3) {
return;
}
auto learning_data_frame = FeatureOutput::GetLatestLearningDataFrame();
......
......@@ -25,6 +25,7 @@
#include "modules/planning/proto/pad_msg.pb.h"
#include "modules/prediction/proto/prediction_obstacle.pb.h"
#include "modules/routing/proto/routing.pb.h"
#include "modules/storytelling/proto/story.pb.h"
namespace apollo {
namespace planning {
......@@ -42,6 +43,7 @@ struct LocalView {
std::shared_ptr<routing::RoutingResponse> routing;
std::shared_ptr<relative_map::MapMsg> relative_map;
std::shared_ptr<PadMessage> pad_msg;
std::shared_ptr<storytelling::Stories> stories;
};
} // namespace planning
......
......@@ -37,6 +37,7 @@
#include "modules/planning/proto/planning_config.pb.h"
#include "modules/prediction/proto/prediction_obstacle.pb.h"
#include "modules/routing/proto/routing.pb.h"
#include "modules/storytelling/proto/story.pb.h"
namespace apollo {
namespace planning {
......@@ -58,6 +59,8 @@ class MessageProcess {
void OnRoutingResponse(
const apollo::routing::RoutingResponse& routing_response);
void OnStoryTelling(const apollo::storytelling::Stories& stories);
void OnTrafficLightDetection(
const apollo::perception::TrafficLightDetection& traffic_light_detection);
......@@ -100,6 +103,8 @@ class MessageProcess {
localizations,
LearningDataFrame* learning_data_frame);
void GeneratePlanningTag(LearningDataFrame* learning_data_frame);
void GenerateLearningDataFrame(LearningDataFrame* learning_data_frame);
private:
......@@ -117,7 +122,7 @@ class MessageProcess {
obstacle_history_map_;
ChassisFeature chassis_feature_;
std::string map_name_;
std::vector<OverlapFeature> overlaps_;
PlanningTag planning_tag_;
apollo::routing::RoutingResponse routing_response_;
double traffic_light_detection_message_timestamp_;
std::vector<TrafficLightFeature> traffic_lights_;
......
......@@ -536,11 +536,12 @@ DEFINE_string(planning_data_dir, "/apollo/modules/planning/data/",
DEFINE_string(planning_offline_bags, "",
"a list of source files or directories for offline mode. "
"The items need to be separated by colon ':'. ");
DEFINE_int32(planning_offline_mode, 0,
DEFINE_int32(planning_learning_mode, 0,
"0: no learning "
"1: online learning, no dump file "
"2: offline learning. read record files and dump learning_data "
" to <record file>.<n>.bin");
"1: offline learning. read record files and dump learning_data "
" to <record file>.<n>.bin "
"2: online learning(e2e) "
"3: online learning(hybrid)");
DEFINE_int32(learning_data_obstacle_history_time_sec, 3.0,
"time sec (second) of history trajectory points for a obstacle");
DEFINE_int32(learning_data_frame_num_per_file, 100,
......
......@@ -268,9 +268,9 @@ DECLARE_bool(use_front_axe_center_in_path_planning);
DECLARE_bool(use_road_boundary_from_map);
// learning related
DECLARE_int32(planning_learning_mode);
DECLARE_string(planning_data_dir);
DECLARE_string(planning_offline_bags);
DECLARE_int32(planning_offline_mode);
DECLARE_int32(learning_data_obstacle_history_time_sec);
DECLARE_int32(learning_data_frame_num_per_file);
DECLARE_string(planning_birdview_img_feature_renderer_config_file);
......@@ -30,7 +30,7 @@ namespace planning {
void TrajectoryEvaluator::WriteLog(const std::string& msg) {
AERROR << msg;
if (FLAGS_planning_offline_mode == 2) {
if (FLAGS_planning_learning_mode == 1) {
EvaluatorLogger::GetStream() << msg << std::endl;
}
}
......
......@@ -9,8 +9,8 @@
--enable_multi_thread_in_dp_st_graph
--use_osqp_optimizer_for_reference_line
# 0-no learning; 1-online learning; 3-offline: read records and dump learning data
--planning_offline_mode=0
# 0-no learning; 1-offline(read records and dump learning data); 2-online(e2e); 3-online(hybrid)
--planning_learning_mode=0
# --smoother_config_filename=/apollo/modules/planning/conf/spiral_smoother_config.pb.txt
# --smoother_config_filename=/apollo/modules/planning/conf/qp_spline_smoother_config.pb.txt
......
......@@ -8,6 +8,7 @@ topic_config {
relative_map_topic: "/apollo/relative_map"
routing_request_topic: "/apollo/routing_request"
routing_response_topic: "/apollo/routing_response"
story_telling_topic: "/apollo/storytelling"
traffic_light_detection_topic: "/apollo/perception/traffic_light"
}
standard_planning_config {
......
......@@ -12,7 +12,7 @@ stage_config: {
task_config: {
task_type: LEARNING_MODEL_INFERENCE_TASK
learning_model_inference_task_config {
model_file: "/apollo/modules/planning/data/model/test_model.pt"
model_file: "/apollo/modules/planning/data/model/test_model_conv_rnn.pt"
use_cuda: true
}
}
......
......@@ -45,7 +45,6 @@ bool BirdviewImgFeatureRenderer::Init(const PlanningSemanticMapConfig& config) {
FLAGS_map_dir.substr(FLAGS_map_dir.find_last_of("/") + 1);
if (map_name != "sunnyvale_with_two_offices" && map_name != "sunnyvale") {
AERROR << "Map other than sunnyvale_with_two_offices are not supported";
return false;
}
// TODO(Jinyun): add sunnyvale map or draw basemap online
if (map_name == "sunnyvale") {
......@@ -596,11 +595,24 @@ bool BirdviewImgFeatureRenderer::CropByPose(const double ego_x,
const double ego_heading,
const cv::Mat& base_map,
cv::Mat* img_feature) {
// use dimension of base_roadmap_img as it's assumed that all base maps have
// the same size
cv::Point2i ego_img_idx = GetPointImgIdx(ego_x - map_bottom_left_point_x_,
ego_y - map_bottom_left_point_y_, 0,
base_roadmap_img_.size[0]);
if (ego_img_idx.x < 0 || ego_img_idx.x + 1 > base_roadmap_img_.size[1] ||
ego_img_idx.y < 0 || ego_img_idx.y + 1 > base_roadmap_img_.size[0]) {
AERROR << "ego vehicle position out of bound of base map";
return false;
}
const int rough_radius = static_cast<int>(sqrt(
config_.height() * config_.height() + config_.width() * config_.width()));
if (ego_img_idx.x - rough_radius < 0 || ego_img_idx.y - rough_radius < 0) {
AERROR << "cropping out of bound of base map";
return false;
}
cv::Rect rough_rect(ego_img_idx.x - rough_radius,
ego_img_idx.y - rough_radius, 2 * rough_radius,
2 * rough_radius);
......
......@@ -78,7 +78,7 @@ TEST_F(ModelInferenceTest, trajectory_imitation) {
BirdviewImgFeatureRenderer::Instance()->Init(renderer_config);
std::unique_ptr<ModelInference> trajectory_imitation_inference =
std::unique_ptr<ModelInference>(new TrajectoryConvRnnInference(config));
std::unique_ptr<ModelInference>(new TrajectoryImitationInference(config));
ACHECK(trajectory_imitation_inference->Inference(&test_data_frame))
<< "Failed to inference trajectory_imitation_model";
......
......@@ -29,15 +29,13 @@
namespace apollo {
namespace planning {
TrajectoryConvRnnInference::TrajectoryConvRnnInference(
TrajectoryImitationInference::TrajectoryImitationInference(
const LearningModelInferenceTaskConfig& config)
: ModelInference(config), device_(torch::kCPU) {
LoadModel(config);
}
// TODO(Jinyun): evaluate whether use fake input in load model speed up the
// loading process
bool TrajectoryConvRnnInference::LoadModel(
bool TrajectoryImitationInference::LoadModel(
const LearningModelInferenceTaskConfig& config) {
if (config.use_cuda() && torch::cuda::is_available()) {
ADEBUG << "CUDA is available";
......@@ -45,10 +43,24 @@ bool TrajectoryConvRnnInference::LoadModel(
}
model_ = torch::jit::load(config.model_file(), device_);
torch::set_num_threads(1);
// run a fake inference at init time as first inference is relative slow
torch::Tensor input_feature_tensor = torch::zeros({1, 12, 200, 200});
torch::Tensor initial_point_tensor = torch::zeros({1, 1, 200, 200});
torch::Tensor initial_box_tensor = torch::zeros({1, 1, 200, 200});
std::vector<torch::jit::IValue> torch_inputs;
torch_inputs.push_back(c10::ivalue::Tuple::create(
{std::move(input_feature_tensor.to(device_)),
std::move(initial_point_tensor.to(device_)),
std::move(initial_box_tensor.to(device_))},
c10::TupleType::create(
std::vector<c10::TypePtr>(3, c10::TensorType::create()))));
auto torch_output_tensor =
model_.forward(torch_inputs).toTensor().to(torch::kCPU);
return true;
}
bool TrajectoryConvRnnInference::Inference(
bool TrajectoryImitationInference::Inference(
LearningDataFrame* learning_data_frame) {
const int past_points_size = learning_data_frame->adc_trajectory_point_size();
if (past_points_size == 0) {
......@@ -122,11 +134,8 @@ bool TrajectoryConvRnnInference::Inference(
<< prepration_diff.count() * 1000 << " ms.";
auto inference_start_time = std::chrono::system_clock::now();
at::Tensor torch_output_tensor = model_.forward(torch_inputs)
.toTuple()
->elements()[2]
.toTensor()
.to(torch::kCPU);
at::Tensor torch_output_tensor =
model_.forward(torch_inputs).toTensor().to(torch::kCPU);
auto inference_end_time = std::chrono::system_clock::now();
std::chrono::duration<double> inference_diff =
inference_end_time - inference_start_time;
......@@ -150,7 +159,8 @@ bool TrajectoryConvRnnInference::Inference(
const double dy = static_cast<double>(torch_output[0][i][1]);
const double dtheta = static_cast<double>(torch_output[0][i][2]);
const double v = static_cast<double>(torch_output[0][i][3]);
ADEBUG << "dx[" << dx << "], dy[" << dy << "], dtheta[" << dtheta << "], v["
<< v << "]";
const double time_sec = cur_time_sec + delta_t * (i + 1);
apollo::common::math::Vec2d offset(dx, dy);
apollo::common::math::Vec2d rotated_offset = offset.rotate(cur_heading);
......
......@@ -30,23 +30,23 @@
namespace apollo {
namespace planning {
class TrajectoryConvRnnInference : public ModelInference {
class TrajectoryImitationInference : public ModelInference {
public:
/**
* @brief Constructor
*/
explicit TrajectoryConvRnnInference(
explicit TrajectoryImitationInference(
const LearningModelInferenceTaskConfig& config);
/**
* @brief Destructor
*/
virtual ~TrajectoryConvRnnInference() = default;
virtual ~TrajectoryImitationInference() = default;
/**
* @brief Get the name of model inference
*/
std::string GetName() override { return "TRAJECTORY_CONV_RNN_INFERENCE"; };
std::string GetName() override { return "TRAJECTORY_IMITATION_INFERENCE"; };
/**
* @brief load a learned model
......
......@@ -113,7 +113,7 @@ Status OnLanePlanning::Init(const PlanningConfig& config) {
"planning is not initialized with config : " + config_.DebugString());
}
if (FLAGS_planning_offline_mode == 1) {
if (FLAGS_planning_learning_mode == 2 || FLAGS_planning_learning_mode == 3) {
PlanningSemanticMapConfig renderer_config;
ACHECK(apollo::cyber::common::GetProtoFromFile(
FLAGS_planning_birdview_img_feature_renderer_config_file,
......
......@@ -36,6 +36,7 @@ using apollo::perception::TrafficLightDetection;
using apollo::relative_map::MapMsg;
using apollo::routing::RoutingRequest;
using apollo::routing::RoutingResponse;
using apollo::storytelling::Stories;
bool PlanningComponent::Init() {
injector_ = std::make_shared<DependencyInjector>();
......@@ -50,7 +51,7 @@ bool PlanningComponent::Init() {
<< "failed to load planning config file "
<< ComponentBase::ConfigFilePath();
if (FLAGS_planning_offline_mode > 0) {
if (FLAGS_planning_learning_mode > 0) {
if (!message_process_.Init(config_)) {
AERROR << "failed to init MessageProcess";
return false;
......@@ -84,6 +85,14 @@ bool PlanningComponent::Init() {
pad_msg_.CopyFrom(*pad_msg);
});
story_telling_reader_ = node_->CreateReader<Stories>(
config_.topic_config().story_telling_topic(),
[this](const std::shared_ptr<Stories>& stories) {
ADEBUG << "Received story_telling data: run story_telling callback.";
std::lock_guard<std::mutex> lock(mutex_);
stories_.CopyFrom(*stories);
});
if (FLAGS_use_navigation_mode) {
relative_map_reader_ = node_->CreateReader<MapMsg>(
config_.topic_config().relative_map_topic(),
......@@ -135,17 +144,22 @@ bool PlanningComponent::Proc(
std::lock_guard<std::mutex> lock(mutex_);
local_view_.pad_msg = std::make_shared<PadMessage>(pad_msg_);
}
{
std::lock_guard<std::mutex> lock(mutex_);
local_view_.stories = std::make_shared<Stories>(stories_);
}
if (!CheckInput()) {
AERROR << "Input check failed";
return false;
}
if (FLAGS_planning_offline_mode == 1) {
if (FLAGS_planning_learning_mode == 2 || FLAGS_planning_learning_mode == 3) {
// data process for online training
message_process_.OnChassis(*local_view_.chassis);
message_process_.OnPrediction(*local_view_.prediction_obstacles);
message_process_.OnRoutingResponse(*local_view_.routing);
message_process_.OnStoryTelling(*local_view_.stories);
message_process_.OnTrafficLightDetection(*local_view_.traffic_light);
message_process_.OnLocalization(*local_view_.localization_estimate);
}
......
......@@ -33,6 +33,7 @@
#include "modules/planning/proto/planning_config.pb.h"
#include "modules/prediction/proto/prediction_obstacle.pb.h"
#include "modules/routing/proto/routing.pb.h"
#include "modules/storytelling/proto/story.pb.h"
namespace apollo {
namespace planning {
......@@ -64,6 +65,7 @@ class PlanningComponent final
std::shared_ptr<cyber::Reader<routing::RoutingResponse>> routing_reader_;
std::shared_ptr<cyber::Reader<planning::PadMessage>> pad_msg_reader_;
std::shared_ptr<cyber::Reader<relative_map::MapMsg>> relative_map_reader_;
std::shared_ptr<cyber::Reader<storytelling::Stories>> story_telling_reader_;
std::shared_ptr<cyber::Writer<ADCTrajectory>> planning_writer_;
std::shared_ptr<cyber::Writer<routing::RoutingRequest>> rerouting_writer_;
......@@ -73,6 +75,7 @@ class PlanningComponent final
routing::RoutingResponse routing_;
planning::PadMessage pad_msg_;
relative_map::MapMsg relative_map_;
storytelling::Stories stories_;
LocalView local_view_;
......
......@@ -355,7 +355,8 @@ message TopicConfig {
optional string relative_map_topic = 7;
optional string routing_request_topic = 8;
optional string routing_response_topic = 9;
optional string traffic_light_detection_topic = 10;
optional string story_telling_topic = 10;
optional string traffic_light_detection_topic = 11;
}
message PlanningConfig {
......
......@@ -90,25 +90,6 @@ void LaneFollowStage::RecordObstacleDebugInfo(
}
}
void LaneFollowStage::RecordDebugInfo(ReferenceLineInfo* reference_line_info,
const std::string& name,
const double time_diff_ms) {
if (!FLAGS_enable_record_debug) {
ADEBUG << "Skip record debug info";
return;
}
if (reference_line_info == nullptr) {
AERROR << "Reference line info is null.";
return;
}
auto ptr_latency_stats = reference_line_info->mutable_latency_stats();
auto ptr_stats = ptr_latency_stats->add_task_stats();
ptr_stats->set_name(name);
ptr_stats->set_time_ms(time_diff_ms);
}
Stage::StageStatus LaneFollowStage::Process(
const TrajectoryPoint& planning_start_point, Frame* frame) {
bool has_drivable_reference_line = false;
......@@ -180,22 +161,23 @@ Status LaneFollowStage::PlanOnReferenceLine(
<< reference_line_info->IsChangeLanePath();
auto ret = Status::OK();
for (auto* optimizer : task_list_) {
for (auto* task : task_list_) {
const double start_timestamp = Clock::NowInSeconds();
ret = optimizer->Execute(frame, reference_line_info);
if (!ret.ok()) {
AERROR << "Failed to run tasks[" << optimizer->Name()
<< "], Error message: " << ret.error_message();
break;
}
ret = task->Execute(frame, reference_line_info);
const double end_timestamp = Clock::NowInSeconds();
const double time_diff_ms = (end_timestamp - start_timestamp) * 1000;
ADEBUG << "after optimizer " << optimizer->Name() << ":"
ADEBUG << "after task[" << task->Name() << "]:"
<< reference_line_info->PathSpeedDebugString();
ADEBUG << optimizer->Name() << " time spend: " << time_diff_ms << " ms.";
ADEBUG << task->Name() << " time spend: " << time_diff_ms << " ms.";
RecordDebugInfo(reference_line_info, task->Name(), time_diff_ms);
RecordDebugInfo(reference_line_info, optimizer->Name(), time_diff_ms);
if (!ret.ok()) {
AERROR << "Failed to run tasks[" << task->Name()
<< "], Error message: " << ret.error_message();
break;
}
// TODO(SHU): disable reference line order changes for now
// updated reference_line_info, because it is changed in
......
......@@ -70,9 +70,6 @@ class LaneFollowStage : public Stage {
void RecordObstacleDebugInfo(ReferenceLineInfo* reference_line_info);
void RecordDebugInfo(ReferenceLineInfo* reference_line_info,
const std::string& name, const double time_diff_ms);
private:
ScenarioConfig config_;
std::unique_ptr<Stage> stage_;
......
......@@ -800,7 +800,7 @@ void ScenarioManager::ScenarioDispatch(const Frame& frame) {
ACHECK(!frame.reference_line_info().empty());
ScenarioConfig::ScenarioType scenario_type;
if (FLAGS_planning_offline_mode == 1) {
if (FLAGS_planning_learning_mode == 2) {
scenario_type = ScenarioDispatchLearning();
} else {
scenario_type = ScenarioDispatchNonLearning(frame);
......
......@@ -93,7 +93,17 @@ bool Stage::ExecuteTaskOnReferenceLine(
}
for (auto* task : task_list_) {
const double start_timestamp = Clock::NowInSeconds();
const auto ret = task->Execute(frame, &reference_line_info);
const double end_timestamp = Clock::NowInSeconds();
const double time_diff_ms = (end_timestamp - start_timestamp) * 1000;
ADEBUG << "after task[" << task->Name() << "]: "
<< reference_line_info.PathSpeedDebugString();
ADEBUG << task->Name() << " time spend: " << time_diff_ms << " ms.";
RecordDebugInfo(&reference_line_info, task->Name(), time_diff_ms);
if (!ret.ok()) {
AERROR << "Failed to run tasks[" << task->Name()
<< "], Error message: " << ret.error_message();
......@@ -135,7 +145,16 @@ bool Stage::ExecuteTaskOnReferenceLineForOnlineLearning(
auto& picked_reference_line_info =
frame->mutable_reference_line_info()->front();
for (auto* task : task_list_) {
const double start_timestamp = Clock::NowInSeconds();
const auto ret = task->Execute(frame, &picked_reference_line_info);
const double end_timestamp = Clock::NowInSeconds();
const double time_diff_ms = (end_timestamp - start_timestamp) * 1000;
ADEBUG << "task[" << task->Name()
<< "] time spent: " << time_diff_ms << " ms.";
RecordDebugInfo(&picked_reference_line_info, task->Name(), time_diff_ms);
if (!ret.ok()) {
AERROR << "Failed to run tasks[" << task->Name()
<< "], Error message: " << ret.error_message();
......@@ -198,6 +217,25 @@ Stage::StageStatus Stage::FinishScenario() {
return Stage::FINISHED;
}
void Stage::RecordDebugInfo(ReferenceLineInfo* reference_line_info,
const std::string& name,
const double time_diff_ms) {
if (!FLAGS_enable_record_debug) {
ADEBUG << "Skip record debug info";
return;
}
if (reference_line_info == nullptr) {
AERROR << "Reference line info is null.";
return;
}
auto ptr_latency_stats = reference_line_info->mutable_latency_stats();
auto ptr_stats = ptr_latency_stats->add_task_stats();
ptr_stats->set_name(name);
ptr_stats->set_time_ms(time_diff_ms);
}
} // namespace scenario
} // namespace planning
} // namespace apollo
......@@ -92,6 +92,9 @@ class Stage {
virtual Stage::StageStatus FinishScenario();
void RecordDebugInfo(ReferenceLineInfo* reference_line_info,
const std::string& name, const double time_diff_ms);
protected:
std::map<TaskConfig::TaskType, std::unique_ptr<Task>> tasks_;
std::vector<Task*> task_list_;
......
......@@ -26,6 +26,7 @@
#include "modules/common/configs/vehicle_config_helper.h"
#include "modules/common/math/box2d.h"
#include "modules/common/math/line_segment2d.h"
#include "modules/planning/common/path/discretized_path.h"
#include "modules/planning/common/path_boundary.h"
#include "modules/planning/common/planning_context.h"
#include "modules/planning/proto/planning_config.pb.h"
......@@ -34,6 +35,8 @@
namespace apollo {
namespace planning {
using common::ErrorCode;
using common::PathPoint;
using common::SLPoint;
using common::Status;
using common::TrajectoryPoint;
......@@ -47,7 +50,7 @@ PathReferenceDecider::PathReferenceDecider(const TaskConfig &config)
Status PathReferenceDecider::Execute(Frame *frame,
ReferenceLineInfo *reference_line_info) {
Task::Execute(frame, reference_line_info);
return Status::OK();
return Process(frame, reference_line_info);
}
Status PathReferenceDecider::Process(
......@@ -62,29 +65,61 @@ Status PathReferenceDecider::Process(
frame->learning_data_adc_future_trajectory_points();
ADEBUG << "There are " << path_reference.size() << " path points.";
// check if trajectory points are within path bounds
// if yes: use learning model output
// otherwise: use previous path planning method
if (IsValidPathReference(path_reference, path_boundaries)) {
// mark learning trajectory as path reference
frame->set_learning_trajectory_valid(true);
// get regular path bound
size_t regular_path_bound_idx = GetRegularPathBound(path_boundaries);
if (regular_path_bound_idx == path_boundaries.size()) {
const std::string msg = "No regular path boundary";
AERROR << msg;
return Status(ErrorCode::PLANNING_ERROR, msg);
}
// check if path reference is valid
// current, only check if path reference point is within path bounds
if (!IsValidPathReference(reference_line_info,
path_boundaries[regular_path_bound_idx],
path_reference)) {
AERROR << "Learning model output is not a validated path reference";
return Status::OK();
}
std::vector<PathPoint> evaluated_path_reference;
// evaluate path reference
EvaluatePathReference(&path_boundaries[regular_path_bound_idx],
path_reference, &evaluated_path_reference);
// mark learning trajectory as path reference
frame->set_learning_trajectory_valid(true);
return Status::OK();
}
size_t PathReferenceDecider::GetRegularPathBound(
const std::vector<PathBoundary> &path_bounds) const {
for (auto iter = begin(path_bounds); iter != end(path_bounds); ++iter) {
if (iter->label().find("regular") != std::string::npos) {
return distance(begin(path_bounds), iter);
}
}
return path_bounds.size();
}
bool PathReferenceDecider::IsValidPathReference(
const std::vector<TrajectoryPoint> &path_reference,
const std::vector<PathBoundary> &path_bounds) {
// choose only regular path_bound
const PathBoundary *regular_path_bound;
for (const auto path_bound : path_bounds) {
if (path_bound.label() == "regular") {
regular_path_bound = &path_bound;
break;
const ReferenceLineInfo *reference_line_info,
const PathBoundary &regular_path_bound,
const std::vector<TrajectoryPoint> &path_reference) {
for (auto path_referece_point : path_reference) {
const double cur_x = path_referece_point.path_point().x();
const double cur_y = path_referece_point.path_point().y();
if (-1 == IsPointWithinPathBounds(reference_line_info, regular_path_bound,
cur_x, cur_y)) {
return false;
}
}
ADEBUG << regular_path_bound->label();
return true;
}
bool PathReferenceDecider::IsADCBoxAlongPathReferenceWithinPathBounds(
const std::vector<TrajectoryPoint> &path_reference,
const PathBoundary *regular_path_bound) {
/* check adc box has overlap with each path line segment*/
// loop over output trajectory points
// check if path reference point is valid or not
// 1. line segment formed by two adjacent boundary point
......@@ -99,17 +134,23 @@ bool PathReferenceDecider::IsValidPathReference(
common::VehicleConfigHelper::Instance()->GetBoundingBox(path_point);
vehicle_boxes.emplace_back(vehicle_box);
}
// check intersection of linesegments and adc boxes
// left & right path bounds
for (auto segmented_path_bound : segmented_path_bounds) {
// line segment for each bound
for (auto line_segment : segmented_path_bound) {
// check if all vehicle boxes along learning model outputhas overlap with
// ADC box
// check if all vehicle boxes along learning model outputhas
// overlap with ADC box
// TODO(Shu): early stop when vehicle box is far away.
for (auto vehicle_box : vehicle_boxes) {
if (vehicle_box.HasOverlap(line_segment)) {
ADEBUG << std::setprecision(9) << "Vehicle box:["
<< vehicle_box.center_x() << "," << vehicle_box.center_y()
<< "]"
<< "Violate path bound at [" << line_segment.start().x() << ","
<< line_segment.start().y() << "];"
<< "[" << line_segment.end().x() << ","
<< line_segment.end().y() << "]";
return false;
}
}
......@@ -175,5 +216,60 @@ void PathReferenceDecider::PathBoundToLineSegments(
path_bound_segments.emplace_back(cur_right_bound_segments);
}
int PathReferenceDecider::IsPointWithinPathBounds(
const ReferenceLineInfo *reference_line_info,
const PathBoundary &path_bound, const double x, const double y) {
const double kPathBoundsDeciderResolution = 0.5;
common::SLPoint point_sl;
reference_line_info->reference_line().XYToSL({x, y}, &point_sl);
const double start_s = path_bound.start_s();
const double delta_s = path_bound.delta_s();
const int path_bound_size = path_bound.boundary().size();
const double end_s = start_s + delta_s * (path_bound_size - 1);
if (point_sl.s() > end_s ||
point_sl.s() < start_s - kPathBoundsDeciderResolution * 2) {
ADEBUG << "Longitudinally outside the boundary.";
return -1;
}
int idx_after = 0;
while (idx_after < path_bound_size &&
start_s + idx_after * delta_s < point_sl.s()) {
++idx_after;
}
ADEBUG << "The idx_after = " << idx_after;
ADEBUG << "The point is at: " << point_sl.l();
int idx_before = idx_after - 1;
if (std::get<0>(path_bound.boundary().at(idx_before)) <= point_sl.l() &&
std::get<1>(path_bound.boundary().at(idx_before)) >= point_sl.l() &&
std::get<0>(path_bound.boundary().at(idx_after)) <= point_sl.l() &&
std::get<1>(path_bound.boundary().at(idx_after)) >= point_sl.l()) {
return idx_after;
}
ADEBUG << "Laterally outside the boundary.";
return -1;
}
void PathReferenceDecider::EvaluatePathReference(
const PathBoundary *path_bound,
const std::vector<TrajectoryPoint> &path_reference,
std::vector<PathPoint> *evaluated_path_reference) {
const double delta_s = path_bound->delta_s();
const double path_reference_end_s = path_reference.back().path_point().s();
DiscretizedPath discrete_path_reference;
for (auto trajectory_point : path_reference) {
discrete_path_reference.emplace_back(trajectory_point.path_point());
}
for (size_t idx = 0; idx < path_bound->boundary().size(); ++idx) {
// relative s
double cur_s = static_cast<double>(idx) * delta_s;
if (cur_s > path_reference_end_s) {
break;
}
evaluated_path_reference->emplace_back(
discrete_path_reference.Evaluate(cur_s));
}
}
} // namespace planning
} // namespace apollo
......@@ -49,8 +49,10 @@ class PathReferenceDecider : public Task {
* @return false
*/
bool IsValidPathReference(
const std::vector<common::TrajectoryPoint> &path_reference,
const std::vector<PathBoundary> &path_bound);
const ReferenceLineInfo *reference_line_info,
const PathBoundary &path_bound,
const std::vector<common::TrajectoryPoint> &path_reference);
/**
* @brief convert discrete path bounds to line segments
*
......@@ -59,6 +61,54 @@ class PathReferenceDecider : public Task {
const PathBoundary *path_bound,
std::vector<std::vector<common::math::LineSegment2d>>
&path_bound_segments);
/**
* @brief convert path points from evenly dt to evenly ds distribution
*
* @param path_bound
* @param path_reference
* @param evaluated_path_reference
*/
void EvaluatePathReference(
const PathBoundary *path_bound,
const std::vector<common::TrajectoryPoint> &path_reference,
std::vector<common::PathPoint> *evaluated_path_reference);
/**
* @brief check if a point (x,y) is within path bounds
*
* @param reference_line_info
* @param path_bound
* @param x
* @param y
* @return int
*/
int IsPointWithinPathBounds(const ReferenceLineInfo *reference_line_info,
const PathBoundary &path_bound, const double x,
const double y);
/**
* @brief Get the Regular Path Bound object
*
* @param path_bounds
* @return size_t
*/
size_t GetRegularPathBound(
const std::vector<PathBoundary> &path_bounds) const;
/**
* @brief check is ADC box along path reference is within path bounds
* more accurate method.
*
* @param path_reference
* @param regular_path_bound
* @return true
* @return false
*/
bool IsADCBoxAlongPathReferenceWithinPathBounds(
const std::vector<common::TrajectoryPoint> &path_reference,
const PathBoundary *regular_path_bound);
};
} // namespace planning
......
......@@ -39,6 +39,9 @@ using apollo::common::TrajectoryPoint;
LearningModelInferenceTask::LearningModelInferenceTask(const TaskConfig& config)
: Task(config) {
ACHECK(config.has_learning_model_inference_task_config());
trajectory_imitation_inference_ =
std::make_unique<TrajectoryImitationInference>(
config.learning_model_inference_task_config());
}
Status LearningModelInferenceTask::Execute(
......@@ -86,32 +89,28 @@ Status LearningModelInferenceTask::Process(Frame* frame) {
TrajectoryEvaluator trajectory_evaluator;
// evaluate adc trajectory
trajectory_evaluator.EvaluateADCTrajectory(
start_point_timestamp_sec,
config.trajectory_delta_t(),
&learning_data_frame);
trajectory_evaluator.EvaluateADCTrajectory(start_point_timestamp_sec,
config.trajectory_delta_t(),
&learning_data_frame);
// for (const auto& t : learning_data_frame.adc_trajectory_point()) {
// AERROR << "AFTER: " << t.timestamp_sec();
// }
// evaluate obstacle trajectory
trajectory_evaluator.EvaluateObstacleTrajectory(
start_point_timestamp_sec,
config.trajectory_delta_t(),
&learning_data_frame);
trajectory_evaluator.EvaluateObstacleTrajectory(start_point_timestamp_sec,
config.trajectory_delta_t(),
&learning_data_frame);
// evaluate obstacle prediction trajectory
trajectory_evaluator.EvaluateObstaclePredictionTrajectory(
start_point_timestamp_sec,
config.trajectory_delta_t(),
start_point_timestamp_sec, config.trajectory_delta_t(),
&learning_data_frame);
TrajectoryConvRnnInference trajectory_conv_rnn_inference(config);
if (!trajectory_conv_rnn_inference.Inference(&learning_data_frame)) {
const std::string msg =
absl::StrCat("TrajectoryConvRnnInference Inference failed. frame_num[",
learning_data_frame.frame_num(), "]");
if (!trajectory_imitation_inference_->Inference(&learning_data_frame)) {
const std::string msg = absl::StrCat(
"TrajectoryImitationInference Inference failed. frame_num[",
learning_data_frame.frame_num(), "]");
AERROR << msg;
return Status(ErrorCode::PLANNING_ERROR, msg);
}
......@@ -139,7 +138,7 @@ Status LearningModelInferenceTask::Process(Frame* frame) {
constexpr double kADCFutureTrajectoryDeltaTime = 0.02;
std::vector<TrajectoryPointFeature> future_trajectory;
for (const auto& tp :
learning_data_frame.output().adc_future_trajectory_point()) {
learning_data_frame.output().adc_future_trajectory_point()) {
future_trajectory.push_back(tp);
}
......@@ -157,10 +156,8 @@ Status LearningModelInferenceTask::Process(Frame* frame) {
std::vector<TrajectoryPointFeature> evaluated_future_trajectory;
trajectory_evaluator.EvaluateADCFutureTrajectory(
learning_data_frame.frame_num(),
future_trajectory,
start_point_timestamp_sec,
kADCFutureTrajectoryDeltaTime,
learning_data_frame.frame_num(), future_trajectory,
start_point_timestamp_sec, kADCFutureTrajectoryDeltaTime,
&evaluated_future_trajectory);
// convert to common::TrajectoryPoint
......
......@@ -20,10 +20,12 @@
#pragma once
#include <memory>
#include <utility>
#include <vector>
#include "modules/planning/common/trajectory_evaluator.h"
#include "modules/planning/learning_based/model_inference/trajectory_imitation_inference.h"
#include "modules/planning/tasks/task.h"
namespace apollo {
......@@ -40,8 +42,10 @@ class LearningModelInferenceTask : public Task {
apollo::common::Status Process(Frame *frame);
void ConvertADCFutureTrajectory(
const std::vector<TrajectoryPointFeature>& trajectory,
std::vector<common::TrajectoryPoint>* adc_future_trajectory);
const std::vector<TrajectoryPointFeature> &trajectory,
std::vector<common::TrajectoryPoint> *adc_future_trajectory);
std::unique_ptr<TrajectoryImitationInference> trajectory_imitation_inference_;
};
} // namespace planning
......
......@@ -30,7 +30,11 @@ class LearningModelInferenceTaskTest : public ::testing::Test {
public:
virtual void SetUp() {
config_.set_task_type(TaskConfig::LEARNING_MODEL_INFERENCE_TASK);
config_.mutable_learning_model_inference_task_config();
auto* inference_config =
config_.mutable_learning_model_inference_task_config();
inference_config->set_model_file(
"/apollo/modules/planning/data/model/test_model_conv_rnn.pt");
inference_config->set_use_cuda(true);
}
virtual void TearDown() {}
......
model_file: "/apollo/modules/planning/data/model/test_model.pt"
model_file: "/apollo/modules/planning/data/model/test_model_conv_rnn.pt"
use_cuda: true
......@@ -333,6 +333,10 @@ void SemanticMap::DrawHistory(const ObstacleHistory& history,
if (feature.id() == FLAGS_ego_vehicle_id) {
DrawRect(feature, decay_color, base_x, base_y, img);
} else {
if (feature.polygon_point_size() == 0) {
AERROR << "No polygon points in feature, please check!";
continue;
}
DrawPoly(feature, decay_color, base_x, base_y, img);
}
}
......
......@@ -153,12 +153,14 @@ void CloseToJunctionTeller::GetOverlaps(const ADCTrajectory& adc_trajectory) {
const double s_start = adc_trajectory.trajectory_point(0).path_point().s();
junction_id_.clear();
junction_distance_ = -1;
clear_area_id_.clear();
clear_area_distance_ = -1;
crosswalk_id_.clear();
crosswalk_distance_ = -1;
junction_id_.clear();
junction_distance_ = -1;
pnc_junction_id_.clear();
pnc_junction_distance_ = -1;
signal_id_.clear();
signal_distance_ = -1;
stop_sign_id_.clear();
......@@ -171,31 +173,6 @@ void CloseToJunctionTeller::GetOverlaps(const ADCTrajectory& adc_trajectory) {
break;
}
// junction
if (junction_id_.empty() || junction_distance_ < 0) {
std::string junction_id;
std::string pnc_junction_id;
const double junction = GetJunction(path_point, &junction_id);
const double pnc_junction = GetPNCJunction(path_point, &pnc_junction_id);
if (pnc_junction) {
// in PNC_JUNCTION (including overlapping with JUNCTION)
junction_id_ = pnc_junction_id;
junction_type_ = CloseToJunction::PNC_JUNCTION;
junction_distance_ = path_point.s() - s_start;
overlapping_junction_id = junction ? junction_id : "";
} else if (junction) {
// in JUNCTION only
if (junction_id != overlapping_junction_id) {
// not in JUNCTION overlapping with a PNC_JUNCTION
junction_id_ = junction_id;
junction_type_ = CloseToJunction::JUNCTION;
junction_distance_ = path_point.s() - s_start;
}
} else {
overlapping_junction_id.clear();
}
}
// clear_area
if (clear_area_id_.empty() || clear_area_distance_ < 0) {
std::string clear_area_id;
......@@ -214,6 +191,24 @@ void CloseToJunctionTeller::GetOverlaps(const ADCTrajectory& adc_trajectory) {
}
}
// junction
if (junction_id_.empty() || junction_distance_ < 0) {
std::string junction_id;
if (GetJunction(path_point, &junction_id)) {
junction_id_ = junction_id;
junction_distance_ = path_point.s() - s_start;
}
}
// pnc_junction
if (pnc_junction_id_.empty() || pnc_junction_distance_ < 0) {
std::string pnc_junction_id;
if (GetPNCJunction(path_point, &pnc_junction_id)) {
pnc_junction_id_ = pnc_junction_id;
pnc_junction_distance_ = path_point.s() - s_start;
}
}
// signal
if (signal_id_.empty() || signal_distance_ < 0) {
std::string signal_id;
......@@ -288,14 +283,21 @@ void CloseToJunctionTeller::Update(Stories* stories) {
}
// CloseToJunction
if (!junction_id_.empty() && junction_distance_ >= 0) {
if ((!junction_id_.empty() && junction_distance_ >= 0) ||
(!pnc_junction_id_.empty() && pnc_junction_distance_ >=0)) {
if (!stories->has_close_to_junction()) {
AINFO << "Enter CloseToJunction story";
}
auto* story = stories->mutable_close_to_junction();
story->set_id(junction_id_);
story->set_type(junction_type_);
story->set_distance(junction_distance_);
if (!pnc_junction_id_.empty() && pnc_junction_distance_ >=0) {
story->set_id(pnc_junction_id_);
story->set_type(CloseToJunction::PNC_JUNCTION);
story->set_distance(pnc_junction_distance_);
} else {
story->set_id(junction_id_);
story->set_type(CloseToJunction::JUNCTION);
story->set_distance(junction_distance_);
}
} else if (stories->has_close_to_junction()) {
AINFO << "Exit CloseToJunction story";
stories->clear_close_to_junction();
......
......@@ -38,13 +38,14 @@ class CloseToJunctionTeller : public BaseTeller {
void GetOverlaps(const apollo::planning::ADCTrajectory& adc_trajectory);
private:
std::string junction_id_;
CloseToJunction::JunctionType junction_type_;
double junction_distance_;
std::string clear_area_id_;
double clear_area_distance_;
std::string crosswalk_id_;
double crosswalk_distance_;
std::string junction_id_;
double junction_distance_;
std::string pnc_junction_id_;
double pnc_junction_distance_;
std::string signal_id_;
double signal_distance_;
std::string stop_sign_id_;
......
......@@ -28,6 +28,6 @@ sudo mkdir -p ${TARGET_DIR}
/apollo/bazel-bin/modules/planning/pipeline/evaluate_trajectory \
--flagfile=/apollo/modules/planning/conf/planning.conf \
--planning_offline_mode=2 \
--planning_learning_mode=1 \
--planning_source_dirs=${SRC_DIR} \
--planning_data_dir=${TARGET_DIR} \
......@@ -35,6 +35,6 @@ fi
/apollo/bazel-bin/modules/planning/pipeline/record_to_learning_data \
--flagfile=/apollo/modules/planning/conf/planning.conf \
--map_dir=/apollo/modules/map/data/${MAP_DIR} \
--planning_offline_mode=2 \
--planning_learning_mode=1 \
--planning_offline_bags=${SRC_DIR} \
--planning_data_dir=${TARGET_DIR}
......@@ -45,7 +45,7 @@ function set_lib_path() {
export LD_LIBRARY_PATH=/usr/local/adolc/lib64:$LD_LIBRARY_PATH
if [ -e /usr/local/cuda/ ];then
export PATH=/usr/local/cuda/bin:$PATH
add_to_path "/usr/local/cuda/bin"
export LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH
export C_INCLUDE_PATH=/usr/local/cuda/include:$C_INCLUDE_PATH
export CPLUS_INCLUDE_PATH=/usr/local/cuda/include:$CPLUS_INCLUDE_PATH
......@@ -65,7 +65,7 @@ function set_lib_path() {
# Set teleop paths
export PYTHONPATH="${APOLLO_ROOT_DIR}/modules/teleop/common:${PYTHONPATH}"
export PATH="${APOLLO_ROOT_DIR}/modules/teleop/common/scripts:${PATH}"
add_to_path "/apollo/modules/teleop/common/scripts"
}
function create_data_dir() {
......
lspci | grep -i nvidia
wget https://developer.nvidia.com/compute/cuda/8.0/Prod2/local_installers/cuda_8.0.61_375.26_linux-run
sudo sh cuda_88.0.61_375.26_linux-run --slient --tmpdir=/dev
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册