提交 d77b9001 编写于 作者: X Xiangquan Xiao 提交者: Jiangtao Hu

Robot: Fix float assignment warnings. (#2288)

上级 d6f08899
......@@ -186,7 +186,7 @@ inline void L2Norm(int feat_dim, float *feat_data) {
return;
}
// feature normalization
float l2norm = 0.0;
float l2norm = 0.0f;
for (int i = 0; i < feat_dim; ++i) {
l2norm += feat_data[i] * feat_data[i];
}
......
......@@ -24,7 +24,7 @@ void init_sin_cos_rot_table(float* sin_rot_table, float* cos_rot_table,
uint16_t rotation, float rotation_resolution) {
for (uint16_t i = 0; i < rotation; ++i) {
// float rotation = angles::from_degrees(rotation_resolution * i);
float rotation = rotation_resolution * i * M_PI / 180.0;
float rotation = rotation_resolution * i * M_PI / 180.0f;
cos_rot_table[i] = cosf(rotation);
sin_rot_table[i] = sinf(rotation);
}
......
......@@ -73,7 +73,7 @@ void Velodyne128Parser::Order(std::shared_ptr<PointCloud> cloud) {
void Velodyne128Parser::Unpack(const VelodynePacket& pkt,
std::shared_ptr<PointCloud> pc) {
float azimuth_diff, azimuth_corrected_f;
float last_azimuth_diff = 0;
float last_azimuth_diff = 0.0f;
uint16_t azimuth, azimuth_next, azimuth_corrected;
// float x_coord, y_coord, z_coord;
// const raw_packet_t *raw = (const raw_packet_t *)&pkt.data[0];
......
......@@ -67,9 +67,9 @@ uint64_t Velodyne16Parser::GetTimestamp(double base_time, float time_offset,
*/
void Velodyne16Parser::Unpack(const VelodynePacket& pkt,
std::shared_ptr<PointCloud> pc) {
float azimuth_diff = 0.0;
float last_azimuth_diff = 0.0;
float azimuth_corrected_f = 0.0;
float azimuth_diff = 0.0f;
float last_azimuth_diff = 0.0f;
float azimuth_corrected_f = 0.0f;
int azimuth_corrected = 0.0;
// const RawPacket* raw = (const RawPacket*)&pkt.data[0];
......
......@@ -340,7 +340,7 @@ class VoxelGridCovariance : public pcl::VoxelGrid<PointT> {
// Get the distance value
const uint8_t* pt_data =
reinterpret_cast<const uint8_t*>(&input_->points[cp]);
float distance_value = 0;
float distance_value = 0.0f;
memcpy(&distance_value, pt_data + fields[distance_idx].offset,
sizeof(float));
......
......@@ -253,7 +253,7 @@ void LocalizationLidar::RefineAltitudeFromMap(Eigen::Affine3d *pose) {
pre_vehicle_ground_height_ = lidar_pose.translation()(2) - height_diff;
}
float vehicle_ground_alt = 0.0;
float vehicle_ground_alt = 0.0f;
unsigned int x = 0;
unsigned int y = 0;
if (node->GetCoordinate(lidar_trans, &x, &y)) {
......
......@@ -116,9 +116,9 @@ class LossyMapMatrix2D : public BaseMapMatrix {
const int var_range_ = 1023; // 65535;
const int var_ratio_ = 4; // 256;
// const unsigned int _alt_range = 1023;//65535;
const float alt_ground_interval_ = 0.04;
const float alt_ground_interval_ = 0.04f;
const uint16_t ground_void_flag_ = 0xffff;
const float alt_avg_interval_ = 0.04;
const float alt_avg_interval_ = 0.04f;
const int count_range_ = 2; // 30;
mutable float alt_avg_min_;
mutable float alt_avg_max_;
......
......@@ -174,11 +174,11 @@ int main(int argc, char** argv) {
for (; itr != buf.end(); ++itr, ++index) {
// int single_alt = 0;
// int double_alt = 0;
// float delta_alt_max = 0.0;
// float delta_alt_min = 100.0;
// float delta_alt_max = 0.0f;
// float delta_alt_min = 100.0f;
// int delta_alt_minus_num = 0;
// float alt_max = 0.0;
// float alt_min = 100.0;
// float alt_max = 0.0f;
// float alt_min = 100.0f;
LosslessMapNode* lossless_node =
static_cast<LosslessMapNode*>(lossless_map.GetMapNodeSafe(*itr));
......@@ -203,8 +203,8 @@ int main(int argc, char** argv) {
unsigned int count = lossless_node->GetCount(row, col);
// Read altitude
float altitude_ground = 0.0;
float altitude_avg = 0.0;
float altitude_ground = 0.0f;
float altitude_avg = 0.0f;
bool is_ground_useful = false;
std::vector<float> layer_alts;
std::vector<unsigned int> layer_counts;
......
......@@ -87,7 +87,7 @@ struct alignas(16) Object {
// @brief if the velocity estimation is converged, true by default
bool velocity_converged = true;
// @brief velocity confidence, required
float velocity_confidence = 1.0;
float velocity_confidence = 1.0f;
// @brief acceleration of the object, required
Eigen::Vector3f acceleration = Eigen::Vector3f(0, 0, 0);
// @brief covariance matrix of the acceleration uncertainty, required
......
......@@ -20,12 +20,12 @@ namespace perception {
namespace base {
struct CarLight {
float brake_visible = 0;
float brake_switch_on = 0;
float left_turn_visible = 0;
float left_turn_switch_on = 0;
float right_turn_visible = 0;
float right_turn_switch_on = 0;
float brake_visible = 0.0f;
float brake_switch_on = 0.0f;
float left_turn_visible = 0.0f;
float left_turn_switch_on = 0.0f;
float right_turn_visible = 0.0f;
float right_turn_switch_on = 0.0f;
void Reset() {
brake_visible = 0;
......
......@@ -118,7 +118,7 @@ bool ResizeCPU(const base::Blob<uint8_t> &src_blob,
const int y2_read = std::min(y2, height - 1);
int src_reg = 0;
for (int c = 0; c < channel; c++) {
float out = 0;
float out = 0.0f;
int idx11 = (y1_read * stepwidth + x1_read) * channel;
src_reg = src[idx11 + c];
......
......@@ -182,7 +182,7 @@ void DenselineLanePostprocessor::CalLaneMap(
score_channel[2] = output_data[channel2_pos + x];
score_channel[3] = output_data[channel3_pos + x];
// utilize softmax to get the probability
float sum_score = 0;
float sum_score = 0.0f;
for (int i = 0; i < 4; i++) {
score_channel[i] = static_cast<float>(exp(score_channel[i]));
sum_score += score_channel[i];
......@@ -717,13 +717,13 @@ void DenselineLanePostprocessor::AddImageLaneline(
return;
}
// check the validity of laneline
float sum_dist = 0;
float avg_dist = 0;
float sum_dist = 0.0f;
float avg_dist = 0.0f;
int count = 0;
for (int i = 0; i < image_point_set_size; i++) {
float x_pos = img_pos_vec[i](0, 0);
float y_pos = img_pos_vec[i](1, 0);
float x_poly = 0;
float x_poly = 0.0f;
PolyEval(y_pos, max_poly_order, img_coeff, &x_poly);
float dist = static_cast<float>(fabs(x_poly - x_pos));
sum_dist += dist;
......@@ -762,7 +762,7 @@ void DenselineLanePostprocessor::PolyFitCameraLaneline(
// z: longitudinal direction
// x: latitudinal direction
float x_start = camera_point_set[0].z;
float x_end = 0;
float x_end = 0.0f;
Eigen::Matrix<float, max_poly_order + 1, 1> camera_coeff;
std::vector<Eigen::Matrix<float, 2, 1> > camera_pos_vec;
for (int i = 0; i < static_cast<int>(camera_point_set.size()); i++) {
......
......@@ -98,10 +98,10 @@ struct YoloBlobs {
std::shared_ptr<base::Blob<float>> expand_blob;
};
struct MinDims {
float min_2d_height = 0;
float min_3d_height = 0;
float min_3d_length = 0;
float min_3d_width = 0;
float min_2d_height = 0.0f;
float min_3d_height = 0.0f;
float min_3d_length = 0.0f;
float min_3d_width = 0.0f;
};
__host__ __device__
......
......@@ -34,7 +34,7 @@ bool CosineSimilar::Calc(CameraFrame *frame1,
->camera_supplement.object_feature.size();
for (auto &object1 : frame1->detected_objects) {
for (auto &object2 : frame2->detected_objects) {
float s = 0;
float s = 0.0f;
for (size_t k = 0; k < dim; ++k) {
s += object1->camera_supplement.object_feature[k]
* object2->camera_supplement.object_feature[k];
......
......@@ -148,8 +148,8 @@ void ObstacleReference::CorrectSize(CameraFrame *frame) {
if (Contain(object_template_manager_->TypeRefinedByTemplate(),
obj->sub_type)) {
float min_template_volume = 0;
float max_template_volume = 0;
float min_template_volume = 0.0f;
float max_template_volume = 0.0f;
auto min_tmplt = kMinTemplateHWL.at(obj->sub_type);
auto max_tmplt = kMaxTemplateHWL.at(obj->sub_type);
min_template_volume = min_tmplt[0] * min_tmplt[1] * min_tmplt[2];
......
......@@ -31,9 +31,9 @@ namespace perception {
namespace camera {
struct Reference {
float area = 0;
float k = 0;
float ymax = 0;
float area = 0.0f;
float k = 0.0f;
float ymax = 0.0f;
};
class ObstacleReference {
......
......@@ -97,7 +97,7 @@ bool OMTObstacleTracker::CombineDuplicateTargets() {
continue;
}
int count = 0;
float score = 0;
float score = 0.0f;
int index1 = 0;
int index2 = 0;
while (index1 < targets_[i].Size() && index2 < targets_[j].Size()) {
......@@ -262,7 +262,7 @@ float OMTObstacleTracker::ScoreShape(const Target &target,
float OMTObstacleTracker::ScoreAppearance(const Target &target,
TrackObjectPtr track_obj) {
float energy = 0;
float energy = 0.0f;
int count = 0;
auto sensor_name = track_obj->indicator.sensor_name;
for (int i = target.Size() - 1; i >= 0; --i) {
......
......@@ -39,7 +39,7 @@ class CropBox : public IGetBox {
const base::TrafficLightPtr &light, base::RectI *crop_box);
private:
float crop_scale_ = 2.5;
float crop_scale_ = 2.5f;
int min_crop_size_ = 270;
};
......
......@@ -64,8 +64,8 @@ TEST(OnlineCalibrationServiceTest, online_calibration_service_test) {
OnlineCalibrationService *online_calib_service
= dynamic_cast<OnlineCalibrationService *>(calibration_service);
if (online_calib_service != nullptr) {
float camera_ground_height_query = 0.0;
float camera_pitch_angle_query = 0.0;
float camera_ground_height_query = 0.0f;
float camera_pitch_angle_query = 0.0f;
EXPECT_FALSE(online_calib_service->BuildIndex());
EXPECT_FALSE(online_calib_service->QueryCameraToGroundHeightAndPitchAngle(
&camera_ground_height_query,
......
......@@ -70,7 +70,7 @@ TEST(CommonFunctions, poly_eval_test) {
Eigen::Matrix<float, max_poly_order + 1, 1> coeff;
int order = max_poly_order + 2;
float x = 1.0f;
float y = 0;
float y = 0.0f;
coeff(3, 0) = 1.0f;
coeff(2, 0) = 1.0f;
coeff(1, 0) = 1.0f;
......@@ -81,7 +81,7 @@ TEST(CommonFunctions, poly_eval_test) {
Eigen::Matrix<float, max_poly_order + 1, 1> coeff;
int order = max_poly_order;
float x = 1.0f;
float y = 0;
float y = 0.0f;
coeff(3, 0) = 1.0f;
coeff(2, 0) = 1.0f;
coeff(1, 0) = 1.0f;
......
......@@ -130,7 +130,7 @@ TEST(YoloCameraDetectorTest, box_test) {
ASSERT_EQ(box3.xmin, 1);
ASSERT_EQ(box3.xmax, 10);
float size = 0;
float size = 0.0f;
init_box(&box1);
box1.xmax = 0;
size = get_bbox_size(box1);
......
......@@ -92,7 +92,7 @@ TEST(EKFTest, ekf_test) {
// Eigen::Vector3d x;
// Eigen::Vector3d z; // observation: x, y, theta
// float speed = 3.0f;
// float theta = 0.5;
// float theta = 0.5f;
// ekf.Init();
// ekf.measure_noise_ *= 5;
// ekf.Predict(1.f);
......
......@@ -73,12 +73,12 @@ int read_detections(const std::string &path, const int &feature_dim,
frame->frame_id = frame_num;
frame->track_feature_blob.reset(new base::Blob<float>);
(frame->track_feature_blob)->Reshape({det_count, feature_size});
float x = 0;
float y = 0;
float width = 0;
float height = 0;
float feature = 0.0;
float score = 0.0;
float x = 0.0f;
float y = 0.0f;
float width = 0.0f;
float height = 0.0f;
float feature = 0.0f;
float score = 0.0f;
frame->detected_objects.clear();
for (int i = 0; i < det_count; ++i) {
fin >> x >> y >> width >> height >> score;
......@@ -307,10 +307,10 @@ TEST(FusionObstacleTrackerTest, FusionObstacleTracker_test) {
while (fin_gt >> image_name >> det_count) {
std::vector<base::CameraObjectSupplement> bboxs;
base::CameraObjectSupplement bbox;
float x = 0;
float y = 0;
float width = 0;
float height = 0;
float x = 0.0f;
float y = 0.0f;
float width = 0.0f;
float height = 0.0f;
int id = 0;
for (int i = 0; i < det_count; ++i) {
fin_gt >> x >> y >> width >> height >> id;
......@@ -386,7 +386,7 @@ TEST(FusionObstacleTrackerTest, FusionObstacleTracker_test) {
<< frame.tracked_objects.size();
for (auto &bbox_gt : det_gts[frame_num]) {
int id = -1;
float max_iou = 0;
float max_iou = 0.0f;
Eigen::Matrix<double, 3, 1> center0, size0;
center0[0] = bbox_gt.box.Center().x;
size0[0] = bbox_gt.box.xmax - bbox_gt.box.xmin;
......
......@@ -101,13 +101,13 @@ bool LoadFromKitti(const std::string &kitti_path, CameraFrame *frame) {
while (!feof(fp)) {
base::ObjectPtr obj = nullptr;
obj.reset(new base::Object);
float trash = 0.0;
float score = 0.0;
float trash = 0.0f;
float score = 0.0f;
char type[255];
float x1 = 0.0;
float y1 = 0.0;
float x2 = 0.0;
float y2 = 0.0;
float x1 = 0.0f;
float y1 = 0.0f;
float x2 = 0.0f;
float y2 = 0.0f;
memset(type, 0, sizeof(type));
int ret = 0;
......
......@@ -146,7 +146,7 @@ int work() {
AINFO << camera_names[i] << " height: " << camera_ground_height;
name_camera_ground_height_map[camera_names[i]] = camera_ground_height;
Eigen::Matrix3d project_matrix;
float pitch_diff = 0.0;
float pitch_diff = 0.0f;
if (FLAGS_base_camera_name == camera_names[i]) {
project_matrix = Eigen::Matrix3d::Identity();
pitch_diff = 0;
......
......@@ -43,8 +43,8 @@ TEST_F(GatedHungarianMatcherTest, test_Match_Minimize_badcase1) {
SecureMat<float>* global_costs = optimizer_->mutable_global_costs();
global_costs->Reserve(1000, 1000);
float bound_value = 10;
float cost_thresh = 2.5;
float bound_value = 10.0f;
float cost_thresh = 2.5f;
GatedHungarianMatcher<float>::OptimizeFlag opt_flag =
GatedHungarianMatcher<float>::OptimizeFlag::OPTMIN;
std::vector<std::pair<size_t, size_t>> assignments;
......@@ -112,8 +112,8 @@ TEST_F(GatedHungarianMatcherTest, test_Match_Minimize_badcase_2) {
SecureMat<float>* global_costs = optimizer_->mutable_global_costs();
global_costs->Reserve(1000, 1000);
float bound_value = 10;
float cost_thresh = 2.5;
float bound_value = 10.0f;
float cost_thresh = 2.5f;
GatedHungarianMatcher<float>::OptimizeFlag opt_flag =
GatedHungarianMatcher<float>::OptimizeFlag::OPTMIN;
std::vector<std::pair<size_t, size_t>> assignments;
......@@ -163,8 +163,8 @@ TEST_F(GatedHungarianMatcherTest, test_Match_Minimize) {
SecureMat<float>* global_costs = optimizer_->mutable_global_costs();
global_costs->Reserve(1000, 1000);
float cost_thresh = 1.0;
float bound_value = 2.0;
float cost_thresh = 1.0f;
float bound_value = 2.0f;
GatedHungarianMatcher<float>::OptimizeFlag opt_flag =
GatedHungarianMatcher<float>::OptimizeFlag::OPTMIN;
std::vector<std::pair<size_t, size_t>> assignments;
......@@ -414,8 +414,8 @@ TEST_F(GatedHungarianMatcherTest, test_Match_Maximize) {
SecureMat<float>* global_costs = optimizer_->mutable_global_costs();
global_costs->Reserve(1000, 1000);
float cost_thresh = 1.0;
float bound_value = 2.0;
float cost_thresh = 1.0f;
float bound_value = 2.0f;
GatedHungarianMatcher<float>::OptimizeFlag opt_flag =
GatedHungarianMatcher<float>::OptimizeFlag::OPTMAX;
std::vector<std::pair<size_t, size_t>> assignments;
......
......@@ -27,7 +27,7 @@ namespace common {
// @brief: graph edge definition
struct Edge {
float w = 0.0;
float w = 0.0f;
int a = 0;
int b = 0;
// @brief: edge comparison
......
......@@ -298,8 +298,8 @@ int PlaneFitGroundDetector::CompareZ(const float *point_cloud,
unsigned int nr_z_comp_fail_threshold =
IMin(param_.nr_z_comp_fail_threshold, (unsigned int)(nr_compares >> 1));
const float *ptr = nullptr;
float z = 0;
float delta_z = 0;
float z = 0.0f;
float delta_z = 0.0f;
std::vector<int>::const_iterator iter = indices.cbegin();
while (iter < indices.cend()) {
nr_contradi = 0;
......@@ -340,13 +340,13 @@ int PlaneFitGroundDetector::CompareZ(const float *point_cloud,
void PlaneFitGroundDetector::ComputeAdaptiveThreshold() {
unsigned int r = 0;
unsigned int c = 0;
float dr = 0;
float dc = 0;
float k = 0;
float b = 0;
float min_dist = 0;
float max_dist = 0;
float thre = 0;
float dr = 0.0f;
float dc = 0.0f;
float k = 0.0f;
float b = 0.0f;
float min_dist = 0.0f;
float max_dist = 0.0f;
float thre = 0.0f;
float grid_rad = static_cast<float>(param_.nr_grids_coarse - 1) / 2;
assert(pf_thresholds_ != nullptr);
for (r = 0; r < param_.nr_grids_coarse; ++r) {
......@@ -423,7 +423,7 @@ void PlaneFitGroundDetector::ComputeSignedGroundHeightLine(
const float *cptr = nullptr;
float dist[] = {0, 0, 0, 0, 0};
const float *plane[] = {nullptr, nullptr, nullptr, nullptr, nullptr};
float min_abs_dist = 0;
float min_abs_dist = 0.0f;
unsigned int nm1 = param_.nr_grids_coarse - 1;
assert(param_.nr_grids_coarse >= 2);
plane[0] = cn[0].IsValid() ? cn[0].params : nullptr;
......@@ -627,8 +627,8 @@ int PlaneFitGroundDetector::FitGrid(const float *point_cloud,
return (0);
}
GroundPlaneLiDAR plane;
float ptp_dist = 0;
float fit_cost = 0;
float ptp_dist = 0.0f;
float fit_cost = 0.0f;
float fit_cost_best = dist_thre;
int nr_inliers = 0;
int nr_inliers_best = -1;
......@@ -833,7 +833,7 @@ int PlaneFitGroundDetector::FitGridWithNeighbors(
int kNr_iter = param_.nr_ransac_iter_threshold +
static_cast<int>(neighbors.size());
GroundPlaneLiDAR hypothesis[kNr_iter];
float ptp_dist = 0;
float ptp_dist = 0.0f;
int best = -1;
int nr_inliers = 0;
int nr_inliers_best = -1;
......@@ -990,7 +990,7 @@ int PlaneFitGroundDetector::FitGridWithNeighbors(
float PlaneFitGroundDetector::CalculateAngleDist(
const GroundPlaneLiDAR &plane,
const std::vector<std::pair<int, int> > &neighbors) {
float angle_dist = 0;
float angle_dist = 0.0f;
int count = 0;
unsigned int j = 0;
int r_n = 0;
......
......@@ -27,9 +27,9 @@ namespace perception {
namespace common {
struct HoughLine {
float r = 0.0;
float theta = 0.0;
float length = 0.0;
float r = 0.0f;
float theta = 0.0f;
float length = 0.0f;
int vote_num = 0;
std::vector<int> pts;
};
......
......@@ -80,8 +80,8 @@ bool LoadBrownCameraIntrinsic(const std::string &yaml_file,
return false;
}
float camera_width = 0;
float camera_height = 0;
float camera_width = 0.0f;
float camera_height = 0.0f;
Eigen::VectorXf params(9 + 5);
try {
camera_width = node["width"].as<float>();
......
......@@ -118,7 +118,7 @@ TEST(PointCloudProcessingDownsamplingTest, downsampling_circular_org_all1) {
center_pt.x = 0.f;
center_pt.y = 0.f;
center_pt.z = 0.f;
for (float i = 1.f; i <= 128; i++) {
for (float i = 1.f; i <= 128.0f; i++) {
tmp_pt.x = i;
tmp_pt.y = i;
tmp_pt.z = i;
......
......@@ -86,7 +86,7 @@ float ObjectInCameraView(SensorObjectConstPtr sensor_object,
const Eigen::Affine3d& camera_sensor2world_pose,
double camera_ts, double camera_max_dist,
bool motion_compensation, bool all_in) {
float in_view_ratio = 0.0;
float in_view_ratio = 0.0f;
Eigen::Matrix4d world2sensor_pose =
camera_sensor2world_pose.matrix().inverse();
if (!world2sensor_pose.allFinite()) {
......
......@@ -42,7 +42,7 @@ double ComputePtsBoxLocationSimilarity(const ProjectionCachePtr& cache,
double x_std_dev = 0.4;
double y_std_dev = 0.5;
size_t check_augmented_iou_minimum_pts_num = 20;
float augmented_buffer = 25;
float augmented_buffer = 25.0f;
if (object->Empty()) {
ADEBUG << "cache object is empty!";
return min_p;
......@@ -280,9 +280,9 @@ double ComputeRadarCameraVelocitySimilarity(
float diff_velocity = (radar_velocity - camera_velocity).norm() / 2;
float diff_velocity_ratio =
diff_velocity / std::max(scalar_camera_velocity, scalar_radar_velocity);
const float velocity_std = 0.15;
const float max_velocity_p = 0.9;
const float th_velocity_p = 0.5;
const float velocity_std = 0.15f;
const float max_velocity_p = 0.9f;
const float th_velocity_p = 0.5f;
float velocity_score =
1 - ChiSquaredCdf1TableFun(diff_velocity_ratio * diff_velocity_ratio /
velocity_std / velocity_std);
......
......@@ -20,7 +20,7 @@ namespace perception {
namespace fusion {
bool PbfShapeFusion::s_use_camera_3d_ = true;
float PbfShapeFusion::s_camera_radar_time_diff_th_ = 0.3;
float PbfShapeFusion::s_camera_radar_time_diff_th_ = 0.3f;
bool PbfShapeFusion::Init() { return true; }
......
......@@ -94,7 +94,7 @@ TEST(ROIPoolRoundTest, test) {
int pooled_w = 7;
bool use_floor = false;
int feat_channel = 1;
float spatial_scale = 16.0;
float spatial_scale = 16.0f;
auto roi_layer = new apollo::perception::inference::ROIPoolingLayer<float>(
pooled_h, pooled_w, use_floor, spatial_scale, feat_channel);
std::vector<int> feat_shape{2, feat_channel, 30, 30};
......
......@@ -97,7 +97,7 @@ TEST(UtilTest, NormTest) {
norm.L2Norm(&data);
const float *result = data.cpu_data();
for (int i = 0; i < num; ++i) {
float sum = 0;
float sum = 0.0f;
for (int j = 0; j < dim; ++j) {
sum += (*result) * (*result);
++result;
......
......@@ -70,9 +70,9 @@ class FeatureDescriptor {
private:
void GetMinMaxCenter() {
float xsum = 0.0;
float ysum = 0.0;
float zsum = 0.0;
float xsum = 0.0f;
float ysum = 0.0f;
float zsum = 0.0f;
min_pt_.x = min_pt_.y = min_pt_.z = FLT_MAX;
max_pt_.x = max_pt_.y = max_pt_.z = -FLT_MAX;
......
......@@ -98,7 +98,7 @@ bool SpatioTemporalGroundDetector::Detect(const GroundDetectorOptions& options,
size_t num_points_all = 0;
int index = 0;
unsigned int nr_points_element = 3;
float z_distance = 0.0;
float z_distance = 0.0f;
cloud_center_(0) = frame->lidar2world_pose(0, 3);
cloud_center_(1) = frame->lidar2world_pose(1, 3);
......
......@@ -58,7 +58,7 @@ class SpatioTemporalGroundDetector : public BaseGroundDetector {
bool use_roi_ = true;
bool use_ground_service_ = false;
float ground_thres_ = 0.25;
float ground_thres_ = 0.25f;
size_t default_point_size_ = 320000;
Eigen::Vector3d cloud_center_ = Eigen::Vector3d(0.0, 0.0, 0.0);
GroundServiceContent ground_service_content_;
......
......@@ -125,8 +125,8 @@ TEST(SpatioTemporalGroundDetectorTest, test_spatio_temporal_ground_detector) {
// std::dynamic_pointer_cast<GroundService>(ground_service);
// Eigen::Vector3d world_point(0.0, 0.0, 0.0);
// float out_query = 0.0;
// float out_detected = 0.0;
// float out_query = 0.0f;
// float out_detected = 0.0f;
// for (size_t i = 0; i < 10; ++i) {
// const auto& index = frame_data->non_ground_indices.indices[i];
// const auto& pt = frame_data->world_cloud->at(index);
......
......@@ -32,8 +32,8 @@ namespace lidar {
struct BipartiteGraphMatcherInitOptions {};
struct BipartiteGraphMatcherOptions {
float cost_thresh = 4.0;
float bound_value = 100.0;
float cost_thresh = 4.0f;
float bound_value = 100.0f;
};
class BaseBipartiteGraphMatcher {
......
......@@ -67,12 +67,12 @@ class PointCloudPreprocessor {
// params
bool filter_naninf_points_ = true;
bool filter_nearby_box_points_ = true;
float box_forward_x_ = 0.0;
float box_backward_x_ = 0.0;
float box_forward_y_ = 0.0;
float box_backward_y_ = 0.0;
float box_forward_x_ = 0.0f;
float box_backward_x_ = 0.0f;
float box_forward_y_ = 0.0f;
float box_backward_y_ = 0.0f;
bool filter_high_z_points_ = true;
float z_threshold_ = 5.0;
float z_threshold_ = 5.0f;
static const float kPointInfThreshold;
}; // class PointCloudPreprocessor
......
......@@ -137,7 +137,7 @@ void FeatureGenerator::GenerateCPU(const base::PointFCloudPtr& pc_ptr,
}
const auto& pt = pc_ptr->at(i);
float pz = pt.z;
float pi = pt.intensity / 255.0;
float pi = pt.intensity / 255.0f;
if (max_height_data_[idx] < pz) {
max_height_data_[idx] = pz;
if (use_intensity_feature_) {
......
......@@ -87,9 +87,9 @@ class FeatureGenerator {
// feature param
int width_ = 0;
int height_ = 0;
float range_ = 0.0;
float min_height_ = 0.0;
float max_height_ = 0.0;
float range_ = 0.0f;
float min_height_ = 0.0f;
float max_height_ = 0.0f;
bool use_intensity_feature_ = false;
bool use_constant_feature_ = false;
......
......@@ -51,7 +51,7 @@ struct SppData {
size_t data_width = 0;
size_t data_height = 0;
size_t data_size = 0;
float data_range = 0;
float data_range = 0.0f;
void MakeReference(size_t width, size_t height, float range);
......
......@@ -109,7 +109,7 @@ float BboxSizeDistance(
Eigen::Vector3f old_bbox_size = last_object->output_size.cast<float>();
Eigen::Vector3f new_bbox_size = new_object->size.cast<float>();
float size_dist = 0.0;
float size_dist = 0.0f;
double dot_val_00 = fabs(old_bbox_dir(0) * new_bbox_dir(0) +
old_bbox_dir(1) * new_bbox_dir(1));
double dot_val_01 = fabs(old_bbox_dir(0) * new_bbox_dir(1) -
......@@ -177,7 +177,7 @@ float HistogramDistance(
return 100;
}
float histogram_dist = 0.0;
float histogram_dist = 0.0f;
for (size_t i = 0; i < old_object_shape_features.size(); ++i) {
histogram_dist += std::fabs(old_object_shape_features[i] -
new_object_shape_features[i]);
......
......@@ -111,7 +111,7 @@ struct TrackedObject {
int boostup_need_history_size = 0;
bool valid = false;
bool converged = true;
float convergence_confidence = 0.0;
float convergence_confidence = 0.0f;
double update_quality = 0.0;
Eigen::Vector3d selected_measured_velocity;
Eigen::Vector3d selected_measured_acceleration;
......
......@@ -56,7 +56,7 @@ void MlfMotionMeasurement::MeasurementSelection(
const TrackedObjectConstPtr& latest_object, TrackedObjectPtr new_object) {
// Select measured velocity among candidates according motion consistency
int corner_index = 0;
float corner_velocity_gain = 0;
float corner_velocity_gain = 0.0f;
std::vector<float> corner_velocity_gain_norms(4);
for (int i = 0; i < 4; ++i) {
corner_velocity_gain_norms[i] = (new_object->measured_corners_velocity[i] -
......
......@@ -65,7 +65,7 @@ bool SetCameraHeight(const std::string &sensor_name,
const std::string &params_dir, float default_camera_height,
float *camera_height) {
float base_h = default_camera_height;
float camera_offset = 0;
float camera_offset = 0.0f;
try {
YAML::Node lidar_height =
YAML::LoadFile(params_dir + "/" + "velodyne64_height.yaml");
......@@ -478,7 +478,7 @@ int FusionCameraDetectionComponent::InitCameraFrames() {
// Init camera height
for (const auto &camera_name : camera_names_) {
float height = 0.0;
float height = 0.0f;
SetCameraHeight(camera_name, FLAGS_obs_sensor_intrinsic_path,
default_camera_height_, &height);
camera_height_map_[camera_name] = height;
......
......@@ -51,7 +51,7 @@ class SegmentationComponent : public cyber::Component<drivers::PointCloud> {
static uint32_t s_seq_num_;
std::string sensor_name_;
bool enable_hdmap_ = true;
float lidar_query_tf_offset_ = 20.0;
float lidar_query_tf_offset_ = 20.0f;
std::string lidar2novatel_tf2_child_frame_id_;
std::string output_channel_name_;
base::SensorInfo sensor_info_;
......
......@@ -50,7 +50,7 @@ class ContiArsPreprocessor : public BasePreprocessor {
void CorrectTime(drivers::ContiRadar* corrected_obstacles);
int GetNextId();
float delay_time_ = 0.0;
float delay_time_ = 0.0f;
static int current_idx_;
static int local2global_[ORIGIN_CONTI_MAX_ID_NUM];
......
......@@ -234,7 +234,7 @@ bool ReferenceLineInfo::CheckChangeLane() const {
for (const auto* obstacle : path_decision_.obstacles().Items()) {
const auto& sl_boundary = obstacle->PerceptionSLBoundary();
constexpr float kLateralShift = 2.5;
constexpr float kLateralShift = 2.5f;
if (sl_boundary.start_l() < -kLateralShift ||
sl_boundary.end_l() > kLateralShift) {
continue;
......@@ -247,9 +247,9 @@ bool ReferenceLineInfo::CheckChangeLane() const {
return false;
}
constexpr float kSafeTime = 3.0;
constexpr float kForwardMinSafeDistance = 6.0;
constexpr float kBackwardMinSafeDistance = 8.0;
constexpr float kSafeTime = 3.0f;
constexpr float kForwardMinSafeDistance = 6.0f;
constexpr float kBackwardMinSafeDistance = 8.0f;
const float kForwardSafeDistance = std::max(
kForwardMinSafeDistance,
......
......@@ -42,7 +42,7 @@ class DistanceApproachProblemTest : public ::testing::Test {
int num_of_variables_ = 160;
int num_of_constraints_ = 200;
std::size_t horizon_ = 20;
float ts_ = 0.01;
float ts_ = 0.01f;
Eigen::MatrixXd ego_ = Eigen::MatrixXd::Ones(4, 1);
Eigen::MatrixXd x0_ = Eigen::MatrixXd::Ones(4, 1);
Eigen::MatrixXd xf_ = Eigen::MatrixXd::Ones(4, 1);
......
......@@ -48,7 +48,7 @@ class DualVariableWarmStartIPOPTInterfaceTest : public ::testing::Test {
protected:
std::size_t horizon_ = 5;
std::size_t obstacles_num_ = 10;
float ts_ = 0.01;
float ts_ = 0.01f;
Eigen::MatrixXd ego_ = Eigen::MatrixXd::Ones(4, 1);
Eigen::MatrixXd last_time_u_ = Eigen::MatrixXd::Zero(2, 1);
Eigen::MatrixXi obstacles_edges_num_;
......
......@@ -168,12 +168,12 @@ int RNNEvaluator::SetupObstacleFeature(
feature_values->clear();
feature_values->reserve(DIM_OBSTACLE_FEATURE);
float heading = 0.0;
float speed = 0.0;
float lane_l = 0.0;
float theta = 0.0;
float dist_lb = 1.0;
float dist_rb = 1.0;
float heading = 0.0f;
float speed = 0.0f;
float lane_l = 0.0f;
float theta = 0.0f;
float dist_lb = 1.0f;
float dist_rb = 1.0f;
if (obstacle->history_size() < 1) {
AWARN << "Size of feature less than 1!";
return -1;
......
......@@ -144,7 +144,7 @@ void Conv1d::Run(const std::vector<Eigen::MatrixXf>& inputs,
output->resize(output_num_row, output_num_col);
for (int i = 0; i < output_num_row; ++i) {
for (int j = 0; j < output_num_col; ++j) {
float output_i_j_unbiased = 0.0;
float output_i_j_unbiased = 0.0f;
for (int p = 0; p < inputs[0].rows(); ++p) {
for (int q = j * stride_; q < j * stride_ + kernel_size; ++q) {
output_i_j_unbiased +=
......@@ -233,7 +233,7 @@ void AvgPool1d::Run(const std::vector<Eigen::MatrixXf>& inputs,
for (int j = 0; j < output_num_col; ++j) {
CHECK_LE(input_index + kernel_size_, inputs[0].cols());
for (int i = 0; i < output_num_row; ++i) {
float output_i_j_sum = 0.0;
float output_i_j_sum = 0.0f;
for (int k = input_index; k < input_index + kernel_size_; ++k) {
output_i_j_sum += inputs[0](i, k);
}
......
......@@ -311,8 +311,8 @@ class BatchNormalization : public Layer {
Eigen::VectorXf sigma_;
Eigen::VectorXf gamma_;
Eigen::VectorXf beta_;
float epsilon_ = 0.0;
float momentum_ = 0.0;
float epsilon_ = 0.0f;
float momentum_ = 0.0f;
int axis_ = 0;
bool center_ = false;
bool scale_ = false;
......
......@@ -46,7 +46,7 @@ class BufferInterface {
*/
virtual apollo::transform::TransformStamped lookupTransform(
const std::string& target_frame, const std::string& source_frame,
const cyber::Time& time, const float timeout_second = 0.01) const = 0;
const cyber::Time& time, const float timeout_second = 0.01f) const = 0;
/** \brief Get the transform between two frames by frame ID assuming fixed
*frame.
......@@ -68,7 +68,7 @@ class BufferInterface {
const std::string& target_frame, const cyber::Time& target_time,
const std::string& source_frame, const cyber::Time& source_time,
const std::string& fixed_frame,
const float timeout_second = 0.01) const = 0;
const float timeout_second = 0.01f) const = 0;
/** \brief Test if a transform is possible
* \param target_frame The frame into which to transform
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册