提交 5b802769 编写于 作者: L Liangliang Zhang 提交者: Dong Li

Planning: (1) fixed a serious bug when adding spline kernels. NOTICE: the...

Planning: (1) fixed a serious bug when adding spline kernels. NOTICE: the standard format of QP problem is to minimize 1/2 * x^T * H * x + x^T * G (2) added unit test for active_set_qp_solver. (3) fixed all failed tests. (#566)
上级 faa8f37f
......@@ -45,4 +45,16 @@ cc_library(
],
)
cc_test(
name = "active_set_qp_solver_test",
size = "small",
srcs = [
"active_set_qp_solver_test.cc",
],
deps = [
":active_set_qp_solver",
"@gtest//:main",
],
)
cpplint()
......@@ -77,7 +77,6 @@ bool ActiveSetQpSolver::Solve() {
for (int r = 0; r < kernel_matrix_.rows(); ++r) {
g_matrix[r] = offset_(r, 0);
for (int c = 0; c < kernel_matrix_.cols(); ++c) {
h_matrix[index++] = kernel_matrix_(r, c);
}
......
/******************************************************************************
* Copyright 2017 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
/**
* @file
**/
#include "modules/common/math/qp_solver/active_set_qp_solver.h"
#include "glog/logging.h"
#include "gtest/gtest.h"
namespace apollo {
namespace common {
namespace math {
using Eigen::MatrixXd;
TEST(ActiveSetQpSolver, simple_problem_01) {
MatrixXd kernel_matrix = MatrixXd::Zero(1, 1);
kernel_matrix(0, 0) = 2.0;
MatrixXd offset = MatrixXd::Zero(1, 1);
offset(0, 0) = -4.0;
MatrixXd affine_inequality_matrix;
MatrixXd affine_inequality_boundary;
MatrixXd affine_equality_matrix;
MatrixXd affine_equality_boundary;
ActiveSetQpSolver solver(kernel_matrix, offset, affine_inequality_matrix,
affine_inequality_boundary, affine_equality_matrix,
affine_equality_boundary);
solver.Solve();
EXPECT_DOUBLE_EQ(solver.params()(0, 0), 2.0);
}
} // namespace math
} // namespace common
} // namespace apollo
......@@ -49,7 +49,7 @@ Spline1dKernel::Spline1dKernel(const std::vector<double>& x_knots,
void Spline1dKernel::AddRegularization(const double regularized_param) {
Eigen::MatrixXd id_matrix =
Eigen::MatrixXd::Identity(kernel_matrix_.rows(), kernel_matrix_.cols());
kernel_matrix_ += id_matrix * regularized_param;
kernel_matrix_ += 2.0 * id_matrix * regularized_param;
}
bool Spline1dKernel::AddKernel(const Eigen::MatrixXd& kernel,
......@@ -89,6 +89,7 @@ void Spline1dKernel::AddNthDerivativekernelMatrix(const uint32_t n,
const double weight) {
for (uint32_t i = 0; i + 1 < x_knots_.size(); ++i) {
Eigen::MatrixXd cur_kernel =
2 *
SplineSegKernel::instance()->NthDerivativeKernel(
n, spline_order_, x_knots_[i + 1] - x_knots_[i]) *
weight;
......@@ -137,7 +138,7 @@ bool Spline1dKernel::AddReferenceLineKernelMatrix(
for (uint32_t r = 0; r < spline_order_; ++r) {
for (uint32_t c = 0; c < spline_order_; ++c) {
ref_kernel(r, c) = power_x[r + c];
ref_kernel(r, c) = 2.0 * power_x[r + c];
}
}
......
......@@ -51,6 +51,7 @@ TEST(Spline1dKernel, add_regularization) {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2;
// clang-format on
ref_kernel_matrix *= 2.0;
for (int i = 0; i < kernel.kernel_matrix().rows(); ++i) {
for (int j = 0; j < kernel.kernel_matrix().cols(); ++j) {
......@@ -88,6 +89,7 @@ TEST(Spline1dKernel, add_derivative_kernel_matrix_01) {
0, 1, 1.6, 2, 2.28571, 2.5,
0, 1, 1.66667, 2.14286, 2.5, 2.77778;
// clang-format on
ref_kernel_matrix *= 2.0;
for (int i = 0; i < kernel.kernel_matrix().rows(); ++i) {
for (int j = 0; j < kernel.kernel_matrix().cols(); ++j) {
......@@ -132,6 +134,7 @@ TEST(Spline1dKernel, add_derivative_kernel_matrix_02) {
0, 0, 0, 0, 0, 0, 0, 1, 1.6, 2, 2.28571, 2.5, // NOLINT
0, 0, 0, 0, 0, 0, 0, 1, 1.66667, 2.14286, 2.5, 2.77778; // NOLINT
// clang-format on
ref_kernel_matrix *= 2.0;
for (int i = 0; i < kernel.kernel_matrix().rows(); ++i) {
for (int j = 0; j < kernel.kernel_matrix().cols(); ++j) {
......@@ -169,6 +172,7 @@ TEST(Spline1dKernel, add_derivative_kernel_matrix_03) {
0, 1, 1.6, 2, 2.28571, 2.5,
0, 1, 1.66667, 2.14286, 2.5, 2.77778;
// clang-format on
ref_kernel_matrix *= 2.0;
for (int i = 0; i < kernel.kernel_matrix().rows(); ++i) {
for (int j = 0; j < kernel.kernel_matrix().cols(); ++j) {
......@@ -199,8 +203,6 @@ TEST(Spline1dKernel, add_derivative_kernel_matrix_04) {
EXPECT_EQ(kernel.kernel_matrix().rows(), spline_order * (x_knots.size() - 1));
Eigen::MatrixXd ref_kernel_matrix = Eigen::MatrixXd::Zero(4, 4);
std::cout << kernel.kernel_matrix() << std::endl;
// clang-format off
ref_kernel_matrix <<
0, 0, 0, 0,
......@@ -208,6 +210,7 @@ TEST(Spline1dKernel, add_derivative_kernel_matrix_04) {
0, 1, 1.33333, 1.5,
0, 1, 1.5, 1.8;
// clang-format on
ref_kernel_matrix *= 2.0;
for (int i = 0; i < kernel.kernel_matrix().rows(); ++i) {
for (int j = 0; j < kernel.kernel_matrix().cols(); ++j) {
......@@ -246,6 +249,7 @@ TEST(Spline1dKernel, add_second_derivative_kernel_matrix_01) {
0, 0, 8, 18, 28.8, 40,
0, 0, 10, 24, 40, 57.1429;
// clang-format on
ref_kernel_matrix *= 2.0;
for (int i = 0; i < kernel.kernel_matrix().rows(); ++i) {
for (int j = 0; j < kernel.kernel_matrix().cols(); ++j) {
......@@ -292,6 +296,7 @@ TEST(Spline1dKernel, add_second_derivative_kernel_matrix_02) {
0, 0, 0, 0, 0, 0, 0, 0, 8, 18, 28.8, 40,
0, 0, 0, 0, 0, 0, 0, 0, 10, 24, 40, 57.1429;
// clang-format on
ref_kernel_matrix *= 2.0;
for (int i = 0; i < kernel.kernel_matrix().rows(); ++i) {
for (int j = 0; j < kernel.kernel_matrix().cols(); ++j) {
......@@ -330,6 +335,7 @@ TEST(Spline1dKernel, add_third_derivative_kernel_matrix_01) {
0, 0, 0, 72, 192, 360,
0, 0, 0, 120, 360, 720;
// clang-format on
ref_kernel_matrix *= 2.0;
for (int i = 0; i < kernel.kernel_matrix().rows(); ++i) {
for (int j = 0; j < kernel.kernel_matrix().cols(); ++j) {
......@@ -368,6 +374,7 @@ TEST(Spline1dKernel, add_third_derivative_kernel_matrix_02) {
0, 0, 0, 72, 192, 360,
0, 0, 0, 120, 360, 720;
// clang-format on
ref_kernel_matrix *= 2.0;
for (int i = 0; i < kernel.kernel_matrix().rows(); ++i) {
for (int j = 0; j < kernel.kernel_matrix().cols(); ++j) {
......@@ -404,7 +411,7 @@ TEST(Spline1dKernel, add_reference_line_kernel_01) {
for (int i = 0; i < kernel.kernel_matrix().rows(); ++i) {
for (int j = 0; j < kernel.kernel_matrix().cols(); ++j) {
if (i == 0 && j == 0) {
EXPECT_DOUBLE_EQ(kernel.kernel_matrix()(i, j), 1.0);
EXPECT_DOUBLE_EQ(kernel.kernel_matrix()(i, j), 2.0);
} else {
EXPECT_DOUBLE_EQ(kernel.kernel_matrix()(i, j), 0.0);
}
......@@ -428,7 +435,7 @@ TEST(Spline1dKernel, add_reference_line_kernel_02) {
for (int i = 0; i < kernel.kernel_matrix().rows(); ++i) {
for (int j = 0; j < kernel.kernel_matrix().cols(); ++j) {
if (i == 0 && j == 0) {
EXPECT_DOUBLE_EQ(kernel.kernel_matrix()(i, j), 1.0);
EXPECT_DOUBLE_EQ(kernel.kernel_matrix()(i, j), 2.0);
} else {
EXPECT_DOUBLE_EQ(kernel.kernel_matrix()(i, j), 0.0);
}
......@@ -460,8 +467,8 @@ TEST(Spline1dKernel, add_reference_line_kernel_03) {
}
Eigen::MatrixXd ref_kernel_matrix = Eigen::MatrixXd::Zero(6, 6);
ref_kernel_matrix = res.transpose() * res;
ref_kernel_matrix(0, 0) += 1.0;
ref_kernel_matrix = 2.0 * res.transpose() * res;
ref_kernel_matrix(0, 0) += 2.0;
Eigen::MatrixXd ref_offset = Eigen::MatrixXd::Zero(6, 1);
ref_offset = -2.0 * 2.0 * res.transpose();
......@@ -493,7 +500,7 @@ TEST(Spline1dKernel, add_reference_line_kernel_04) {
}
Eigen::MatrixXd ref_kernel_matrix = Eigen::MatrixXd::Zero(6, 6);
ref_kernel_matrix = res.transpose() * res;
ref_kernel_matrix = 2.0 * res.transpose() * res;
Eigen::MatrixXd ref_offset = Eigen::MatrixXd::Zero(6, 1);
ref_offset = -2.0 * 2.0 * res.transpose();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册