提交 89391d1f 编写于 作者: K kechxu 提交者: Jiangtao Hu

Prediction: implement Conv1d layer

上级 533e42b1
......@@ -14,7 +14,7 @@ cc_library(
"//cyber",
"//modules/prediction/network:net_layer",
"//modules/prediction/network:net_model",
"//modules/prediction/proto:network_model_proto",
"//modules/prediction/proto:cruise_model_proto",
"@eigen//:eigen",
],
)
......
......@@ -21,6 +21,7 @@
#include "Eigen/Dense"
#include "cyber/common/macros.h"
#include "modules/prediction/proto/cruise_model.pb.h"
#include "modules/prediction/network/net_model.h"
namespace apollo {
......
......@@ -87,6 +87,60 @@ void Dense::Run(const std::vector<Eigen::MatrixXf>& inputs,
CHECK_EQ(output->cols(), units_);
}
bool Conv1d::Load(const LayerParameter& layer_pb) {
if (!Layer::Load(layer_pb)) {
AERROR << "Fail to Load LayerParameter!";
return false;
}
Conv1dParameter conv1d_pb = layer_pb.conv1d();
if (!conv1d_pb.has_kernel() ||
!LoadTensor(conv1d_pb.kernel(), &kernel_)) {
AERROR << "Fail to Load kernel!";
return false;
}
if (!conv1d_pb.has_bias() || !LoadTensor(conv1d_pb.bias(), &bias_)) {
AERROR << "Fail to Load bias!";
return false;
}
if (!conv1d_pb.has_use_bias()) {
AWARN << "Set use_bias as false.";
use_bias_ = true;
} else {
use_bias_ = conv1d_pb.use_bias();
}
for (int sz : conv1d_pb.shape()) {
shape_.push_back(sz);
}
if (conv1d_pb.has_stride()) {
stride_ = conv1d_pb.stride();
} else {
stride_ = 1;
}
return true;
}
void Conv1d::Run(const std::vector<Eigen::MatrixXf>& inputs,
Eigen::MatrixXf* output) {
CHECK_EQ(inputs.size(), 1);
CHECK_GT(kernel_.size(), 0);
CHECK_EQ(kernel_[0].rows(), inputs[0].rows());
int kernel_size = kernel_[0].cols();
int output_num_col = (inputs[0].cols() - kernel_size) / stride_ + 1;
int output_num_row = static_cast<int>(kernel_.size());
output->resize(output_num_row, output_num_col);
for (int i = 0; i < output_num_col; ++i) {
for (int j = 0; j + kernel_size < inputs[0].cols(); j += stride_) {
double output_i_j = 0.0;
for (int p = 0; p < inputs[0].rows(); ++p) {
for (int q = j; q < j + kernel_size; ++q) {
output_i_j += inputs[0](p, q) * kernel_[i](p, q - j);
}
}
(*output)(i, j) = output_i_j;
}
}
}
bool Activation::Load(const LayerParameter& layer_pb) {
if (!Layer::Load(layer_pb)) {
AERROR << "Fail to Load the layer parameters!";
......
......@@ -134,6 +134,90 @@ class Dense : public Layer {
std::function<float(float)> kactivation_;
};
/**
* @class Conv1d
* @brief Conv1d is the convolution 1d network layer.
* Conv1d layer output is y = Conv(x, w),
* where x is the input, w the weight
*
* Parameter w and b can be loaded from pb message. if bias is
* not used, b = 0.
*/
class Conv1d : public Layer {
public:
/**
* @brief Load the dense layer parameter from a pb message
* @param A pb message contains the parameters
* @return True is loaded successively, otherwise False
*/
bool Load(const apollo::prediction::LayerParameter& layer_pb) override;
/**
* @brief Compute the layer output from inputs
* @param Inputs to a network layer
* @param Output of a network layer will be returned
*/
void Run(const std::vector<Eigen::MatrixXf>& inputs,
Eigen::MatrixXf* output) override;
private:
std::vector<int> shape_;
bool use_bias_;
std::vector<Eigen::MatrixXf> kernel_;
Eigen::VectorXf bias_;
int stride_;
};
/**
* @class MaxPool1d
* @brief MaxPool1d is the max Pool 1d network layer.
*/
class MaxPool1d : public Layer {
public:
/**
* @brief Load the dense layer parameter from a pb message
* @param A pb message contains the parameters
* @return True is loaded successively, otherwise False
*/
bool Load(const apollo::prediction::LayerParameter& layer_pb) override;
/**
* @brief Compute the layer output from inputs
* @param Inputs to a network layer
* @param Output of a network layer will be returned
*/
void Run(const std::vector<Eigen::MatrixXf>& inputs,
Eigen::MatrixXf* output) override;
private:
int kernel_size_;
};
/**
* @class AvgPool1d
* @brief AvgPool1d is the average Pool 1d network layer.
*/
class AvgPool1d : public Layer {
public:
/**
* @brief Load the dense layer parameter from a pb message
* @param A pb message contains the parameters
* @return True is loaded successively, otherwise False
*/
bool Load(const apollo::prediction::LayerParameter& layer_pb) override;
/**
* @brief Compute the layer output from inputs
* @param Inputs to a network layer
* @param Output of a network layer will be returned
*/
void Run(const std::vector<Eigen::MatrixXf>& inputs,
Eigen::MatrixXf* output) override;
private:
int kernel_size_;
};
/**
* @class Activation
* @brief Activation is an activation network layer.
......
......@@ -89,6 +89,16 @@ bool LoadTensor(const TensorParameter& tensor_pb, Eigen::VectorXf* vector) {
return true;
}
bool LoadTensor(const TensorParameter& tensor_pb,
std::vector<Eigen::MatrixXf>* tensor3d) {
if (tensor_pb.data_size() == 0 || tensor_pb.shape_size() != 3) {
AERROR << "Fail to load the necessary fields!";
return false;
}
// TODO(kechxu) implement
return true;
}
} // namespace network
} // namespace prediction
} // namespace apollo
......@@ -23,6 +23,7 @@
#include <functional>
#include <string>
#include <vector>
#include "Eigen/Dense"
......@@ -93,6 +94,15 @@ bool LoadTensor(const TensorParameter& tensor_pb, Eigen::MatrixXf* matrix);
*/
bool LoadTensor(const TensorParameter& tensor_pb, Eigen::VectorXf* vector);
/**
* @brief load matrix value from a protobuf message
* @param protobuf message in the form of TensorParameter
* @param vector of Eigen::MatrixXf will be returned
* @return True if load data successively, otherwise False
*/
bool LoadTensor(const TensorParameter& tensor_pb,
std::vector<Eigen::MatrixXf>* tensor3d);
} // namespace network
} // namespace prediction
} // namespace apollo
......@@ -156,3 +156,20 @@ cc_proto_library(
":scenario_proto_lib",
],
)
cc_proto_library(
name = "cruise_model_proto",
deps = [
":cruise_model_proto_lib",
],
)
proto_library(
name = "cruise_model_proto_lib",
srcs = [
"cruise_model.proto",
],
deps = [
":network_model_proto_lib",
]
)
syntax = "proto2";
package apollo.prediction;
import "modules/prediction/proto/network_layers.proto";
// Intermediate building blocks:
message LaneFeatureConv {
optional Conv1dParameter conv1d_0 = 1;
optional ActivationParameter activation_1 = 2;
optional Conv1dParameter conv1d_2 = 3;
optional ActivationParameter activation_3 = 4;
optional Conv1dParameter conv1d_4 = 5;
}
message ObsFeatureFC {
optional DenseParameter linear_0 = 1;
optional ActivationParameter activation_1 = 2;
optional DenseParameter linear_3 = 3;
optional ActivationParameter activation_4 = 4;
}
message Classify {
optional DenseParameter linear_0 = 1;
optional ActivationParameter activation_1 = 2;
optional DenseParameter linear_3 = 3;
optional ActivationParameter activation_4 = 4;
optional DenseParameter linear_6 = 5;
optional ActivationParameter activation_7 = 6;
optional DenseParameter linear_9 = 7;
optional ActivationParameter activation_10 = 8;
}
message Regress {
optional DenseParameter linear_0 = 1;
optional ActivationParameter activation_1 = 2;
optional DenseParameter linear_3 = 3;
optional ActivationParameter activation_4 = 4;
optional DenseParameter linear_6 = 5;
optional ActivationParameter activation_7 = 6;
optional DenseParameter linear_9 = 7;
optional ActivationParameter activation_10 = 8;
}
// Final model
message CruiseModel {
optional LaneFeatureConv lane_feature_conv = 1;
optional MaxPool1d lane_feature_maxpool = 2;
optional AvgPool1d lane_feature_avgpool = 3;
optional ObsFeatureFC obs_feature_fc = 5;
optional Classify classify = 6;
optional Regress regress = 7;
}
......@@ -22,10 +22,21 @@ message DenseParameter {
}
message Conv1dParameter {
optional int32 units = 1;
repeated int32 shape = 1;
optional bool use_bias = 2;
optional TensorParameter weights = 3;
optional TensorParameter kernel = 3;
optional TensorParameter bias = 4;
optional int32 stride = 5;
}
message MaxPool1d {
optional int32 kernel_size = 1;
optional int32 stride = 2;
}
message AvgPool1d {
optional int32 kernel_size = 1;
optional int32 stride = 2;
}
message BatchNormalizationParameter {
......@@ -91,5 +102,7 @@ message LayerParameter {
FlattenParameter flatten = 9;
ConcatenateParameter concatenate = 10;
Conv1dParameter conv1d = 11;
MaxPool1d maxpool1d = 12;
AvgPool1d avgpool1d = 13;
}
}
......@@ -188,7 +188,7 @@ void FeatureExtractor::ExtractFrontJunctionFeatures(
}
JunctionInfoPtr junction = ego_trajectory_container->ADCJunction();
// TODO(all) change need_consider to false once map is fixed
bool need_consider = true;
bool need_consider = false;
if (junction == nullptr) {
return;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册