Apollo 10.0
自动驾驶开放平台
apollo::perception::camera::DarkSCNNLanePostprocessor类 参考

#include <darkSCNN_lane_postprocessor.h>

类 apollo::perception::camera::DarkSCNNLanePostprocessor 继承关系图:
apollo::perception::camera::DarkSCNNLanePostprocessor 的协作图:

Public 类型

template<class EigenType >
using EigenVector = apollo::common::EigenVector< EigenType >
 

Public 成员函数

 DarkSCNNLanePostprocessor ()
 
virtual ~DarkSCNNLanePostprocessor ()=default
 
bool Init (const LanePostprocessorInitOptions &options=LanePostprocessorInitOptions()) override
 
bool Process2D (const LanePostprocessorOptions &options, CameraFrame *frame) override
 
bool Process3D (const LanePostprocessorOptions &options, CameraFrame *frame) override
 
void SetIm2CarHomography (const Eigen::Matrix3d &homography_im2car) override
 
std::vector< std::vector< LanePointInfo > > GetLanelinePointSet ()
 
std::vector< LanePointInfoGetAllInferLinePointSet ()
 
std::string Name () const override
 
- Public 成员函数 继承自 apollo::perception::camera::BaseLanePostprocessor
 BaseLanePostprocessor ()=default
 
virtual ~BaseLanePostprocessor ()=default
 
 DISALLOW_COPY_AND_ASSIGN (BaseLanePostprocessor)
 

详细描述

在文件 darkSCNN_lane_postprocessor.h38 行定义.

成员类型定义说明

◆ EigenVector

构造及析构函数说明

◆ DarkSCNNLanePostprocessor()

apollo::perception::camera::DarkSCNNLanePostprocessor::DarkSCNNLanePostprocessor ( )
inline

◆ ~DarkSCNNLanePostprocessor()

virtual apollo::perception::camera::DarkSCNNLanePostprocessor::~DarkSCNNLanePostprocessor ( )
virtualdefault

成员函数说明

◆ GetAllInferLinePointSet()

std::vector< LanePointInfo > apollo::perception::camera::DarkSCNNLanePostprocessor::GetAllInferLinePointSet ( )

◆ GetLanelinePointSet()

std::vector< std::vector< LanePointInfo > > apollo::perception::camera::DarkSCNNLanePostprocessor::GetLanelinePointSet ( )

◆ Init()

bool apollo::perception::camera::DarkSCNNLanePostprocessor::Init ( const LanePostprocessorInitOptions options = LanePostprocessorInitOptions())
overridevirtual

实现了 apollo::perception::camera::BaseLanePostprocessor.

在文件 darkSCNN_lane_postprocessor.cc77 行定义.

78 {
79 // Read postprocessor parameter
80 std::string config_file =
81 GetConfigFile(options.config_path, options.config_file);
82 if (!cyber::common::GetProtoFromFile(config_file,
83 &lane_postprocessor_param_)) {
84 AERROR << "Failed to read config detect_param: " << config_file;
85 return false;
86 }
87 AINFO << "lane_postprocessor param: "
88 << lane_postprocessor_param_.DebugString();
89
90 input_offset_x_ = lane_postprocessor_param_.input_offset_x();
91 input_offset_y_ = lane_postprocessor_param_.input_offset_y();
92 lane_map_width_ = lane_postprocessor_param_.resize_width();
93 lane_map_height_ = lane_postprocessor_param_.resize_height();
94 DCHECK_LT(lane_map_width_, kMaxValidSize);
95 DCHECK_GT(lane_map_width_, 0);
96 DCHECK_LT(lane_map_height_, kMaxValidSize);
97 DCHECK_GT(lane_map_height_, 0);
98
99 roi_height_ = lane_postprocessor_param_.roi_height();
100 roi_start_ = lane_postprocessor_param_.roi_start();
101 roi_width_ = lane_postprocessor_param_.roi_width();
102
103 lane_type_num_ = static_cast<int>(spatialLUTind.size());
104 AINFO << "lane_type_num_: " << lane_type_num_;
105 return true;
106}
#define AERROR
Definition log.h:44
#define AINFO
Definition log.h:42
bool GetProtoFromFile(const std::string &file_name, google::protobuf::Message *message)
Parses the content of the file specified by the file_name as a representation of protobufs,...
Definition file.cc:132
std::map< base::LaneLinePositionType, int > spatialLUTind
std::string GetConfigFile(const std::string &config_path, const std::string &config_file)
Definition util.cc:80

◆ Name()

std::string apollo::perception::camera::DarkSCNNLanePostprocessor::Name ( ) const
overridevirtual

实现了 apollo::perception::camera::BaseLanePostprocessor.

在文件 darkSCNN_lane_postprocessor.cc467 行定义.

467 {
468 return "DarkSCNNLanePostprocessor";
469}

◆ Process2D()

bool apollo::perception::camera::DarkSCNNLanePostprocessor::Process2D ( const LanePostprocessorOptions options,
CameraFrame frame 
)
overridevirtual

实现了 apollo::perception::camera::BaseLanePostprocessor.

在文件 darkSCNN_lane_postprocessor.cc108 行定义.

109 {
110 ADEBUG << "Begin to Process2D.";
111 frame->lane_objects.clear();
112 auto start = std::chrono::high_resolution_clock::now();
113
114 cv::Mat lane_map(lane_map_height_, lane_map_width_, CV_32FC1);
115 memcpy(lane_map.data, frame->lane_detected_blob->cpu_data(),
116 lane_map_width_ * lane_map_height_ * sizeof(float));
117
118 // if (options.use_lane_history &&
119 // (!use_history_ || time_stamp_ > options.timestamp)) {
120 // InitLaneHistory();
121 // }
122
123 // 1. Sample points on lane_map and project them onto world coordinate
124
125 // TODO(techoe): Should be fixed
126 int y = static_cast<int>(lane_map.rows * 0.9 - 1);
127 // TODO(techoe): Should be fixed
128 int step_y = (y - 40) * (y - 40) / 6400 + 1;
129
130 xy_points.clear();
131 xy_points.resize(lane_type_num_);
132 uv_points.clear();
133 uv_points.resize(lane_type_num_);
134
135 while (y > 0) {
136 for (int x = 1; x < lane_map.cols - 1; ++x) {
137 int value = static_cast<int>(round(lane_map.at<float>(y, x)));
138 // lane on left
139
140 if ((value > 0 && value < 5) || value == 11) {
141 // right edge (inner) of the lane
142 if (value != static_cast<int>(round(lane_map.at<float>(y, x + 1)))) {
143 Eigen::Matrix<float, 3, 1> img_point(
144 static_cast<float>(x * roi_width_ / lane_map.cols),
145 static_cast<float>(y * roi_height_ / lane_map.rows + roi_start_),
146 1.0);
147 Eigen::Matrix<float, 3, 1> xy_p;
148 xy_p = trans_mat_ * img_point;
149 Eigen::Matrix<float, 2, 1> xy_point;
150 Eigen::Matrix<float, 2, 1> uv_point;
151 if (std::fabs(xy_p(2)) < 1e-6) continue;
152 xy_point << xy_p(0) / xy_p(2), xy_p(1) / xy_p(2);
153
154 // Filter out lane line points
155 if (xy_point(0) < 0.0 || // This condition is only for front camera
156 xy_point(0) > max_longitudinal_distance_ ||
157 std::abs(xy_point(1)) > 30.0) {
158 continue;
159 }
160 uv_point << static_cast<float>(x * roi_width_ / lane_map.cols),
161 static_cast<float>(y * roi_height_ / lane_map.rows + roi_start_);
162 if (xy_points[value].size() < minNumPoints_ || xy_point(0) < 50.0f ||
163 std::fabs(xy_point(1) - xy_points[value].back()(1)) < 1.0f) {
164 xy_points[value].push_back(xy_point);
165 uv_points[value].push_back(uv_point);
166 }
167 }
168 } else if (value >= 5 && value < lane_type_num_) {
169 // Left edge (inner) of the lane
170 if (value != static_cast<int>(round(lane_map.at<float>(y, x - 1)))) {
171 Eigen::Matrix<float, 3, 1> img_point(
172 static_cast<float>(x * roi_width_ / lane_map.cols),
173 static_cast<float>(y * roi_height_ / lane_map.rows + roi_start_),
174 1.0);
175 Eigen::Matrix<float, 3, 1> xy_p;
176 xy_p = trans_mat_ * img_point;
177 Eigen::Matrix<float, 2, 1> xy_point;
178 Eigen::Matrix<float, 2, 1> uv_point;
179 if (std::fabs(xy_p(2)) < 1e-6) continue;
180 xy_point << xy_p(0) / xy_p(2), xy_p(1) / xy_p(2);
181 // Filter out lane line points
182 if (xy_point(0) < 0.0 || // This condition is only for front camera
183 xy_point(0) > max_longitudinal_distance_ ||
184 std::abs(xy_point(1)) > 30.0) {
185 continue;
186 }
187 uv_point << static_cast<float>(x * roi_width_ / lane_map.cols),
188 static_cast<float>(y * roi_height_ / lane_map.rows + roi_start_);
189 if (xy_points[value].size() < minNumPoints_ || xy_point(0) < 50.0f ||
190 std::fabs(xy_point(1) - xy_points[value].back()(1)) < 1.0f) {
191 xy_points[value].push_back(xy_point);
192 uv_points[value].push_back(uv_point);
193 }
194 } else if (value >= lane_type_num_) {
195 AWARN << "Lane line value shouldn't be equal or more than: "
196 << lane_type_num_;
197 }
198 }
199 }
200 step_y = (y - 45) * (y - 45) / 6400 + 1;
201 y -= step_y;
202 }
203
204 auto elapsed_1 = std::chrono::high_resolution_clock::now() - start;
205 int64_t microseconds_1 =
206 std::chrono::duration_cast<std::chrono::microseconds>(elapsed_1).count();
207 time_1 += microseconds_1;
208
209 // 2. Remove outliers and Do a ransac fitting
210 EigenVector<Eigen::Matrix<float, 4, 1>> coeffs;
211 EigenVector<Eigen::Matrix<float, 4, 1>> img_coeffs;
212 EigenVector<Eigen::Matrix<float, 2, 1>> selected_xy_points;
213 coeffs.resize(lane_type_num_);
214 img_coeffs.resize(lane_type_num_);
215 for (int i = 1; i < lane_type_num_; ++i) {
216 coeffs[i] << 0, 0, 0, 0;
217 if (xy_points[i].size() < minNumPoints_) continue;
218 Eigen::Matrix<float, 4, 1> coeff;
219 // Solve linear system to estimate polynomial coefficients
220 if (RansacFitting<float>(xy_points[i], &selected_xy_points, &coeff, 200,
221 static_cast<int>(minNumPoints_), 0.1f)) {
222 coeffs[i] = coeff;
223
224 xy_points[i].clear();
225 xy_points[i] = selected_xy_points;
226 } else {
227 xy_points[i].clear();
228 }
229 }
230
231 auto elapsed_2 = std::chrono::high_resolution_clock::now() - start;
232 int64_t microseconds_2 =
233 std::chrono::duration_cast<std::chrono::microseconds>(elapsed_2).count();
234 time_2 += microseconds_2 - microseconds_1;
235
236 // 3. Write values into lane_objects
237 std::vector<float> c0s(lane_type_num_, 0);
238 for (int i = 1; i < lane_type_num_; ++i) {
239 if (xy_points[i].size() < minNumPoints_) continue;
240 c0s[i] = GetPolyValue(
241 static_cast<float>(coeffs[i](3)), static_cast<float>(coeffs[i](2)),
242 static_cast<float>(coeffs[i](1)), static_cast<float>(coeffs[i](0)),
243 static_cast<float>(3.0));
244 }
245 // [1] Determine lane spatial tag in special cases
246 if (xy_points[4].size() < minNumPoints_ &&
247 xy_points[5].size() >= minNumPoints_) {
248 std::swap(xy_points[4], xy_points[5]);
249 std::swap(uv_points[4], uv_points[5]);
250 std::swap(coeffs[4], coeffs[5]);
251 std::swap(c0s[4], c0s[5]);
252 } else if (xy_points[6].size() < minNumPoints_ &&
253 xy_points[5].size() >= minNumPoints_) {
254 std::swap(xy_points[6], xy_points[5]);
255 std::swap(uv_points[6], uv_points[5]);
256 std::swap(coeffs[6], coeffs[5]);
257 std::swap(c0s[6], c0s[5]);
258 }
259
260 if (xy_points[4].size() < minNumPoints_ &&
261 xy_points[11].size() >= minNumPoints_) {
262 // Use left lane boundary as the right most missing left lane,
263 bool use_boundary = true;
264 for (int k = 3; k >= 1; --k) {
265 if (xy_points[k].size() >= minNumPoints_) {
266 use_boundary = false;
267 break;
268 }
269 }
270 if (use_boundary) {
271 std::swap(xy_points[4], xy_points[11]);
272 std::swap(uv_points[4], uv_points[11]);
273 std::swap(coeffs[4], coeffs[11]);
274 std::swap(c0s[4], c0s[11]);
275 }
276 }
277
278 if (xy_points[6].size() < minNumPoints_ &&
279 xy_points[12].size() >= minNumPoints_) {
280 // Use right lane boundary as the left most missing right lane,
281 bool use_boundary = true;
282 for (int k = 7; k <= 9; ++k) {
283 if (xy_points[k].size() >= minNumPoints_) {
284 use_boundary = false;
285 break;
286 }
287 }
288 if (use_boundary) {
289 std::swap(xy_points[6], xy_points[12]);
290 std::swap(uv_points[6], uv_points[12]);
291 std::swap(coeffs[6], coeffs[12]);
292 std::swap(c0s[6], c0s[12]);
293 }
294 }
295
296 for (int i = 1; i < lane_type_num_; ++i) {
297 base::LaneLine cur_object;
298 if (xy_points[i].size() < minNumPoints_) {
299 continue;
300 }
301
302 // [2] Set spatial label
303 cur_object.pos_type = spatialLUT[i];
304
305 // [3] Determine which lines are valid according to the y value at x = 3
306 if ((i < 5 && c0s[i] < c0s[i + 1]) ||
307 (i > 5 && i < 10 && c0s[i] > c0s[i - 1])) {
308 continue;
309 }
310 if (i == 11 || i == 12) {
311 std::sort(c0s.begin(), c0s.begin() + 10);
312 if ((c0s[i] > c0s[0] && i == 12) || (c0s[i] < c0s[9] && i == 11)) {
313 continue;
314 }
315 }
316 // [4] Write values
317 cur_object.curve_car_coord.x_start =
318 static_cast<float>(xy_points[i].front()(0));
319 cur_object.curve_car_coord.x_end =
320 static_cast<float>(xy_points[i].back()(0));
321 cur_object.curve_car_coord.a = static_cast<float>(coeffs[i](3));
322 cur_object.curve_car_coord.b = static_cast<float>(coeffs[i](2));
323 cur_object.curve_car_coord.c = static_cast<float>(coeffs[i](1));
324 cur_object.curve_car_coord.d = static_cast<float>(coeffs[i](0));
325 // if (cur_object.curve_car_coord.x_end -
326 // cur_object.curve_car_coord.x_start < 5) continue;
327 // cur_object.order = 2;
328 cur_object.curve_car_coord_point_set.clear();
329 for (size_t j = 0; j < xy_points[i].size(); ++j) {
330 base::Point2DF p_j;
331 p_j.x = static_cast<float>(xy_points[i][j](0));
332 p_j.y = static_cast<float>(xy_points[i][j](1));
333 cur_object.curve_car_coord_point_set.push_back(p_j);
334 }
335
336 cur_object.curve_image_point_set.clear();
337 for (size_t j = 0; j < uv_points[i].size(); ++j) {
338 base::Point2DF p_j;
339 p_j.x = static_cast<float>(uv_points[i][j](0));
340 p_j.y = static_cast<float>(uv_points[i][j](1));
341 cur_object.curve_image_point_set.push_back(p_j);
342 }
343
344 // cur_object.confidence.push_back(1);
345 cur_object.confidence = 1.0f;
346 frame->lane_objects.push_back(cur_object);
347 }
348
349 // Special case riding on a lane:
350 // 0: no center lane, 1: center lane as left, 2: center lane as right
351 int has_center_ = 0;
352 for (auto lane_ : frame->lane_objects) {
353 if (lane_.pos_type == base::LaneLinePositionType::EGO_CENTER) {
354 if (lane_.curve_car_coord.d >= 0) {
355 has_center_ = 1;
356 } else if (lane_.curve_car_coord.d < 0) {
357 has_center_ = 2;
358 }
359 break;
360 }
361 }
362 // Change labels for all lanes from one side
363 if (has_center_ == 1) {
364 for (auto& lane_ : frame->lane_objects) {
365 int spatial_id = spatialLUTind[lane_.pos_type];
366 if (spatial_id >= 1 && spatial_id <= 5) {
367 lane_.pos_type = spatialLUT[spatial_id - 1];
368 }
369 }
370 } else if (has_center_ == 2) {
371 for (auto& lane_ : frame->lane_objects) {
372 int spatial_id = spatialLUTind[lane_.pos_type];
373 if (spatial_id >= 5 && spatial_id <= 9) {
374 lane_.pos_type = spatialLUT[spatial_id + 1];
375 }
376 }
377 }
378
379 auto elapsed = std::chrono::high_resolution_clock::now() - start;
380 int64_t microseconds =
381 std::chrono::duration_cast<std::chrono::microseconds>(elapsed).count();
382 // AINFO << "Time for writing: " << microseconds - microseconds_2 << " us";
383 time_3 += microseconds - microseconds_2;
384 ++time_num;
385
386 ADEBUG << "frame->lane_objects.size(): " << frame->lane_objects.size();
387
388 ADEBUG << "Avg sampling time: " << time_1 / time_num
389 << " Avg fitting time: " << time_2 / time_num
390 << " Avg writing time: " << time_3 / time_num;
391 ADEBUG << "darkSCNN lane_postprocess done!";
392 return true;
393}
#define ADEBUG
Definition log.h:41
#define AWARN
Definition log.h:43
@ EGO_CENTER
center lane marking of the ego lane, changing lane
Point2D< float > Point2DF
Definition point.h:87
T GetPolyValue(T a, T b, T c, T d, T x)
std::vector< base::LaneLinePositionType > spatialLUT({base::LaneLinePositionType::UNKNOWN, base::LaneLinePositionType::FOURTH_LEFT, base::LaneLinePositionType::THIRD_LEFT, base::LaneLinePositionType::ADJACENT_LEFT, base::LaneLinePositionType::EGO_LEFT, base::LaneLinePositionType::EGO_CENTER, base::LaneLinePositionType::EGO_RIGHT, base::LaneLinePositionType::ADJACENT_RIGHT, base::LaneLinePositionType::THIRD_RIGHT, base::LaneLinePositionType::FOURTH_RIGHT, base::LaneLinePositionType::OTHER, base::LaneLinePositionType::CURB_LEFT, base::LaneLinePositionType::CURB_RIGHT})

◆ Process3D()

bool apollo::perception::camera::DarkSCNNLanePostprocessor::Process3D ( const LanePostprocessorOptions options,
CameraFrame frame 
)
overridevirtual

实现了 apollo::perception::camera::BaseLanePostprocessor.

在文件 darkSCNN_lane_postprocessor.cc396 行定义.

397 {
398 ConvertImagePoint2Camera(frame);
399 PolyFitCameraLaneline(frame);
400 return true;
401}

◆ SetIm2CarHomography()

void apollo::perception::camera::DarkSCNNLanePostprocessor::SetIm2CarHomography ( const Eigen::Matrix3d &  homography_im2car)
inlineoverridevirtual

重载 apollo::perception::camera::BaseLanePostprocessor .

在文件 darkSCNN_lane_postprocessor.h65 行定义.

65 {
66 Eigen::Matrix3d im2car = homography_im2car;
67 trans_mat_ = im2car.cast<float>();
68 trans_mat_inv = trans_mat_.inverse();
69 }

该类的文档由以下文件生成: