Apollo 10.0
自动驾驶开放平台
apollo::perception::camera::DarkSCNNLaneDetector类 参考

#include <darkSCNN_lane_detector.h>

类 apollo::perception::camera::DarkSCNNLaneDetector 继承关系图:
apollo::perception::camera::DarkSCNNLaneDetector 的协作图:

Public 成员函数

 DarkSCNNLaneDetector ()
 
virtual ~DarkSCNNLaneDetector ()=default
 
bool Init (const LaneDetectorInitOptions &options=LaneDetectorInitOptions()) override
 
bool Detect (const LaneDetectorOptions &options, CameraFrame *frame) override
 
std::string Name () const override
 
- Public 成员函数 继承自 apollo::perception::camera::BaseLaneDetector
 BaseLaneDetector ()=default
 
virtual ~BaseLaneDetector ()=default
 
 DISALLOW_COPY_AND_ASSIGN (BaseLaneDetector)
 

详细描述

在文件 darkSCNN_lane_detector.h41 行定义.

构造及析构函数说明

◆ DarkSCNNLaneDetector()

apollo::perception::camera::DarkSCNNLaneDetector::DarkSCNNLaneDetector ( )
inline

在文件 darkSCNN_lane_detector.h43 行定义.

44 input_height_ = 0;
45 input_width_ = 0;
46 input_offset_y_ = 0;
47 input_offset_x_ = 0;
48 crop_height_ = 0;
49 crop_width_ = 0;
50 resize_height_ = 0;
51 resize_width_ = 0;
52 image_mean_[0] = 0;
53 image_mean_[1] = 0;
54 image_mean_[2] = 0;
55 confidence_threshold_lane_ = 0;
56 lane_output_height_ = 0;
57 lane_output_width_ = 0;
58 num_lanes_ = 0;
59 }

◆ ~DarkSCNNLaneDetector()

virtual apollo::perception::camera::DarkSCNNLaneDetector::~DarkSCNNLaneDetector ( )
virtualdefault

成员函数说明

◆ Detect()

bool apollo::perception::camera::DarkSCNNLaneDetector::Detect ( const LaneDetectorOptions options,
CameraFrame frame 
)
overridevirtual

实现了 apollo::perception::camera::BaseLaneDetector.

在文件 darkSCNN_lane_detector.cc165 行定义.

166 {
167 if (frame == nullptr) {
168 AINFO << "camera frame is empty.";
169 return false;
170 }
171
172 auto start = std::chrono::high_resolution_clock::now();
173 auto data_provider = frame->data_provider;
174 if (input_width_ != data_provider->src_width()) {
175 AERROR << "Input size is not correct: " << input_width_ << " vs "
176 << data_provider->src_width();
177 return false;
178 }
179 if (input_height_ != data_provider->src_height()) {
180 AERROR << "Input size is not correct: " << input_height_ << " vs "
181 << data_provider->src_height();
182 return false;
183 }
184
185 // use data provider to crop input image
186 if (!data_provider->GetImage(data_provider_image_option_, &image_src_)) {
187 return false;
188 }
189
190 // bottom 0 is data
191 auto input_blob = cnnadapter_lane_->get_blob(net_inputs_[0]);
192 auto blob_channel = input_blob->channels();
193 auto blob_height = input_blob->height();
194 auto blob_width = input_blob->width();
195 ADEBUG << "input_blob: " << blob_channel << " " << blob_height << " "
196 << blob_width << std::endl;
197
198 if (blob_height != resize_height_) {
199 AERROR << "height is not equal" << blob_height << " vs " << resize_height_;
200 return false;
201 }
202 if (blob_width != resize_width_) {
203 AERROR << "width is not equal" << blob_width << " vs " << resize_width_;
204 return false;
205 }
206 ADEBUG << "image_blob: " << image_src_.blob()->shape_string();
207 ADEBUG << "input_blob: " << input_blob->shape_string();
208 // resize the cropped image into network input blob
210 image_src_, input_blob, static_cast<int>(crop_width_), 0,
211 static_cast<float>(image_mean_[0]), static_cast<float>(image_mean_[1]),
212 static_cast<float>(image_mean_[2]), false, static_cast<float>(1.0));
213 ADEBUG << "resize gpu finish.";
214 cudaDeviceSynchronize();
215 cnnadapter_lane_->Infer();
216 ADEBUG << "infer finish.";
217
218 auto elapsed_1 = std::chrono::high_resolution_clock::now() - start;
219 int64_t microseconds_1 =
220 std::chrono::duration_cast<std::chrono::microseconds>(elapsed_1).count();
221 time_1 += microseconds_1;
222
223 // convert network output to color map
224 const auto seg_blob = cnnadapter_lane_->get_blob(net_outputs_[0]);
225 ADEBUG << "seg_blob: " << seg_blob->shape_string();
226 std::vector<cv::Mat> masks;
227 for (int i = 0; i < num_lanes_; ++i) {
228 cv::Mat tmp(lane_output_height_, lane_output_width_, CV_32FC1);
229 memcpy(tmp.data,
230 seg_blob->cpu_data() + lane_output_width_ * lane_output_height_ * i,
231 lane_output_width_ * lane_output_height_ * sizeof(float));
232 // cv::resize(tmp
233 // , tmp, cv::Size(lane_output_width_, lane_output_height_), 0,
234 // 0);
235 masks.push_back(tmp);
236 }
237 std::vector<int> cnt_pixels(13, 0);
238 cv::Mat mask_color(lane_output_height_, lane_output_width_, CV_32FC1);
239 mask_color.setTo(cv::Scalar(0));
240 for (int c = 0; c < num_lanes_; ++c) {
241 for (int h = 0; h < masks[c].rows; ++h) {
242 for (int w = 0; w < masks[c].cols; ++w) {
243 if (masks[c].at<float>(h, w) >= confidence_threshold_lane_) {
244 mask_color.at<float>(h, w) = static_cast<float>(c);
245 cnt_pixels[c]++;
246 }
247 }
248 }
249 }
250 memcpy(lane_blob_->mutable_cpu_data(),
251 reinterpret_cast<float *>(mask_color.data),
252 lane_output_width_ * lane_output_height_ * sizeof(float));
253 // Don't use this way to copy data, it will modify data
254 // lane_blob_->set_cpu_data((float*)mask_color.data);
255 frame->lane_detected_blob = lane_blob_;
256
257 // retrieve vanishing point network output
258 if (net_outputs_.size() > 1) {
259 const auto vpt_blob = cnnadapter_lane_->get_blob(net_outputs_[1]);
260 ADEBUG << "vpt_blob: " << vpt_blob->shape_string();
261 std::vector<float> v_point(2, 0);
262 std::copy(vpt_blob->cpu_data(), vpt_blob->cpu_data() + 2, v_point.begin());
263 // compute coordinate in net input image
264 v_point[0] = v_point[0] * vpt_std_[0] + vpt_mean_[0] +
265 (static_cast<float>(blob_width) / 2);
266 v_point[1] = v_point[1] * vpt_std_[1] + vpt_mean_[1] +
267 (static_cast<float>(blob_height) / 2);
268 // compute coordinate in original image
269 v_point[0] = v_point[0] / static_cast<float>(blob_width) *
270 static_cast<float>(crop_width_) +
271 static_cast<float>(input_offset_x_);
272 v_point[1] = v_point[1] / static_cast<float>(blob_height) *
273 static_cast<float>(crop_height_) +
274 static_cast<float>(input_offset_y_);
275
276 ADEBUG << "vanishing point: " << v_point[0] << " " << v_point[1];
277 if (v_point[0] > 0 && v_point[0] < static_cast<float>(input_width_) &&
278 v_point[1] > 0 && v_point[0] < static_cast<float>(input_height_)) {
279 frame->pred_vpt = v_point;
280 }
281 }
282
283 auto elapsed_2 = std::chrono::high_resolution_clock::now() - start;
284 int64_t microseconds_2 =
285 std::chrono::duration_cast<std::chrono::microseconds>(elapsed_2).count();
286 time_2 += microseconds_2 - microseconds_1;
287
288 time_num += 1;
289 ADEBUG << "Avg detection infer time: " << time_1 / time_num
290 << " Avg detection merge output time: " << time_2 / time_num;
291 ADEBUG << "Lane detection done!";
292 return true;
293}
std::shared_ptr< Blob< uint8_t > > blob()
Definition image_8u.h:133
#define ADEBUG
Definition log.h:41
#define AERROR
Definition log.h:44
#define AINFO
Definition log.h:42
bool ResizeGPU(const base::Image8U &src, std::shared_ptr< apollo::perception::base::Blob< float > > dst, int stepwidth, int start_axis)

◆ Init()

bool apollo::perception::camera::DarkSCNNLaneDetector::Init ( const LaneDetectorInitOptions options = LaneDetectorInitOptions())
overridevirtual

实现了 apollo::perception::camera::BaseLaneDetector.

在文件 darkSCNN_lane_detector.cc34 行定义.

34 {
35 std::string config_file =
36 GetConfigFile(options.config_path, options.config_file);
37 if (!GetProtoFromFile(config_file, &darkscnn_param_)) {
38 AINFO << "load proto param failed!!!" << config_file;
39 return false;
40 }
41
42 AINFO << "darkSCNN param: " << darkscnn_param_.DebugString();
43
44 const auto model_param = darkscnn_param_.model_param();
45 std::string model_path = GetModelPath(model_param.model_name());
46 std::string proto_file = GetModelFile(model_path, model_param.proto_file());
47 std::string weight_file = GetModelFile(model_path, model_param.weight_file());
48 AINFO << " proto_file: " << proto_file;
49 AINFO << " weight_file: " << weight_file;
50
51 base_camera_model_ = options.base_camera_model;
52 if (base_camera_model_ == nullptr) {
53 AERROR << "options.intrinsic is nullptr!";
54 input_height_ = 1080;
55 input_width_ = 1920;
56 } else {
57 input_height_ = static_cast<uint16_t>(base_camera_model_->get_height());
58 input_width_ = static_cast<uint16_t>(base_camera_model_->get_width());
59 }
60 ACHECK(input_width_ > 0) << "input width should be more than 0";
61 ACHECK(input_height_ > 0) << "input height should be more than 0";
62
63 AINFO << "input_height: " << input_height_;
64 AINFO << "input_width: " << input_width_;
65
66 // compute image provider parameters
67 input_offset_y_ = static_cast<uint16_t>(model_param.input_offset_y());
68 input_offset_x_ = static_cast<uint16_t>(model_param.input_offset_x());
69 resize_height_ = static_cast<uint16_t>(model_param.resize_height());
70 resize_width_ = static_cast<uint16_t>(model_param.resize_width());
71 crop_height_ = static_cast<uint16_t>(model_param.crop_height());
72 crop_width_ = static_cast<uint16_t>(model_param.crop_width());
73 confidence_threshold_lane_ = model_param.confidence_threshold();
74
75 CHECK_LE(crop_height_, input_height_)
76 << "crop height larger than input height";
77 CHECK_LE(crop_width_, input_width_) << "crop width larger than input width";
78
79 if (model_param.is_bgr()) {
80 data_provider_image_option_.target_color = base::Color::BGR;
81 image_mean_[0] = model_param.mean_b();
82 image_mean_[1] = model_param.mean_g();
83 image_mean_[2] = model_param.mean_r();
84 } else {
85 data_provider_image_option_.target_color = base::Color::RGB;
86 image_mean_[0] = model_param.mean_r();
87 image_mean_[1] = model_param.mean_g();
88 image_mean_[2] = model_param.mean_b();
89 }
90 data_provider_image_option_.do_crop = true;
91 data_provider_image_option_.crop_roi.x = input_offset_x_;
92 data_provider_image_option_.crop_roi.y = input_offset_y_;
93 data_provider_image_option_.crop_roi.height = crop_height_;
94 data_provider_image_option_.crop_roi.width = crop_width_;
95
96 cudaDeviceProp prop;
97 AERROR << "huqilin_debug : darkscnn init test 1";
98 cudaGetDeviceProperties(&prop, options.gpu_id);
99 AINFO << "GPU: " << prop.name;
100 AERROR << "huqilin_debug : darkscnn init test 2";
101 const auto net_param = darkscnn_param_.net_param();
102 net_inputs_.push_back(net_param.input_blob());
103 net_outputs_.push_back(net_param.seg_blob());
104 if (model_param.model_type() == "CaffeNet" && net_param.has_vpt_blob() &&
105 net_param.vpt_blob().size() > 0) {
106 net_outputs_.push_back(net_param.vpt_blob());
107 }
108
109 for (auto name : net_inputs_) {
110 AINFO << "net input blobs: " << name;
111 }
112 for (auto name : net_outputs_) {
113 AINFO << "net output blobs: " << name;
114 }
115
116 // initialize caffe net
117 const auto &model_type = model_param.model_type();
118 AINFO << "model_type: " << model_type;
119 cnnadapter_lane_.reset(
120 inference::CreateInferenceByName(model_type, proto_file, weight_file,
121 net_outputs_, net_inputs_, model_path));
122 ACHECK(cnnadapter_lane_ != nullptr);
123
124 cnnadapter_lane_->set_gpu_id(options.gpu_id);
125 ACHECK(resize_width_ > 0) << "resize width should be more than 0";
126 ACHECK(resize_height_ > 0) << "resize height should be more than 0";
127 std::vector<int> shape = {1, 3, resize_height_, resize_width_};
128 std::map<std::string, std::vector<int>> input_reshape{
129 {net_inputs_[0], shape}};
130 AINFO << "input_reshape: " << input_reshape[net_inputs_[0]][0] << ", "
131 << input_reshape[net_inputs_[0]][1] << ", "
132 << input_reshape[net_inputs_[0]][2] << ", "
133 << input_reshape[net_inputs_[0]][3];
134 if (!cnnadapter_lane_->Init(input_reshape)) {
135 AINFO << "net init fail.";
136 return false;
137 }
138
139 for (auto &input_blob_name : net_inputs_) {
140 auto input_blob = cnnadapter_lane_->get_blob(input_blob_name);
141 AINFO << input_blob_name << ": " << input_blob->channels() << " "
142 << input_blob->height() << " " << input_blob->width();
143 }
144
145 auto output_blob = cnnadapter_lane_->get_blob(net_outputs_[0]);
146 AINFO << net_outputs_[0] << " : " << output_blob->channels() << " "
147 << output_blob->height() << " " << output_blob->width();
148 lane_output_height_ = output_blob->height();
149 lane_output_width_ = output_blob->width();
150 num_lanes_ = output_blob->channels();
151
152 if (net_outputs_.size() > 1) {
153 vpt_mean_.push_back(model_param.vpt_mean_dx());
154 vpt_mean_.push_back(model_param.vpt_mean_dy());
155 vpt_std_.push_back(model_param.vpt_std_dx());
156 vpt_std_.push_back(model_param.vpt_std_dy());
157 }
158
159 std::vector<int> lane_shape = {1, 1, lane_output_height_, lane_output_width_};
160 lane_blob_.reset(new base::Blob<float>(lane_shape));
161 AERROR << "huqilin_debug : darkscnn init test ok";
162 return true;
163}
#define ACHECK(cond)
Definition log.h:80
bool GetProtoFromFile(const std::string &file_name, google::protobuf::Message *message)
Parses the content of the file specified by the file_name as a representation of protobufs,...
Definition file.cc:132
Inference * CreateInferenceByName(const std::string &frame_work, const std::string &proto_file, const std::string &weight_file, const std::vector< std::string > &outputs, const std::vector< std::string > &inputs, const std::string &model_root)
std::string GetModelFile(const std::string &model_name, const std::string &file_name)
Get the model file path by model path and file name
Definition util.cc:55
std::string GetConfigFile(const std::string &config_path, const std::string &config_file)
Definition util.cc:80
std::string GetModelPath(const std::string &model_name)
Get the model path by model name, search from APOLLO_MODEL_PATH
Definition util.cc:44

◆ Name()

std::string apollo::perception::camera::DarkSCNNLaneDetector::Name ( ) const
overridevirtual

实现了 apollo::perception::camera::BaseLaneDetector.

在文件 darkSCNN_lane_detector.cc295 行定义.

295 {
296 return "DarkSCNNLaneDetector";
297}

该类的文档由以下文件生成: