Apollo 10.0
自动驾驶开放平台
apollo::perception::camera::Cipv类 参考

#include <cipv_camera.h>

类 apollo::perception::camera::Cipv 继承关系图:
apollo::perception::camera::Cipv 的协作图:

Public 成员函数

 Cipv ()=default
 
 ~Cipv ()=default
 
bool Init (const Eigen::Matrix3d &homography_im2car, const CipvInitOptions &options=CipvInitOptions()) override
 
bool Process (CameraFrame *frame, const CipvOptions &options, const Eigen::Affine3d &world2camera, const base::MotionBufferPtr &motion_buffer) override
 
std::string Name () const override
 
bool DetermineCipv (const std::vector< base::LaneLine > &lane_objects, const CipvOptions &options, const Eigen::Affine3d &world2camera, std::vector< std::shared_ptr< base::Object > > *objects)
 
bool CollectDrops (const base::MotionBufferPtr &motion_buffer, const Eigen::Affine3d &world2camera, std::vector< std::shared_ptr< base::Object > > *objects)
 
- Public 成员函数 继承自 apollo::perception::camera::BaseCipv
 BaseCipv ()=default
 
virtual ~BaseCipv ()=default
 

静态 Public 成员函数

static float VehicleDynamics (const uint32_t tick, const float yaw_rate, const float velocity, const float time_unit, float *x, float *y)
 
static float VehicleDynamics (const uint32_t tick, const float yaw_rate, const float velocity, const float time_unit, const float vehicle_half_width, float *center_x, float *ceneter_y, float *left_x, float *left_y, float *right_x, float *right_y)
 
static bool MakeVirtualEgoLaneFromYawRate (const float yaw_rate, const float velocity, const float offset_distance, LaneLineSimple *left_lane_line, LaneLineSimple *right_lane_line)
 

详细描述

在文件 cipv_camera.h52 行定义.

构造及析构函数说明

◆ Cipv()

apollo::perception::camera::Cipv::Cipv ( )
default

◆ ~Cipv()

apollo::perception::camera::Cipv::~Cipv ( )
default

成员函数说明

◆ CollectDrops()

bool apollo::perception::camera::Cipv::CollectDrops ( const base::MotionBufferPtr motion_buffer,
const Eigen::Affine3d &  world2camera,
std::vector< std::shared_ptr< base::Object > > *  objects 
)

在文件 cipv_camera.cc959 行定义.

961 {
962 int motion_size = static_cast<int>(motion_buffer->size());
963 if (debug_level_ >= 2) {
964 AINFO << " motion_size: " << motion_size;
965 }
966 if (motion_size <= 0) {
967 ADEBUG << " motion_size: " << motion_size;
968 return false;
969 }
970 // std::map<int, std::vector<std::pair<float, float>>>
971 // tmp_object_trackjectories;
972 // std::swap(object_trackjectories_, tmp_object_trackjectories);
973
974 if (debug_level_ >= 2) {
975 AINFO << "object_trackjectories_.size(): " << object_trackjectories_.size();
976 }
977 for (auto obj : *objects) {
978 int cur_id = obj->track_id;
979 if (debug_level_ >= 2) {
980 AINFO << "target ID: " << cur_id;
981 }
982 // for (auto point : tmp_object_trackjectories[cur_id]) {
983 // object_trackjectories_[cur_id].emplace_back(point);
984 // }
985
986 // If it is the first object, set capacity.
987 if (object_trackjectories_[cur_id].empty()) {
988 object_trackjectories_[cur_id].set_capacity(kDropsHistorySize);
989 }
990
991 object_id_skip_count_[cur_id] = 0;
992
993 auto pos = world2camera * obj->center;
994 float center_x = static_cast<float>(pos(2));
995 float center_y = static_cast<float>(-pos(0));
996
997 object_trackjectories_[cur_id].push_back(
998 std::make_pair(center_x, center_y));
999
1000 if (debug_level_ >= 2) {
1001 AINFO << "object_trackjectories_[" << cur_id
1002 << " ].size(): " << object_trackjectories_[cur_id].size();
1003 }
1004
1005 Eigen::Matrix4f accum_motion_buffer =
1006 motion_buffer->at(motion_size - 1).motion;
1007 // Add drops
1008 for (std::size_t it = object_trackjectories_[cur_id].size() - 1, count = 0;
1009 it > 0; it--) {
1010 if (count >= kDropsHistorySize || count > motion_buffer->size()) {
1011 break;
1012 }
1013 if (static_cast<int>(it) + 1 > motion_size) {
1014 continue;
1015 }
1016 Eigen::VectorXf pt =
1017 Eigen::VectorXf::Zero((*motion_buffer)[0].motion.cols());
1018 pt(0) = object_trackjectories_[cur_id][it].first;
1019 pt(1) = object_trackjectories_[cur_id][it].second;
1020 pt(2) = 0.0f;
1021 pt(3) = 1.0f;
1022
1023 Eigen::Vector3d transformed_pt;
1024 accum_motion_buffer *= motion_buffer->at(motion_size - it - 1).motion;
1025 TranformPoint(pt, accum_motion_buffer, &transformed_pt);
1026 // TranformPoint(pt, (*motion_buffer)[motion_size - count - 1].motion,
1027 // &transformed_pt);
1028 if (debug_level_ >= 3) {
1029 AINFO << "(*motion_buffer)[" << motion_size - it - 1 << "].motion:";
1030 AINFO << motion_buffer->at(motion_size - it - 1).motion;
1031 AINFO << "accum_motion_buffer[" << motion_size - it - 1 << "] =";
1032 AINFO << accum_motion_buffer;
1033 AINFO << "target[" << obj->track_id << "][" << it << "]: ("
1034 << transformed_pt(0) << ", " << transformed_pt(1) << ")";
1035 }
1036 obj->drops[count] = transformed_pt;
1037 obj->drop_num = count++;
1038 }
1039 }
1040
1041 // Currently remove trajectory if they do not exist in the current frame
1042 // TODO(techoe): need to wait several frames
1043 for (const auto &each_object : object_trackjectories_) {
1044 int obj_id = each_object.first;
1045 bool b_found_id = false;
1046 for (auto obj : *objects) {
1047 int cur_id = obj->track_id;
1048 if (obj_id == cur_id) {
1049 b_found_id = true;
1050 break;
1051 }
1052 }
1053 // If object ID was not found erase it from map
1054 if (!b_found_id && object_trackjectories_[obj_id].size() > 0) {
1055 // object_id_skip_count_[obj_id].second++;
1056 object_id_skip_count_[obj_id]++;
1057 if (debug_level_ >= 2) {
1058 AINFO << "object_id_skip_count_[" << obj_id
1059 << " ]: " << object_id_skip_count_[obj_id];
1060 }
1061 if (object_id_skip_count_[obj_id] >= kMaxAllowedSkipObject) {
1062 if (debug_level_ >= 2) {
1063 AINFO << "Removed obsolete object " << obj_id;
1064 }
1065 object_trackjectories_.erase(obj_id);
1066 object_id_skip_count_.erase(obj_id);
1067 }
1068 }
1069 }
1070 if (debug_level_ >= 2) {
1071 for (auto obj : *objects) {
1072 int cur_id = obj->track_id;
1073 AINFO << "obj->track_id: " << cur_id;
1074 AINFO << "obj->drop_num: " << obj->drop_num;
1075 }
1076 }
1077 return true;
1078}
#define ADEBUG
Definition log.h:41
#define AINFO
Definition log.h:42

◆ DetermineCipv()

bool apollo::perception::camera::Cipv::DetermineCipv ( const std::vector< base::LaneLine > &  lane_objects,
const CipvOptions options,
const Eigen::Affine3d &  world2camera,
std::vector< std::shared_ptr< base::Object > > *  objects 
)

在文件 cipv_camera.cc853 行定义.

856 {
857 if (debug_level_ >= 3) {
858 AINFO << "Cipv Got SensorObjects with size of " << objects->size();
859 AINFO << "Cipv Got lane object with size of " << lane_objects.size();
860 }
861
862 // float yaw_rate = options.yaw_rate;
863 // float velocity = options.velocity;
864 int32_t cipv_index = -1;
865 // int32_t old_cipv_track_id = sensor_objects.cipv_track_id;
866 int32_t cipv_track_id = -1;
867 bool b_left_valid = false;
868 bool b_right_valid = false;
869 int32_t old_cipv_index = -1;
870 EgoLane egolane_image;
871 EgoLane egolane_ground;
872 EgoLane virtual_egolane_ground;
873 egolane_ground.left_line.line_point.clear();
874 egolane_ground.right_line.line_point.clear();
875 virtual_egolane_ground.left_line.line_point.clear();
876 virtual_egolane_ground.right_line.line_point.clear();
877
878 // Get ego lanes (in both image and ground coordinate)
879 GetEgoLane(lane_objects, &egolane_image, &egolane_ground, &b_left_valid,
880 &b_right_valid);
881 ElongateEgoLane(lane_objects, b_left_valid, b_right_valid, options.yaw_rate,
882 options.velocity, &egolane_image, &egolane_ground);
883
884 CreateVirtualEgoLane(options.yaw_rate, options.velocity,
885 &virtual_egolane_ground);
886
887 float min_distance = std::numeric_limits<float>::max();
888 float distance;
889 for (int32_t i = 0; i < static_cast<int32_t>(objects->size()); ++i) {
890 if (debug_level_ >= 2) {
891 AINFO << "objects[" << i << "]->track_id: " << (*objects)[i]->track_id;
892 }
893 if (IsObjectInTheLane((*objects)[i], egolane_image, egolane_ground,
894 world2camera, false, &distance) ||
895 IsObjectInTheLane((*objects)[i], egolane_image, virtual_egolane_ground,
896 world2camera, true, &distance)) {
897 if (cipv_index < 0 || distance < min_distance) {
898 cipv_index = i;
899 cipv_track_id = (*objects)[i]->track_id;
900 min_distance = distance;
901 }
902
903 if (debug_level_ >= 2) {
904 AINFO << "current cipv_index: " << cipv_index;
905 }
906 }
907 if ((*objects)[i]->track_id == old_cipv_track_id_) {
908 old_cipv_index = cipv_index;
909 }
910 }
911 if (debug_level_ >= 1) {
912 AINFO << "old_cipv_index: " << old_cipv_index;
913 AINFO << "old_cipv_track_id_: " << old_cipv_track_id_;
914 }
915 if (cipv_index >= 0) {
916 if (old_cipv_index >= 0 && old_cipv_index != cipv_index &&
917 old_cipv_index < static_cast<int32_t>(objects->size())) {
918 (*objects)[old_cipv_index]->b_cipv = false;
919 }
920 (*objects)[cipv_index]->b_cipv = true;
921 old_cipv_track_id_ = cipv_track_id;
922 // sensor_objects.cipv_index = cipv_index;
923 // sensor_objects.cipv_track_id = cipv_track_id;
924 if (debug_level_ >= 1) {
925 AINFO << "final cipv_index: " << cipv_index;
926 AINFO << "final cipv_track_id: " << cipv_track_id;
927 // AINFO << "CIPV Index is changed from " << old_cipv_index << "th
928 // object to "
929 // << cipv_index << "th object.";
930 // AINFO << "CIPV Track_ID is changed from " << old_cipv_track_id <<
931 // " to "
932 // << cipv_track_id << ".";
933 }
934 } else {
935 if (debug_level_ >= 1) {
936 AINFO << "No cipv";
937 }
938 }
939
940 return true;
941}

◆ Init()

bool apollo::perception::camera::Cipv::Init ( const Eigen::Matrix3d &  homography_im2car,
const CipvInitOptions options = CipvInitOptions() 
)
overridevirtual

实现了 apollo::perception::camera::BaseCipv.

在文件 cipv_camera.cc30 行定义.

31 {
32 b_image_based_cipv_ = options.image_based_cipv;
33 debug_level_ =
34 options.debug_level;
35 // 0: no debug message
36 // 1: minimal output
37 // 2: some important output
38 // 3: verbose message
39 // 4: visualization
40 // 5: all
41 // -x: specific debugging, where x is the specific number
42 time_unit_ = options.average_frame_rate;
43 homography_im2car_ = homography_im2car;
44 homography_car2im_ = homography_im2car.inverse();
45
46 min_laneline_length_for_cipv_ = options.min_laneline_length_for_cipv;
47 average_lane_width_in_meter_ = options.average_lane_width_in_meter;
48 max_vehicle_width_in_meter_ = options.max_vehicle_width_in_meter;
49 margin_vehicle_to_lane_ =
50 (average_lane_width_in_meter_ - max_vehicle_width_in_meter_) * 0.5f;
51 single_virtual_egolane_width_in_meter_ = max_vehicle_width_in_meter_;
52 half_vehicle_width_in_meter_ = max_vehicle_width_in_meter_ * 0.5f;
53 half_virtual_egolane_width_in_meter_ =
54 single_virtual_egolane_width_in_meter_ * 0.5f;
55 old_cipv_track_id_ = -2;
56
57 return true;
58}

◆ MakeVirtualEgoLaneFromYawRate()

bool apollo::perception::camera::Cipv::MakeVirtualEgoLaneFromYawRate ( const float  yaw_rate,
const float  velocity,
const float  offset_distance,
LaneLineSimple left_lane_line,
LaneLineSimple right_lane_line 
)
static

在文件 cipv_camera.cc229 行定义.

233 {
234 float center_x = 0.0f;
235 float center_y = 0.0f;
236 float left_x = 0.0f;
237 float left_y = 0.0f;
238 float right_x = 0.0f;
239 float right_y = 0.0f;
240 left_lane_line->line_point.clear();
241 right_lane_line->line_point.clear();
242
243 for (uint32_t i = 1; i < kMaxNumVirtualLanePoint; ++i) {
244 VehicleDynamics(i, yaw_rate, velocity, kAverageFrameRate, offset_distance,
245 &center_x, &center_y, &left_x, &left_y, &right_x, &right_y);
246 Point2Df left_point(left_x, left_y);
247 left_lane_line->line_point.emplace_back(left_point);
248 Point2Df right_point(right_x, right_y);
249 right_lane_line->line_point.emplace_back(right_point);
250 }
251 return true;
252}
static float VehicleDynamics(const uint32_t tick, const float yaw_rate, const float velocity, const float time_unit, float *x, float *y)
Eigen::Vector2f Point2Df
Definition lane_object.h:53

◆ Name()

std::string apollo::perception::camera::Cipv::Name ( ) const
overridevirtual

实现了 apollo::perception::camera::BaseCipv.

在文件 cipv_camera.cc1116 行定义.

1116{ return "Cipv"; }

◆ Process()

bool apollo::perception::camera::Cipv::Process ( CameraFrame frame,
const CipvOptions options,
const Eigen::Affine3d &  world2camera,
const base::MotionBufferPtr motion_buffer 
)
overridevirtual

实现了 apollo::perception::camera::BaseCipv.

在文件 cipv_camera.cc60 行定义.

63 {
64 DetermineCipv(frame->lane_objects, options, world2camera,
65 &frame->tracked_objects);
66
67 // Get Drop points
68 if (motion_buffer->size() > 0) {
69 CollectDrops(motion_buffer, world2camera, &frame->tracked_objects);
70 } else {
71 AWARN << "motion_buffer is empty";
72 }
73 return true;
74}
bool DetermineCipv(const std::vector< base::LaneLine > &lane_objects, const CipvOptions &options, const Eigen::Affine3d &world2camera, std::vector< std::shared_ptr< base::Object > > *objects)
bool CollectDrops(const base::MotionBufferPtr &motion_buffer, const Eigen::Affine3d &world2camera, std::vector< std::shared_ptr< base::Object > > *objects)
#define AWARN
Definition log.h:43

◆ VehicleDynamics() [1/2]

float apollo::perception::camera::Cipv::VehicleDynamics ( const uint32_t  tick,
const float  yaw_rate,
const float  velocity,
const float  time_unit,
const float  vehicle_half_width,
float *  center_x,
float *  ceneter_y,
float *  left_x,
float *  left_y,
float *  right_x,
float *  right_y 
)
static

在文件 cipv_camera.cc196 行定义.

200 {
201 // Option 1. Straight model;
202 // *x = time_unit * velocity * static_cast<float>(tick);
203 // *y = 0.0f;
204
205 // Option 2. Sphere model
206 float adjusted_velocity = std::max(kMinVelocity, velocity);
207 float theta = static_cast<float>(tick) * time_unit * yaw_rate;
208 float displacement = static_cast<float>(tick) * time_unit * adjusted_velocity;
209 float offset = half_vehicle_width;
210 *center_x = displacement * static_cast<float>(cos(theta));
211 *center_y = displacement * static_cast<float>(sin(theta));
212 if (theta < 0) {
213 offset = -half_vehicle_width;
214 }
215 *left_x = (displacement + offset) * static_cast<float>(cos(theta)) - offset;
216 *left_y = (displacement + offset) * static_cast<float>(sin(theta)) +
217 half_vehicle_width;
218 *right_x = (displacement - offset) * static_cast<float>(cos(theta)) + offset;
219 *right_y = (displacement - offset) * static_cast<float>(sin(theta)) -
220 half_vehicle_width;
221
222 // Option 3. Bicycle model
223 // TODO(techoe): Apply bicycle model for vehicle dynamics (need wheel base)
224
225 return true;
226}
float cos(Angle16 a)
Definition angle.cc:42
float sin(Angle16 a)
Definition angle.cc:25
constexpr float kMinVelocity
Definition cipv_camera.h:40

◆ VehicleDynamics() [2/2]

float apollo::perception::camera::Cipv::VehicleDynamics ( const uint32_t  tick,
const float  yaw_rate,
const float  velocity,
const float  time_unit,
float *  x,
float *  y 
)
static

在文件 cipv_camera.cc175 行定义.

177 {
178 // Option 1. Straight model;
179 // *x = time_unit * velocity * static_cast<float>(tick);
180 // *y = 0.0f;
181
182 // Option 2. Sphere model
183 float adjusted_velocity = std::max(kMinVelocity, velocity);
184 float theta = static_cast<float>(tick) * time_unit * yaw_rate;
185 float displacement = static_cast<float>(tick) * time_unit * adjusted_velocity;
186 *x = displacement * static_cast<float>(cos(theta));
187 *y = displacement * static_cast<float>(sin(theta));
188
189 // Option 3. Bicycle model
190 // TODO(techoe): Apply bicycle model for vehicle dynamics (need wheel base)
191
192 return true;
193}

该类的文档由以下文件生成: