Apollo 10.0
自动驾驶开放平台
perception_camera_updater.cc
浏览该文件的文档.
1/******************************************************************************
2 * Copyright 2019 The Apollo Authors. All Rights Reserved.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *****************************************************************************/
16
18
19#include <limits>
20#include <string>
21#include <vector>
22
23#include "opencv2/opencv.hpp"
24
25#include "modules/common_msgs/basic_msgs/geometry.pb.h"
26#include "modules/common_msgs/perception_msgs/perception_obstacle.pb.h"
27#include "modules/dreamview/proto/camera_update.pb.h"
28
29#include "cyber/common/file.h"
31namespace apollo {
32namespace dreamview {
33
39
40namespace {
41void ConvertMatrixToArray(const Eigen::Matrix4d &matrix,
42 std::vector<double> *array) {
43 const double *pointer = matrix.data();
44 for (int i = 0; i < matrix.size(); ++i) {
45 array->push_back(pointer[i]);
46 }
47}
48
49template <typename Point>
50void ConstructTransformationMatrix(const Quaternion &quaternion,
51 const Point &translation,
52 Eigen::Matrix4d *matrix) {
53 matrix->setConstant(0);
54 Eigen::Quaterniond q;
55 q.x() = quaternion.qx();
56 q.y() = quaternion.qy();
57 q.z() = quaternion.qz();
58 q.w() = quaternion.qw();
59 matrix->block<3, 3>(0, 0) = q.normalized().toRotationMatrix();
60 (*matrix)(0, 3) = translation.x();
61 (*matrix)(1, 3) = translation.y();
62 (*matrix)(2, 3) = translation.z();
63 (*matrix)(3, 3) = 1;
64}
65} // namespace
66
68 : websocket_(websocket),
69 node_(cyber::CreateNode("perception_camera_updater")) {
70 InitReaders();
71}
72
74 callback_api_ = callback_api;
75 enabled_ = true;
76 camera_update_.set_k_image_scale(kImageScale);
77}
78
80 if (enabled_) {
81 localization_queue_.clear();
82 image_buffer_.clear();
83 tf_static_.clear();
84 current_image_timestamp_ = 0.0;
85 }
86 enabled_ = false;
87}
88
89void PerceptionCameraUpdater::GetImageLocalization(
90 std::vector<double> *localization) {
91 if (localization_queue_.empty()) {
92 AERROR << "Localization queue is empty, cannot get localization for image,"
93 << "image_timestamp: " << current_image_timestamp_;
94 return;
95 }
96
97 double timestamp_diff = std::numeric_limits<double>::max();
98 Pose image_pos;
99 while (!localization_queue_.empty()) {
100 const double tmp_diff = localization_queue_.front()->measurement_time() -
101 current_image_timestamp_;
102 if (tmp_diff < 0) {
103 timestamp_diff = tmp_diff;
104 image_pos = localization_queue_.front()->pose();
105 if (localization_queue_.size() > 1) {
106 localization_queue_.pop_front();
107 } else {
108 // At least keep one pose in queue, in case there's no localization
109 // coming between two requests.
110 break;
111 }
112 } else {
113 if (tmp_diff < std::fabs(timestamp_diff)) {
114 image_pos = localization_queue_.front()->pose();
115 }
116 break;
117 }
118 }
119
120 Eigen::Matrix4d localization_matrix;
121 ConstructTransformationMatrix(image_pos.orientation(), image_pos.position(),
122 &localization_matrix);
123 ConvertMatrixToArray(localization_matrix, localization);
124}
125
126bool PerceptionCameraUpdater::QueryStaticTF(const std::string &frame_id,
127 const std::string &child_frame_id,
128 Eigen::Matrix4d *matrix) {
129 TransformStamped transform;
130 if (tf_buffer_->GetLatestStaticTF(frame_id, child_frame_id, &transform)) {
131 ConstructTransformationMatrix(transform.transform().rotation(),
132 transform.transform().translation(), matrix);
133 return true;
134 }
135 return false;
136}
137
138void PerceptionCameraUpdater::GetLocalization2CameraTF(
139 std::vector<double> *localization2camera_tf) {
140 Eigen::Matrix4d localization2camera_mat = Eigen::Matrix4d::Identity();
141
142 // Since "/tf" topic has dynamic updates of world->novatel and
143 // world->localization, novatel->localization in tf buffer is being changed
144 // and their transformation does not represent for static transform anymore.
145 // Thus we query static transform respectively and calculate by ourselves
146 Eigen::Matrix4d loc2novatel_mat;
147 if (QueryStaticTF("localization", "novatel", &loc2novatel_mat)) {
148 localization2camera_mat *= loc2novatel_mat;
149 }
150
151 Eigen::Matrix4d novatel2lidar_mat;
152 if (QueryStaticTF("novatel", "velodyne128", &novatel2lidar_mat)) {
153 localization2camera_mat *= novatel2lidar_mat;
154 }
155
156 Eigen::Matrix4d lidar2camera_mat;
157 if (QueryStaticTF("velodyne128", "front_6mm", &lidar2camera_mat)) {
158 localization2camera_mat *= lidar2camera_mat;
159 }
160
161 ConvertMatrixToArray(localization2camera_mat, localization2camera_tf);
162}
163
164void PerceptionCameraUpdater::OnCompressedImage(
165 const std::shared_ptr<CompressedImage> &compressed_image) {
166 if (!enabled_ ||
167 compressed_image->format() == "h265" /* skip video format */) {
168 return;
169 }
170
171 std::vector<uint8_t> compressed_raw_data(compressed_image->data().begin(),
172 compressed_image->data().end());
173 cv::Mat mat_image = cv::imdecode(compressed_raw_data, cv::IMREAD_COLOR);
174 const int width = mat_image.cols;
175 const int height = mat_image.rows;
176
177 // Scale down image size properly to reduce data transfer latency through
178 // websocket and ensure image quality is acceptable meanwhile
179 cv::resize(mat_image, mat_image,
180 cv::Size(static_cast<int>(mat_image.cols * kImageScale),
181 static_cast<int>(mat_image.rows * kImageScale)),
182 0, 0, cv::INTER_LINEAR);
183 std::vector<uint8_t> tmp_buffer;
184 cv::imencode(".jpg", mat_image, tmp_buffer, std::vector<int>() /* params */);
185
186 double next_image_timestamp;
187 if (compressed_image->has_measurement_time()) {
188 next_image_timestamp = compressed_image->measurement_time();
189 } else {
190 next_image_timestamp = compressed_image->header().timestamp_sec();
191 }
192
193 std::lock_guard<std::mutex> lock(image_mutex_);
194 if (next_image_timestamp < current_image_timestamp_) {
195 // If replay different bags, the timestamp may jump to earlier time and
196 // we need to clear the localization queue
197 localization_queue_.clear();
198 }
199 current_image_timestamp_ = next_image_timestamp;
200 camera_update_.set_image(&(tmp_buffer[0]), tmp_buffer.size());
201 camera_update_.set_image_aspect_ratio(static_cast<double>(width) / height);
202}
203
204void PerceptionCameraUpdater::OnImage(
205 const std::shared_ptr<apollo::drivers::Image> &image) {
206 if (!enabled_) {
207 return;
208 }
209 cv::Mat mat(image->height(), image->width(), CV_8UC3,
210 const_cast<char *>(image->data().data()), image->step());
211 cv::cvtColor(mat, mat, cv::COLOR_RGB2BGR);
212 cv::resize(mat, mat,
213 cv::Size(static_cast<int>(image->width() * kImageScale),
214 static_cast<int>(image->height() * kImageScale)),
215 0, 0, cv::INTER_LINEAR);
216 cv::imencode(".jpg", mat, image_buffer_, std::vector<int>());
217 double next_image_timestamp;
218 if (image->has_measurement_time()) {
219 next_image_timestamp = image->measurement_time();
220 } else {
221 next_image_timestamp = image->header().timestamp_sec();
222 }
223 std::lock_guard<std::mutex> lock(image_mutex_);
224 if (next_image_timestamp < current_image_timestamp_) {
225 localization_queue_.clear();
226 }
227 current_image_timestamp_ = next_image_timestamp;
228 camera_update_.set_image(&(image_buffer_[0]), image_buffer_.size());
229}
230
231void PerceptionCameraUpdater::OnLocalization(
232 const std::shared_ptr<LocalizationEstimate> &localization) {
233 if (!enabled_) {
234 return;
235 }
236
237 std::lock_guard<std::mutex> lock(localization_mutex_);
238 localization_queue_.push_back(localization);
239}
240
241void PerceptionCameraUpdater::OnObstacles(
242 const std::shared_ptr<apollo::perception::PerceptionObstacles> &obstacles) {
243 if (channels_.size() == 0) return;
244 perception_obstacle_enable_ = true;
245 std::lock_guard<std::mutex> lock(obstacle_mutex_);
246 bbox2ds.clear();
247 obstacle_id.clear();
248 obstacle_sub_type.clear();
249 for (const auto &obstacle : obstacles->perception_obstacle()) {
250 bbox2ds.push_back(obstacle.bbox2d());
251 obstacle_id.push_back(obstacle.id());
252 obstacle_sub_type.push_back(obstacle.sub_type());
253 }
254}
255
256void PerceptionCameraUpdater::InitReaders() {
257 node_->CreateReader<LocalizationEstimate>(
258 FLAGS_localization_topic,
259 [this](const std::shared_ptr<LocalizationEstimate> &localization) {
260 OnLocalization(localization);
261 });
262 node_->CreateReader<apollo::perception::PerceptionObstacles>(
263 FLAGS_perception_obstacle_topic,
264 [this](const std::shared_ptr<apollo::perception::PerceptionObstacles>
265 &obstacles) { OnObstacles(obstacles); });
266}
267
268void PerceptionCameraUpdater::GetUpdate(std::string *camera_update) {
269 {
270 std::lock(image_mutex_, localization_mutex_, obstacle_mutex_);
271 std::lock_guard<std::mutex> lock1(image_mutex_, std::adopt_lock);
272 std::lock_guard<std::mutex> lock2(localization_mutex_, std::adopt_lock);
273 std::lock_guard<std::mutex> lock3(obstacle_mutex_, std::adopt_lock);
274 std::vector<double> localization;
275 GetImageLocalization(&localization);
276 *camera_update_.mutable_localization() = {localization.begin(),
277 localization.end()};
278 std::vector<double> localization2camera_tf;
279 GetLocalization2CameraTF(&localization2camera_tf);
280 *camera_update_.mutable_localization2camera_tf() = {
281 localization2camera_tf.begin(), localization2camera_tf.end()};
282 // Concurrently modify protobuf msg can cause ByteSizeConsistencyError
283 // when serializing, so we need lock.
284 if (perception_obstacle_enable_) {
285 *camera_update_.mutable_bbox2d() = {bbox2ds.begin(), bbox2ds.end()};
286 *camera_update_.mutable_obstacles_id() = {obstacle_id.begin(),
287 obstacle_id.end()};
288 *camera_update_.mutable_obstacles_sub_type() = {obstacle_sub_type.begin(),
289 obstacle_sub_type.end()};
290 }
291 camera_update_.SerializeToString(camera_update);
292 }
293}
295 std::vector<std::string> *channels) {
296 enabled_ = true;
297 auto channelManager =
298 apollo::cyber::service_discovery::TopologyManager::Instance()
299 ->channel_manager();
300 std::vector<apollo::cyber::proto::RoleAttributes> role_attr_vec;
301 channelManager->GetWriters(&role_attr_vec);
302 for (auto &role_attr : role_attr_vec) {
303 std::string messageType;
304 messageType = role_attr.message_type();
305 int index = messageType.rfind("drivers.Image");
306 if (index != -1) {
307 channels->push_back(role_attr.channel_name());
308 }
309 }
310 channels_.clear();
311 channels_ = {channels->begin(), channels->end()};
312}
313bool PerceptionCameraUpdater::ChangeChannel(std::string channel) {
314 if (curr_channel_name != "") node_->DeleteReader(curr_channel_name);
315 perception_camera_reader_.reset();
316 perception_camera_reader_ = node_->CreateReader<drivers::Image>(
317 channel,
318 [this](const std::shared_ptr<drivers::Image> &image) { OnImage(image); });
319 if (perception_camera_reader_ == nullptr) {
320 return false;
321 }
322 curr_channel_name = channel;
323 bool update_res = false;
324 update_res = callback_api_(channel);
325 if (!update_res) {
326 AERROR << "update current camera channel fail";
327 return false;
328 }
329 return true;
330}
331} // namespace dreamview
332} // namespace apollo
std::function< bool(const std::string &string)> DvCallback
void GetChannelMsg(std::vector< std::string > *channels)
GetChannelMsg
The WebSocketHandler, built on top of CivetWebSocketHandler, is a websocket handler that handles diff...
bool GetLatestStaticTF(const std::string &frame_id, const std::string &child_frame_id, TransformStamped *tf)
Definition buffer.cc:123
#define AERROR
Definition log.h:44
uint32_t height
Height of point cloud
uint32_t width
Width of point cloud
class register implement
Definition arena_queue.h:37
optional double x
Definition frame.proto:18
optional double y
Definition frame.proto:19
optional double z
Definition frame.proto:20
optional double w
Definition frame.proto:21