Apollo 10.0
自动驾驶开放平台
blob.cc
浏览该文件的文档.
1/******************************************************************************
2COPYRIGHT
3
4All contributions by the University of California:
5Copyright (c) 2014-2017 The Regents of the University of California (Regents)
6All rights reserved.
7
8All other contributions:
9Copyright (c) 2014-2017, the respective contributors
10All rights reserved.
11
12Caffe uses a shared copyright model: each contributor holds copyright over
13their contributions to Caffe. The project versioning records all such
14contribution and copyright details. If a contributor wants to further mark
15their specific copyright on a particular contribution, they should indicate
16their copyright solely in the commit message of the change when it is
17committed.
18
19LICENSE
20
21Redistribution and use in source and binary forms, with or without
22modification, are permitted provided that the following conditions are met:
23
241. Redistributions of source code must retain the above copyright notice, this
25 list of conditions and the following disclaimer.
262. Redistributions in binary form must reproduce the above copyright notice,
27 this list of conditions and the following disclaimer in the documentation
28 and/or other materials provided with the distribution.
29
30THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
31ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
32WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
33DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
34ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
35(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
36LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
37ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
39SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40
41CONTRIBUTION AGREEMENT
42
43By contributing to the BVLC/caffe repository through pull-request, comment,
44or otherwise, the contributor releases their content to the
45license and copyright terms herein.
46 *****************************************************************************/
47
48/******************************************************************************
49 * Copyright 2018 The Apollo Authors. All Rights Reserved.
50 *
51 * Licensed under the Apache License, Version 2.0 (the "License");
52 * you may not use this file except in compliance with the License.
53 * You may obtain a copy of the License at
54 *
55 * http://www.apache.org/licenses/LICENSE-2.0
56 *
57 * Unless required by applicable law or agreed to in writing, software
58 * distributed under the License is distributed on an "AS IS" BASIS,
59 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
60 * See the License for the specific language governing permissions and
61 * limitations under the License.
62 *****************************************************************************/
63
65#include <limits>
66
67namespace apollo {
68namespace perception {
69namespace base {
70
71template <typename Dtype>
72void Blob<Dtype>::Reshape(const int num, const int channels, const int height,
73 const int width) {
74 std::vector<int> shape(4);
75 shape[0] = num;
76 shape[1] = channels;
77 shape[2] = height;
78 shape[3] = width;
79 Reshape(shape);
80}
81
82template <typename Dtype>
83void Blob<Dtype>::Reshape(const std::vector<int>& shape) {
84 CHECK_LE(shape.size(), kMaxBlobAxes);
85 count_ = 1;
86 shape_.resize(shape.size());
87 if (!shape_data_ || shape_data_->size() < shape.size() * sizeof(int)) {
88 shape_data_.reset(
89 new SyncedMemory(shape.size() * sizeof(int), use_cuda_host_malloc_));
90 }
91 int* shape_data = static_cast<int*>(shape_data_->mutable_cpu_data());
92 for (size_t i = 0; i < shape.size(); ++i) {
93 CHECK_GE(shape[i], 0);
94 if (count_ != 0) {
95 CHECK_LE(shape[i], std::numeric_limits<int>::max() / count_)
96 << "blob size exceeds std::numeric_limits<int>::max()";
97 }
98 count_ *= shape[i];
99 shape_[i] = shape[i];
100 shape_data[i] = shape[i];
101 }
102 if (count_ > capacity_) {
103 capacity_ = count_;
104 data_.reset(
105 new SyncedMemory(capacity_ * sizeof(Dtype), use_cuda_host_malloc_));
106 }
108
109template <typename Dtype>
111 Reshape(other.shape());
112}
113
114template <typename Dtype>
115Blob<Dtype>::Blob(const int num, const int channels, const int height,
116 const int width, const bool use_cuda_host_malloc)
117 // capacity_ must be initialized before calling Reshape
118 : capacity_(0), use_cuda_host_malloc_(use_cuda_host_malloc) {
120}
121
122template <typename Dtype>
123Blob<Dtype>::Blob(const std::vector<int>& shape,
124 const bool use_cuda_host_malloc)
125 // capacity_ must be initialized before calling Reshape
126 : capacity_(0), use_cuda_host_malloc_(use_cuda_host_malloc) {
127 Reshape(shape);
128}
129
130template <typename Dtype>
131const int* Blob<Dtype>::gpu_shape() const {
132 ACHECK(shape_data_);
133 return (const int*)shape_data_->gpu_data();
134}
135
136template <typename Dtype>
137const Dtype* Blob<Dtype>::cpu_data() const {
138 ACHECK(data_);
139 return (const Dtype*)data_->cpu_data();
140}
141
142template <typename Dtype>
143void Blob<Dtype>::set_cpu_data(Dtype* data) {
144 ACHECK(data);
145 // Make sure CPU and GPU sizes remain equal
146 size_t size = count_ * sizeof(Dtype);
147 if (data_->size() != size) {
148 data_.reset(new SyncedMemory(size, use_cuda_host_malloc_));
149 }
150 data_->set_cpu_data(data);
151}
152
153template <typename Dtype>
154const Dtype* Blob<Dtype>::gpu_data() const {
155 ACHECK(data_);
156 return (const Dtype*)data_->gpu_data();
157}
158
159template <typename Dtype>
160void Blob<Dtype>::set_gpu_data(Dtype* data) {
161 ACHECK(data);
162 // Make sure CPU and GPU sizes remain equal
163 size_t size = count_ * sizeof(Dtype);
164 if (data_->size() != size) {
165 data_.reset(new SyncedMemory(size, use_cuda_host_malloc_));
166 }
167 data_->set_gpu_data(data);
168}
169
170template <typename Dtype>
172 ACHECK(data_);
173 return static_cast<Dtype*>(data_->mutable_cpu_data());
174}
175
176template <typename Dtype>
178 ACHECK(data_);
179 return static_cast<Dtype*>(data_->mutable_gpu_data());
180}
181
182template <typename Dtype>
183void Blob<Dtype>::ShareData(const Blob& other) {
184 CHECK_EQ(count_, other.count());
185 data_ = other.data();
186}
187
188template class Blob<bool>;
189template class Blob<uint8_t>;
190template class Blob<int>;
191template class Blob<unsigned int>;
192template class Blob<float>;
193template class Blob<double>;
194
195} // namespace base
196} // namespace perception
197} // namespace apollo
A wrapper around SyncedMemory holders serving as the basic computational unit for images,...
Definition blob.h:88
const std::vector< int > & shape() const
Definition blob.h:130
void ReshapeLike(const Blob &other)
Definition blob.cc:110
void Reshape(const int num, const int channels, const int height, const int width)
Deprecated; use Reshape(const std::vector<int>& shape).
Definition blob.cc:72
int height() const
Deprecated legacy shape accessor height: use shape(2) instead.
Definition blob.h:214
int channels() const
Deprecated legacy shape accessor channels: use shape(1) instead.
Definition blob.h:212
int num() const
Deprecated legacy shape accessor num: use shape(0) instead.
Definition blob.h:210
const std::shared_ptr< SyncedMemory > & data() const
Definition blob.h:276
int width() const
Deprecated legacy shape accessor width: use shape(3) instead.
Definition blob.h:216
Manages memory allocation and synchronization between the host (CPU) and device (GPU).
Definition syncedmem.h:99
#define ACHECK(cond)
Definition log.h:80
constexpr size_t kMaxBlobAxes
Definition blob.h:79
class register implement
Definition arena_queue.h:37