1 /*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "DMABUFHEAPS"
18
19 #include <BufferAllocator/BufferAllocator.h>
20
21 #include <errno.h>
22 #include <fcntl.h>
23 #include <ion/ion.h>
24 #include <linux/dma-buf.h>
25 #include <linux/dma-heap.h>
26 #include <linux/ion_4.12.h>
27 #include <stdlib.h>
28 #include <sys/types.h>
29 #include <unistd.h>
30
31 #include <mutex>
32 #include <shared_mutex>
33 #include <string>
34 #include <unordered_set>
35
36 #include <android-base/logging.h>
37 #include <android-base/unique_fd.h>
38
39 static constexpr char kDmaHeapRoot[] = "/dev/dma_heap/";
40 static constexpr char kIonDevice[] = "/dev/ion";
41 static constexpr char kIonSystemHeapName[] = "ion_system_heap";
42
LogInterface(const std::string & interface)43 void BufferAllocator::LogInterface(const std::string& interface) {
44 if (!logged_interface_) {
45 LOG(INFO) << "Using : " << interface;
46 logged_interface_ = true;
47 }
48 }
49
OpenDmabufHeap(const std::string & heap_name)50 int BufferAllocator::OpenDmabufHeap(const std::string& heap_name) {
51 std::shared_lock<std::shared_mutex> slock(dmabuf_heap_fd_mutex_);
52
53 /* Check if heap has already been opened. */
54 auto it = dmabuf_heap_fds_.find(heap_name);
55 if (it != dmabuf_heap_fds_.end())
56 return it->second;
57
58 slock.unlock();
59
60 /*
61 * Heap device needs to be opened, use a unique_lock since dmabuf_heap_fd_
62 * needs to be modified.
63 */
64 std::unique_lock<std::shared_mutex> ulock(dmabuf_heap_fd_mutex_);
65
66 /*
67 * Check if we already opened this heap again to prevent racing threads from
68 * opening the heap device multiple times.
69 */
70 it = dmabuf_heap_fds_.find(heap_name);
71 if (it != dmabuf_heap_fds_.end()) return it->second;
72
73 std::string heap_path = kDmaHeapRoot + heap_name;
74 int fd = TEMP_FAILURE_RETRY(open(heap_path.c_str(), O_RDONLY | O_CLOEXEC));
75 if (fd < 0) return -errno;
76
77 LOG(INFO) << "Using DMA-BUF heap named: " << heap_name;
78
79 auto ret = dmabuf_heap_fds_.insert({heap_name, android::base::unique_fd(fd)});
80 CHECK(ret.second);
81 return fd;
82 }
83
QueryIonHeaps()84 void BufferAllocator::QueryIonHeaps() {
85 uses_legacy_ion_iface_ = ion_is_legacy(ion_fd_);
86 if (uses_legacy_ion_iface_) {
87 LogInterface("Legacy ion heaps");
88 MapNameToIonMask(kDmabufSystemHeapName, ION_HEAP_SYSTEM_MASK, ION_FLAG_CACHED);
89 MapNameToIonMask(kDmabufSystemUncachedHeapName, ION_HEAP_SYSTEM_MASK);
90 return;
91 }
92
93 int heap_count;
94 int ret = ion_query_heap_cnt(ion_fd_, &heap_count);
95 if (ret == 0) {
96 ion_heap_info_.resize(heap_count, {});
97 ret = ion_query_get_heaps(ion_fd_, heap_count, ion_heap_info_.data());
98 }
99
100 // Abort if heap query fails
101 CHECK(ret == 0)
102 << "Non-legacy ION implementation must support heap information queries";
103 LogInterface("Non-legacy ION heaps");
104
105 /*
106 * No error checking here, it is possible that devices may have used another name for
107 * the ion system heap.
108 */
109 MapNameToIonName(kDmabufSystemHeapName, kIonSystemHeapName, ION_FLAG_CACHED);
110 MapNameToIonName(kDmabufSystemUncachedHeapName, kIonSystemHeapName);
111 }
112
BufferAllocator()113 BufferAllocator::BufferAllocator() {
114 ion_fd_.reset(TEMP_FAILURE_RETRY(open(kIonDevice, O_RDONLY| O_CLOEXEC)));
115 if (ion_fd_ >= 0)
116 QueryIonHeaps();
117 }
118
MapNameToIonMask(const std::string & heap_name,unsigned int ion_heap_mask,unsigned int ion_heap_flags)119 int BufferAllocator::MapNameToIonMask(const std::string& heap_name, unsigned int ion_heap_mask,
120 unsigned int ion_heap_flags) {
121 if (!ion_heap_mask)
122 return -EINVAL;
123 IonHeapConfig heap_config = { ion_heap_mask, ion_heap_flags };
124
125 std::unique_lock<std::shared_mutex> ulock(heap_name_to_config_mutex_);
126 heap_name_to_config_[heap_name] = heap_config;
127 return 0;
128 }
129
GetIonHeapIdByName(const std::string & heap_name,unsigned int * heap_id)130 int BufferAllocator::GetIonHeapIdByName(const std::string& heap_name, unsigned int* heap_id) {
131 for (auto& it : ion_heap_info_) {
132 if (heap_name == it.name) {
133 *heap_id = it.heap_id;
134 return 0;
135 }
136 }
137
138 LOG(ERROR) << "No ion heap of name " << heap_name << " exists";
139 return -EINVAL;
140 }
141
MapNameToIonName(const std::string & heap_name,const std::string & ion_heap_name,unsigned int ion_heap_flags)142 int BufferAllocator::MapNameToIonName(const std::string& heap_name,
143 const std::string& ion_heap_name,
144 unsigned int ion_heap_flags) {
145 unsigned int ion_heap_id = 0;
146 auto ret = GetIonHeapIdByName(ion_heap_name, &ion_heap_id);
147 if (ret < 0)
148 return ret;
149
150 unsigned int ion_heap_mask = 1 << ion_heap_id;
151 IonHeapConfig heap_config = { ion_heap_mask, ion_heap_flags };
152
153 std::unique_lock<std::shared_mutex> ulock(heap_name_to_config_mutex_);
154 heap_name_to_config_[heap_name] = heap_config;
155
156 return 0;
157 }
158
MapNameToIonHeap(const std::string & heap_name,const std::string & ion_heap_name,unsigned int ion_heap_flags,unsigned int legacy_ion_heap_mask,unsigned int legacy_ion_heap_flags)159 int BufferAllocator::MapNameToIonHeap(const std::string& heap_name,
160 const std::string& ion_heap_name,
161 unsigned int ion_heap_flags,
162 unsigned int legacy_ion_heap_mask,
163 unsigned int legacy_ion_heap_flags) {
164 /* if the DMA-BUF Heap exists, we can ignore ion mappings */
165 int ret = OpenDmabufHeap(heap_name);
166 if (ret >= 0)
167 return 0;
168
169 /* If ION support is not detected, ignore the mappings */
170 if (ion_fd_ < 0) return 0;
171
172 if (uses_legacy_ion_iface_ || ion_heap_name == "") {
173 ret = MapNameToIonMask(heap_name, legacy_ion_heap_mask, legacy_ion_heap_flags);
174 } else if (!ion_heap_name.empty()) {
175 ret = MapNameToIonName(heap_name, ion_heap_name, ion_heap_flags);
176 }
177
178 return ret;
179 }
180
GetIonConfig(const std::string & heap_name,IonHeapConfig & heap_config)181 int BufferAllocator::GetIonConfig(const std::string& heap_name, IonHeapConfig& heap_config) {
182 int ret = 0;
183
184 std::shared_lock<std::shared_mutex> slock(heap_name_to_config_mutex_);
185
186 auto it = heap_name_to_config_.find(heap_name);
187 if (it != heap_name_to_config_.end()) {
188 heap_config = it->second;
189 return ret;
190 }
191
192 slock.unlock();
193
194 if (uses_legacy_ion_iface_) {
195 ret = -EINVAL;
196 } else {
197 unsigned int heap_id;
198 ret = GetIonHeapIdByName(heap_name, &heap_id);
199 if (ret == 0) {
200 heap_config.mask = 1 << heap_id;
201 heap_config.flags = 0;
202 /* save it so that this lookup does not need to happen again */
203 std::unique_lock<std::shared_mutex> ulock(heap_name_to_config_mutex_);
204 heap_name_to_config_[heap_name] = heap_config;
205 }
206 }
207
208 if (ret)
209 LOG(ERROR) << "No ion heap of name " << heap_name << " exists";
210 return ret;
211 }
212
DmabufAlloc(const std::string & heap_name,size_t len,int fd)213 int BufferAllocator::DmabufAlloc(const std::string& heap_name, size_t len, int fd) {
214 if (fd < 0) return fd;
215
216 struct dma_heap_allocation_data heap_data{
217 .len = len, // length of data to be allocated in bytes
218 .fd_flags = O_RDWR | O_CLOEXEC, // permissions for the memory to be allocated
219 };
220
221 auto ret = TEMP_FAILURE_RETRY(ioctl(fd, DMA_HEAP_IOCTL_ALLOC, &heap_data));
222 if (ret < 0) {
223 PLOG(ERROR) << "Unable to allocate from DMA-BUF heap: " << heap_name;
224 return ret;
225 }
226
227 if (heap_data.fd >= 0) {
228 if (DmabufSetName(heap_data.fd, heap_name))
229 PLOG(WARNING) << "Unable to name DMA buffer for: " << heap_name;
230 }
231
232 return heap_data.fd;
233 }
234
DmabufSetName(unsigned int dmabuf_fd,const std::string & name)235 int BufferAllocator::DmabufSetName(unsigned int dmabuf_fd, const std::string& name) {
236 /*
237 * Truncate the name here to avoid failure if the length exceeds the limit.
238 * length() does not count the '\0' character at the end of the string,
239 * but the kernel does, ioctl() would also fail if len == DMA_BUF_NAME_LEN.
240 * So we limit the maximum length of the name to 'DMA_BUF_NAME_LEN - 1'.
241 */
242 const std::string truncated_name = name.substr(0, DMA_BUF_NAME_LEN - 1);
243 return TEMP_FAILURE_RETRY(ioctl(dmabuf_fd, DMA_BUF_SET_NAME_B, truncated_name.c_str()));
244 }
245
IonAlloc(const std::string & heap_name,size_t len,unsigned int heap_flags,size_t legacy_align)246 int BufferAllocator::IonAlloc(const std::string& heap_name, size_t len,
247 unsigned int heap_flags, size_t legacy_align) {
248 IonHeapConfig heap_config;
249 auto ret = GetIonConfig(heap_name, heap_config);
250 if (ret)
251 return ret;
252
253 int alloc_fd = -1;
254 unsigned int flags = heap_config.flags | heap_flags;
255 ret = ion_alloc_fd(ion_fd_, len, legacy_align, heap_config.mask, flags, &alloc_fd);
256 if (ret) {
257 PLOG(ERROR) << "allocation fails for ion heap with mask: " << heap_config.mask
258 << " and flags: " << flags;
259 return ret;
260 }
261 return alloc_fd;
262 }
263
Alloc(const std::string & heap_name,size_t len,unsigned int heap_flags,size_t legacy_align)264 int BufferAllocator::Alloc(const std::string& heap_name, size_t len,
265 unsigned int heap_flags, size_t legacy_align) {
266 int dma_buf_heap_fd = OpenDmabufHeap(heap_name);
267 if (dma_buf_heap_fd >= 0) return DmabufAlloc(heap_name, len, dma_buf_heap_fd);
268
269 /*
270 * Swap back to ion only if we failed to allocate for a dma-buffer heap
271 * that doesn't exist.
272 */
273 return IonAlloc(heap_name, len, heap_flags, legacy_align);
274 }
275
AllocSystem(bool cpu_access_needed,size_t len,unsigned int heap_flags,size_t legacy_align)276 int BufferAllocator::AllocSystem(bool cpu_access_needed, size_t len, unsigned int heap_flags,
277 size_t legacy_align) {
278 if (!cpu_access_needed) {
279 /*
280 * CPU does not need to access allocated buffer so we try to allocate in
281 * the 'system-uncached' heap after querying for its existence.
282 */
283 static bool uncached_dmabuf_system_heap_support = [this]() -> bool {
284 auto dmabuf_heap_list = this->GetDmabufHeapList();
285 return (dmabuf_heap_list.find(kDmabufSystemUncachedHeapName) != dmabuf_heap_list.end());
286 }();
287
288 if (uncached_dmabuf_system_heap_support) {
289 int dma_buf_heap_fd = OpenDmabufHeap(kDmabufSystemUncachedHeapName);
290 return (dma_buf_heap_fd < 0)
291 ? dma_buf_heap_fd
292 : DmabufAlloc(kDmabufSystemUncachedHeapName, len, dma_buf_heap_fd);
293 }
294
295 static bool uncached_ion_system_heap_support = [this]() -> bool {
296 IonHeapConfig heap_config;
297 auto ret = this->GetIonConfig(kDmabufSystemUncachedHeapName, heap_config);
298 return (ret == 0);
299 }();
300
301 if (uncached_ion_system_heap_support)
302 return IonAlloc(kDmabufSystemUncachedHeapName, len, heap_flags, legacy_align);
303 }
304
305 /*
306 * Either 1) CPU needs to access allocated buffer OR 2) CPU does not need to
307 * access allocated buffer but the "system-uncached" heap is unsupported.
308 */
309 return Alloc(kDmabufSystemHeapName, len, heap_flags, legacy_align);
310 }
311
LegacyIonCpuSync(unsigned int dmabuf_fd,const CustomCpuSyncLegacyIon & legacy_ion_cpu_sync_custom,void * legacy_ion_custom_data)312 int BufferAllocator::LegacyIonCpuSync(unsigned int dmabuf_fd,
313 const CustomCpuSyncLegacyIon& legacy_ion_cpu_sync_custom,
314 void *legacy_ion_custom_data) {
315 if (!legacy_ion_cpu_sync_custom)
316 return ion_sync_fd(ion_fd_, dmabuf_fd);
317
318 // dup ion_fd_ so that we retain its ownership.
319 int new_ion_fd = TEMP_FAILURE_RETRY(dup(ion_fd_.get()));
320 if (new_ion_fd < 0) {
321 PLOG(ERROR) << "Unable to dup ion fd. error: " << new_ion_fd;
322 return new_ion_fd;
323 }
324
325 int ret = legacy_ion_cpu_sync_custom(new_ion_fd, dmabuf_fd, legacy_ion_custom_data);
326
327 close(new_ion_fd);
328 return ret;
329 }
330
DoSync(unsigned int dmabuf_fd,bool start,SyncType sync_type,const CustomCpuSyncLegacyIon & legacy_ion_cpu_sync_custom,void * legacy_ion_custom_data)331 int BufferAllocator::DoSync(unsigned int dmabuf_fd, bool start, SyncType sync_type,
332 const CustomCpuSyncLegacyIon& legacy_ion_cpu_sync_custom,
333 void *legacy_ion_custom_data) {
334 if (uses_legacy_ion_iface_) {
335 return LegacyIonCpuSync(dmabuf_fd, legacy_ion_cpu_sync_custom,
336 legacy_ion_custom_data);
337 }
338
339 struct dma_buf_sync sync = {
340 .flags = (start ? DMA_BUF_SYNC_START : DMA_BUF_SYNC_END) |
341 static_cast<uint64_t>(sync_type),
342 };
343 return TEMP_FAILURE_RETRY(ioctl(dmabuf_fd, DMA_BUF_IOCTL_SYNC, &sync));
344 }
345
CpuSyncStart(unsigned int dmabuf_fd,SyncType sync_type,const CustomCpuSyncLegacyIon & legacy_ion_cpu_sync_custom,void * legacy_ion_custom_data)346 int BufferAllocator::CpuSyncStart(unsigned int dmabuf_fd, SyncType sync_type,
347 const CustomCpuSyncLegacyIon& legacy_ion_cpu_sync_custom,
348 void *legacy_ion_custom_data) {
349 int ret = DoSync(dmabuf_fd, true, sync_type, legacy_ion_cpu_sync_custom,
350 legacy_ion_custom_data);
351
352 if (ret) PLOG(ERROR) << "CpuSyncStart() failure";
353 return ret;
354 }
355
CpuSyncEnd(unsigned int dmabuf_fd,SyncType sync_type,const CustomCpuSyncLegacyIon & legacy_ion_cpu_sync_custom,void * legacy_ion_custom_data)356 int BufferAllocator::CpuSyncEnd(unsigned int dmabuf_fd, SyncType sync_type,
357 const CustomCpuSyncLegacyIon& legacy_ion_cpu_sync_custom,
358 void* legacy_ion_custom_data) {
359 int ret = DoSync(dmabuf_fd, false, sync_type, legacy_ion_cpu_sync_custom,
360 legacy_ion_custom_data);
361 if (ret) PLOG(ERROR) << "CpuSyncEnd() failure";
362
363 return ret;
364 }
365
GetDmabufHeapList()366 std::unordered_set<std::string> BufferAllocator::GetDmabufHeapList() {
367 std::unordered_set<std::string> heap_list;
368 std::unique_ptr<DIR, int (*)(DIR*)> dir(opendir(kDmaHeapRoot), closedir);
369
370 if (dir) {
371 struct dirent* dent;
372 while ((dent = readdir(dir.get()))) {
373 if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, "..")) continue;
374
375 heap_list.insert(dent->d_name);
376 }
377 }
378
379 return heap_list;
380 }
381
CheckIonSupport()382 bool BufferAllocator::CheckIonSupport() {
383 static bool ion_support = (access(kIonDevice, R_OK) == 0);
384
385 return ion_support;
386 }
387