1 /*
2  * Copyright (C) 2020 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "DMABUFHEAPS"
18 
19 #include <BufferAllocator/BufferAllocator.h>
20 
21 #include <errno.h>
22 #include <fcntl.h>
23 #include <ion/ion.h>
24 #include <linux/dma-buf.h>
25 #include <linux/dma-heap.h>
26 #include <linux/ion_4.12.h>
27 #include <stdlib.h>
28 #include <sys/types.h>
29 #include <unistd.h>
30 
31 #include <string>
32 
33 #include <android-base/logging.h>
34 #include <android-base/unique_fd.h>
35 
36 static constexpr char kDmaHeapRoot[] = "/dev/dma_heap/";
37 static constexpr char kIonDevice[] = "/dev/ion";
38 static constexpr char kIonSystemHeapName[] = "ion_system_heap";
39 
LogInterface(const std::string & interface)40 void BufferAllocator::LogInterface(const std::string& interface) {
41     if (!logged_interface_) {
42         LOG(INFO) << "Using : " << interface;
43         logged_interface_ = true;
44     }
45 }
46 
GetDmabufHeapFd(const std::string & heap_name)47 int BufferAllocator::GetDmabufHeapFd(const std::string& heap_name) {
48     /* check if we have this dmabuf heap open and if so return the fd for it. */
49     auto it = dmabuf_heap_fds_.find(heap_name);
50     if (it != dmabuf_heap_fds_.end())
51         return it->second;
52     return -1;
53 }
54 
OpenDmabufHeap(const std::string & heap_name)55 int BufferAllocator::OpenDmabufHeap(const std::string& heap_name) {
56     /* Check if we have already opened this heap. */
57     auto fd = GetDmabufHeapFd(heap_name);
58     if (fd < 0) {
59         std::string heap_path = kDmaHeapRoot + heap_name;
60         fd = TEMP_FAILURE_RETRY(open(heap_path.c_str(), O_RDWR | O_CLOEXEC));
61         if (fd < 0) {
62             PLOG(ERROR) << "Unable to open dmabuf heap :" << heap_path;
63             return -errno;
64         }
65 
66         dmabuf_heap_fds_.insert({heap_name, android::base::unique_fd(fd)});
67     }
68     return fd;
69 }
70 
QueryIonHeaps()71 void BufferAllocator::QueryIonHeaps() {
72     uses_legacy_ion_iface_ = ion_is_legacy(ion_fd_);
73     if (uses_legacy_ion_iface_) {
74         LogInterface("Legacy ion heaps");
75         MapNameToIonMask(kDmabufSystemHeapName, ION_HEAP_SYSTEM_MASK);
76         return;
77     }
78 
79     int heap_count;
80     int ret = ion_query_heap_cnt(ion_fd_, &heap_count);
81     if (ret == 0) {
82         ion_heap_info_.resize(heap_count, {});
83         ret = ion_query_get_heaps(ion_fd_, heap_count, ion_heap_info_.data());
84     }
85 
86     // Abort if heap query fails
87     CHECK(ret == 0)
88             << "Non-legacy ION implementation must support heap information queries";
89     LogInterface("Non-legacy ION heaps");
90 
91     /*
92      * No error checking here, it is possible that devices may have used another name for
93      * the ion system heap.
94      */
95     MapNameToIonName(kDmabufSystemHeapName, kIonSystemHeapName);
96 }
97 
BufferAllocator()98 BufferAllocator::BufferAllocator() {
99     if (OpenDmabufHeap("system") < 0) {
100         /* Since dmabuf heaps are not supported, try opening /dev/ion. */
101         ion_fd_.reset(TEMP_FAILURE_RETRY(open(kIonDevice, O_RDONLY| O_CLOEXEC)));
102 
103         /*
104          * If ion_fd_ is invalid, then neither dmabuf heaps nor ion is supported
105          * which is an invalid configuration. Abort in this case.
106          */
107         CHECK(ion_fd_ >= 0) << "Either dmabuf heaps or ion must be supported";
108         QueryIonHeaps();
109     } else {
110         LogInterface("DMABUF Heaps");
111     }
112 }
113 
MapNameToIonMask(const std::string & heap_name,unsigned int ion_heap_mask,unsigned int ion_heap_flags)114 int BufferAllocator::MapNameToIonMask(const std::string& heap_name, unsigned int ion_heap_mask,
115                                       unsigned int ion_heap_flags) {
116     if (!ion_heap_mask)
117         return -EINVAL;
118     IonHeapConfig heap_config = { ion_heap_mask, ion_heap_flags };
119     heap_name_to_config_[heap_name] = heap_config;
120     return 0;
121 }
122 
GetIonHeapIdByName(const std::string & heap_name,unsigned int * heap_id)123 int BufferAllocator::GetIonHeapIdByName(const std::string& heap_name, unsigned int* heap_id) {
124     for (auto& it : ion_heap_info_) {
125         if (heap_name == it.name) {
126             *heap_id = it.heap_id;
127             return 0;
128         }
129     }
130 
131     LOG(ERROR) << "No ion heap of name " << heap_name << " exists";
132     return -EINVAL;
133 }
134 
MapNameToIonName(const std::string & heap_name,const std::string & ion_heap_name,unsigned int ion_heap_flags)135 int BufferAllocator::MapNameToIonName(const std::string& heap_name,
136                                       const std::string& ion_heap_name,
137                                       unsigned int ion_heap_flags) {
138     unsigned int ion_heap_id = 0;
139     auto ret = GetIonHeapIdByName(ion_heap_name, &ion_heap_id);
140     if (ret < 0)
141         return ret;
142 
143     unsigned int ion_heap_mask = 1 << ion_heap_id;
144     IonHeapConfig heap_config = { ion_heap_mask, ion_heap_flags };
145     heap_name_to_config_[heap_name] = heap_config;
146 
147     return 0;
148 }
149 
MapNameToIonHeap(const std::string & heap_name,const std::string & ion_heap_name,unsigned int ion_heap_flags,unsigned int legacy_ion_heap_mask,unsigned int legacy_ion_heap_flags)150 int BufferAllocator::MapNameToIonHeap(const std::string& heap_name,
151                                       const std::string& ion_heap_name,
152                                       unsigned int ion_heap_flags,
153                                       unsigned int legacy_ion_heap_mask,
154                                       unsigned int legacy_ion_heap_flags) {
155     int ret = 0;
156 
157     if (uses_legacy_ion_iface_) {
158         ret = MapNameToIonMask(heap_name, legacy_ion_heap_mask, legacy_ion_heap_flags);
159     } else if (!DmabufHeapsSupported() && !ion_heap_name.empty()) {
160         ret = MapNameToIonName(heap_name, ion_heap_name, ion_heap_flags);
161     }
162 
163     return ret;
164 }
165 
GetIonConfig(const std::string & heap_name,IonHeapConfig & heap_config)166 int BufferAllocator::GetIonConfig(const std::string& heap_name, IonHeapConfig& heap_config) {
167     int ret = 0;
168     auto it = heap_name_to_config_.find(heap_name);
169     if (it != heap_name_to_config_.end()) {
170         heap_config = it->second;
171     } else {
172         if (uses_legacy_ion_iface_) {
173             ret = -EINVAL;
174         } else {
175             unsigned int heap_id;
176             ret = GetIonHeapIdByName(heap_name, &heap_id);
177             if (ret == 0) {
178                 heap_config.mask = 1 << heap_id;
179                 heap_config.flags = 0;
180                 /* save it so that this lookup does not need to happen again */
181                 heap_name_to_config_[heap_name] = heap_config;
182             }
183         }
184     }
185 
186     if (ret)
187         LOG(ERROR) << "No ion heap of name " << heap_name << " exists";
188     return ret;
189 }
190 
DmabufAlloc(const std::string & heap_name,size_t len)191 int BufferAllocator::DmabufAlloc(const std::string& heap_name, size_t len) {
192     int fd = OpenDmabufHeap(heap_name);
193     if (fd < 0) {
194         LOG(ERROR) << "Unsupported dmabuf heap: " << heap_name << " error: " << fd;
195         return fd;
196     }
197 
198     struct dma_heap_allocation_data heap_data{
199         .len = len,  // length of data to be allocated in bytes
200         .fd_flags = O_RDWR | O_CLOEXEC,  // permissions for the memory to be allocated
201     };
202 
203     auto ret = TEMP_FAILURE_RETRY(ioctl(fd, DMA_HEAP_IOCTL_ALLOC, &heap_data));
204     if (ret < 0)
205         return ret;
206 
207     return heap_data.fd;
208 }
209 
IonAlloc(const std::string & heap_name,size_t len,unsigned int heap_flags)210 int BufferAllocator::IonAlloc(const std::string& heap_name, size_t len, unsigned int heap_flags) {
211     IonHeapConfig heap_config;
212     auto ret = GetIonConfig(heap_name, heap_config);
213     if (ret)
214         return ret;
215 
216     int alloc_fd = -1;
217     unsigned int flags = heap_config.flags | heap_flags;
218     ret = ion_alloc_fd(ion_fd_, len, 0, heap_config.mask, flags, &alloc_fd);
219     if (ret) {
220         PLOG(ERROR) << "allocation fails for ion heap with mask: " << heap_config.mask
221                     << " and flags: " << flags;
222         return ret;
223     }
224     return alloc_fd;
225 }
226 
Alloc(const std::string & heap_name,size_t len,unsigned int heap_flags)227 int BufferAllocator::Alloc(const std::string& heap_name, size_t len, unsigned int heap_flags) {
228     if (DmabufHeapsSupported()) {
229         return DmabufAlloc(heap_name, len);
230     }
231 
232     return IonAlloc(heap_name, len, heap_flags);
233 }
234 
LegacyIonCpuSync(unsigned int dmabuf_fd,const CustomCpuSyncLegacyIon & legacy_ion_cpu_sync_custom)235 int BufferAllocator::LegacyIonCpuSync(unsigned int dmabuf_fd,
236                                       const CustomCpuSyncLegacyIon& legacy_ion_cpu_sync_custom) {
237     if (!legacy_ion_cpu_sync_custom)
238         return ion_sync_fd(ion_fd_, dmabuf_fd);
239 
240     // dup ion_fd_ so that we retain its ownership.
241     int new_ion_fd = TEMP_FAILURE_RETRY(dup(ion_fd_.get()));
242     if (new_ion_fd < 0) {
243         PLOG(ERROR) << "Unable to dup ion fd. error: " << new_ion_fd;
244         return new_ion_fd;
245     }
246 
247     int ret = legacy_ion_cpu_sync_custom(new_ion_fd);
248 
249     close(new_ion_fd);
250     return ret;
251 }
252 
DoSync(unsigned int dmabuf_fd,bool start,SyncType sync_type,const CustomCpuSyncLegacyIon & legacy_ion_cpu_sync_custom)253 int BufferAllocator::DoSync(unsigned int dmabuf_fd, bool start, SyncType sync_type,
254                             const CustomCpuSyncLegacyIon& legacy_ion_cpu_sync_custom) {
255     if (uses_legacy_ion_iface_) {
256         return LegacyIonCpuSync(dmabuf_fd, legacy_ion_cpu_sync_custom);
257     }
258 
259     struct dma_buf_sync sync = {
260         .flags = (start ? DMA_BUF_SYNC_START : DMA_BUF_SYNC_END) |
261                 static_cast<uint64_t>(sync_type),
262     };
263     return TEMP_FAILURE_RETRY(ioctl(dmabuf_fd, DMA_BUF_IOCTL_SYNC, &sync));
264 }
265 
CpuSyncStart(unsigned int dmabuf_fd,SyncType sync_type,const CustomCpuSyncLegacyIon & legacy_ion_cpu_sync_custom)266 int BufferAllocator::CpuSyncStart(unsigned int dmabuf_fd, SyncType sync_type,
267                                   const CustomCpuSyncLegacyIon& legacy_ion_cpu_sync_custom) {
268     auto it = fd_last_sync_type_.find(dmabuf_fd);
269     if (it != fd_last_sync_type_.end()) {
270         LOG(ERROR) << "CpuSyncEnd needs to be invoked for this fd first";
271         return -EINVAL;
272     }
273 
274     int ret = DoSync(dmabuf_fd, true /* start */, sync_type, legacy_ion_cpu_sync_custom);
275 
276     if (ret) {
277         PLOG(ERROR) << "CpuSyncStart() failure";
278     } else {
279         fd_last_sync_type_[dmabuf_fd] = sync_type;
280     }
281     return ret;
282 }
283 
CpuSyncEnd(unsigned int dmabuf_fd,const CustomCpuSyncLegacyIon & legacy_ion_cpu_sync_custom)284 int BufferAllocator::CpuSyncEnd(unsigned int dmabuf_fd,
285                                 const CustomCpuSyncLegacyIon& legacy_ion_cpu_sync_custom) {
286     auto it = fd_last_sync_type_.find(dmabuf_fd);
287     if (it == fd_last_sync_type_.end()) {
288         LOG(ERROR) << "CpuSyncStart() must be called before CpuSyncEnd()";
289         return -EINVAL;
290     }
291 
292     int ret = DoSync(dmabuf_fd, false /* start */, it->second /* sync_type */,
293                      legacy_ion_cpu_sync_custom);
294     if (ret) {
295         PLOG(ERROR) << "CpuSyncEnd() failure";
296     } else {
297         fd_last_sync_type_.erase(it);
298     }
299 
300     return ret;
301 }
302