1 /*
2 * ion.c
3 *
4 * Memory Allocator functions for ion
5 *
6 * Copyright 2011 Google, Inc
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 */
20 #define LOG_TAG "ion"
21
22 #include <errno.h>
23 #include <fcntl.h>
24 #include <linux/ion.h>
25 #include <stdatomic.h>
26 #include <stdio.h>
27 #include <string.h>
28 #include <sys/ioctl.h>
29 #include <sys/mman.h>
30 #include <sys/types.h>
31 #include <unistd.h>
32
33 #include <ion/ion.h>
34 #include <linux/ion_4.19.h>
35
36 #include <log/log.h>
37
38 #define ION_ABI_VERSION_MODULAR_HEAPS 2
39
40 enum ion_version { ION_VERSION_UNKNOWN, ION_VERSION_MODERN, ION_VERSION_LEGACY };
41
42 static atomic_int g_ion_version = ATOMIC_VAR_INIT(ION_VERSION_UNKNOWN);
43
ion_is_legacy(int fd)44 int ion_is_legacy(int fd) {
45 int version = atomic_load_explicit(&g_ion_version, memory_order_acquire);
46 if (version == ION_VERSION_UNKNOWN) {
47 /**
48 * Check for FREE IOCTL here; it is available only in the old
49 * kernels, not the new ones.
50 */
51 int err = ion_free(fd, (ion_user_handle_t)NULL);
52 version = (err == -ENOTTY) ? ION_VERSION_MODERN : ION_VERSION_LEGACY;
53 atomic_store_explicit(&g_ion_version, version, memory_order_release);
54 }
55 return version == ION_VERSION_LEGACY;
56 }
57
ion_open()58 int ion_open() {
59 int fd = open("/dev/ion", O_RDONLY | O_CLOEXEC);
60 if (fd < 0) ALOGE("open /dev/ion failed: %s", strerror(errno));
61
62 return fd;
63 }
64
ion_close(int fd)65 int ion_close(int fd) {
66 int ret = close(fd);
67 if (ret < 0) return -errno;
68 return ret;
69 }
70
ion_ioctl(int fd,int req,void * arg)71 static int ion_ioctl(int fd, int req, void* arg) {
72 int ret = ioctl(fd, req, arg);
73 if (ret < 0) {
74 ALOGE("ioctl %x failed with code %d: %s", req, ret, strerror(errno));
75 return -errno;
76 }
77 return ret;
78 }
79
ion_is_using_modular_heaps(int fd)80 int ion_is_using_modular_heaps(int fd) {
81 int ion_abi_version = 0;
82 int ret = 0;
83
84 ret = ion_ioctl(fd, ION_IOC_ABI_VERSION, &ion_abi_version);
85 return (ret == 0 && ion_abi_version >= ION_ABI_VERSION_MODULAR_HEAPS);
86 }
87
ion_alloc(int fd,size_t len,size_t align,unsigned int heap_mask,unsigned int flags,ion_user_handle_t * handle)88 int ion_alloc(int fd, size_t len, size_t align, unsigned int heap_mask, unsigned int flags,
89 ion_user_handle_t* handle) {
90 int ret = 0;
91
92 if ((handle == NULL) || (!ion_is_legacy(fd))) return -EINVAL;
93
94 struct ion_allocation_data data = {
95 .len = len, .align = align, .heap_id_mask = heap_mask, .flags = flags,
96 };
97
98 ret = ion_ioctl(fd, ION_IOC_ALLOC, &data);
99 if (ret < 0) return ret;
100
101 *handle = data.handle;
102
103 return ret;
104 }
105
ion_free(int fd,ion_user_handle_t handle)106 int ion_free(int fd, ion_user_handle_t handle) {
107 struct ion_handle_data data = {
108 .handle = handle,
109 };
110 return ion_ioctl(fd, ION_IOC_FREE, &data);
111 }
112
ion_map(int fd,ion_user_handle_t handle,size_t length,int prot,int flags,off_t offset,unsigned char ** ptr,int * map_fd)113 int ion_map(int fd, ion_user_handle_t handle, size_t length, int prot, int flags, off_t offset,
114 unsigned char** ptr, int* map_fd) {
115 if (!ion_is_legacy(fd)) return -EINVAL;
116 int ret;
117 unsigned char* tmp_ptr;
118 struct ion_fd_data data = {
119 .handle = handle,
120 };
121
122 if (map_fd == NULL) return -EINVAL;
123 if (ptr == NULL) return -EINVAL;
124
125 ret = ion_ioctl(fd, ION_IOC_MAP, &data);
126 if (ret < 0) return ret;
127 if (data.fd < 0) {
128 ALOGE("map ioctl returned negative fd");
129 return -EINVAL;
130 }
131 tmp_ptr = mmap(NULL, length, prot, flags, data.fd, offset);
132 if (tmp_ptr == MAP_FAILED) {
133 ALOGE("mmap failed: %s", strerror(errno));
134 return -errno;
135 }
136 *map_fd = data.fd;
137 *ptr = tmp_ptr;
138 return ret;
139 }
140
ion_share(int fd,ion_user_handle_t handle,int * share_fd)141 int ion_share(int fd, ion_user_handle_t handle, int* share_fd) {
142 int ret;
143 struct ion_fd_data data = {
144 .handle = handle,
145 };
146
147 if (!ion_is_legacy(fd)) return -EINVAL;
148 if (share_fd == NULL) return -EINVAL;
149
150 ret = ion_ioctl(fd, ION_IOC_SHARE, &data);
151 if (ret < 0) return ret;
152 if (data.fd < 0) {
153 ALOGE("share ioctl returned negative fd");
154 return -EINVAL;
155 }
156 *share_fd = data.fd;
157 return ret;
158 }
159
ion_alloc_fd(int fd,size_t len,size_t align,unsigned int heap_mask,unsigned int flags,int * handle_fd)160 int ion_alloc_fd(int fd, size_t len, size_t align, unsigned int heap_mask, unsigned int flags,
161 int* handle_fd) {
162 ion_user_handle_t handle;
163 int ret;
164
165 if (!handle_fd) return -EINVAL;
166
167 if (!ion_is_legacy(fd)) {
168 struct ion_new_allocation_data data = {
169 .len = len,
170 .heap_id_mask = heap_mask,
171 .flags = flags,
172 };
173
174 ret = ion_ioctl(fd, ION_IOC_NEW_ALLOC, &data);
175 if (ret < 0) return ret;
176 *handle_fd = data.fd;
177 } else {
178 ret = ion_alloc(fd, len, align, heap_mask, flags, &handle);
179 if (ret < 0) return ret;
180 ret = ion_share(fd, handle, handle_fd);
181 ion_free(fd, handle);
182 }
183 return ret;
184 }
185
ion_import(int fd,int share_fd,ion_user_handle_t * handle)186 int ion_import(int fd, int share_fd, ion_user_handle_t* handle) {
187 int ret;
188 struct ion_fd_data data = {
189 .fd = share_fd,
190 };
191
192 if (!ion_is_legacy(fd)) return -EINVAL;
193
194 if (handle == NULL) return -EINVAL;
195
196 ret = ion_ioctl(fd, ION_IOC_IMPORT, &data);
197 if (ret < 0) return ret;
198 *handle = data.handle;
199 return ret;
200 }
201
ion_sync_fd(int fd,int handle_fd)202 int ion_sync_fd(int fd, int handle_fd) {
203 struct ion_fd_data data = {
204 .fd = handle_fd,
205 };
206
207 if (!ion_is_legacy(fd)) return -EINVAL;
208
209 return ion_ioctl(fd, ION_IOC_SYNC, &data);
210 }
211
ion_query_heap_cnt(int fd,int * cnt)212 int ion_query_heap_cnt(int fd, int* cnt) {
213 int ret;
214 struct ion_heap_query query;
215
216 if (!cnt) return -EINVAL;
217 memset(&query, 0, sizeof(query));
218
219 ret = ion_ioctl(fd, ION_IOC_HEAP_QUERY, &query);
220 if (ret < 0) return ret;
221
222 *cnt = query.cnt;
223 return ret;
224 }
225
ion_query_get_heaps(int fd,int cnt,void * buffers)226 int ion_query_get_heaps(int fd, int cnt, void* buffers) {
227 int ret;
228 struct ion_heap_query query = {
229 .cnt = cnt, .heaps = (uintptr_t)buffers,
230 };
231
232 ret = ion_ioctl(fd, ION_IOC_HEAP_QUERY, &query);
233 return ret;
234 }
235