1 /*
2  * include/linux/ion.h
3  *
4  * Copyright (C) 2011 Google, Inc.
5  * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17 
18 #ifndef _LINUX_ION_H
19 #define _LINUX_ION_H
20 
21 #include <linux/ioctl.h>
22 #include <linux/types.h>
23 
24 struct ion_handle;
25 typedef struct ion_handle *ion_user_handle_t;
26 /**
27  * enum ion_heap_types - list of all possible types of heaps
28  * @ION_HEAP_TYPE_SYSTEM:	 memory allocated via vmalloc
29  * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc
30  * @ION_HEAP_TYPE_CARVEOUT:	 memory allocated from a prereserved
31  * 				 carveout heap, allocations are physically
32  * 				 contiguous
33  * @ION_HEAP_TYPE_IOMMU: IOMMU memory
34  * @ION_HEAP_TYPE_CP:	 memory allocated from a prereserved
35  *				carveout heap, allocations are physically
36  *				contiguous. Used for content protection.
37  * @ION_HEAP_TYPE_DMA:          memory allocated via DMA API
38  * @ION_NUM_HEAPS:		 helper for iterating over heaps, a bit mask
39  * 				 is used to identify the heaps, so only 32
40  * 				 total heap types are supported
41  */
42 enum ion_heap_type {
43 	ION_HEAP_TYPE_SYSTEM,
44 	ION_HEAP_TYPE_SYSTEM_CONTIG,
45 	ION_HEAP_TYPE_CARVEOUT,
46 	ION_HEAP_TYPE_DMA,
47 	ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
48 				 are at the end of this enum */
49 	ION_NUM_HEAPS = 16,
50 };
51 
52 #define ION_HEAP_SYSTEM_MASK		(1 << ION_HEAP_TYPE_SYSTEM)
53 #define ION_HEAP_SYSTEM_CONTIG_MASK	(1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
54 #define ION_HEAP_CARVEOUT_MASK		(1 << ION_HEAP_TYPE_CARVEOUT)
55 #define ION_HEAP_TYPE_DMA_MASK         (1 << ION_HEAP_TYPE_DMA)
56 
57 /**
58  * heap flags - the lower 16 bits are used by core ion, the upper 16
59  * bits are reserved for use by the heaps themselves.
60  */
61 #define ION_FLAG_CACHED 1		/* mappings of this buffer should be
62 					   cached, ion will do cache
63 					   maintenance when the buffer is
64 					   mapped for dma */
65 
66 #ifdef __KERNEL__
67 #include <linux/err.h>
68 #include <mach/ion.h>
69 struct ion_device;
70 struct ion_heap;
71 struct ion_mapper;
72 struct ion_client;
73 struct ion_buffer;
74 
75 /* This should be removed some day when phys_addr_t's are fully
76    plumbed in the kernel, and all instances of ion_phys_addr_t should
77    be converted to phys_addr_t.  For the time being many kernel interfaces
78    do not accept phys_addr_t's that would have to */
79 #define ion_phys_addr_t unsigned long
80 #define ion_virt_addr_t unsigned long
81 
82 /**
83  * struct ion_platform_heap - defines a heap in the given platform
84  * @type:	type of the heap from ion_heap_type enum
85  * @id:		unique identifier for heap.  When allocating (lower numbers
86  * 		will be allocated from first)
87  * @name:	used for debug purposes
88  * @base:	base address of heap in physical memory if applicable
89  * @size:	size of the heap in bytes if applicable
90  * @memory_type:Memory type used for the heap
91  * @has_outer_cache:    set to 1 if outer cache is used, 0 otherwise.
92  * @extra_data:	Extra data specific to each heap type
93  * @priv:	heap private data
94  */
95 struct ion_platform_heap {
96 	enum ion_heap_type type;
97 	unsigned int id;
98 	const char *name;
99 	ion_phys_addr_t base;
100 	size_t size;
101 	enum ion_memory_types memory_type;
102 	unsigned int has_outer_cache;
103 	void *extra_data;
104 	void *priv;
105 };
106 
107 /**
108  * struct ion_platform_data - array of platform heaps passed from board file
109  * @has_outer_cache:    set to 1 if outer cache is used, 0 otherwise.
110  * @nr:    number of structures in the array
111  * @request_region: function to be called when the number of allocations goes
112  *						from 0 -> 1
113  * @release_region: function to be called when the number of allocations goes
114  *						from 1 -> 0
115  * @setup_region:   function to be called upon ion registration
116  * @heaps: array of platform_heap structions
117  *
118  * Provided by the board file in the form of platform data to a platform device.
119  */
120 struct ion_platform_data {
121 	unsigned int has_outer_cache;
122 	int nr;
123 	int (*request_region)(void *);
124 	int (*release_region)(void *);
125 	void *(*setup_region)(void);
126 	struct ion_platform_heap *heaps;
127 };
128 
129 #ifdef CONFIG_ION
130 
131 /**
132  * ion_reserve() - reserve memory for ion heaps if applicable
133  * @data:	platform data specifying starting physical address and
134  *		size
135  *
136  * Calls memblock reserve to set aside memory for heaps that are
137  * located at specific memory addresses or of specfic sizes not
138  * managed by the kernel
139  */
140 void ion_reserve(struct ion_platform_data *data);
141 
142 /**
143  * ion_client_create() -  allocate a client and returns it
144  * @dev:	the global ion device
145  * @heap_mask:	mask of heaps this client can allocate from
146  * @name:	used for debugging
147  */
148 struct ion_client *ion_client_create(struct ion_device *dev,
149 				     unsigned int heap_mask, const char *name);
150 
151 /**
152  *  msm_ion_client_create - allocate a client using the ion_device specified in
153  *				drivers/gpu/ion/msm/msm_ion.c
154  *
155  * heap_mask and name are the same as ion_client_create, return values
156  * are the same as ion_client_create.
157  */
158 
159 struct ion_client *msm_ion_client_create(unsigned int heap_mask,
160 					const char *name);
161 
162 /**
163  * ion_client_destroy() -  free's a client and all it's handles
164  * @client:	the client
165  *
166  * Free the provided client and all it's resources including
167  * any handles it is holding.
168  */
169 void ion_client_destroy(struct ion_client *client);
170 
171 /**
172  * ion_alloc - allocate ion memory
173  * @client:	the client
174  * @len:	size of the allocation
175  * @align:	requested allocation alignment, lots of hardware blocks have
176  *		alignment requirements of some kind
177  * @heap_mask:	mask of heaps to allocate from, if multiple bits are set
178  *		heaps will be tried in order from lowest to highest order bit
179  * @flags:	heap flags, the low 16 bits are consumed by ion, the high 16
180  *		bits are passed on to the respective heap and can be heap
181  *		custom
182  *
183  * Allocate memory in one of the heaps provided in heap mask and return
184  * an opaque handle to it.
185  */
186 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
187 			     size_t align, unsigned int heap_mask,
188 			     unsigned int flags);
189 
190 /**
191  * ion_free - free a handle
192  * @client:	the client
193  * @handle:	the handle to free
194  *
195  * Free the provided handle.
196  */
197 void ion_free(struct ion_client *client, struct ion_handle *handle);
198 
199 /**
200  * ion_phys - returns the physical address and len of a handle
201  * @client:	the client
202  * @handle:	the handle
203  * @addr:	a pointer to put the address in
204  * @len:	a pointer to put the length in
205  *
206  * This function queries the heap for a particular handle to get the
207  * handle's physical address.  It't output is only correct if
208  * a heap returns physically contiguous memory -- in other cases
209  * this api should not be implemented -- ion_sg_table should be used
210  * instead.  Returns -EINVAL if the handle is invalid.  This has
211  * no implications on the reference counting of the handle --
212  * the returned value may not be valid if the caller is not
213  * holding a reference.
214  */
215 int ion_phys(struct ion_client *client, struct ion_handle *handle,
216 	     ion_phys_addr_t *addr, size_t *len);
217 
218 /**
219  * ion_map_dma - return an sg_table describing a handle
220  * @client:	the client
221  * @handle:	the handle
222  *
223  * This function returns the sg_table describing
224  * a particular ion handle.
225  */
226 struct sg_table *ion_sg_table(struct ion_client *client,
227 			      struct ion_handle *handle);
228 
229 /**
230  * ion_map_kernel - create mapping for the given handle
231  * @client:	the client
232  * @handle:	handle to map
233  * @flags:	flags for this mapping
234  *
235  * Map the given handle into the kernel and return a kernel address that
236  * can be used to access this address. If no flags are specified, this
237  * will return a non-secure uncached mapping.
238  */
239 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle);
240 
241 /**
242  * ion_unmap_kernel() - destroy a kernel mapping for a handle
243  * @client:	the client
244  * @handle:	handle to unmap
245  */
246 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
247 
248 /**
249  * ion_share_dma_buf() - given an ion client, create a dma-buf fd
250  * @client:	the client
251  * @handle:	the handle
252  */
253 int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle);
254 
255 /**
256  * ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle
257  * @client:	the client
258  * @fd:		the dma-buf fd
259  *
260  * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf,
261  * import that fd and return a handle representing it.  If a dma-buf from
262  * another exporter is passed in this function will return ERR_PTR(-EINVAL)
263  */
264 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd);
265 
266 /**
267  * ion_handle_get_flags - get the flags for a given handle
268  *
269  * @client - client who allocated the handle
270  * @handle - handle to get the flags
271  * @flags - pointer to store the flags
272  *
273  * Gets the current flags for a handle. These flags indicate various options
274  * of the buffer (caching, security, etc.)
275  */
276 int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
277 				unsigned long *flags);
278 
279 
280 /**
281  * ion_map_iommu - map the given handle into an iommu
282  *
283  * @client - client who allocated the handle
284  * @handle - handle to map
285  * @domain_num - domain number to map to
286  * @partition_num - partition number to allocate iova from
287  * @align - alignment for the iova
288  * @iova_length - length of iova to map. If the iova length is
289  *		greater than the handle length, the remaining
290  *		address space will be mapped to a dummy buffer.
291  * @iova - pointer to store the iova address
292  * @buffer_size - pointer to store the size of the buffer
293  * @flags - flags for options to map
294  * @iommu_flags - flags specific to the iommu.
295  *
296  * Maps the handle into the iova space specified via domain number. Iova
297  * will be allocated from the partition specified via partition_num.
298  * Returns 0 on success, negative value on error.
299  */
300 int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
301 			int domain_num, int partition_num, unsigned long align,
302 			unsigned long iova_length, unsigned long *iova,
303 			unsigned long *buffer_size,
304 			unsigned long flags, unsigned long iommu_flags);
305 
306 
307 /**
308  * ion_handle_get_size - get the allocated size of a given handle
309  *
310  * @client - client who allocated the handle
311  * @handle - handle to get the size
312  * @size - pointer to store the size
313  *
314  * gives the allocated size of a handle. returns 0 on success, negative
315  * value on error
316  *
317  * NOTE: This is intended to be used only to get a size to pass to map_iommu.
318  * You should *NOT* rely on this for any other usage.
319  */
320 
321 int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
322 			unsigned long *size);
323 
324 /**
325  * ion_unmap_iommu - unmap the handle from an iommu
326  *
327  * @client - client who allocated the handle
328  * @handle - handle to unmap
329  * @domain_num - domain to unmap from
330  * @partition_num - partition to unmap from
331  *
332  * Decrement the reference count on the iommu mapping. If the count is
333  * 0, the mapping will be removed from the iommu.
334  */
335 void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
336 			int domain_num, int partition_num);
337 
338 
339 /**
340  * ion_secure_heap - secure a heap
341  *
342  * @client - a client that has allocated from the heap heap_id
343  * @heap_id - heap id to secure.
344  * @version - version of content protection
345  * @data - extra data needed for protection
346  *
347  * Secure a heap
348  * Returns 0 on success
349  */
350 int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
351 			void *data);
352 
353 /**
354  * ion_unsecure_heap - un-secure a heap
355  *
356  * @client - a client that has allocated from the heap heap_id
357  * @heap_id - heap id to un-secure.
358  * @version - version of content protection
359  * @data - extra data needed for protection
360  *
361  * Un-secure a heap
362  * Returns 0 on success
363  */
364 int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
365 			void *data);
366 
367 /**
368  * msm_ion_do_cache_op - do cache operations.
369  *
370  * @client - pointer to ION client.
371  * @handle - pointer to buffer handle.
372  * @vaddr -  virtual address to operate on.
373  * @len - Length of data to do cache operation on.
374  * @cmd - Cache operation to perform:
375  *		ION_IOC_CLEAN_CACHES
376  *		ION_IOC_INV_CACHES
377  *		ION_IOC_CLEAN_INV_CACHES
378  *
379  * Returns 0 on success
380  */
381 int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
382 			void *vaddr, unsigned long len, unsigned int cmd);
383 
384 #else
ion_reserve(struct ion_platform_data * data)385 static inline void ion_reserve(struct ion_platform_data *data)
386 {
387 
388 }
389 
ion_client_create(struct ion_device * dev,unsigned int heap_mask,const char * name)390 static inline struct ion_client *ion_client_create(struct ion_device *dev,
391 				     unsigned int heap_mask, const char *name)
392 {
393 	return ERR_PTR(-ENODEV);
394 }
395 
msm_ion_client_create(unsigned int heap_mask,const char * name)396 static inline struct ion_client *msm_ion_client_create(unsigned int heap_mask,
397 					const char *name)
398 {
399 	return ERR_PTR(-ENODEV);
400 }
401 
ion_client_destroy(struct ion_client * client)402 static inline void ion_client_destroy(struct ion_client *client) { }
403 
ion_alloc(struct ion_client * client,size_t len,size_t align,unsigned int heap_mask,unsigned int flags)404 static inline struct ion_handle *ion_alloc(struct ion_client *client,
405 					size_t len, size_t align,
406 					unsigned int heap_mask,
407 					unsigned int flags)
408 {
409 	return ERR_PTR(-ENODEV);
410 }
411 
ion_free(struct ion_client * client,struct ion_handle * handle)412 static inline void ion_free(struct ion_client *client,
413 	struct ion_handle *handle) { }
414 
415 
ion_phys(struct ion_client * client,struct ion_handle * handle,ion_phys_addr_t * addr,size_t * len)416 static inline int ion_phys(struct ion_client *client,
417 	struct ion_handle *handle, ion_phys_addr_t *addr, size_t *len)
418 {
419 	return -ENODEV;
420 }
421 
ion_sg_table(struct ion_client * client,struct ion_handle * handle)422 static inline struct sg_table *ion_sg_table(struct ion_client *client,
423 			      struct ion_handle *handle)
424 {
425 	return ERR_PTR(-ENODEV);
426 }
427 
ion_map_kernel(struct ion_client * client,struct ion_handle * handle,unsigned long flags)428 static inline void *ion_map_kernel(struct ion_client *client,
429 	struct ion_handle *handle, unsigned long flags)
430 {
431 	return ERR_PTR(-ENODEV);
432 }
433 
ion_unmap_kernel(struct ion_client * client,struct ion_handle * handle)434 static inline void ion_unmap_kernel(struct ion_client *client,
435 	struct ion_handle *handle) { }
436 
ion_share_dma_buf(struct ion_client * client,struct ion_handle * handle)437 static inline int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
438 {
439 	return -ENODEV;
440 }
441 
ion_import_dma_buf(struct ion_client * client,int fd)442 static inline struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
443 {
444 	return ERR_PTR(-ENODEV);
445 }
446 
ion_handle_get_flags(struct ion_client * client,struct ion_handle * handle,unsigned long * flags)447 static inline int ion_handle_get_flags(struct ion_client *client,
448 	struct ion_handle *handle, unsigned long *flags)
449 {
450 	return -ENODEV;
451 }
452 
ion_map_iommu(struct ion_client * client,struct ion_handle * handle,int domain_num,int partition_num,unsigned long align,unsigned long iova_length,unsigned long * iova,unsigned long * buffer_size,unsigned long flags,unsigned long iommu_flags)453 static inline int ion_map_iommu(struct ion_client *client,
454 			struct ion_handle *handle, int domain_num,
455 			int partition_num, unsigned long align,
456 			unsigned long iova_length, unsigned long *iova,
457 			unsigned long *buffer_size,
458 			unsigned long flags,
459 			unsigned long iommu_flags)
460 {
461 	return -ENODEV;
462 }
463 
ion_unmap_iommu(struct ion_client * client,struct ion_handle * handle,int domain_num,int partition_num)464 static inline void ion_unmap_iommu(struct ion_client *client,
465 			struct ion_handle *handle, int domain_num,
466 			int partition_num)
467 {
468 	return;
469 }
470 
ion_secure_heap(struct ion_device * dev,int heap_id,int version,void * data)471 static inline int ion_secure_heap(struct ion_device *dev, int heap_id,
472 					int version, void *data)
473 {
474 	return -ENODEV;
475 
476 }
477 
ion_unsecure_heap(struct ion_device * dev,int heap_id,int version,void * data)478 static inline int ion_unsecure_heap(struct ion_device *dev, int heap_id,
479 					int version, void *data)
480 {
481 	return -ENODEV;
482 }
483 
msm_ion_do_cache_op(struct ion_client * client,struct ion_handle * handle,void * vaddr,unsigned long len,unsigned int cmd)484 static inline int msm_ion_do_cache_op(struct ion_client *client,
485 			struct ion_handle *handle, void *vaddr,
486 			unsigned long len, unsigned int cmd)
487 {
488 	return -ENODEV;
489 }
490 
491 #endif /* CONFIG_ION */
492 #endif /* __KERNEL__ */
493 
494 /**
495  * DOC: Ion Userspace API
496  *
497  * create a client by opening /dev/ion
498  * most operations handled via following ioctls
499  *
500  */
501 
502 /**
503  * struct ion_allocation_data - metadata passed from userspace for allocations
504  * @len:	size of the allocation
505  * @align:	required alignment of the allocation
506  * @heap_mask:	mask of heaps to allocate from
507  * @flags:	flags passed to heap
508  * @handle:	pointer that will be populated with a cookie to use to refer
509  *		to this allocation
510  *
511  * Provided by userspace as an argument to the ioctl
512  */
513 struct ion_allocation_data {
514 	size_t len;
515 	size_t align;
516 	unsigned int heap_id_mask;
517 	unsigned int flags;
518 	ion_user_handle_t handle;
519 };
520 
521 /**
522  * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair
523  * @handle:	a handle
524  * @fd:		a file descriptor representing that handle
525  *
526  * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with
527  * the handle returned from ion alloc, and the kernel returns the file
528  * descriptor to share or map in the fd field.  For ION_IOC_IMPORT, userspace
529  * provides the file descriptor and the kernel returns the handle.
530  */
531 struct ion_fd_data {
532 	ion_user_handle_t handle;
533 	int fd;
534 };
535 
536 /**
537  * struct ion_handle_data - a handle passed to/from the kernel
538  * @handle:	a handle
539  */
540 struct ion_handle_data {
541 	ion_user_handle_t handle;
542 };
543 
544 /**
545  * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl
546  * @cmd:	the custom ioctl function to call
547  * @arg:	additional data to pass to the custom ioctl, typically a user
548  *		pointer to a predefined structure
549  *
550  * This works just like the regular cmd and arg fields of an ioctl.
551  */
552 struct ion_custom_data {
553 	unsigned int cmd;
554 	unsigned long arg;
555 };
556 #define ION_IOC_MAGIC		'I'
557 
558 /**
559  * DOC: ION_IOC_ALLOC - allocate memory
560  *
561  * Takes an ion_allocation_data struct and returns it with the handle field
562  * populated with the opaque handle for the allocation.
563  */
564 #define ION_IOC_ALLOC		_IOWR(ION_IOC_MAGIC, 0, \
565 				      struct ion_allocation_data)
566 
567 /**
568  * DOC: ION_IOC_FREE - free memory
569  *
570  * Takes an ion_handle_data struct and frees the handle.
571  */
572 #define ION_IOC_FREE		_IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
573 
574 /**
575  * DOC: ION_IOC_MAP - get a file descriptor to mmap
576  *
577  * Takes an ion_fd_data struct with the handle field populated with a valid
578  * opaque handle.  Returns the struct with the fd field set to a file
579  * descriptor open in the current address space.  This file descriptor
580  * can then be used as an argument to mmap.
581  */
582 #define ION_IOC_MAP		_IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data)
583 
584 /**
585  * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation
586  *
587  * Takes an ion_fd_data struct with the handle field populated with a valid
588  * opaque handle.  Returns the struct with the fd field set to a file
589  * descriptor open in the current address space.  This file descriptor
590  * can then be passed to another process.  The corresponding opaque handle can
591  * be retrieved via ION_IOC_IMPORT.
592  */
593 #define ION_IOC_SHARE		_IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
594 
595 /**
596  * DOC: ION_IOC_IMPORT - imports a shared file descriptor
597  *
598  * Takes an ion_fd_data struct with the fd field populated with a valid file
599  * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle
600  * filed set to the corresponding opaque handle.
601  */
602 #define ION_IOC_IMPORT		_IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data)
603 
604 /**
605  * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
606  *
607  * Takes the argument of the architecture specific ioctl to call and
608  * passes appropriate userdata for that ioctl
609  */
610 #define ION_IOC_CUSTOM		_IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
611 
612 /**
613  * DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory
614  *
615  * Deprecated in favor of using the dma_buf api's correctly (syncing
616  * will happend automatically when the buffer is mapped to a device).
617  * If necessary should be used after touching a cached buffer from the cpu,
618  * this will make the buffer in memory coherent.
619  */
620 #define ION_IOC_SYNC		_IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data)
621 #endif /* _LINUX_ION_H */
622