2 * drivers/video/tegra/nvmap/nvmap.h
4 * GPU memory management driver for Tegra
6 * Copyright (c) 2009-2014, NVIDIA CORPORATION. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
23 #ifndef __VIDEO_TEGRA_NVMAP_NVMAP_H
24 #define __VIDEO_TEGRA_NVMAP_NVMAP_H
26 #include <linux/list.h>
28 #include <linux/mutex.h>
29 #include <linux/rbtree.h>
30 #include <linux/sched.h>
31 #include <linux/wait.h>
32 #include <linux/atomic.h>
33 #include <linux/dma-buf.h>
34 #include <linux/syscalls.h>
36 #include <linux/miscdevice.h>
37 #include <linux/nvmap.h>
38 #include <linux/vmalloc.h>
39 #include <linux/slab.h>
41 #include <linux/workqueue.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/dma-direction.h>
44 #include <linux/platform_device.h>
46 #include <asm/cacheflush.h>
48 #include <asm/outercache.h>
50 #include "nvmap_heap.h"
52 #ifdef CONFIG_NVMAP_HIGHMEM_ONLY
53 #define __GFP_NVMAP __GFP_HIGHMEM
55 #define __GFP_NVMAP (GFP_KERNEL | __GFP_HIGHMEM)
58 #ifdef CONFIG_NVMAP_FORCE_ZEROED_USER_PAGES
59 #define NVMAP_ZEROED_PAGES __GFP_ZERO
61 #define NVMAP_ZEROED_PAGES 0
64 #define GFP_NVMAP (__GFP_NVMAP | __GFP_NOWARN | NVMAP_ZEROED_PAGES)
66 extern bool zero_memory;
69 #define NVMAP_LAZY_VFREE
75 void _nvmap_handle_free(struct nvmap_handle *h);
76 /* holds max number of handles allocted per process at any time */
77 extern u32 nvmap_max_handle_count;
79 /* If set force zeroed memory to userspace. */
80 extern bool zero_memory;
83 #define PG_PROT_KERNEL PAGE_KERNEL
84 #define FLUSH_DCACHE_AREA __flush_dcache_area
85 #define outer_flush_range(s, e)
86 #define outer_inv_range(s, e)
87 #define outer_clean_range(s, e)
88 #define outer_flush_all()
89 #define outer_clean_all()
90 extern void __clean_dcache_page(struct page *);
91 extern void __flush_dcache_page(struct page *);
93 #define PG_PROT_KERNEL pgprot_kernel
94 #define FLUSH_DCACHE_AREA __cpuc_flush_dcache_area
95 extern void __flush_dcache_page(struct address_space *, struct page *);
98 struct nvmap_vma_list {
99 struct list_head list;
100 struct vm_area_struct *vma;
103 /* handles allocated using shared system memory (either IOVMM- or high-order
104 * page allocations */
105 struct nvmap_pgalloc {
107 bool contig; /* contiguous system memory */
108 atomic_t ndirty; /* count number of dirty pages */
111 struct nvmap_handle {
112 struct rb_node node; /* entry on global handle tree */
113 atomic_t ref; /* reference count (i.e., # of duplications) */
114 atomic_t pin; /* pin count */
115 u32 flags; /* caching flags */
116 size_t size; /* padded (as-allocated) size */
117 size_t orig_size; /* original (as-requested) size */
119 u8 kind; /* memory kind (0=pitch, !0 -> blocklinear) */
120 struct nvmap_client *owner;
123 * dma_buf necessities. An attachment is made on dma_buf allocation to
124 * facilitate the nvmap_pin* APIs.
126 struct dma_buf *dmabuf;
127 struct dma_buf_attachment *attachment;
130 struct nvmap_pgalloc pgalloc;
131 struct nvmap_heap_block *carveout;
133 bool heap_pgalloc; /* handle is page allocated (sysmem / iovmm) */
134 bool alloc; /* handle has memory allocated */
135 u32 userflags; /* flags passed from userspace */
136 void *vaddr; /* mapping used inside kernel */
137 struct list_head vmas; /* list of all user vma's */
138 atomic_t share_count; /* number of processes sharing the handle */
140 void *nvhost_priv; /* nvhost private data */
141 void (*nvhost_priv_delete)(void *priv);
144 /* handle_ref objects are client-local references to an nvmap_handle;
145 * they are distinct objects so that handles can be unpinned and
146 * unreferenced the correct number of times when a client abnormally
148 struct nvmap_handle_ref {
149 struct nvmap_handle *handle;
151 atomic_t dupes; /* number of times to free on file close */
152 atomic_t pin; /* number of times to unpin on free */
155 #ifdef CONFIG_NVMAP_PAGE_POOLS
158 * This is the default ratio defining pool size. It can be thought of as pool
159 * size in either MB per GB or KB per MB. That means the max this number can
160 * be is 1024 (all physical memory - not a very good idea) or 0 (no page pool
163 #define NVMAP_PP_POOL_SIZE (128)
166 * The wakeup threshold is how many empty page slots there need to be in order
167 * for the background allocater to be woken up.
169 #define NVMAP_PP_DEF_FILL_THRESH (4096)
172 * For when memory does not require zeroing this is the minimum number of pages
173 * remaining in the page pools before the background allocer is woken up. This
174 * essentially disables the page pools (unless its extremely small).
176 #define NVMAP_PP_ZERO_MEM_FILL_MIN (2048)
178 struct nvmap_page_pool {
180 u32 alloc; /* Alloc index. */
181 u32 fill; /* Fill index. */
182 u32 count; /* Number of pages in the table. */
183 u32 length; /* Length of the pages array. */
184 struct page **page_array;
186 #ifdef CONFIG_NVMAP_PAGE_POOL_DEBUG
194 #define pp_empty(pp) \
195 ((pp)->fill == (pp)->alloc && !(pp)->page_array[(pp)->alloc])
196 #define pp_full(pp) \
197 ((pp)->fill == (pp)->alloc && (pp)->page_array[(pp)->alloc])
199 #define nvmap_pp_alloc_inc(pp) nvmap_pp_inc_index((pp), &(pp)->alloc)
200 #define nvmap_pp_fill_inc(pp) nvmap_pp_inc_index((pp), &(pp)->fill)
202 /* Handle wrap around. */
203 static inline void nvmap_pp_inc_index(struct nvmap_page_pool *pp, u32 *ind)
207 /* Wrap condition. */
208 if (*ind >= pp->length)
212 static inline void nvmap_page_pool_lock(struct nvmap_page_pool *pool)
214 mutex_lock(&pool->lock);
217 static inline void nvmap_page_pool_unlock(struct nvmap_page_pool *pool)
219 mutex_unlock(&pool->lock);
222 int nvmap_page_pool_init(struct nvmap_device *dev);
223 int nvmap_page_pool_fini(struct nvmap_device *dev);
224 struct page *nvmap_page_pool_alloc(struct nvmap_page_pool *pool);
225 bool nvmap_page_pool_fill(struct nvmap_page_pool *pool, struct page *page);
226 int __nvmap_page_pool_alloc_lots_locked(struct nvmap_page_pool *pool,
227 struct page **pages, u32 nr);
228 int __nvmap_page_pool_fill_lots_locked(struct nvmap_page_pool *pool,
229 struct page **pages, u32 nr);
230 int nvmap_page_pool_clear(void);
231 int nvmap_page_pool_debugfs_init(struct dentry *nvmap_root);
234 struct nvmap_carveout_commit {
236 struct list_head list;
239 struct nvmap_client {
241 struct rb_root handle_refs;
242 struct mutex ref_lock;
245 struct task_struct *task;
246 struct list_head list;
249 struct nvmap_carveout_commit carveout_commit[0];
253 struct nvmap_vma_priv {
254 struct nvmap_handle *handle;
256 atomic_t count; /* number of processes cloning the VMA */
259 struct nvmap_device {
260 struct rb_root handles;
261 spinlock_t handle_lock;
262 struct miscdevice dev_user;
263 struct nvmap_carveout_node *heaps;
265 #ifdef CONFIG_NVMAP_PAGE_POOLS
266 struct nvmap_page_pool pool;
268 struct list_head clients;
269 spinlock_t clients_lock;
290 atomic64_t stats[NS_NUM];
294 extern struct nvmap_stats nvmap_stats;
295 extern struct nvmap_device *nvmap_dev;
297 void nvmap_stats_inc(enum nvmap_stats_t, size_t size);
298 void nvmap_stats_dec(enum nvmap_stats_t, size_t size);
299 u64 nvmap_stats_read(enum nvmap_stats_t);
301 static inline void nvmap_ref_lock(struct nvmap_client *priv)
303 mutex_lock(&priv->ref_lock);
306 static inline void nvmap_ref_unlock(struct nvmap_client *priv)
308 mutex_unlock(&priv->ref_lock);
312 * NOTE: this does not ensure the continued existence of the underlying
313 * dma_buf. If you want ensure the existence of the dma_buf you must get an
314 * nvmap_handle_ref as that is what tracks the dma_buf refs.
316 static inline struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h)
318 if (WARN_ON(!virt_addr_valid(h))) {
319 pr_err("%s: invalid handle\n", current->group_leader->comm);
323 if (unlikely(atomic_inc_return(&h->ref) <= 1)) {
324 pr_err("%s: %s attempt to get a freed handle\n",
325 __func__, current->group_leader->comm);
333 static inline pgprot_t nvmap_pgprot(struct nvmap_handle *h, pgprot_t prot)
335 if (h->flags == NVMAP_HANDLE_UNCACHEABLE) {
337 if (h->owner && !h->owner->warned) {
338 char task_comm[TASK_COMM_LEN];
339 h->owner->warned = 1;
340 get_task_comm(task_comm, h->owner->task);
341 pr_err("PID %d: %s: WARNING: "
342 "NVMAP_HANDLE_WRITE_COMBINE "
343 "should be used in place of "
344 "NVMAP_HANDLE_UNCACHEABLE on ARM64\n",
345 h->owner->task->pid, task_comm);
348 return pgprot_noncached(prot);
350 else if (h->flags == NVMAP_HANDLE_WRITE_COMBINE)
351 return pgprot_dmacoherent(prot);
355 struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *dev,
356 struct nvmap_handle *handle,
359 unsigned long nvmap_carveout_usage(struct nvmap_client *c,
360 struct nvmap_heap_block *b);
362 struct nvmap_carveout_node;
363 void nvmap_carveout_commit_add(struct nvmap_client *client,
364 struct nvmap_carveout_node *node, size_t len);
366 void nvmap_carveout_commit_subtract(struct nvmap_client *client,
367 struct nvmap_carveout_node *node,
370 void nvmap_handle_put(struct nvmap_handle *h);
372 struct nvmap_handle_ref *__nvmap_validate_locked(struct nvmap_client *priv,
373 struct nvmap_handle *h);
375 struct nvmap_handle *nvmap_validate_get(struct nvmap_handle *h);
377 struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
380 struct nvmap_handle_ref *nvmap_duplicate_handle(struct nvmap_client *client,
381 struct nvmap_handle *h, bool skip_val);
383 struct nvmap_handle_ref *nvmap_create_handle_from_fd(
384 struct nvmap_client *client, int fd);
386 int nvmap_alloc_handle(struct nvmap_client *client,
387 struct nvmap_handle *h, unsigned int heap_mask,
388 size_t align, u8 kind,
391 void nvmap_free_handle(struct nvmap_client *c, struct nvmap_handle *h);
393 void nvmap_free_handle_user_id(struct nvmap_client *c, unsigned long user_id);
395 int nvmap_pin_ids(struct nvmap_client *client,
396 unsigned int nr, struct nvmap_handle * const *ids);
398 void nvmap_unpin_ids(struct nvmap_client *priv,
399 unsigned int nr, struct nvmap_handle * const *ids);
401 int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h);
403 void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h);
405 int is_nvmap_vma(struct vm_area_struct *vma);
407 int nvmap_get_dmabuf_fd(struct nvmap_client *client, struct nvmap_handle *h);
408 struct nvmap_handle *nvmap_get_id_from_dmabuf_fd(struct nvmap_client *client,
411 int nvmap_get_handle_param(struct nvmap_client *client,
412 struct nvmap_handle_ref *ref, u32 param, u64 *result);
414 struct nvmap_client *nvmap_client_get(struct nvmap_client *client);
416 void nvmap_client_put(struct nvmap_client *c);
418 struct nvmap_handle *unmarshal_user_id(u32 id);
420 /* MM definitions. */
421 extern size_t cache_maint_inner_threshold;
422 extern size_t cache_maint_outer_threshold;
424 extern void v7_flush_kern_cache_all(void);
425 extern void v7_clean_kern_cache_all(void *);
426 extern void __flush_dcache_all(void *arg);
427 extern void __clean_dcache_all(void *arg);
429 void inner_flush_cache_all(void);
430 void inner_clean_cache_all(void);
431 void nvmap_clean_cache(struct page **pages, int numpages);
432 void nvmap_flush_cache(struct page **pages, int numpages);
434 int nvmap_do_cache_maint_list(struct nvmap_handle **handles, u32 *offsets,
435 u32 *sizes, int op, int nr);
437 /* Internal API to support dmabuf */
438 struct dma_buf *__nvmap_dmabuf_export(struct nvmap_client *client,
439 struct nvmap_handle *handle);
440 struct dma_buf *__nvmap_make_dmabuf(struct nvmap_client *client,
441 struct nvmap_handle *handle);
442 struct sg_table *__nvmap_sg_table(struct nvmap_client *client,
443 struct nvmap_handle *h);
444 void __nvmap_free_sg_table(struct nvmap_client *client,
445 struct nvmap_handle *h, struct sg_table *sgt);
446 void *__nvmap_kmap(struct nvmap_handle *h, unsigned int pagenum);
447 void __nvmap_kunmap(struct nvmap_handle *h, unsigned int pagenum, void *addr);
448 void *__nvmap_mmap(struct nvmap_handle *h);
449 void __nvmap_munmap(struct nvmap_handle *h, void *addr);
450 int __nvmap_map(struct nvmap_handle *h, struct vm_area_struct *vma);
451 int __nvmap_get_handle_param(struct nvmap_client *client,
452 struct nvmap_handle *h, u32 param, u64 *result);
453 int __nvmap_do_cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
454 unsigned long start, unsigned long end,
455 unsigned int op, bool clean_only_dirty);
456 struct nvmap_client *__nvmap_create_client(struct nvmap_device *dev,
458 struct dma_buf *__nvmap_dmabuf_export_from_ref(struct nvmap_handle_ref *ref);
459 struct nvmap_handle *__nvmap_ref_to_id(struct nvmap_handle_ref *ref);
460 int __nvmap_pin(struct nvmap_handle_ref *ref, phys_addr_t *phys);
461 void __nvmap_unpin(struct nvmap_handle_ref *ref);
462 int __nvmap_dmabuf_fd(struct nvmap_client *client,
463 struct dma_buf *dmabuf, int flags);
465 void nvmap_dmabuf_debugfs_init(struct dentry *nvmap_root);
466 int nvmap_dmabuf_stash_init(void);
468 void *nvmap_altalloc(size_t len);
469 void nvmap_altfree(void *ptr, size_t len);
471 static inline struct page *nvmap_to_page(struct page *page)
473 return (struct page *)((unsigned long)page & ~3UL);
476 static inline bool nvmap_page_dirty(struct page *page)
478 return (unsigned long)page & 1UL;
481 static inline void nvmap_page_mkdirty(struct page **page)
483 *page = (struct page *)((unsigned long)*page | 1UL);
486 static inline void nvmap_page_mkclean(struct page **page)
488 *page = (struct page *)((unsigned long)*page & ~1UL);
491 static inline bool nvmap_page_reserved(struct page *page)
493 return !!((unsigned long)page & 2UL);
496 static inline void nvmap_page_mkreserved(struct page **page)
498 *page = (struct page *)((unsigned long)*page | 2UL);
501 static inline void nvmap_page_mkunreserved(struct page **page)
503 *page = (struct page *)((unsigned long)*page & ~2UL);
507 * FIXME: assume user space requests for reserve operations
510 static inline void nvmap_handle_mk(struct nvmap_handle *h,
511 u32 offset, u32 size,
512 void (*fn)(struct page **))
515 int start_page = PAGE_ALIGN(offset) >> PAGE_SHIFT;
516 int end_page = (offset + size) >> PAGE_SHIFT;
518 if (h->heap_pgalloc) {
519 for (i = start_page; i < end_page; i++)
520 fn(&h->pgalloc.pages[i]);
524 static inline void nvmap_handle_mkclean(struct nvmap_handle *h,
525 u32 offset, u32 size)
527 nvmap_handle_mk(h, offset, size, nvmap_page_mkclean);
530 static inline void nvmap_handle_mkunreserved(struct nvmap_handle *h,
531 u32 offset, u32 size)
533 nvmap_handle_mk(h, offset, size, nvmap_page_mkunreserved);
536 static inline void nvmap_handle_mkreserved(struct nvmap_handle *h,
537 u32 offset, u32 size)
539 nvmap_handle_mk(h, offset, size, nvmap_page_mkreserved);
542 static inline struct page **nvmap_pages(struct page **pg_pages, u32 nr_pages)
547 pages = nvmap_altalloc(sizeof(*pages) * nr_pages);
551 for (i = 0; i < nr_pages; i++)
552 pages[i] = nvmap_to_page(pg_pages[i]);
557 void nvmap_zap_handle(struct nvmap_handle *handle, u32 offset, u32 size);
559 void nvmap_zap_handles(struct nvmap_handle **handles, u32 *offsets,
562 void nvmap_vma_open(struct vm_area_struct *vma);
564 int nvmap_reserve_pages(struct nvmap_handle **handles, u32 *offsets,
565 u32 *sizes, u32 nr, u32 op);
567 #endif /* __VIDEO_TEGRA_NVMAP_NVMAP_H */