2 * drivers/video/tegra/nvmap/nvmap.h
4 * GPU memory management driver for Tegra
6 * Copyright (c) 2009-2014, NVIDIA CORPORATION. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
23 #ifndef __VIDEO_TEGRA_NVMAP_NVMAP_H
24 #define __VIDEO_TEGRA_NVMAP_NVMAP_H
26 #include <linux/list.h>
28 #include <linux/mutex.h>
29 #include <linux/rbtree.h>
30 #include <linux/sched.h>
31 #include <linux/wait.h>
32 #include <linux/atomic.h>
33 #include <linux/dma-buf.h>
34 #include <linux/syscalls.h>
35 #include <linux/nvmap.h>
37 #include <linux/workqueue.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/dma-direction.h>
40 #include <linux/platform_device.h>
41 #include <linux/vmalloc.h>
42 #include <linux/slab.h>
44 #include <asm/cacheflush.h>
45 #include <asm/tlbflush.h>
47 #include <asm/outercache.h>
49 #include "nvmap_heap.h"
51 #ifdef CONFIG_NVMAP_HIGHMEM_ONLY
52 #define __GFP_NVMAP __GFP_HIGHMEM
54 #define __GFP_NVMAP (GFP_KERNEL | __GFP_HIGHMEM)
57 #define GFP_NVMAP (__GFP_NVMAP | __GFP_NOWARN)
59 #define NVMAP_NUM_PTES 64
61 extern bool zero_memory;
64 #define NVMAP_LAZY_VFREE
69 extern const struct file_operations nvmap_fd_fops;
70 void _nvmap_handle_free(struct nvmap_handle *h);
71 /* holds max number of handles allocted per process at any time */
72 extern u32 nvmap_max_handle_count;
73 extern size_t cache_maint_inner_threshold;
75 extern struct platform_device *nvmap_pdev;
77 #if defined(CONFIG_TEGRA_NVMAP)
78 #define nvmap_err(_client, _fmt, ...) \
79 dev_err(nvmap_client_to_device(_client), \
80 "%s: "_fmt, __func__, ##__VA_ARGS__)
82 #define nvmap_warn(_client, _fmt, ...) \
83 dev_warn(nvmap_client_to_device(_client), \
84 "%s: "_fmt, __func__, ##__VA_ARGS__)
86 #define nvmap_debug(_client, _fmt, ...) \
87 dev_dbg(nvmap_client_to_device(_client), \
88 "%s: "_fmt, __func__, ##__VA_ARGS__)
90 /* If set force zeroed memory to userspace. */
91 extern bool zero_memory;
94 #define PG_PROT_KERNEL PAGE_KERNEL
95 #define FLUSH_TLB_PAGE(addr) flush_tlb_kernel_range(addr, PAGE_SIZE)
96 #define FLUSH_DCACHE_AREA __flush_dcache_area
97 #define outer_flush_range(s, e)
98 #define outer_inv_range(s, e)
99 #define outer_clean_range(s, e)
100 #define outer_flush_all()
101 #define outer_clean_all()
102 extern void __flush_dcache_page(struct page *);
104 #define PG_PROT_KERNEL pgprot_kernel
105 #define FLUSH_TLB_PAGE(addr) flush_tlb_kernel_page(addr)
106 #define FLUSH_DCACHE_AREA __cpuc_flush_dcache_area
107 extern void __flush_dcache_page(struct address_space *, struct page *);
110 struct nvmap_vma_list {
111 struct list_head list;
112 struct vm_area_struct *vma;
116 /* handles allocated using shared system memory (either IOVMM- or high-order
117 * page allocations */
118 struct nvmap_pgalloc {
120 bool contig; /* contiguous system memory */
121 u32 iovm_addr; /* is non-zero, if client need specific iova mapping */
122 atomic_t ndirty; /* count number of dirty pages */
125 struct nvmap_handle {
126 struct rb_node node; /* entry on global handle tree */
127 atomic_t ref; /* reference count (i.e., # of duplications) */
128 atomic_t pin; /* pin count */
129 unsigned long flags; /* caching flags */
130 size_t size; /* padded (as-allocated) size */
131 size_t orig_size; /* original (as-requested) size */
133 u8 kind; /* memory kind (0=pitch, !0 -> blocklinear) */
134 void *map_resources; /* mapping resources associated with the
136 struct nvmap_client *owner;
137 struct nvmap_handle_ref *owner_ref; /* use this ref to avoid spending
138 time on validation in some cases.
139 if handle was duplicated by other client and
140 original client destroy ref, this field
141 has to be set to zero. In this case ref should be
142 obtained through validation */
145 * dma_buf necessities. An attachment is made on dma_buf allocation to
146 * facilitate the nvmap_pin* APIs.
148 struct dma_buf *dmabuf;
149 struct dma_buf_attachment *attachment;
151 struct nvmap_device *dev;
153 struct nvmap_pgalloc pgalloc;
154 struct nvmap_heap_block *carveout;
156 bool global; /* handle may be duplicated by other clients */
157 bool secure; /* zap IOVMM area on unpin */
158 bool heap_pgalloc; /* handle is page allocated (sysmem / iovmm) */
159 bool alloc; /* handle has memory allocated */
160 u32 heap_type; /* handle heap is allocated from */
161 unsigned int userflags; /* flags passed from userspace */
162 void *vaddr; /* mapping used inside kernel */
163 struct list_head vmas; /* list of all user vma's */
164 atomic_t umap_count; /* number of outstanding maps from user */
165 atomic_t kmap_count; /* number of outstanding map from kernel */
166 atomic_t share_count; /* number of processes sharing the handle */
167 struct list_head lru; /* list head to track the lru */
169 void *nvhost_priv; /* nvhost private data */
170 void (*nvhost_priv_delete)(void *priv);
173 /* handle_ref objects are client-local references to an nvmap_handle;
174 * they are distinct objects so that handles can be unpinned and
175 * unreferenced the correct number of times when a client abnormally
177 struct nvmap_handle_ref {
178 struct nvmap_handle *handle;
180 atomic_t dupes; /* number of times to free on file close */
181 atomic_t pin; /* number of times to unpin on free */
184 #ifdef CONFIG_NVMAP_PAGE_POOLS
185 #define NVMAP_UC_POOL NVMAP_HANDLE_UNCACHEABLE
186 #define NVMAP_WC_POOL NVMAP_HANDLE_WRITE_COMBINE
187 #define NVMAP_IWB_POOL NVMAP_HANDLE_INNER_CACHEABLE
188 #define NVMAP_WB_POOL NVMAP_HANDLE_CACHEABLE
189 #define NVMAP_NUM_POOLS (NVMAP_HANDLE_CACHEABLE + 1)
192 * This is the default ratio defining pool size. It can be thought of as pool
193 * size in either MB per GB or KB per MB. That means the max this number can
194 * be is 1024 (all physical memory - not a very good idea) or 0 (no page pool
197 #define NVMAP_PP_POOL_SIZE (42)
200 * The wakeup threshold is how many empty page slots there need to be in order
201 * for the background allocater to be woken up.
203 #define NVMAP_PP_DEF_FILL_THRESH (1024)
206 * For when memory does not require zeroing this is the minimum number of pages
207 * remaining in the page pools before the background allocer is woken up. This
208 * essentially disables the page pools (unless its extremely small).
210 #define NVMAP_PP_ZERO_MEM_FILL_MIN (256)
212 struct nvmap_page_pool {
214 u32 alloc; /* Alloc index. */
215 u32 fill; /* Fill index. */
216 u32 count; /* Number of pages in the table. */
217 u32 length; /* Length of the pages array. */
218 struct page **page_array;
220 #ifdef CONFIG_NVMAP_PAGE_POOL_DEBUG
228 #define pp_empty(pp) \
229 ((pp)->fill == (pp)->alloc && !(pp)->page_array[(pp)->alloc])
230 #define pp_full(pp) \
231 ((pp)->fill == (pp)->alloc && (pp)->page_array[(pp)->alloc])
233 #define nvmap_pp_alloc_inc(pp) nvmap_pp_inc_index((pp), &(pp)->alloc)
234 #define nvmap_pp_fill_inc(pp) nvmap_pp_inc_index((pp), &(pp)->fill)
236 /* Handle wrap around. */
237 static inline void nvmap_pp_inc_index(struct nvmap_page_pool *pp, u32 *ind)
241 /* Wrap condition. */
242 if (*ind >= pp->length)
246 static inline void nvmap_page_pool_lock(struct nvmap_page_pool *pool)
248 mutex_lock(&pool->lock);
251 static inline void nvmap_page_pool_unlock(struct nvmap_page_pool *pool)
253 mutex_unlock(&pool->lock);
256 int nvmap_page_pool_init(struct nvmap_device *dev);
257 int nvmap_page_pool_fini(struct nvmap_device *dev);
258 struct page *nvmap_page_pool_alloc(struct nvmap_page_pool *pool);
259 bool nvmap_page_pool_fill(struct nvmap_page_pool *pool, struct page *page);
260 int nvmap_page_pool_clear(void);
263 struct nvmap_client {
265 struct rb_root handle_refs;
266 struct mutex ref_lock;
269 struct task_struct *task;
270 struct list_head list;
274 struct nvmap_vma_priv {
275 struct nvmap_handle *handle;
277 atomic_t count; /* number of processes cloning the VMA */
280 #include <linux/mm.h>
281 #include <linux/miscdevice.h>
283 struct nvmap_device {
284 struct vm_struct *vm_rgn;
285 pte_t *ptes[NVMAP_NUM_PTES];
286 unsigned long ptebits[NVMAP_NUM_PTES / BITS_PER_LONG];
287 unsigned int lastpte;
290 struct rb_root handles;
291 spinlock_t handle_lock;
292 wait_queue_head_t pte_wait;
293 struct miscdevice dev_user;
294 struct nvmap_carveout_node *heaps;
296 #ifdef CONFIG_NVMAP_PAGE_POOLS
297 struct nvmap_page_pool pool;
299 struct list_head clients;
300 struct mutex clients_lock;
301 struct list_head lru_handles;
323 atomic64_t stats[NS_NUM];
327 extern struct nvmap_stats nvmap_stats;
329 void nvmap_stats_inc(enum nvmap_stats_t, size_t size);
330 void nvmap_stats_dec(enum nvmap_stats_t, size_t size);
331 u64 nvmap_stats_read(enum nvmap_stats_t);
333 static inline void nvmap_ref_lock(struct nvmap_client *priv)
335 mutex_lock(&priv->ref_lock);
338 static inline void nvmap_ref_unlock(struct nvmap_client *priv)
340 mutex_unlock(&priv->ref_lock);
344 * NOTE: this does not ensure the continued existence of the underlying
345 * dma_buf. If you want ensure the existence of the dma_buf you must get an
346 * nvmap_handle_ref as that is what tracks the dma_buf refs.
348 static inline struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h)
350 if (unlikely(atomic_inc_return(&h->ref) <= 1)) {
351 pr_err("%s: %s attempt to get a freed handle\n",
352 __func__, current->group_leader->comm);
359 static inline pgprot_t nvmap_pgprot(struct nvmap_handle *h, pgprot_t prot)
361 if (h->flags == NVMAP_HANDLE_UNCACHEABLE)
362 return pgprot_noncached(prot);
363 else if (h->flags == NVMAP_HANDLE_WRITE_COMBINE)
364 return pgprot_writecombine(prot);
368 #else /* CONFIG_TEGRA_NVMAP */
369 struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h);
370 void nvmap_handle_put(struct nvmap_handle *h);
371 pgprot_t nvmap_pgprot(struct nvmap_handle *h, pgprot_t prot);
373 #endif /* !CONFIG_TEGRA_NVMAP */
375 struct device *nvmap_client_to_device(struct nvmap_client *client);
377 pte_t **nvmap_alloc_pte(struct nvmap_device *dev, void **vaddr);
379 pte_t **nvmap_alloc_pte_irq(struct nvmap_device *dev, void **vaddr);
381 void nvmap_free_pte(struct nvmap_device *dev, pte_t **pte);
383 pte_t **nvmap_vaddr_to_pte(struct nvmap_device *dev, unsigned long vaddr);
385 struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *dev,
386 struct nvmap_handle *handle,
389 unsigned long nvmap_carveout_usage(struct nvmap_client *c,
390 struct nvmap_heap_block *b);
392 struct nvmap_carveout_node;
394 int nvmap_find_cache_maint_op(struct nvmap_device *dev,
395 struct nvmap_handle *h);
397 void nvmap_handle_put(struct nvmap_handle *h);
399 struct nvmap_handle_ref *__nvmap_validate_locked(struct nvmap_client *priv,
400 struct nvmap_handle *h);
402 struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
405 struct nvmap_handle_ref *nvmap_duplicate_handle(struct nvmap_client *client,
406 struct nvmap_handle *h, bool skip_val);
408 struct nvmap_handle_ref *nvmap_create_handle_from_fd(
409 struct nvmap_client *client, int fd);
411 int nvmap_alloc_handle(struct nvmap_client *client,
412 struct nvmap_handle *h, unsigned int heap_mask,
413 size_t align, u8 kind,
416 void nvmap_free_handle(struct nvmap_client *c, struct nvmap_handle *h);
418 void nvmap_free_handle_user_id(struct nvmap_client *c, unsigned long user_id);
420 int nvmap_pin_ids(struct nvmap_client *client,
421 unsigned int nr, struct nvmap_handle * const *ids);
423 void nvmap_unpin_ids(struct nvmap_client *priv,
424 unsigned int nr, struct nvmap_handle * const *ids);
426 int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h);
428 void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h);
430 int is_nvmap_vma(struct vm_area_struct *vma);
432 int nvmap_get_dmabuf_fd(struct nvmap_client *client, struct nvmap_handle *h);
433 struct nvmap_handle *nvmap_get_id_from_dmabuf_fd(struct nvmap_client *client,
436 int nvmap_get_handle_param(struct nvmap_client *client,
437 struct nvmap_handle_ref *ref, u32 param, u64 *result);
439 struct nvmap_client *nvmap_client_get(struct nvmap_client *client);
441 void nvmap_client_put(struct nvmap_client *c);
443 struct nvmap_handle *unmarshal_user_handle(__u32 handle);
445 static inline void nvmap_flush_tlb_kernel_page(unsigned long kaddr)
447 #ifdef CONFIG_ARM_ERRATA_798181
448 flush_tlb_kernel_page_skip_errata_798181(kaddr);
450 FLUSH_TLB_PAGE(kaddr);
454 /* MM definitions. */
455 extern size_t cache_maint_outer_threshold;
456 extern int inner_cache_maint_threshold;
458 extern void v7_flush_kern_cache_all(void);
459 extern void v7_clean_kern_cache_all(void *);
460 extern void __flush_dcache_all(void *arg);
461 extern void __clean_dcache_all(void *arg);
463 void inner_flush_cache_all(void);
464 void inner_clean_cache_all(void);
465 void nvmap_flush_cache(struct page **pages, int numpages);
467 int nvmap_do_cache_maint_list(struct nvmap_handle **handles, u32 *offsets,
468 u32 *sizes, int op, int nr);
470 /* Internal API to support dmabuf */
471 struct dma_buf *__nvmap_dmabuf_export(struct nvmap_client *client,
472 struct nvmap_handle *handle);
473 struct dma_buf *__nvmap_make_dmabuf(struct nvmap_client *client,
474 struct nvmap_handle *handle);
475 struct sg_table *__nvmap_sg_table(struct nvmap_client *client,
476 struct nvmap_handle *h);
477 void __nvmap_free_sg_table(struct nvmap_client *client,
478 struct nvmap_handle *h, struct sg_table *sgt);
479 void *__nvmap_kmap(struct nvmap_handle *h, unsigned int pagenum);
480 void __nvmap_kunmap(struct nvmap_handle *h, unsigned int pagenum, void *addr);
481 void *__nvmap_mmap(struct nvmap_handle *h);
482 void __nvmap_munmap(struct nvmap_handle *h, void *addr);
483 int __nvmap_map(struct nvmap_handle *h, struct vm_area_struct *vma);
484 int __nvmap_get_handle_param(struct nvmap_client *client,
485 struct nvmap_handle *h, u32 param, u64 *result);
486 int __nvmap_do_cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
487 unsigned long start, unsigned long end,
488 unsigned int op, bool clean_only_dirty);
489 struct nvmap_client *__nvmap_create_client(struct nvmap_device *dev,
491 struct dma_buf *__nvmap_dmabuf_export_from_ref(struct nvmap_handle_ref *ref);
492 struct nvmap_handle *__nvmap_ref_to_id(struct nvmap_handle_ref *ref);
493 int __nvmap_pin(struct nvmap_handle_ref *ref, phys_addr_t *phys);
494 void __nvmap_unpin(struct nvmap_handle_ref *ref);
495 int __nvmap_dmabuf_fd(struct dma_buf *dmabuf, int flags);
497 void nvmap_dmabuf_debugfs_init(struct dentry *nvmap_root);
498 int nvmap_dmabuf_stash_init(void);
500 void *nvmap_altalloc(size_t len);
501 void nvmap_altfree(void *ptr, size_t len);
503 static inline struct page *nvmap_to_page(struct page *page)
505 return (struct page *)((unsigned long)page & ~3UL);
508 static inline bool nvmap_page_dirty(struct page *page)
510 return (unsigned long)page & 1UL;
513 static inline void nvmap_page_mkdirty(struct page **page)
515 *page = (struct page *)((unsigned long)*page | 1UL);
518 static inline void nvmap_page_mkclean(struct page **page)
520 *page = (struct page *)((unsigned long)*page & ~1UL);
523 static inline bool nvmap_page_reserved(struct page *page)
525 return !!((unsigned long)page & 2UL);
528 static inline void nvmap_page_mkreserved(struct page **page)
530 *page = (struct page *)((unsigned long)*page | 2UL);
533 static inline void nvmap_page_mkunreserved(struct page **page)
535 *page = (struct page *)((unsigned long)*page & ~2UL);
539 * FIXME: assume user space requests for reserve operations
542 static inline void nvmap_handle_mk(struct nvmap_handle *h,
543 u32 offset, u32 size,
544 void (*fn)(struct page **))
547 int start_page = PAGE_ALIGN(offset) >> PAGE_SHIFT;
548 int end_page = (offset + size) >> PAGE_SHIFT;
550 if (h->heap_pgalloc) {
551 for (i = start_page; i < end_page; i++)
552 fn(&h->pgalloc.pages[i]);
556 static inline void nvmap_handle_mkclean(struct nvmap_handle *h,
557 u32 offset, u32 size)
559 nvmap_handle_mk(h, offset, size, nvmap_page_mkclean);
562 static inline void nvmap_handle_mkunreserved(struct nvmap_handle *h,
563 u32 offset, u32 size)
565 nvmap_handle_mk(h, offset, size, nvmap_page_mkunreserved);
568 static inline void nvmap_handle_mkreserved(struct nvmap_handle *h,
569 u32 offset, u32 size)
571 nvmap_handle_mk(h, offset, size, nvmap_page_mkreserved);
574 static inline struct page **nvmap_pages(struct page **pg_pages, u32 nr_pages)
579 pages = nvmap_altalloc(sizeof(*pages) * nr_pages);
583 for (i = 0; i < nr_pages; i++)
584 pages[i] = nvmap_to_page(pg_pages[i]);
589 void nvmap_zap_handle(struct nvmap_handle *handle, u32 offset, u32 size);
591 void nvmap_zap_handles(struct nvmap_handle **handles, u32 *offsets,
594 void nvmap_vma_open(struct vm_area_struct *vma);
596 int nvmap_reserve_pages(struct nvmap_handle **handles, u32 *offsets,
597 u32 *sizes, u32 nr, u32 op);
599 static inline void nvmap_kmaps_inc(struct nvmap_handle *h)
601 mutex_lock(&h->lock);
602 atomic_inc(&h->kmap_count);
603 mutex_unlock(&h->lock);
606 static inline void nvmap_kmaps_inc_no_lock(struct nvmap_handle *h)
608 atomic_inc(&h->kmap_count);
611 static inline void nvmap_kmaps_dec(struct nvmap_handle *h)
613 atomic_dec(&h->kmap_count);
616 static inline void nvmap_umaps_inc(struct nvmap_handle *h)
618 mutex_lock(&h->lock);
619 atomic_inc(&h->umap_count);
620 mutex_unlock(&h->lock);
623 static inline void nvmap_umaps_dec(struct nvmap_handle *h)
625 atomic_dec(&h->umap_count);
628 static inline void nvmap_lru_add(struct nvmap_handle *h)
630 spin_lock(&nvmap_dev->lru_lock);
631 BUG_ON(!list_empty(&h->lru));
632 list_add_tail(&h->lru, &nvmap_dev->lru_handles);
633 spin_unlock(&nvmap_dev->lru_lock);
636 static inline void nvmap_lru_del(struct nvmap_handle *h)
638 spin_lock(&nvmap_dev->lru_lock);
640 INIT_LIST_HEAD(&h->lru);
641 spin_unlock(&nvmap_dev->lru_lock);
644 static inline void nvmap_lru_reset(struct nvmap_handle *h)
646 spin_lock(&nvmap_dev->lru_lock);
647 BUG_ON(list_empty(&h->lru));
649 list_add_tail(&h->lru, &nvmap_dev->lru_handles);
650 spin_unlock(&nvmap_dev->lru_lock);
653 #endif /* __VIDEO_TEGRA_NVMAP_NVMAP_H */