2 * drivers/video/tegra/nvmap/nvmap.h
4 * GPU memory management driver for Tegra
6 * Copyright (c) 2009-2014, NVIDIA CORPORATION. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
23 #ifndef __VIDEO_TEGRA_NVMAP_NVMAP_H
24 #define __VIDEO_TEGRA_NVMAP_NVMAP_H
26 #include <linux/list.h>
28 #include <linux/mutex.h>
29 #include <linux/rbtree.h>
30 #include <linux/sched.h>
31 #include <linux/wait.h>
32 #include <linux/atomic.h>
33 #include <linux/dma-buf.h>
34 #include <linux/syscalls.h>
35 #include <linux/nvmap.h>
37 #include <linux/workqueue.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/dma-direction.h>
40 #include <linux/platform_device.h>
41 #include <linux/vmalloc.h>
42 #include <linux/slab.h>
44 #include <asm/cacheflush.h>
45 #include <asm/tlbflush.h>
47 #include <asm/outercache.h>
49 #include "nvmap_heap.h"
51 #ifdef CONFIG_NVMAP_HIGHMEM_ONLY
52 #define __GFP_NVMAP __GFP_HIGHMEM
54 #define __GFP_NVMAP (GFP_KERNEL | __GFP_HIGHMEM)
57 #define GFP_NVMAP (__GFP_NVMAP | __GFP_NOWARN)
59 #define NVMAP_NUM_PTES 64
61 extern bool zero_memory;
64 #define NVMAP_LAZY_VFREE
69 extern const struct file_operations nvmap_fd_fops;
70 void _nvmap_handle_free(struct nvmap_handle *h);
71 /* holds max number of handles allocted per process at any time */
72 extern u32 nvmap_max_handle_count;
73 extern size_t cache_maint_inner_threshold;
75 extern struct platform_device *nvmap_pdev;
77 #if defined(CONFIG_TEGRA_NVMAP)
78 #define nvmap_err(_client, _fmt, ...) \
79 dev_err(nvmap_client_to_device(_client), \
80 "%s: "_fmt, __func__, ##__VA_ARGS__)
82 #define nvmap_warn(_client, _fmt, ...) \
83 dev_warn(nvmap_client_to_device(_client), \
84 "%s: "_fmt, __func__, ##__VA_ARGS__)
86 #define nvmap_debug(_client, _fmt, ...) \
87 dev_dbg(nvmap_client_to_device(_client), \
88 "%s: "_fmt, __func__, ##__VA_ARGS__)
90 /* If set force zeroed memory to userspace. */
91 extern bool zero_memory;
94 #define PG_PROT_KERNEL PAGE_KERNEL
95 #define FLUSH_TLB_PAGE(addr) flush_tlb_kernel_range(addr, PAGE_SIZE)
96 #define FLUSH_DCACHE_AREA __flush_dcache_area
97 #define outer_flush_range(s, e)
98 #define outer_inv_range(s, e)
99 #define outer_clean_range(s, e)
100 #define outer_flush_all()
101 #define outer_clean_all()
102 extern void __flush_dcache_page(struct page *);
104 #define PG_PROT_KERNEL pgprot_kernel
105 #define FLUSH_TLB_PAGE(addr) flush_tlb_kernel_page(addr)
106 #define FLUSH_DCACHE_AREA __cpuc_flush_dcache_area
107 extern void __flush_dcache_page(struct address_space *, struct page *);
110 struct nvmap_vma_list {
111 struct list_head list;
112 struct vm_area_struct *vma;
116 /* handles allocated using shared system memory (either IOVMM- or high-order
117 * page allocations */
118 struct nvmap_pgalloc {
120 bool contig; /* contiguous system memory */
121 u32 iovm_addr; /* is non-zero, if client need specific iova mapping */
122 atomic_t ndirty; /* count number of dirty pages */
125 struct nvmap_handle {
126 struct rb_node node; /* entry on global handle tree */
127 atomic_t ref; /* reference count (i.e., # of duplications) */
128 atomic_t pin; /* pin count */
129 unsigned long flags; /* caching flags */
130 size_t size; /* padded (as-allocated) size */
131 size_t orig_size; /* original (as-requested) size */
133 u8 kind; /* memory kind (0=pitch, !0 -> blocklinear) */
134 void *map_resources; /* mapping resources associated with the
136 struct nvmap_client *owner;
137 struct nvmap_handle_ref *owner_ref; /* use this ref to avoid spending
138 time on validation in some cases.
139 if handle was duplicated by other client and
140 original client destroy ref, this field
141 has to be set to zero. In this case ref should be
142 obtained through validation */
145 * dma_buf necessities. An attachment is made on dma_buf allocation to
146 * facilitate the nvmap_pin* APIs.
148 struct dma_buf *dmabuf;
149 struct dma_buf_attachment *attachment;
151 struct nvmap_device *dev;
153 struct nvmap_pgalloc pgalloc;
154 struct nvmap_heap_block *carveout;
156 bool global; /* handle may be duplicated by other clients */
157 bool secure; /* zap IOVMM area on unpin */
158 bool heap_pgalloc; /* handle is page allocated (sysmem / iovmm) */
159 bool alloc; /* handle has memory allocated */
160 u32 heap_type; /* handle heap is allocated from */
161 unsigned int userflags; /* flags passed from userspace */
162 void *vaddr; /* mapping used inside kernel */
163 struct list_head vmas; /* list of all user vma's */
164 atomic_t umap_count; /* number of outstanding maps from user */
165 atomic_t kmap_count; /* number of outstanding map from kernel */
166 atomic_t share_count; /* number of processes sharing the handle */
167 struct list_head lru; /* list head to track the lru */
169 void *nvhost_priv; /* nvhost private data */
170 void (*nvhost_priv_delete)(void *priv);
173 /* handle_ref objects are client-local references to an nvmap_handle;
174 * they are distinct objects so that handles can be unpinned and
175 * unreferenced the correct number of times when a client abnormally
177 struct nvmap_handle_ref {
178 struct nvmap_handle *handle;
180 atomic_t dupes; /* number of times to free on file close */
181 atomic_t pin; /* number of times to unpin on free */
184 #ifdef CONFIG_NVMAP_PAGE_POOLS
185 #define NVMAP_UC_POOL NVMAP_HANDLE_UNCACHEABLE
186 #define NVMAP_WC_POOL NVMAP_HANDLE_WRITE_COMBINE
187 #define NVMAP_IWB_POOL NVMAP_HANDLE_INNER_CACHEABLE
188 #define NVMAP_WB_POOL NVMAP_HANDLE_CACHEABLE
189 #define NVMAP_NUM_POOLS (NVMAP_HANDLE_CACHEABLE + 1)
192 * This is the default ratio defining pool size. It can be thought of as pool
193 * size in either MB per GB or KB per MB. That means the max this number can
194 * be is 1024 (all physical memory - not a very good idea) or 0 (no page pool
197 #define NVMAP_PP_POOL_SIZE (42)
200 * The wakeup threshold is how many empty page slots there need to be in order
201 * for the background allocater to be woken up.
203 #define NVMAP_PP_DEF_FILL_THRESH (1024)
206 * For when memory does not require zeroing this is the minimum number of pages
207 * remaining in the page pools before the background allocer is woken up. This
208 * essentially disables the page pools (unless its extremely small).
210 #define NVMAP_PP_ZERO_MEM_FILL_MIN (256)
212 struct nvmap_page_pool {
214 u32 alloc; /* Alloc index. */
215 u32 fill; /* Fill index. */
216 u32 count; /* Number of pages in the table. */
217 u32 length; /* Length of the pages array. */
218 struct page **page_array;
220 #ifdef CONFIG_NVMAP_PAGE_POOL_DEBUG
228 #define pp_empty(pp) \
229 ((pp)->fill == (pp)->alloc && !(pp)->page_array[(pp)->alloc])
230 #define pp_full(pp) \
231 ((pp)->fill == (pp)->alloc && (pp)->page_array[(pp)->alloc])
233 #define nvmap_pp_alloc_inc(pp) nvmap_pp_inc_index((pp), &(pp)->alloc)
234 #define nvmap_pp_fill_inc(pp) nvmap_pp_inc_index((pp), &(pp)->fill)
236 /* Handle wrap around. */
237 static inline void nvmap_pp_inc_index(struct nvmap_page_pool *pp, u32 *ind)
241 /* Wrap condition. */
242 if (*ind >= pp->length)
246 int nvmap_page_pool_init(struct nvmap_device *dev);
247 int nvmap_page_pool_fini(struct nvmap_device *dev);
248 struct page *nvmap_page_pool_alloc(struct nvmap_page_pool *pool);
249 bool nvmap_page_pool_fill(struct nvmap_page_pool *pool, struct page *page);
250 int nvmap_page_pool_clear(void);
253 struct nvmap_client {
255 struct rb_root handle_refs;
256 struct mutex ref_lock;
259 struct task_struct *task;
260 struct list_head list;
264 struct nvmap_vma_priv {
265 struct nvmap_handle *handle;
267 atomic_t count; /* number of processes cloning the VMA */
270 #include <linux/mm.h>
271 #include <linux/miscdevice.h>
273 struct nvmap_device {
274 struct vm_struct *vm_rgn;
275 pte_t *ptes[NVMAP_NUM_PTES];
276 unsigned long ptebits[NVMAP_NUM_PTES / BITS_PER_LONG];
277 unsigned int lastpte;
280 struct rb_root handles;
281 spinlock_t handle_lock;
282 wait_queue_head_t pte_wait;
283 struct miscdevice dev_user;
284 struct nvmap_carveout_node *heaps;
286 #ifdef CONFIG_NVMAP_PAGE_POOLS
287 struct nvmap_page_pool pool;
289 struct list_head clients;
290 struct mutex clients_lock;
291 struct list_head lru_handles;
313 atomic64_t stats[NS_NUM];
317 extern struct nvmap_stats nvmap_stats;
319 void nvmap_stats_inc(enum nvmap_stats_t, size_t size);
320 void nvmap_stats_dec(enum nvmap_stats_t, size_t size);
321 u64 nvmap_stats_read(enum nvmap_stats_t);
323 static inline void nvmap_ref_lock(struct nvmap_client *priv)
325 mutex_lock(&priv->ref_lock);
328 static inline void nvmap_ref_unlock(struct nvmap_client *priv)
330 mutex_unlock(&priv->ref_lock);
334 * NOTE: this does not ensure the continued existence of the underlying
335 * dma_buf. If you want ensure the existence of the dma_buf you must get an
336 * nvmap_handle_ref as that is what tracks the dma_buf refs.
338 static inline struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h)
340 if (unlikely(atomic_inc_return(&h->ref) <= 1)) {
341 pr_err("%s: %s attempt to get a freed handle\n",
342 __func__, current->group_leader->comm);
349 static inline pgprot_t nvmap_pgprot(struct nvmap_handle *h, pgprot_t prot)
351 if (h->flags == NVMAP_HANDLE_UNCACHEABLE)
352 return pgprot_noncached(prot);
353 else if (h->flags == NVMAP_HANDLE_WRITE_COMBINE)
354 return pgprot_writecombine(prot);
358 #else /* CONFIG_TEGRA_NVMAP */
359 struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h);
360 void nvmap_handle_put(struct nvmap_handle *h);
361 pgprot_t nvmap_pgprot(struct nvmap_handle *h, pgprot_t prot);
363 #endif /* !CONFIG_TEGRA_NVMAP */
365 struct device *nvmap_client_to_device(struct nvmap_client *client);
367 pte_t **nvmap_alloc_pte(struct nvmap_device *dev, void **vaddr);
369 pte_t **nvmap_alloc_pte_irq(struct nvmap_device *dev, void **vaddr);
371 void nvmap_free_pte(struct nvmap_device *dev, pte_t **pte);
373 pte_t **nvmap_vaddr_to_pte(struct nvmap_device *dev, unsigned long vaddr);
375 struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *dev,
376 struct nvmap_handle *handle,
379 unsigned long nvmap_carveout_usage(struct nvmap_client *c,
380 struct nvmap_heap_block *b);
382 struct nvmap_carveout_node;
384 int nvmap_find_cache_maint_op(struct nvmap_device *dev,
385 struct nvmap_handle *h);
387 void nvmap_handle_put(struct nvmap_handle *h);
389 struct nvmap_handle_ref *__nvmap_validate_locked(struct nvmap_client *priv,
390 struct nvmap_handle *h);
392 struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
395 struct nvmap_handle_ref *nvmap_duplicate_handle(struct nvmap_client *client,
396 struct nvmap_handle *h, bool skip_val);
398 struct nvmap_handle_ref *nvmap_create_handle_from_fd(
399 struct nvmap_client *client, int fd);
401 int nvmap_alloc_handle(struct nvmap_client *client,
402 struct nvmap_handle *h, unsigned int heap_mask,
403 size_t align, u8 kind,
406 void nvmap_free_handle(struct nvmap_client *c, struct nvmap_handle *h);
408 void nvmap_free_handle_user_id(struct nvmap_client *c, unsigned long user_id);
410 int nvmap_pin_ids(struct nvmap_client *client,
411 unsigned int nr, struct nvmap_handle * const *ids);
413 void nvmap_unpin_ids(struct nvmap_client *priv,
414 unsigned int nr, struct nvmap_handle * const *ids);
416 int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h);
418 void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h);
420 int is_nvmap_vma(struct vm_area_struct *vma);
422 int nvmap_get_dmabuf_fd(struct nvmap_client *client, struct nvmap_handle *h);
423 struct nvmap_handle *nvmap_get_id_from_dmabuf_fd(struct nvmap_client *client,
426 int nvmap_get_handle_param(struct nvmap_client *client,
427 struct nvmap_handle_ref *ref, u32 param, u64 *result);
429 struct nvmap_client *nvmap_client_get(struct nvmap_client *client);
431 void nvmap_client_put(struct nvmap_client *c);
433 struct nvmap_handle *unmarshal_user_id(u32 id);
435 static inline void nvmap_flush_tlb_kernel_page(unsigned long kaddr)
437 #ifdef CONFIG_ARM_ERRATA_798181
438 flush_tlb_kernel_page_skip_errata_798181(kaddr);
440 FLUSH_TLB_PAGE(kaddr);
444 /* MM definitions. */
445 extern size_t cache_maint_outer_threshold;
446 extern int inner_cache_maint_threshold;
448 extern void v7_flush_kern_cache_all(void);
449 extern void v7_clean_kern_cache_all(void *);
450 extern void __flush_dcache_all(void *arg);
451 extern void __clean_dcache_all(void *arg);
453 void inner_flush_cache_all(void);
454 void inner_clean_cache_all(void);
455 void nvmap_flush_cache(struct page **pages, int numpages);
457 int nvmap_do_cache_maint_list(struct nvmap_handle **handles, u32 *offsets,
458 u32 *sizes, int op, int nr);
460 /* Internal API to support dmabuf */
461 struct dma_buf *__nvmap_dmabuf_export(struct nvmap_client *client,
462 struct nvmap_handle *handle);
463 struct dma_buf *__nvmap_make_dmabuf(struct nvmap_client *client,
464 struct nvmap_handle *handle);
465 struct sg_table *__nvmap_sg_table(struct nvmap_client *client,
466 struct nvmap_handle *h);
467 void __nvmap_free_sg_table(struct nvmap_client *client,
468 struct nvmap_handle *h, struct sg_table *sgt);
469 void *__nvmap_kmap(struct nvmap_handle *h, unsigned int pagenum);
470 void __nvmap_kunmap(struct nvmap_handle *h, unsigned int pagenum, void *addr);
471 void *__nvmap_mmap(struct nvmap_handle *h);
472 void __nvmap_munmap(struct nvmap_handle *h, void *addr);
473 int __nvmap_map(struct nvmap_handle *h, struct vm_area_struct *vma);
474 int __nvmap_get_handle_param(struct nvmap_client *client,
475 struct nvmap_handle *h, u32 param, u64 *result);
476 int __nvmap_do_cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
477 unsigned long start, unsigned long end,
478 unsigned int op, bool clean_only_dirty);
479 struct nvmap_client *__nvmap_create_client(struct nvmap_device *dev,
481 struct dma_buf *__nvmap_dmabuf_export_from_ref(struct nvmap_handle_ref *ref);
482 struct nvmap_handle *__nvmap_ref_to_id(struct nvmap_handle_ref *ref);
483 int __nvmap_pin(struct nvmap_handle_ref *ref, phys_addr_t *phys);
484 void __nvmap_unpin(struct nvmap_handle_ref *ref);
485 int __nvmap_dmabuf_fd(struct dma_buf *dmabuf, int flags);
487 void nvmap_dmabuf_debugfs_init(struct dentry *nvmap_root);
488 int nvmap_dmabuf_stash_init(void);
490 void *nvmap_altalloc(size_t len);
491 void nvmap_altfree(void *ptr, size_t len);
493 static inline struct page *nvmap_to_page(struct page *page)
495 return (struct page *)((unsigned long)page & ~3UL);
498 static inline bool nvmap_page_dirty(struct page *page)
500 return (unsigned long)page & 1UL;
503 static inline void nvmap_page_mkdirty(struct page **page)
505 *page = (struct page *)((unsigned long)*page | 1UL);
508 static inline void nvmap_page_mkclean(struct page **page)
510 *page = (struct page *)((unsigned long)*page & ~1UL);
513 static inline bool nvmap_page_reserved(struct page *page)
515 return !!((unsigned long)page & 2UL);
518 static inline void nvmap_page_mkreserved(struct page **page)
520 *page = (struct page *)((unsigned long)*page | 2UL);
523 static inline void nvmap_page_mkunreserved(struct page **page)
525 *page = (struct page *)((unsigned long)*page & ~2UL);
529 * FIXME: assume user space requests for reserve operations
532 static inline void nvmap_handle_mk(struct nvmap_handle *h,
533 u32 offset, u32 size,
534 void (*fn)(struct page **))
537 int start_page = PAGE_ALIGN(offset) >> PAGE_SHIFT;
538 int end_page = (offset + size) >> PAGE_SHIFT;
540 if (h->heap_pgalloc) {
541 for (i = start_page; i < end_page; i++)
542 fn(&h->pgalloc.pages[i]);
546 static inline void nvmap_handle_mkclean(struct nvmap_handle *h,
547 u32 offset, u32 size)
549 nvmap_handle_mk(h, offset, size, nvmap_page_mkclean);
552 static inline void nvmap_handle_mkunreserved(struct nvmap_handle *h,
553 u32 offset, u32 size)
555 nvmap_handle_mk(h, offset, size, nvmap_page_mkunreserved);
558 static inline void nvmap_handle_mkreserved(struct nvmap_handle *h,
559 u32 offset, u32 size)
561 nvmap_handle_mk(h, offset, size, nvmap_page_mkreserved);
564 static inline struct page **nvmap_pages(struct page **pg_pages, u32 nr_pages)
569 pages = nvmap_altalloc(sizeof(*pages) * nr_pages);
573 for (i = 0; i < nr_pages; i++)
574 pages[i] = nvmap_to_page(pg_pages[i]);
579 void nvmap_zap_handle(struct nvmap_handle *handle, u32 offset, u32 size);
581 void nvmap_zap_handles(struct nvmap_handle **handles, u32 *offsets,
584 void nvmap_vma_open(struct vm_area_struct *vma);
586 int nvmap_reserve_pages(struct nvmap_handle **handles, u32 *offsets,
587 u32 *sizes, u32 nr, u32 op);
589 static inline void nvmap_kmaps_inc(struct nvmap_handle *h)
591 mutex_lock(&h->lock);
592 atomic_inc(&h->kmap_count);
593 mutex_unlock(&h->lock);
596 static inline void nvmap_kmaps_inc_no_lock(struct nvmap_handle *h)
598 atomic_inc(&h->kmap_count);
601 static inline void nvmap_kmaps_dec(struct nvmap_handle *h)
603 atomic_dec(&h->kmap_count);
606 static inline void nvmap_umaps_inc(struct nvmap_handle *h)
608 mutex_lock(&h->lock);
609 atomic_inc(&h->umap_count);
610 mutex_unlock(&h->lock);
613 static inline void nvmap_umaps_dec(struct nvmap_handle *h)
615 atomic_dec(&h->umap_count);
618 static inline void nvmap_lru_add(struct nvmap_handle *h)
620 spin_lock(&nvmap_dev->lru_lock);
621 BUG_ON(!list_empty(&h->lru));
622 list_add_tail(&h->lru, &nvmap_dev->lru_handles);
623 spin_unlock(&nvmap_dev->lru_lock);
626 static inline void nvmap_lru_del(struct nvmap_handle *h)
628 spin_lock(&nvmap_dev->lru_lock);
630 INIT_LIST_HEAD(&h->lru);
631 spin_unlock(&nvmap_dev->lru_lock);
634 static inline void nvmap_lru_reset(struct nvmap_handle *h)
636 spin_lock(&nvmap_dev->lru_lock);
637 BUG_ON(list_empty(&h->lru));
639 list_add_tail(&h->lru, &nvmap_dev->lru_handles);
640 spin_unlock(&nvmap_dev->lru_lock);
643 #endif /* __VIDEO_TEGRA_NVMAP_NVMAP_H */