]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/video/tegra/nvmap/nvmap_priv.h
fefe949c88b176f8b8db549f0eb2499f30e9d13c
[sojka/nv-tegra/linux-3.10.git] / drivers / video / tegra / nvmap / nvmap_priv.h
1 /*
2  * drivers/video/tegra/nvmap/nvmap.h
3  *
4  * GPU memory management driver for Tegra
5  *
6  * Copyright (c) 2009-2014, NVIDIA CORPORATION. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *'
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #ifndef __VIDEO_TEGRA_NVMAP_NVMAP_H
24 #define __VIDEO_TEGRA_NVMAP_NVMAP_H
25
26 #include <linux/list.h>
27 #include <linux/mm.h>
28 #include <linux/mutex.h>
29 #include <linux/rbtree.h>
30 #include <linux/sched.h>
31 #include <linux/wait.h>
32 #include <linux/atomic.h>
33 #include <linux/dma-buf.h>
34 #include <linux/syscalls.h>
35 #include <linux/mm.h>
36 #include <linux/miscdevice.h>
37 #include <linux/nvmap.h>
38 #include <linux/vmalloc.h>
39 #include <linux/slab.h>
40
41 #include <linux/workqueue.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/dma-direction.h>
44 #include <linux/platform_device.h>
45
46 #include <asm/cacheflush.h>
47 #ifndef CONFIG_ARM64
48 #include <asm/outercache.h>
49 #endif
50 #include "nvmap_heap.h"
51
52 #ifdef CONFIG_NVMAP_HIGHMEM_ONLY
53 #define __GFP_NVMAP     __GFP_HIGHMEM
54 #else
55 #define __GFP_NVMAP     (GFP_KERNEL | __GFP_HIGHMEM)
56 #endif
57
58 #ifdef CONFIG_NVMAP_FORCE_ZEROED_USER_PAGES
59 #define NVMAP_ZEROED_PAGES     __GFP_ZERO
60 #else
61 #define NVMAP_ZEROED_PAGES     0
62 #endif
63
64 #define GFP_NVMAP              (__GFP_NVMAP | __GFP_NOWARN | NVMAP_ZEROED_PAGES)
65
66 extern bool zero_memory;
67
68 #ifdef CONFIG_64BIT
69 #define NVMAP_LAZY_VFREE
70 #endif
71
72 struct page;
73 struct nvmap_device;
74
75 void _nvmap_handle_free(struct nvmap_handle *h);
76 /* holds max number of handles allocted per process at any time */
77 extern u32 nvmap_max_handle_count;
78
79 /* If set force zeroed memory to userspace. */
80 extern bool zero_memory;
81
82 #ifdef CONFIG_ARM64
83 #define PG_PROT_KERNEL PAGE_KERNEL
84 #define FLUSH_DCACHE_AREA __flush_dcache_area
85 #define outer_flush_range(s, e)
86 #define outer_inv_range(s, e)
87 #define outer_clean_range(s, e)
88 #define outer_flush_all()
89 #define outer_clean_all()
90 extern void __clean_dcache_page(struct page *);
91 extern void __flush_dcache_page(struct page *);
92 #else
93 #define PG_PROT_KERNEL pgprot_kernel
94 #define FLUSH_DCACHE_AREA __cpuc_flush_dcache_area
95 extern void __flush_dcache_page(struct address_space *, struct page *);
96 #endif
97
98 struct nvmap_vma_list {
99         struct list_head list;
100         struct vm_area_struct *vma;
101 };
102
103 /* handles allocated using shared system memory (either IOVMM- or high-order
104  * page allocations */
105 struct nvmap_pgalloc {
106         struct page **pages;
107         bool contig;                    /* contiguous system memory */
108         atomic_t ndirty;        /* count number of dirty pages */
109 };
110
111 struct nvmap_handle {
112         struct rb_node node;    /* entry on global handle tree */
113         atomic_t ref;           /* reference count (i.e., # of duplications) */
114         atomic_t pin;           /* pin count */
115         u32 flags;              /* caching flags */
116         size_t size;            /* padded (as-allocated) size */
117         size_t orig_size;       /* original (as-requested) size */
118         size_t align;
119         u8 kind;                /* memory kind (0=pitch, !0 -> blocklinear) */
120         struct nvmap_client *owner;
121
122         /*
123          * dma_buf necessities. An attachment is made on dma_buf allocation to
124          * facilitate the nvmap_pin* APIs.
125          */
126         struct dma_buf *dmabuf;
127         struct dma_buf_attachment *attachment;
128
129         union {
130                 struct nvmap_pgalloc pgalloc;
131                 struct nvmap_heap_block *carveout;
132         };
133         bool heap_pgalloc;      /* handle is page allocated (sysmem / iovmm) */
134         bool alloc;             /* handle has memory allocated */
135         u32 heap_type;          /* handle heap is allocated from */
136         u32 userflags;          /* flags passed from userspace */
137         void *vaddr;            /* mapping used inside kernel */
138         struct list_head vmas;  /* list of all user vma's */
139         atomic_t share_count;   /* number of processes sharing the handle */
140         struct mutex lock;
141         void *nvhost_priv;      /* nvhost private data */
142         void (*nvhost_priv_delete)(void *priv);
143 };
144
145 /* handle_ref objects are client-local references to an nvmap_handle;
146  * they are distinct objects so that handles can be unpinned and
147  * unreferenced the correct number of times when a client abnormally
148  * terminates */
149 struct nvmap_handle_ref {
150         struct nvmap_handle *handle;
151         struct rb_node  node;
152         atomic_t        dupes;  /* number of times to free on file close */
153         atomic_t        pin;    /* number of times to unpin on free */
154 };
155
156 #ifdef CONFIG_NVMAP_PAGE_POOLS
157
158 /*
159  * This is the default ratio defining pool size. It can be thought of as pool
160  * size in either MB per GB or KB per MB. That means the max this number can
161  * be is 1024 (all physical memory - not a very good idea) or 0 (no page pool
162  * at all).
163  */
164 #define NVMAP_PP_POOL_SIZE (128)
165
166 /*
167  * The wakeup threshold is how many empty page slots there need to be in order
168  * for the background allocater to be woken up.
169  */
170 #define NVMAP_PP_DEF_FILL_THRESH (4096)
171
172 /*
173  * For when memory does not require zeroing this is the minimum number of pages
174  * remaining in the page pools before the background allocer is woken up. This
175  * essentially disables the page pools (unless its extremely small).
176  */
177 #define NVMAP_PP_ZERO_MEM_FILL_MIN (2048)
178
179 struct nvmap_page_pool {
180         struct mutex lock;
181         u32 alloc;  /* Alloc index. */
182         u32 fill;   /* Fill index. */
183         u32 count;  /* Number of pages in the table. */
184         u32 length; /* Length of the pages array. */
185         struct page **page_array;
186
187 #ifdef CONFIG_NVMAP_PAGE_POOL_DEBUG
188         u64 allocs;
189         u64 fills;
190         u64 hits;
191         u64 misses;
192 #endif
193 };
194
195 #define pp_empty(pp)                            \
196         ((pp)->fill == (pp)->alloc && !(pp)->page_array[(pp)->alloc])
197 #define pp_full(pp)                             \
198         ((pp)->fill == (pp)->alloc && (pp)->page_array[(pp)->alloc])
199
200 #define nvmap_pp_alloc_inc(pp) nvmap_pp_inc_index((pp), &(pp)->alloc)
201 #define nvmap_pp_fill_inc(pp)  nvmap_pp_inc_index((pp), &(pp)->fill)
202
203 /* Handle wrap around. */
204 static inline void nvmap_pp_inc_index(struct nvmap_page_pool *pp, u32 *ind)
205 {
206         *ind += 1;
207
208         /* Wrap condition. */
209         if (*ind >= pp->length)
210                 *ind = 0;
211 }
212
213 static inline void nvmap_page_pool_lock(struct nvmap_page_pool *pool)
214 {
215         mutex_lock(&pool->lock);
216 }
217
218 static inline void nvmap_page_pool_unlock(struct nvmap_page_pool *pool)
219 {
220         mutex_unlock(&pool->lock);
221 }
222
223 int nvmap_page_pool_init(struct nvmap_device *dev);
224 int nvmap_page_pool_fini(struct nvmap_device *dev);
225 struct page *nvmap_page_pool_alloc(struct nvmap_page_pool *pool);
226 bool nvmap_page_pool_fill(struct nvmap_page_pool *pool, struct page *page);
227 int __nvmap_page_pool_alloc_lots_locked(struct nvmap_page_pool *pool,
228                                         struct page **pages, u32 nr);
229 int __nvmap_page_pool_fill_lots_locked(struct nvmap_page_pool *pool,
230                                        struct page **pages, u32 nr);
231 int nvmap_page_pool_clear(void);
232 int nvmap_page_pool_debugfs_init(struct dentry *nvmap_root);
233 #endif
234
235 struct nvmap_carveout_commit {
236         size_t commit;
237         struct list_head list;
238 };
239
240 struct nvmap_client {
241         const char                      *name;
242         struct rb_root                  handle_refs;
243         struct mutex                    ref_lock;
244         bool                            kernel_client;
245         atomic_t                        count;
246         struct task_struct              *task;
247         struct list_head                list;
248         u32                             handle_count;
249         u32                             next_fd;
250         struct nvmap_carveout_commit    carveout_commit[0];
251         int warned;
252 };
253
254 struct nvmap_vma_priv {
255         struct nvmap_handle *handle;
256         size_t          offs;
257         atomic_t        count;  /* number of processes cloning the VMA */
258 };
259
260 struct nvmap_device {
261         struct rb_root  handles;
262         spinlock_t      handle_lock;
263         struct miscdevice dev_user;
264         struct nvmap_carveout_node *heaps;
265         int nr_carveouts;
266 #ifdef CONFIG_NVMAP_PAGE_POOLS
267         struct nvmap_page_pool pool;
268 #endif
269         struct list_head clients;
270         spinlock_t      clients_lock;
271 };
272
273 enum nvmap_stats_t {
274         NS_ALLOC = 0,
275         NS_RELEASE,
276         NS_UALLOC,
277         NS_URELEASE,
278         NS_KALLOC,
279         NS_KRELEASE,
280         NS_CFLUSH_RQ,
281         NS_CFLUSH_DONE,
282         NS_UCFLUSH_RQ,
283         NS_UCFLUSH_DONE,
284         NS_KCFLUSH_RQ,
285         NS_KCFLUSH_DONE,
286         NS_TOTAL,
287         NS_NUM,
288 };
289
290 struct nvmap_stats {
291         atomic64_t stats[NS_NUM];
292         atomic64_t collect;
293 };
294
295 extern struct nvmap_stats nvmap_stats;
296 extern struct nvmap_device *nvmap_dev;
297
298 void nvmap_stats_inc(enum nvmap_stats_t, size_t size);
299 void nvmap_stats_dec(enum nvmap_stats_t, size_t size);
300 u64 nvmap_stats_read(enum nvmap_stats_t);
301
302 static inline void nvmap_ref_lock(struct nvmap_client *priv)
303 {
304         mutex_lock(&priv->ref_lock);
305 }
306
307 static inline void nvmap_ref_unlock(struct nvmap_client *priv)
308 {
309         mutex_unlock(&priv->ref_lock);
310 }
311
312 /*
313  * NOTE: this does not ensure the continued existence of the underlying
314  * dma_buf. If you want ensure the existence of the dma_buf you must get an
315  * nvmap_handle_ref as that is what tracks the dma_buf refs.
316  */
317 static inline struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h)
318 {
319         if (WARN_ON(!virt_addr_valid(h))) {
320                 pr_err("%s: invalid handle\n", current->group_leader->comm);
321                 return NULL;
322         }
323
324         if (unlikely(atomic_inc_return(&h->ref) <= 1)) {
325                 pr_err("%s: %s attempt to get a freed handle\n",
326                         __func__, current->group_leader->comm);
327                 atomic_dec(&h->ref);
328                 return NULL;
329         }
330         return h;
331 }
332
333
334 static inline pgprot_t nvmap_pgprot(struct nvmap_handle *h, pgprot_t prot)
335 {
336         if (h->flags == NVMAP_HANDLE_UNCACHEABLE) {
337 #ifdef CONFIG_ARM64
338                 if (h->owner && !h->owner->warned) {
339                         char task_comm[TASK_COMM_LEN];
340                         h->owner->warned = 1;
341                         get_task_comm(task_comm, h->owner->task);
342                         pr_err("PID %d: %s: WARNING: "
343                                 "NVMAP_HANDLE_WRITE_COMBINE "
344                                 "should be used in place of "
345                                 "NVMAP_HANDLE_UNCACHEABLE on ARM64\n",
346                                 h->owner->task->pid, task_comm);
347                 }
348 #endif
349                 return pgprot_noncached(prot);
350         }
351         else if (h->flags == NVMAP_HANDLE_WRITE_COMBINE)
352                 return pgprot_dmacoherent(prot);
353         return prot;
354 }
355
356 struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *dev,
357                                               struct nvmap_handle *handle,
358                                               unsigned long type);
359
360 unsigned long nvmap_carveout_usage(struct nvmap_client *c,
361                                    struct nvmap_heap_block *b);
362
363 struct nvmap_carveout_node;
364 void nvmap_carveout_commit_add(struct nvmap_client *client,
365                                struct nvmap_carveout_node *node, size_t len);
366
367 void nvmap_carveout_commit_subtract(struct nvmap_client *client,
368                                     struct nvmap_carveout_node *node,
369                                     size_t len);
370
371 void nvmap_handle_put(struct nvmap_handle *h);
372
373 struct nvmap_handle_ref *__nvmap_validate_locked(struct nvmap_client *priv,
374                                                  struct nvmap_handle *h);
375
376 struct nvmap_handle *nvmap_validate_get(struct nvmap_handle *h);
377
378 struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
379                                              size_t size);
380
381 struct nvmap_handle_ref *nvmap_duplicate_handle(struct nvmap_client *client,
382                                         struct nvmap_handle *h, bool skip_val);
383
384 struct nvmap_handle_ref *nvmap_create_handle_from_fd(
385                         struct nvmap_client *client, int fd);
386
387 int nvmap_alloc_handle(struct nvmap_client *client,
388                        struct nvmap_handle *h, unsigned int heap_mask,
389                        size_t align, u8 kind,
390                        unsigned int flags);
391
392 void nvmap_free_handle(struct nvmap_client *c, struct nvmap_handle *h);
393
394 void nvmap_free_handle_user_id(struct nvmap_client *c, unsigned long user_id);
395
396 int nvmap_pin_ids(struct nvmap_client *client,
397                   unsigned int nr, struct nvmap_handle * const *ids);
398
399 void nvmap_unpin_ids(struct nvmap_client *priv,
400                      unsigned int nr, struct nvmap_handle * const *ids);
401
402 int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h);
403
404 void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h);
405
406 int is_nvmap_vma(struct vm_area_struct *vma);
407
408 int nvmap_get_dmabuf_fd(struct nvmap_client *client, struct nvmap_handle *h);
409 struct nvmap_handle *nvmap_get_id_from_dmabuf_fd(struct nvmap_client *client,
410                                                  int fd);
411
412 int nvmap_get_handle_param(struct nvmap_client *client,
413                 struct nvmap_handle_ref *ref, u32 param, u64 *result);
414
415 struct nvmap_client *nvmap_client_get(struct nvmap_client *client);
416
417 void nvmap_client_put(struct nvmap_client *c);
418
419 struct nvmap_handle *unmarshal_user_id(u32 id);
420
421 /* MM definitions. */
422 extern size_t cache_maint_inner_threshold;
423 extern size_t cache_maint_outer_threshold;
424
425 extern void v7_flush_kern_cache_all(void);
426 extern void v7_clean_kern_cache_all(void *);
427 extern void __flush_dcache_all(void *arg);
428 extern void __clean_dcache_all(void *arg);
429
430 void inner_flush_cache_all(void);
431 void inner_clean_cache_all(void);
432 void nvmap_clean_cache(struct page **pages, int numpages);
433 void nvmap_flush_cache(struct page **pages, int numpages);
434
435 int nvmap_do_cache_maint_list(struct nvmap_handle **handles, u32 *offsets,
436                               u32 *sizes, int op, int nr);
437
438 /* Internal API to support dmabuf */
439 struct dma_buf *__nvmap_dmabuf_export(struct nvmap_client *client,
440                                  struct nvmap_handle *handle);
441 struct dma_buf *__nvmap_make_dmabuf(struct nvmap_client *client,
442                                     struct nvmap_handle *handle);
443 struct sg_table *__nvmap_sg_table(struct nvmap_client *client,
444                                   struct nvmap_handle *h);
445 void __nvmap_free_sg_table(struct nvmap_client *client,
446                            struct nvmap_handle *h, struct sg_table *sgt);
447 void *__nvmap_kmap(struct nvmap_handle *h, unsigned int pagenum);
448 void __nvmap_kunmap(struct nvmap_handle *h, unsigned int pagenum, void *addr);
449 void *__nvmap_mmap(struct nvmap_handle *h);
450 void __nvmap_munmap(struct nvmap_handle *h, void *addr);
451 int __nvmap_map(struct nvmap_handle *h, struct vm_area_struct *vma);
452 int __nvmap_get_handle_param(struct nvmap_client *client,
453                              struct nvmap_handle *h, u32 param, u64 *result);
454 int __nvmap_do_cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
455                            unsigned long start, unsigned long end,
456                            unsigned int op, bool clean_only_dirty);
457 struct nvmap_client *__nvmap_create_client(struct nvmap_device *dev,
458                                            const char *name);
459 struct dma_buf *__nvmap_dmabuf_export_from_ref(struct nvmap_handle_ref *ref);
460 struct nvmap_handle *__nvmap_ref_to_id(struct nvmap_handle_ref *ref);
461 int __nvmap_pin(struct nvmap_handle_ref *ref, phys_addr_t *phys);
462 void __nvmap_unpin(struct nvmap_handle_ref *ref);
463 int __nvmap_dmabuf_fd(struct nvmap_client *client,
464                       struct dma_buf *dmabuf, int flags);
465
466 void nvmap_dmabuf_debugfs_init(struct dentry *nvmap_root);
467 int nvmap_dmabuf_stash_init(void);
468
469 void *nvmap_altalloc(size_t len);
470 void nvmap_altfree(void *ptr, size_t len);
471
472 static inline struct page *nvmap_to_page(struct page *page)
473 {
474         return (struct page *)((unsigned long)page & ~3UL);
475 }
476
477 static inline bool nvmap_page_dirty(struct page *page)
478 {
479         return (unsigned long)page & 1UL;
480 }
481
482 static inline void nvmap_page_mkdirty(struct page **page)
483 {
484         *page = (struct page *)((unsigned long)*page | 1UL);
485 }
486
487 static inline void nvmap_page_mkclean(struct page **page)
488 {
489         *page = (struct page *)((unsigned long)*page & ~1UL);
490 }
491
492 static inline bool nvmap_page_reserved(struct page *page)
493 {
494         return !!((unsigned long)page & 2UL);
495 }
496
497 static inline void nvmap_page_mkreserved(struct page **page)
498 {
499         *page = (struct page *)((unsigned long)*page | 2UL);
500 }
501
502 static inline void nvmap_page_mkunreserved(struct page **page)
503 {
504         *page = (struct page *)((unsigned long)*page & ~2UL);
505 }
506
507 /*
508  * FIXME: assume user space requests for reserve operations
509  * are page aligned
510  */
511 static inline void nvmap_handle_mk(struct nvmap_handle *h,
512                                    u32 offset, u32 size,
513                                    void (*fn)(struct page **))
514 {
515         int i;
516         int start_page = PAGE_ALIGN(offset) >> PAGE_SHIFT;
517         int end_page = (offset + size) >> PAGE_SHIFT;
518
519         if (h->heap_pgalloc) {
520                 for (i = start_page; i < end_page; i++)
521                         fn(&h->pgalloc.pages[i]);
522         }
523 }
524
525 static inline void nvmap_handle_mkclean(struct nvmap_handle *h,
526                                         u32 offset, u32 size)
527 {
528         nvmap_handle_mk(h, offset, size, nvmap_page_mkclean);
529 }
530
531 static inline void nvmap_handle_mkunreserved(struct nvmap_handle *h,
532                                              u32 offset, u32 size)
533 {
534         nvmap_handle_mk(h, offset, size, nvmap_page_mkunreserved);
535 }
536
537 static inline void nvmap_handle_mkreserved(struct nvmap_handle *h,
538                                            u32 offset, u32 size)
539 {
540         nvmap_handle_mk(h, offset, size, nvmap_page_mkreserved);
541 }
542
543 static inline struct page **nvmap_pages(struct page **pg_pages, u32 nr_pages)
544 {
545         struct page **pages;
546         int i;
547
548         pages = nvmap_altalloc(sizeof(*pages) * nr_pages);
549         if (!pages)
550                 return NULL;
551
552         for (i = 0; i < nr_pages; i++)
553                 pages[i] = nvmap_to_page(pg_pages[i]);
554
555         return pages;
556 }
557
558 void nvmap_zap_handle(struct nvmap_handle *handle, u32 offset, u32 size);
559
560 void nvmap_zap_handles(struct nvmap_handle **handles, u32 *offsets,
561                        u32 *sizes, u32 nr);
562
563 void nvmap_vma_open(struct vm_area_struct *vma);
564
565 int nvmap_reserve_pages(struct nvmap_handle **handles, u32 *offsets,
566                         u32 *sizes, u32 nr, u32 op);
567
568 #endif /* __VIDEO_TEGRA_NVMAP_NVMAP_H */