]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/video/tegra/nvmap/nvmap_priv.h
27cb3290a599d215b75a4e3d12e5452d3d13386e
[sojka/nv-tegra/linux-3.10.git] / drivers / video / tegra / nvmap / nvmap_priv.h
1 /*
2  * drivers/video/tegra/nvmap/nvmap.h
3  *
4  * GPU memory management driver for Tegra
5  *
6  * Copyright (c) 2009-2014, NVIDIA CORPORATION. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *'
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #ifndef __VIDEO_TEGRA_NVMAP_NVMAP_H
24 #define __VIDEO_TEGRA_NVMAP_NVMAP_H
25
26 #include <linux/list.h>
27 #include <linux/mm.h>
28 #include <linux/mutex.h>
29 #include <linux/rbtree.h>
30 #include <linux/sched.h>
31 #include <linux/wait.h>
32 #include <linux/atomic.h>
33 #include <linux/dma-buf.h>
34 #include <linux/syscalls.h>
35 #include <linux/mm.h>
36 #include <linux/miscdevice.h>
37 #include <linux/nvmap.h>
38 #include <linux/vmalloc.h>
39 #include <linux/slab.h>
40
41 #include <linux/workqueue.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/dma-direction.h>
44 #include <linux/platform_device.h>
45
46 #include <asm/cacheflush.h>
47 #ifndef CONFIG_ARM64
48 #include <asm/outercache.h>
49 #endif
50 #include "nvmap_heap.h"
51
52 #ifdef CONFIG_NVMAP_HIGHMEM_ONLY
53 #define __GFP_NVMAP     __GFP_HIGHMEM
54 #else
55 #define __GFP_NVMAP     (GFP_KERNEL | __GFP_HIGHMEM)
56 #endif
57
58 #define GFP_NVMAP       (__GFP_NVMAP | __GFP_NOWARN)
59
60 #ifdef CONFIG_64BIT
61 #define NVMAP_LAZY_VFREE
62 #endif
63
64 struct page;
65 struct nvmap_device;
66
67 void _nvmap_handle_free(struct nvmap_handle *h);
68 /* holds max number of handles allocted per process at any time */
69 extern u32 nvmap_max_handle_count;
70
71 /* If set force zeroed memory to userspace. */
72 extern bool zero_memory;
73
74 #ifdef CONFIG_ARM64
75 #define PG_PROT_KERNEL PAGE_KERNEL
76 #define FLUSH_DCACHE_AREA __flush_dcache_area
77 #define outer_flush_range(s, e)
78 #define outer_inv_range(s, e)
79 #define outer_clean_range(s, e)
80 #define outer_flush_all()
81 #define outer_clean_all()
82 extern void __clean_dcache_page(struct page *);
83 extern void __flush_dcache_page(struct page *);
84 #else
85 #define PG_PROT_KERNEL pgprot_kernel
86 #define FLUSH_DCACHE_AREA __cpuc_flush_dcache_area
87 extern void __flush_dcache_page(struct address_space *, struct page *);
88 #endif
89
90 struct nvmap_vma_list {
91         struct list_head list;
92         struct vm_area_struct *vma;
93         pid_t pid;
94 };
95
96 /* handles allocated using shared system memory (either IOVMM- or high-order
97  * page allocations */
98 struct nvmap_pgalloc {
99         struct page **pages;
100         bool contig;                    /* contiguous system memory */
101         atomic_t ndirty;        /* count number of dirty pages */
102 };
103
104 struct nvmap_handle {
105         struct rb_node node;    /* entry on global handle tree */
106         atomic_t ref;           /* reference count (i.e., # of duplications) */
107         atomic_t pin;           /* pin count */
108         u32 flags;              /* caching flags */
109         size_t size;            /* padded (as-allocated) size */
110         size_t orig_size;       /* original (as-requested) size */
111         size_t align;
112         u8 kind;                /* memory kind (0=pitch, !0 -> blocklinear) */
113         struct nvmap_client *owner;
114
115         /*
116          * dma_buf necessities. An attachment is made on dma_buf allocation to
117          * facilitate the nvmap_pin* APIs.
118          */
119         struct dma_buf *dmabuf;
120         struct dma_buf_attachment *attachment;
121
122         union {
123                 struct nvmap_pgalloc pgalloc;
124                 struct nvmap_heap_block *carveout;
125         };
126         bool heap_pgalloc;      /* handle is page allocated (sysmem / iovmm) */
127         bool alloc;             /* handle has memory allocated */
128         u32 heap_type;          /* handle heap is allocated from */
129         u32 userflags;          /* flags passed from userspace */
130         void *vaddr;            /* mapping used inside kernel */
131         struct list_head vmas;  /* list of all user vma's */
132         atomic_t share_count;   /* number of processes sharing the handle */
133         struct mutex lock;
134         void *nvhost_priv;      /* nvhost private data */
135         void (*nvhost_priv_delete)(void *priv);
136 };
137
138 /* handle_ref objects are client-local references to an nvmap_handle;
139  * they are distinct objects so that handles can be unpinned and
140  * unreferenced the correct number of times when a client abnormally
141  * terminates */
142 struct nvmap_handle_ref {
143         struct nvmap_handle *handle;
144         struct rb_node  node;
145         atomic_t        dupes;  /* number of times to free on file close */
146         atomic_t        pin;    /* number of times to unpin on free */
147 };
148
149 #ifdef CONFIG_NVMAP_PAGE_POOLS
150
151 /*
152  * This is the default ratio defining pool size. It can be thought of as pool
153  * size in either MB per GB or KB per MB. That means the max this number can
154  * be is 1024 (all physical memory - not a very good idea) or 0 (no page pool
155  * at all).
156  */
157 #define NVMAP_PP_POOL_SIZE (128)
158
159 /*
160  * The wakeup threshold is how many empty page slots there need to be in order
161  * for the background allocater to be woken up.
162  */
163 #define NVMAP_PP_DEF_FILL_THRESH (4096)
164
165 /*
166  * For when memory does not require zeroing this is the minimum number of pages
167  * remaining in the page pools before the background allocer is woken up. This
168  * essentially disables the page pools (unless its extremely small).
169  */
170 #define NVMAP_PP_ZERO_MEM_FILL_MIN (2048)
171
172 struct nvmap_page_pool {
173         struct mutex lock;
174         u32 count;  /* Number of pages in the page list. */
175         u32 max;    /* Max length of the page list. */
176         struct list_head page_list;
177
178 #ifdef CONFIG_NVMAP_PAGE_POOL_DEBUG
179         u64 allocs;
180         u64 fills;
181         u64 hits;
182         u64 misses;
183 #endif
184 };
185
186 int nvmap_page_pool_init(struct nvmap_device *dev);
187 int nvmap_page_pool_fini(struct nvmap_device *dev);
188 struct page *nvmap_page_pool_alloc(struct nvmap_page_pool *pool);
189 bool nvmap_page_pool_fill(struct nvmap_page_pool *pool, struct page *page);
190 int nvmap_page_pool_alloc_lots(struct nvmap_page_pool *pool,
191                                         struct page **pages, u32 nr);
192 int nvmap_page_pool_fill_lots(struct nvmap_page_pool *pool,
193                                        struct page **pages, u32 nr);
194 int nvmap_page_pool_clear(void);
195 int nvmap_page_pool_debugfs_init(struct dentry *nvmap_root);
196 #endif
197
198 struct nvmap_client {
199         const char                      *name;
200         struct rb_root                  handle_refs;
201         struct mutex                    ref_lock;
202         bool                            kernel_client;
203         atomic_t                        count;
204         struct task_struct              *task;
205         struct list_head                list;
206         u32                             handle_count;
207         u32                             next_fd;
208         int warned;
209 };
210
211 struct nvmap_vma_priv {
212         struct nvmap_handle *handle;
213         size_t          offs;
214         atomic_t        count;  /* number of processes cloning the VMA */
215 };
216
217 struct nvmap_device {
218         struct rb_root  handles;
219         spinlock_t      handle_lock;
220         struct miscdevice dev_user;
221         struct nvmap_carveout_node *heaps;
222         int nr_carveouts;
223 #ifdef CONFIG_NVMAP_PAGE_POOLS
224         struct nvmap_page_pool pool;
225 #endif
226         struct list_head clients;
227         spinlock_t      clients_lock;
228 };
229
230 enum nvmap_stats_t {
231         NS_ALLOC = 0,
232         NS_RELEASE,
233         NS_UALLOC,
234         NS_URELEASE,
235         NS_KALLOC,
236         NS_KRELEASE,
237         NS_CFLUSH_RQ,
238         NS_CFLUSH_DONE,
239         NS_UCFLUSH_RQ,
240         NS_UCFLUSH_DONE,
241         NS_KCFLUSH_RQ,
242         NS_KCFLUSH_DONE,
243         NS_TOTAL,
244         NS_NUM,
245 };
246
247 struct nvmap_stats {
248         atomic64_t stats[NS_NUM];
249         atomic64_t collect;
250 };
251
252 extern struct nvmap_stats nvmap_stats;
253 extern struct nvmap_device *nvmap_dev;
254
255 void nvmap_stats_inc(enum nvmap_stats_t, size_t size);
256 void nvmap_stats_dec(enum nvmap_stats_t, size_t size);
257 u64 nvmap_stats_read(enum nvmap_stats_t);
258
259 static inline void nvmap_ref_lock(struct nvmap_client *priv)
260 {
261         mutex_lock(&priv->ref_lock);
262 }
263
264 static inline void nvmap_ref_unlock(struct nvmap_client *priv)
265 {
266         mutex_unlock(&priv->ref_lock);
267 }
268
269 /*
270  * NOTE: this does not ensure the continued existence of the underlying
271  * dma_buf. If you want ensure the existence of the dma_buf you must get an
272  * nvmap_handle_ref as that is what tracks the dma_buf refs.
273  */
274 static inline struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h)
275 {
276         if (WARN_ON(!virt_addr_valid(h))) {
277                 pr_err("%s: invalid handle\n", current->group_leader->comm);
278                 return NULL;
279         }
280
281         if (unlikely(atomic_inc_return(&h->ref) <= 1)) {
282                 pr_err("%s: %s attempt to get a freed handle\n",
283                         __func__, current->group_leader->comm);
284                 atomic_dec(&h->ref);
285                 return NULL;
286         }
287         return h;
288 }
289
290
291 static inline pgprot_t nvmap_pgprot(struct nvmap_handle *h, pgprot_t prot)
292 {
293         if (h->flags == NVMAP_HANDLE_UNCACHEABLE) {
294 #ifdef CONFIG_ARM64
295                 if (h->owner && !h->owner->warned) {
296                         char task_comm[TASK_COMM_LEN];
297                         h->owner->warned = 1;
298                         get_task_comm(task_comm, h->owner->task);
299                         pr_err("PID %d: %s: WARNING: "
300                                 "NVMAP_HANDLE_WRITE_COMBINE "
301                                 "should be used in place of "
302                                 "NVMAP_HANDLE_UNCACHEABLE on ARM64\n",
303                                 h->owner->task->pid, task_comm);
304                 }
305 #endif
306                 return pgprot_noncached(prot);
307         }
308         else if (h->flags == NVMAP_HANDLE_WRITE_COMBINE)
309                 return pgprot_dmacoherent(prot);
310         return prot;
311 }
312
313 struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *dev,
314                                               struct nvmap_handle *handle,
315                                               unsigned long type);
316
317 unsigned long nvmap_carveout_usage(struct nvmap_client *c,
318                                    struct nvmap_heap_block *b);
319
320 struct nvmap_carveout_node;
321
322 void nvmap_handle_put(struct nvmap_handle *h);
323
324 struct nvmap_handle_ref *__nvmap_validate_locked(struct nvmap_client *priv,
325                                                  struct nvmap_handle *h);
326
327 struct nvmap_handle *nvmap_validate_get(struct nvmap_handle *h);
328
329 struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
330                                              size_t size);
331
332 struct nvmap_handle_ref *nvmap_duplicate_handle(struct nvmap_client *client,
333                                         struct nvmap_handle *h, bool skip_val);
334
335 struct nvmap_handle_ref *nvmap_create_handle_from_fd(
336                         struct nvmap_client *client, int fd);
337
338 int nvmap_alloc_handle(struct nvmap_client *client,
339                        struct nvmap_handle *h, unsigned int heap_mask,
340                        size_t align, u8 kind,
341                        unsigned int flags);
342
343 void nvmap_free_handle(struct nvmap_client *c, struct nvmap_handle *h);
344
345 void nvmap_free_handle_user_id(struct nvmap_client *c, unsigned long user_id);
346
347 int nvmap_pin_ids(struct nvmap_client *client,
348                   unsigned int nr, struct nvmap_handle * const *ids);
349
350 void nvmap_unpin_ids(struct nvmap_client *priv,
351                      unsigned int nr, struct nvmap_handle * const *ids);
352
353 int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h);
354
355 void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h);
356
357 int is_nvmap_vma(struct vm_area_struct *vma);
358
359 int nvmap_get_dmabuf_fd(struct nvmap_client *client, struct nvmap_handle *h);
360 struct nvmap_handle *nvmap_get_id_from_dmabuf_fd(struct nvmap_client *client,
361                                                  int fd);
362
363 int nvmap_get_handle_param(struct nvmap_client *client,
364                 struct nvmap_handle_ref *ref, u32 param, u64 *result);
365
366 struct nvmap_client *nvmap_client_get(struct nvmap_client *client);
367
368 void nvmap_client_put(struct nvmap_client *c);
369
370 struct nvmap_handle *unmarshal_user_handle(__u32 handle);
371
372 /* MM definitions. */
373 extern size_t cache_maint_inner_threshold;
374 extern size_t cache_maint_outer_threshold;
375
376 extern void v7_flush_kern_cache_all(void);
377 extern void v7_clean_kern_cache_all(void *);
378 extern void __flush_dcache_all(void *arg);
379 extern void __clean_dcache_all(void *arg);
380
381 void inner_flush_cache_all(void);
382 void inner_clean_cache_all(void);
383 void nvmap_clean_cache(struct page **pages, int numpages);
384 void nvmap_flush_cache(struct page **pages, int numpages);
385
386 int nvmap_do_cache_maint_list(struct nvmap_handle **handles, u32 *offsets,
387                               u32 *sizes, int op, int nr);
388
389 /* Internal API to support dmabuf */
390 struct dma_buf *__nvmap_dmabuf_export(struct nvmap_client *client,
391                                  struct nvmap_handle *handle);
392 struct dma_buf *__nvmap_make_dmabuf(struct nvmap_client *client,
393                                     struct nvmap_handle *handle);
394 struct sg_table *__nvmap_sg_table(struct nvmap_client *client,
395                                   struct nvmap_handle *h);
396 void __nvmap_free_sg_table(struct nvmap_client *client,
397                            struct nvmap_handle *h, struct sg_table *sgt);
398 void *__nvmap_kmap(struct nvmap_handle *h, unsigned int pagenum);
399 void __nvmap_kunmap(struct nvmap_handle *h, unsigned int pagenum, void *addr);
400 void *__nvmap_mmap(struct nvmap_handle *h);
401 void __nvmap_munmap(struct nvmap_handle *h, void *addr);
402 int __nvmap_map(struct nvmap_handle *h, struct vm_area_struct *vma);
403 int __nvmap_get_handle_param(struct nvmap_client *client,
404                              struct nvmap_handle *h, u32 param, u64 *result);
405 int __nvmap_do_cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
406                            unsigned long start, unsigned long end,
407                            unsigned int op, bool clean_only_dirty);
408 struct nvmap_client *__nvmap_create_client(struct nvmap_device *dev,
409                                            const char *name);
410 struct dma_buf *__nvmap_dmabuf_export_from_ref(struct nvmap_handle_ref *ref);
411 struct nvmap_handle *__nvmap_ref_to_id(struct nvmap_handle_ref *ref);
412 int __nvmap_pin(struct nvmap_handle_ref *ref, phys_addr_t *phys);
413 void __nvmap_unpin(struct nvmap_handle_ref *ref);
414 int __nvmap_dmabuf_fd(struct nvmap_client *client,
415                       struct dma_buf *dmabuf, int flags);
416
417 void nvmap_dmabuf_debugfs_init(struct dentry *nvmap_root);
418 int nvmap_dmabuf_stash_init(void);
419
420 void *nvmap_altalloc(size_t len);
421 void nvmap_altfree(void *ptr, size_t len);
422
423 static inline struct page *nvmap_to_page(struct page *page)
424 {
425         return (struct page *)((unsigned long)page & ~3UL);
426 }
427
428 static inline bool nvmap_page_dirty(struct page *page)
429 {
430         return (unsigned long)page & 1UL;
431 }
432
433 static inline void nvmap_page_mkdirty(struct page **page)
434 {
435         *page = (struct page *)((unsigned long)*page | 1UL);
436 }
437
438 static inline void nvmap_page_mkclean(struct page **page)
439 {
440         *page = (struct page *)((unsigned long)*page & ~1UL);
441 }
442
443 static inline bool nvmap_page_reserved(struct page *page)
444 {
445         return !!((unsigned long)page & 2UL);
446 }
447
448 static inline void nvmap_page_mkreserved(struct page **page)
449 {
450         *page = (struct page *)((unsigned long)*page | 2UL);
451 }
452
453 static inline void nvmap_page_mkunreserved(struct page **page)
454 {
455         *page = (struct page *)((unsigned long)*page & ~2UL);
456 }
457
458 /*
459  * FIXME: assume user space requests for reserve operations
460  * are page aligned
461  */
462 static inline void nvmap_handle_mk(struct nvmap_handle *h,
463                                    u32 offset, u32 size,
464                                    void (*fn)(struct page **))
465 {
466         int i;
467         int start_page = PAGE_ALIGN(offset) >> PAGE_SHIFT;
468         int end_page = (offset + size) >> PAGE_SHIFT;
469
470         if (h->heap_pgalloc) {
471                 for (i = start_page; i < end_page; i++)
472                         fn(&h->pgalloc.pages[i]);
473         }
474 }
475
476 static inline void nvmap_handle_mkclean(struct nvmap_handle *h,
477                                         u32 offset, u32 size)
478 {
479         nvmap_handle_mk(h, offset, size, nvmap_page_mkclean);
480 }
481
482 static inline void nvmap_handle_mkunreserved(struct nvmap_handle *h,
483                                              u32 offset, u32 size)
484 {
485         nvmap_handle_mk(h, offset, size, nvmap_page_mkunreserved);
486 }
487
488 static inline void nvmap_handle_mkreserved(struct nvmap_handle *h,
489                                            u32 offset, u32 size)
490 {
491         nvmap_handle_mk(h, offset, size, nvmap_page_mkreserved);
492 }
493
494 static inline struct page **nvmap_pages(struct page **pg_pages, u32 nr_pages)
495 {
496         struct page **pages;
497         int i;
498
499         pages = nvmap_altalloc(sizeof(*pages) * nr_pages);
500         if (!pages)
501                 return NULL;
502
503         for (i = 0; i < nr_pages; i++)
504                 pages[i] = nvmap_to_page(pg_pages[i]);
505
506         return pages;
507 }
508
509 void nvmap_zap_handle(struct nvmap_handle *handle, u32 offset, u32 size);
510
511 void nvmap_zap_handles(struct nvmap_handle **handles, u32 *offsets,
512                        u32 *sizes, u32 nr);
513
514 void nvmap_vma_open(struct vm_area_struct *vma);
515
516 int nvmap_reserve_pages(struct nvmap_handle **handles, u32 *offsets,
517                         u32 *sizes, u32 nr, u32 op);
518
519 #endif /* __VIDEO_TEGRA_NVMAP_NVMAP_H */