]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/video/tegra/nvmap/nvmap_priv.h
video: tegra: nvmap: support inserting pages on unreserve
[sojka/nv-tegra/linux-3.10.git] / drivers / video / tegra / nvmap / nvmap_priv.h
1 /*
2  * drivers/video/tegra/nvmap/nvmap.h
3  *
4  * GPU memory management driver for Tegra
5  *
6  * Copyright (c) 2009-2015, NVIDIA CORPORATION. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *'
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #ifndef __VIDEO_TEGRA_NVMAP_NVMAP_H
24 #define __VIDEO_TEGRA_NVMAP_NVMAP_H
25
26 #include <linux/list.h>
27 #include <linux/mm.h>
28 #include <linux/mutex.h>
29 #include <linux/rbtree.h>
30 #include <linux/sched.h>
31 #include <linux/wait.h>
32 #include <linux/atomic.h>
33 #include <linux/dma-buf.h>
34 #include <linux/syscalls.h>
35 #include <linux/mm.h>
36 #include <linux/miscdevice.h>
37 #include <linux/nvmap.h>
38 #include <linux/vmalloc.h>
39 #include <linux/slab.h>
40
41 #include <linux/workqueue.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/dma-direction.h>
44 #include <linux/platform_device.h>
45
46 #include <asm/cacheflush.h>
47 #ifndef CONFIG_ARM64
48 #include <asm/outercache.h>
49 #endif
50 #include "nvmap_heap.h"
51
52 #ifdef CONFIG_NVMAP_HIGHMEM_ONLY
53 #define __GFP_NVMAP     __GFP_HIGHMEM
54 #else
55 #define __GFP_NVMAP     (GFP_KERNEL | __GFP_HIGHMEM)
56 #endif
57
58 #define GFP_NVMAP              (__GFP_NVMAP | __GFP_NOWARN)
59
60 #ifdef CONFIG_64BIT
61 #define NVMAP_LAZY_VFREE
62 #endif
63
64 struct page;
65 struct nvmap_device;
66
67 void _nvmap_handle_free(struct nvmap_handle *h);
68 /* holds max number of handles allocted per process at any time */
69 extern u32 nvmap_max_handle_count;
70
71 /* If set force zeroed memory to userspace. */
72 extern bool zero_memory;
73
74 #ifdef CONFIG_ARM64
75 #define PG_PROT_KERNEL PAGE_KERNEL
76 #define FLUSH_DCACHE_AREA __flush_dcache_area
77 #define outer_flush_range(s, e)
78 #define outer_inv_range(s, e)
79 #define outer_clean_range(s, e)
80 #define outer_flush_all()
81 #define outer_clean_all()
82 extern void __clean_dcache_page(struct page *);
83 extern void __flush_dcache_page(struct page *);
84 #else
85 #define PG_PROT_KERNEL pgprot_kernel
86 #define FLUSH_DCACHE_AREA __cpuc_flush_dcache_area
87 extern void __flush_dcache_page(struct address_space *, struct page *);
88 #endif
89
90 struct nvmap_vma_list {
91         struct list_head list;
92         struct vm_area_struct *vma;
93         pid_t pid;
94 };
95
96 struct nvmap_carveout_node {
97         unsigned int            heap_bit;
98         struct nvmap_heap       *carveout;
99         int                     index;
100         phys_addr_t             base;
101         size_t                  size;
102 };
103
104 /* handles allocated using shared system memory (either IOVMM- or high-order
105  * page allocations */
106 struct nvmap_pgalloc {
107         struct page **pages;
108         bool contig;                    /* contiguous system memory */
109         atomic_t ndirty;        /* count number of dirty pages */
110 };
111
112 struct nvmap_handle {
113         struct rb_node node;    /* entry on global handle tree */
114         atomic_t ref;           /* reference count (i.e., # of duplications) */
115         atomic_t pin;           /* pin count */
116         u32 flags;              /* caching flags */
117         size_t size;            /* padded (as-allocated) size */
118         size_t orig_size;       /* original (as-requested) size */
119         size_t align;
120         u8 kind;                /* memory kind (0=pitch, !0 -> blocklinear) */
121         struct nvmap_client *owner;
122
123         /*
124          * dma_buf necessities. An attachment is made on dma_buf allocation to
125          * facilitate the nvmap_pin* APIs.
126          */
127         struct dma_buf *dmabuf;
128         struct dma_buf_attachment *attachment;
129
130         union {
131                 struct nvmap_pgalloc pgalloc;
132                 struct nvmap_heap_block *carveout;
133         };
134         bool heap_pgalloc;      /* handle is page allocated (sysmem / iovmm) */
135         bool alloc;             /* handle has memory allocated */
136         u32 heap_type;          /* handle heap is allocated from */
137         u32 userflags;          /* flags passed from userspace */
138         void *vaddr;            /* mapping used inside kernel */
139         struct list_head vmas;  /* list of all user vma's */
140         atomic_t umap_count;    /* number of outstanding maps from user */
141         atomic_t kmap_count;    /* number of outstanding map from kernel */
142         atomic_t share_count;   /* number of processes sharing the handle */
143         struct list_head lru;   /* list head to track the lru */
144         struct mutex lock;
145         void *nvhost_priv;      /* nvhost private data */
146         void (*nvhost_priv_delete)(void *priv);
147         unsigned int ivm_id;
148         int peer;               /* Peer VM number */
149 };
150
151 /* handle_ref objects are client-local references to an nvmap_handle;
152  * they are distinct objects so that handles can be unpinned and
153  * unreferenced the correct number of times when a client abnormally
154  * terminates */
155 struct nvmap_handle_ref {
156         struct nvmap_handle *handle;
157         struct rb_node  node;
158         atomic_t        dupes;  /* number of times to free on file close */
159         atomic_t        pin;    /* number of times to unpin on free */
160 };
161
162 #ifdef CONFIG_NVMAP_PAGE_POOLS
163
164 /*
165  * This is the default ratio defining pool size. It can be thought of as pool
166  * size in either MB per GB or KB per MB. That means the max this number can
167  * be is 1024 (all physical memory - not a very good idea) or 0 (no page pool
168  * at all).
169  */
170 #define NVMAP_PP_POOL_SIZE               (128)
171
172 /*
173  * The wakeup threshold is how many empty page slots there need to be in order
174  * for the background allocater to be woken up.
175  */
176 #define NVMAP_PP_DEF_FILL_THRESH         (4096)
177
178 /*
179  * For when memory does not require zeroing this is the minimum number of pages
180  * remaining in the page pools before the background allocer is woken up. This
181  * essentially disables the background allocator (unless its extremely small).
182  */
183 #define NVMAP_PP_DEF_ZERO_MEM_FILL_MIN   (2048)
184
185 /*
186  * Minimum amount of memory we try to keep available. That is if memory sinks
187  * below this amount the page pools will stop filling.
188  */
189 #define NVMAP_PP_DEF_MIN_AVAILABLE_MB    (128)
190
191 struct nvmap_page_pool {
192         struct mutex lock;
193         u32 count;  /* Number of pages in the page list. */
194         u32 max;    /* Max length of the page list. */
195         int to_zero; /* Number of pages on the zero list */
196         struct list_head page_list;
197         struct list_head zero_list;
198         u32 dirty_pages;
199
200 #ifdef CONFIG_NVMAP_PAGE_POOL_DEBUG
201         u64 allocs;
202         u64 fills;
203         u64 hits;
204         u64 misses;
205 #endif
206 };
207
208 int nvmap_page_pool_init(struct nvmap_device *dev);
209 int nvmap_page_pool_fini(struct nvmap_device *dev);
210 struct page *nvmap_page_pool_alloc(struct nvmap_page_pool *pool);
211 int nvmap_page_pool_alloc_lots(struct nvmap_page_pool *pool,
212                                         struct page **pages, u32 nr);
213 int nvmap_page_pool_fill_lots(struct nvmap_page_pool *pool,
214                                        struct page **pages, u32 nr);
215 int nvmap_page_pool_clear(void);
216 int nvmap_page_pool_debugfs_init(struct dentry *nvmap_root);
217 #endif
218
219 #define NVMAP_IVM_INVALID_PEER          (-1)
220
221 struct nvmap_client {
222         const char                      *name;
223         struct rb_root                  handle_refs;
224         struct mutex                    ref_lock;
225         bool                            kernel_client;
226         atomic_t                        count;
227         struct task_struct              *task;
228         struct list_head                list;
229         u32                             handle_count;
230         u32                             next_fd;
231         int warned;
232 };
233
234 struct nvmap_vma_priv {
235         struct nvmap_handle *handle;
236         size_t          offs;
237         atomic_t        count;  /* number of processes cloning the VMA */
238 };
239
240 struct nvmap_device {
241         struct rb_root  handles;
242         spinlock_t      handle_lock;
243         struct miscdevice dev_user;
244         struct nvmap_carveout_node *heaps;
245         int nr_carveouts;
246 #ifdef CONFIG_NVMAP_PAGE_POOLS
247         struct nvmap_page_pool pool;
248 #endif
249         struct list_head clients;
250         struct rb_root pids;
251         struct mutex    clients_lock;
252         struct list_head lru_handles;
253         spinlock_t      lru_lock;
254         struct dentry *handles_by_pid;
255 };
256
257 enum nvmap_stats_t {
258         NS_ALLOC = 0,
259         NS_RELEASE,
260         NS_UALLOC,
261         NS_URELEASE,
262         NS_KALLOC,
263         NS_KRELEASE,
264         NS_CFLUSH_RQ,
265         NS_CFLUSH_DONE,
266         NS_UCFLUSH_RQ,
267         NS_UCFLUSH_DONE,
268         NS_KCFLUSH_RQ,
269         NS_KCFLUSH_DONE,
270         NS_TOTAL,
271         NS_NUM,
272 };
273
274 struct nvmap_stats {
275         atomic64_t stats[NS_NUM];
276         atomic64_t collect;
277 };
278
279 extern struct nvmap_stats nvmap_stats;
280 extern struct nvmap_device *nvmap_dev;
281
282 void nvmap_stats_inc(enum nvmap_stats_t, size_t size);
283 void nvmap_stats_dec(enum nvmap_stats_t, size_t size);
284 u64 nvmap_stats_read(enum nvmap_stats_t);
285
286 static inline void nvmap_ref_lock(struct nvmap_client *priv)
287 {
288         mutex_lock(&priv->ref_lock);
289 }
290
291 static inline void nvmap_ref_unlock(struct nvmap_client *priv)
292 {
293         mutex_unlock(&priv->ref_lock);
294 }
295
296 /*
297  * NOTE: this does not ensure the continued existence of the underlying
298  * dma_buf. If you want ensure the existence of the dma_buf you must get an
299  * nvmap_handle_ref as that is what tracks the dma_buf refs.
300  */
301 static inline struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h)
302 {
303         if (WARN_ON(!virt_addr_valid(h))) {
304                 pr_err("%s: invalid handle\n", current->group_leader->comm);
305                 return NULL;
306         }
307
308         if (unlikely(atomic_inc_return(&h->ref) <= 1)) {
309                 pr_err("%s: %s attempt to get a freed handle\n",
310                         __func__, current->group_leader->comm);
311                 atomic_dec(&h->ref);
312                 return NULL;
313         }
314         return h;
315 }
316
317 static inline pgprot_t nvmap_pgprot(struct nvmap_handle *h, pgprot_t prot)
318 {
319         if (h->heap_type == NVMAP_HEAP_CARVEOUT_VPR) {
320 #ifdef pgprot_device_writecombine
321                 return pgprot_device_writecombine(prot);
322 #else
323                 return pgprot_noncached(prot);
324 #endif
325         }
326
327         if (h->flags == NVMAP_HANDLE_UNCACHEABLE) {
328 #ifdef CONFIG_ARM64
329                 if (h->owner && !h->owner->warned) {
330                         char task_comm[TASK_COMM_LEN];
331                         h->owner->warned = 1;
332                         get_task_comm(task_comm, h->owner->task);
333                         pr_err("PID %d: %s: TAG: 0x%04x WARNING: "
334                                 "NVMAP_HANDLE_WRITE_COMBINE "
335                                 "should be used in place of "
336                                 "NVMAP_HANDLE_UNCACHEABLE on ARM64\n",
337                                 h->owner->task->pid, task_comm,
338                                 h->userflags >> 16);
339                 }
340 #endif
341                 return pgprot_noncached(prot);
342         }
343         else if (h->flags == NVMAP_HANDLE_WRITE_COMBINE)
344                 return pgprot_dmacoherent(prot);
345         return prot;
346 }
347
348 int nvmap_probe(struct platform_device *pdev);
349 int nvmap_remove(struct platform_device *pdev);
350 int nvmap_init(struct platform_device *pdev);
351
352 struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *dev,
353                                               struct nvmap_handle *handle,
354                                               unsigned long type,
355                                               phys_addr_t *start);
356
357 unsigned long nvmap_carveout_usage(struct nvmap_client *c,
358                                    struct nvmap_heap_block *b);
359
360 struct nvmap_carveout_node;
361
362 void nvmap_handle_put(struct nvmap_handle *h);
363
364 struct nvmap_handle_ref *__nvmap_validate_locked(struct nvmap_client *priv,
365                                                  struct nvmap_handle *h);
366
367 struct nvmap_handle *nvmap_validate_get(struct nvmap_handle *h);
368
369 struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
370                                              size_t size);
371
372 struct nvmap_handle_ref *nvmap_duplicate_handle(struct nvmap_client *client,
373                                         struct nvmap_handle *h, bool skip_val);
374
375 struct nvmap_handle *nvmap_validate_get_by_ivmid(struct nvmap_client *client,
376                                                  unsigned int ivm_id);
377
378 struct nvmap_handle_ref *nvmap_create_handle_from_fd(
379                         struct nvmap_client *client, int fd);
380
381 void inner_cache_maint(unsigned int op, void *vaddr, size_t size);
382 void outer_cache_maint(unsigned int op, phys_addr_t paddr, size_t size);
383
384 int nvmap_alloc_handle(struct nvmap_client *client,
385                        struct nvmap_handle *h, unsigned int heap_mask,
386                        size_t align, u8 kind,
387                        unsigned int flags, int peer);
388
389 void nvmap_free_handle(struct nvmap_client *c, struct nvmap_handle *h);
390
391 void nvmap_free_handle_fd(struct nvmap_client *c, int fd);
392
393 int nvmap_pin_handles(struct nvmap_client *client, unsigned int nr,
394                       struct nvmap_handle * const *handles);
395
396 void nvmap_unpin_handles(struct nvmap_client *priv, unsigned int nr,
397                          struct nvmap_handle * const *handles);
398
399 int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h);
400
401 void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h);
402
403 int is_nvmap_vma(struct vm_area_struct *vma);
404
405 int nvmap_get_dmabuf_fd(struct nvmap_client *client, struct nvmap_handle *h);
406 struct nvmap_handle *nvmap_handle_get_from_dmabuf_fd(
407                                 struct nvmap_client *client, int fd);
408
409 int nvmap_get_handle_param(struct nvmap_client *client,
410                 struct nvmap_handle_ref *ref, u32 param, u64 *result);
411
412 struct nvmap_client *nvmap_client_get(struct nvmap_client *client);
413
414 void nvmap_client_put(struct nvmap_client *c);
415
416 struct nvmap_handle *nvmap_handle_get_from_fd(int fd);
417
418 /* MM definitions. */
419 extern size_t cache_maint_inner_threshold;
420 extern size_t cache_maint_outer_threshold;
421
422 extern void v7_flush_kern_cache_all(void);
423 extern void v7_clean_kern_cache_all(void *);
424 extern void __flush_dcache_all(void *arg);
425 extern void __clean_dcache_all(void *arg);
426
427 void inner_flush_cache_all(void);
428 void inner_clean_cache_all(void);
429 void nvmap_clean_cache(struct page **pages, int numpages);
430 void nvmap_clean_cache_page(struct page *page);
431 void nvmap_flush_cache(struct page **pages, int numpages);
432
433 int nvmap_do_cache_maint_list(struct nvmap_handle **handles, u32 *offsets,
434                               u32 *sizes, int op, int nr);
435
436 /* Internal API to support dmabuf */
437 struct dma_buf *__nvmap_dmabuf_export(struct nvmap_client *client,
438                                  struct nvmap_handle *handle);
439 struct dma_buf *__nvmap_make_dmabuf(struct nvmap_client *client,
440                                     struct nvmap_handle *handle);
441 struct sg_table *__nvmap_sg_table(struct nvmap_client *client,
442                                   struct nvmap_handle *h);
443 void __nvmap_free_sg_table(struct nvmap_client *client,
444                            struct nvmap_handle *h, struct sg_table *sgt);
445 void *__nvmap_kmap(struct nvmap_handle *h, unsigned int pagenum);
446 void __nvmap_kunmap(struct nvmap_handle *h, unsigned int pagenum, void *addr);
447 void *__nvmap_mmap(struct nvmap_handle *h);
448 void __nvmap_munmap(struct nvmap_handle *h, void *addr);
449 int __nvmap_map(struct nvmap_handle *h, struct vm_area_struct *vma);
450 int __nvmap_get_handle_param(struct nvmap_client *client,
451                              struct nvmap_handle *h, u32 param, u64 *result);
452 int __nvmap_do_cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
453                            unsigned long start, unsigned long end,
454                            unsigned int op, bool clean_only_dirty);
455 struct nvmap_client *__nvmap_create_client(struct nvmap_device *dev,
456                                            const char *name);
457 struct dma_buf *__nvmap_dmabuf_export_from_ref(struct nvmap_handle_ref *ref);
458 struct nvmap_handle *__nvmap_ref_to_handle(struct nvmap_handle_ref *ref);
459 int __nvmap_pin(struct nvmap_handle_ref *ref, phys_addr_t *phys);
460 void __nvmap_unpin(struct nvmap_handle_ref *ref);
461 int __nvmap_dmabuf_fd(struct nvmap_client *client,
462                       struct dma_buf *dmabuf, int flags);
463
464 void nvmap_dmabuf_debugfs_init(struct dentry *nvmap_root);
465 int nvmap_dmabuf_stash_init(void);
466
467 void *nvmap_altalloc(size_t len);
468 void nvmap_altfree(void *ptr, size_t len);
469
470 void do_set_pte(struct vm_area_struct *vma, unsigned long address,
471                 struct page *page, pte_t *pte, bool write, bool anon);
472
473 static inline struct page *nvmap_to_page(struct page *page)
474 {
475         return (struct page *)((unsigned long)page & ~3UL);
476 }
477
478 static inline bool nvmap_page_dirty(struct page *page)
479 {
480         return (unsigned long)page & 1UL;
481 }
482
483 static inline bool nvmap_page_mkdirty(struct page **page)
484 {
485         if (nvmap_page_dirty(*page))
486                 return false;
487         *page = (struct page *)((unsigned long)*page | 1UL);
488         return true;
489 }
490
491 static inline bool nvmap_page_mkclean(struct page **page)
492 {
493         if (!nvmap_page_dirty(*page))
494                 return false;
495         *page = (struct page *)((unsigned long)*page & ~1UL);
496         return true;
497 }
498
499 static inline bool nvmap_page_reserved(struct page *page)
500 {
501         return !!((unsigned long)page & 2UL);
502 }
503
504 static inline bool nvmap_page_mkreserved(struct page **page)
505 {
506         if (nvmap_page_reserved(*page))
507                 return false;
508         *page = (struct page *)((unsigned long)*page | 2UL);
509         return true;
510 }
511
512 static inline bool nvmap_page_mkunreserved(struct page **page)
513 {
514         if (!nvmap_page_reserved(*page))
515                 return false;
516         *page = (struct page *)((unsigned long)*page & ~2UL);
517         return true;
518 }
519
520 /*
521  * FIXME: assume user space requests for reserve operations
522  * are page aligned
523  */
524 static inline int nvmap_handle_mk(struct nvmap_handle *h,
525                                   u32 offset, u32 size,
526                                   bool (*fn)(struct page **))
527 {
528         int i, nchanged = 0;
529         int start_page = PAGE_ALIGN(offset) >> PAGE_SHIFT;
530         int end_page = (offset + size) >> PAGE_SHIFT;
531
532         mutex_lock(&h->lock);
533         if (h->heap_pgalloc) {
534                 for (i = start_page; i < end_page; i++)
535                         nchanged += fn(&h->pgalloc.pages[i]) ? 1 : 0;
536         }
537         mutex_unlock(&h->lock);
538         return nchanged;
539 }
540
541 static inline void nvmap_handle_mkclean(struct nvmap_handle *h,
542                                         u32 offset, u32 size)
543 {
544         int nchanged = nvmap_handle_mk(h, offset, size, nvmap_page_mkclean);
545         if (h->heap_pgalloc)
546                 atomic_sub(nchanged, &h->pgalloc.ndirty);
547 }
548
549 static inline void nvmap_handle_mkdirty(struct nvmap_handle *h,
550                                         u32 offset, u32 size)
551 {
552         int nchanged = nvmap_handle_mk(h, offset, size, nvmap_page_mkdirty);
553         if (h->heap_pgalloc)
554                 atomic_sub(nchanged, &h->pgalloc.ndirty);
555 }
556
557 static inline void nvmap_handle_mkunreserved(struct nvmap_handle *h,
558                                              u32 offset, u32 size)
559 {
560         nvmap_handle_mk(h, offset, size, nvmap_page_mkunreserved);
561 }
562
563 static inline void nvmap_handle_mkreserved(struct nvmap_handle *h,
564                                            u32 offset, u32 size)
565 {
566         nvmap_handle_mk(h, offset, size, nvmap_page_mkreserved);
567 }
568
569 static inline struct page **nvmap_pages(struct page **pg_pages, u32 nr_pages)
570 {
571         struct page **pages;
572         int i;
573
574         pages = nvmap_altalloc(sizeof(*pages) * nr_pages);
575         if (!pages)
576                 return NULL;
577
578         for (i = 0; i < nr_pages; i++)
579                 pages[i] = nvmap_to_page(pg_pages[i]);
580
581         return pages;
582 }
583
584 void nvmap_zap_handle(struct nvmap_handle *handle, u32 offset, u32 size);
585
586 void nvmap_zap_handles(struct nvmap_handle **handles, u32 *offsets,
587                        u32 *sizes, u32 nr);
588
589 void nvmap_vma_open(struct vm_area_struct *vma);
590
591 int nvmap_reserve_pages(struct nvmap_handle **handles, u32 *offsets,
592                         u32 *sizes, u32 nr, u32 op);
593
594 static inline void nvmap_kmaps_inc(struct nvmap_handle *h)
595 {
596         mutex_lock(&h->lock);
597         atomic_inc(&h->kmap_count);
598         mutex_unlock(&h->lock);
599 }
600
601 static inline void nvmap_kmaps_inc_no_lock(struct nvmap_handle *h)
602 {
603         atomic_inc(&h->kmap_count);
604 }
605
606 static inline void nvmap_kmaps_dec(struct nvmap_handle *h)
607 {
608         atomic_dec(&h->kmap_count);
609 }
610
611 static inline void nvmap_umaps_inc(struct nvmap_handle *h)
612 {
613         mutex_lock(&h->lock);
614         atomic_inc(&h->umap_count);
615         mutex_unlock(&h->lock);
616 }
617
618 static inline void nvmap_umaps_dec(struct nvmap_handle *h)
619 {
620         atomic_dec(&h->umap_count);
621 }
622
623 static inline void nvmap_lru_add(struct nvmap_handle *h)
624 {
625         spin_lock(&nvmap_dev->lru_lock);
626         BUG_ON(!list_empty(&h->lru));
627         list_add_tail(&h->lru, &nvmap_dev->lru_handles);
628         spin_unlock(&nvmap_dev->lru_lock);
629 }
630
631 static inline void nvmap_lru_del(struct nvmap_handle *h)
632 {
633         spin_lock(&nvmap_dev->lru_lock);
634         list_del(&h->lru);
635         INIT_LIST_HEAD(&h->lru);
636         spin_unlock(&nvmap_dev->lru_lock);
637 }
638
639 static inline void nvmap_lru_reset(struct nvmap_handle *h)
640 {
641         spin_lock(&nvmap_dev->lru_lock);
642         BUG_ON(list_empty(&h->lru));
643         list_del(&h->lru);
644         list_add_tail(&h->lru, &nvmap_dev->lru_handles);
645         spin_unlock(&nvmap_dev->lru_lock);
646 }
647
648 static inline bool nvmap_handle_track_dirty(struct nvmap_handle *h)
649 {
650         if (!h->heap_pgalloc)
651                 return false;
652
653         return h->userflags & (NVMAP_HANDLE_CACHE_SYNC |
654                                NVMAP_HANDLE_CACHE_SYNC_AT_RESERVE);
655 }
656
657 void nvmap_dmabuf_release_stashed_maps(struct dma_buf *dmabuf);
658
659 #endif /* __VIDEO_TEGRA_NVMAP_NVMAP_H */