]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/video/tegra/nvmap/nvmap_priv.h
486b64f32751e1ef636ad9868bfd16e73d8541d1
[sojka/nv-tegra/linux-3.10.git] / drivers / video / tegra / nvmap / nvmap_priv.h
1 /*
2  * drivers/video/tegra/nvmap/nvmap.h
3  *
4  * GPU memory management driver for Tegra
5  *
6  * Copyright (c) 2009-2014, NVIDIA CORPORATION. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *'
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #ifndef __VIDEO_TEGRA_NVMAP_NVMAP_H
24 #define __VIDEO_TEGRA_NVMAP_NVMAP_H
25
26 #include <linux/list.h>
27 #include <linux/mm.h>
28 #include <linux/mutex.h>
29 #include <linux/rbtree.h>
30 #include <linux/sched.h>
31 #include <linux/wait.h>
32 #include <linux/atomic.h>
33 #include <linux/dma-buf.h>
34 #include <linux/syscalls.h>
35 #include <linux/nvmap.h>
36
37 #include <linux/workqueue.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/dma-direction.h>
40 #include <linux/platform_device.h>
41 #include <linux/vmalloc.h>
42 #include <linux/slab.h>
43
44 #include <asm/cacheflush.h>
45 #include <asm/tlbflush.h>
46 #ifndef CONFIG_ARM64
47 #include <asm/outercache.h>
48 #endif
49 #include "nvmap_heap.h"
50
51 #ifdef CONFIG_NVMAP_HIGHMEM_ONLY
52 #define __GFP_NVMAP     __GFP_HIGHMEM
53 #else
54 #define __GFP_NVMAP     (GFP_KERNEL | __GFP_HIGHMEM)
55 #endif
56
57 #define GFP_NVMAP       (__GFP_NVMAP | __GFP_NOWARN)
58
59 #define NVMAP_NUM_PTES          64
60
61 extern bool zero_memory;
62
63 #ifdef CONFIG_64BIT
64 #define NVMAP_LAZY_VFREE
65 #endif
66
67 struct page;
68
69 extern const struct file_operations nvmap_fd_fops;
70 void _nvmap_handle_free(struct nvmap_handle *h);
71 /* holds max number of handles allocted per process at any time */
72 extern u32 nvmap_max_handle_count;
73 extern size_t cache_maint_inner_threshold;
74
75 extern struct platform_device *nvmap_pdev;
76
77 #if defined(CONFIG_TEGRA_NVMAP)
78 #define nvmap_err(_client, _fmt, ...)                           \
79         dev_err(nvmap_client_to_device(_client),                \
80                 "%s: "_fmt, __func__, ##__VA_ARGS__)
81
82 #define nvmap_warn(_client, _fmt, ...)                          \
83         dev_warn(nvmap_client_to_device(_client),               \
84                  "%s: "_fmt, __func__, ##__VA_ARGS__)
85
86 #define nvmap_debug(_client, _fmt, ...)                         \
87         dev_dbg(nvmap_client_to_device(_client),                \
88                 "%s: "_fmt, __func__, ##__VA_ARGS__)
89
90 /* If set force zeroed memory to userspace. */
91 extern bool zero_memory;
92
93 #ifdef CONFIG_ARM64
94 #define PG_PROT_KERNEL PAGE_KERNEL
95 #define FLUSH_TLB_PAGE(addr) flush_tlb_kernel_range(addr, PAGE_SIZE)
96 #define FLUSH_DCACHE_AREA __flush_dcache_area
97 #define outer_flush_range(s, e)
98 #define outer_inv_range(s, e)
99 #define outer_clean_range(s, e)
100 #define outer_flush_all()
101 #define outer_clean_all()
102 extern void __flush_dcache_page(struct page *);
103 #else
104 #define PG_PROT_KERNEL pgprot_kernel
105 #define FLUSH_TLB_PAGE(addr) flush_tlb_kernel_page(addr)
106 #define FLUSH_DCACHE_AREA __cpuc_flush_dcache_area
107 extern void __flush_dcache_page(struct address_space *, struct page *);
108 #endif
109
110 struct nvmap_vma_list {
111         struct list_head list;
112         struct vm_area_struct *vma;
113         pid_t pid;
114 };
115
116 /* handles allocated using shared system memory (either IOVMM- or high-order
117  * page allocations */
118 struct nvmap_pgalloc {
119         struct page **pages;
120         bool contig;                    /* contiguous system memory */
121         u32 iovm_addr;  /* is non-zero, if client need specific iova mapping */
122         atomic_t ndirty;        /* count number of dirty pages */
123 };
124
125 struct nvmap_handle {
126         struct rb_node node;    /* entry on global handle tree */
127         atomic_t ref;           /* reference count (i.e., # of duplications) */
128         atomic_t pin;           /* pin count */
129         unsigned long flags;    /* caching flags */
130         size_t size;            /* padded (as-allocated) size */
131         size_t orig_size;       /* original (as-requested) size */
132         size_t align;
133         u8 kind;                /* memory kind (0=pitch, !0 -> blocklinear) */
134         void *map_resources;    /* mapping resources associated with the
135                                    buffer */
136         struct nvmap_client *owner;
137         struct nvmap_handle_ref *owner_ref; /* use this ref to avoid spending
138                         time on validation in some cases.
139                         if handle was duplicated by other client and
140                         original client destroy ref, this field
141                         has to be set to zero. In this case ref should be
142                         obtained through validation */
143
144         /*
145          * dma_buf necessities. An attachment is made on dma_buf allocation to
146          * facilitate the nvmap_pin* APIs.
147          */
148         struct dma_buf *dmabuf;
149         struct dma_buf_attachment *attachment;
150
151         struct nvmap_device *dev;
152         union {
153                 struct nvmap_pgalloc pgalloc;
154                 struct nvmap_heap_block *carveout;
155         };
156         bool global;            /* handle may be duplicated by other clients */
157         bool secure;            /* zap IOVMM area on unpin */
158         bool heap_pgalloc;      /* handle is page allocated (sysmem / iovmm) */
159         bool alloc;             /* handle has memory allocated */
160         u32 heap_type;          /* handle heap is allocated from */
161         unsigned int userflags; /* flags passed from userspace */
162         void *vaddr;            /* mapping used inside kernel */
163         struct list_head vmas;  /* list of all user vma's */
164         atomic_t umap_count;    /* number of outstanding maps from user */
165         atomic_t kmap_count;    /* number of outstanding map from kernel */
166         atomic_t share_count;   /* number of processes sharing the handle */
167         struct list_head lru;   /* list head to track the lru */
168         struct mutex lock;
169         void *nvhost_priv;      /* nvhost private data */
170         void (*nvhost_priv_delete)(void *priv);
171 };
172
173 /* handle_ref objects are client-local references to an nvmap_handle;
174  * they are distinct objects so that handles can be unpinned and
175  * unreferenced the correct number of times when a client abnormally
176  * terminates */
177 struct nvmap_handle_ref {
178         struct nvmap_handle *handle;
179         struct rb_node  node;
180         atomic_t        dupes;  /* number of times to free on file close */
181         atomic_t        pin;    /* number of times to unpin on free */
182 };
183
184 #ifdef CONFIG_NVMAP_PAGE_POOLS
185 #define NVMAP_UC_POOL NVMAP_HANDLE_UNCACHEABLE
186 #define NVMAP_WC_POOL NVMAP_HANDLE_WRITE_COMBINE
187 #define NVMAP_IWB_POOL NVMAP_HANDLE_INNER_CACHEABLE
188 #define NVMAP_WB_POOL NVMAP_HANDLE_CACHEABLE
189 #define NVMAP_NUM_POOLS (NVMAP_HANDLE_CACHEABLE + 1)
190
191 /*
192  * This is the default ratio defining pool size. It can be thought of as pool
193  * size in either MB per GB or KB per MB. That means the max this number can
194  * be is 1024 (all physical memory - not a very good idea) or 0 (no page pool
195  * at all).
196  */
197 #define NVMAP_PP_POOL_SIZE (42)
198
199 /*
200  * The wakeup threshold is how many empty page slots there need to be in order
201  * for the background allocater to be woken up.
202  */
203 #define NVMAP_PP_DEF_FILL_THRESH (1024)
204
205 /*
206  * For when memory does not require zeroing this is the minimum number of pages
207  * remaining in the page pools before the background allocer is woken up. This
208  * essentially disables the page pools (unless its extremely small).
209  */
210 #define NVMAP_PP_ZERO_MEM_FILL_MIN (256)
211
212 struct nvmap_page_pool {
213         struct mutex lock;
214         u32 alloc;  /* Alloc index. */
215         u32 fill;   /* Fill index. */
216         u32 count;  /* Number of pages in the table. */
217         u32 length; /* Length of the pages array. */
218         struct page **page_array;
219
220 #ifdef CONFIG_NVMAP_PAGE_POOL_DEBUG
221         u64 allocs;
222         u64 fills;
223         u64 hits;
224         u64 misses;
225 #endif
226 };
227
228 #define pp_empty(pp)                            \
229         ((pp)->fill == (pp)->alloc && !(pp)->page_array[(pp)->alloc])
230 #define pp_full(pp)                             \
231         ((pp)->fill == (pp)->alloc && (pp)->page_array[(pp)->alloc])
232
233 #define nvmap_pp_alloc_inc(pp) nvmap_pp_inc_index((pp), &(pp)->alloc)
234 #define nvmap_pp_fill_inc(pp)  nvmap_pp_inc_index((pp), &(pp)->fill)
235
236 /* Handle wrap around. */
237 static inline void nvmap_pp_inc_index(struct nvmap_page_pool *pp, u32 *ind)
238 {
239         *ind += 1;
240
241         /* Wrap condition. */
242         if (*ind >= pp->length)
243                 *ind = 0;
244 }
245
246 int nvmap_page_pool_init(struct nvmap_device *dev);
247 int nvmap_page_pool_fini(struct nvmap_device *dev);
248 struct page *nvmap_page_pool_alloc(struct nvmap_page_pool *pool);
249 bool nvmap_page_pool_fill(struct nvmap_page_pool *pool, struct page *page);
250 int nvmap_page_pool_clear(void);
251 #endif
252
253 struct nvmap_client {
254         const char                      *name;
255         struct rb_root                  handle_refs;
256         struct mutex                    ref_lock;
257         bool                            kernel_client;
258         atomic_t                        count;
259         struct task_struct              *task;
260         struct list_head                list;
261         u32                             handle_count;
262 };
263
264 struct nvmap_vma_priv {
265         struct nvmap_handle *handle;
266         size_t          offs;
267         atomic_t        count;  /* number of processes cloning the VMA */
268 };
269
270 #include <linux/mm.h>
271 #include <linux/miscdevice.h>
272
273 struct nvmap_device {
274         struct vm_struct *vm_rgn;
275         pte_t           *ptes[NVMAP_NUM_PTES];
276         unsigned long   ptebits[NVMAP_NUM_PTES / BITS_PER_LONG];
277         unsigned int    lastpte;
278         spinlock_t      ptelock;
279
280         struct rb_root  handles;
281         spinlock_t      handle_lock;
282         wait_queue_head_t pte_wait;
283         struct miscdevice dev_user;
284         struct nvmap_carveout_node *heaps;
285         int nr_carveouts;
286 #ifdef CONFIG_NVMAP_PAGE_POOLS
287         struct nvmap_page_pool pool;
288 #endif
289         struct list_head clients;
290         struct mutex    clients_lock;
291         struct list_head lru_handles;
292         spinlock_t      lru_lock;
293 };
294
295 enum nvmap_stats_t {
296         NS_ALLOC = 0,
297         NS_RELEASE,
298         NS_UALLOC,
299         NS_URELEASE,
300         NS_KALLOC,
301         NS_KRELEASE,
302         NS_CFLUSH_RQ,
303         NS_CFLUSH_DONE,
304         NS_UCFLUSH_RQ,
305         NS_UCFLUSH_DONE,
306         NS_KCFLUSH_RQ,
307         NS_KCFLUSH_DONE,
308         NS_TOTAL,
309         NS_NUM,
310 };
311
312 struct nvmap_stats {
313         atomic64_t stats[NS_NUM];
314         atomic64_t collect;
315 };
316
317 extern struct nvmap_stats nvmap_stats;
318
319 void nvmap_stats_inc(enum nvmap_stats_t, size_t size);
320 void nvmap_stats_dec(enum nvmap_stats_t, size_t size);
321 u64 nvmap_stats_read(enum nvmap_stats_t);
322
323 static inline void nvmap_ref_lock(struct nvmap_client *priv)
324 {
325         mutex_lock(&priv->ref_lock);
326 }
327
328 static inline void nvmap_ref_unlock(struct nvmap_client *priv)
329 {
330         mutex_unlock(&priv->ref_lock);
331 }
332
333 /*
334  * NOTE: this does not ensure the continued existence of the underlying
335  * dma_buf. If you want ensure the existence of the dma_buf you must get an
336  * nvmap_handle_ref as that is what tracks the dma_buf refs.
337  */
338 static inline struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h)
339 {
340         if (unlikely(atomic_inc_return(&h->ref) <= 1)) {
341                 pr_err("%s: %s attempt to get a freed handle\n",
342                         __func__, current->group_leader->comm);
343                 atomic_dec(&h->ref);
344                 return NULL;
345         }
346         return h;
347 }
348
349 static inline pgprot_t nvmap_pgprot(struct nvmap_handle *h, pgprot_t prot)
350 {
351         if (h->flags == NVMAP_HANDLE_UNCACHEABLE)
352                 return pgprot_noncached(prot);
353         else if (h->flags == NVMAP_HANDLE_WRITE_COMBINE)
354                 return pgprot_writecombine(prot);
355         return prot;
356 }
357
358 #else /* CONFIG_TEGRA_NVMAP */
359 struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h);
360 void nvmap_handle_put(struct nvmap_handle *h);
361 pgprot_t nvmap_pgprot(struct nvmap_handle *h, pgprot_t prot);
362
363 #endif /* !CONFIG_TEGRA_NVMAP */
364
365 struct device *nvmap_client_to_device(struct nvmap_client *client);
366
367 pte_t **nvmap_alloc_pte(struct nvmap_device *dev, void **vaddr);
368
369 pte_t **nvmap_alloc_pte_irq(struct nvmap_device *dev, void **vaddr);
370
371 void nvmap_free_pte(struct nvmap_device *dev, pte_t **pte);
372
373 pte_t **nvmap_vaddr_to_pte(struct nvmap_device *dev, unsigned long vaddr);
374
375 struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *dev,
376                                               struct nvmap_handle *handle,
377                                               unsigned long type);
378
379 unsigned long nvmap_carveout_usage(struct nvmap_client *c,
380                                    struct nvmap_heap_block *b);
381
382 struct nvmap_carveout_node;
383
384 int nvmap_find_cache_maint_op(struct nvmap_device *dev,
385                 struct nvmap_handle *h);
386
387 void nvmap_handle_put(struct nvmap_handle *h);
388
389 struct nvmap_handle_ref *__nvmap_validate_locked(struct nvmap_client *priv,
390                                                  struct nvmap_handle *h);
391
392 struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
393                                              size_t size);
394
395 struct nvmap_handle_ref *nvmap_duplicate_handle(struct nvmap_client *client,
396                                         struct nvmap_handle *h, bool skip_val);
397
398 struct nvmap_handle_ref *nvmap_create_handle_from_fd(
399                         struct nvmap_client *client, int fd);
400
401 int nvmap_alloc_handle(struct nvmap_client *client,
402                        struct nvmap_handle *h, unsigned int heap_mask,
403                        size_t align, u8 kind,
404                        unsigned int flags);
405
406 void nvmap_free_handle(struct nvmap_client *c, struct nvmap_handle *h);
407
408 void nvmap_free_handle_user_id(struct nvmap_client *c, unsigned long user_id);
409
410 int nvmap_pin_ids(struct nvmap_client *client,
411                   unsigned int nr, struct nvmap_handle * const *ids);
412
413 void nvmap_unpin_ids(struct nvmap_client *priv,
414                      unsigned int nr, struct nvmap_handle * const *ids);
415
416 int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h);
417
418 void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h);
419
420 int is_nvmap_vma(struct vm_area_struct *vma);
421
422 int nvmap_get_dmabuf_fd(struct nvmap_client *client, struct nvmap_handle *h);
423 struct nvmap_handle *nvmap_get_id_from_dmabuf_fd(struct nvmap_client *client,
424                                                  int fd);
425
426 int nvmap_get_handle_param(struct nvmap_client *client,
427                 struct nvmap_handle_ref *ref, u32 param, u64 *result);
428
429 struct nvmap_client *nvmap_client_get(struct nvmap_client *client);
430
431 void nvmap_client_put(struct nvmap_client *c);
432
433 struct nvmap_handle *unmarshal_user_id(u32 id);
434
435 static inline void nvmap_flush_tlb_kernel_page(unsigned long kaddr)
436 {
437 #ifdef CONFIG_ARM_ERRATA_798181
438         flush_tlb_kernel_page_skip_errata_798181(kaddr);
439 #else
440         FLUSH_TLB_PAGE(kaddr);
441 #endif
442 }
443
444 /* MM definitions. */
445 extern size_t cache_maint_outer_threshold;
446 extern int inner_cache_maint_threshold;
447
448 extern void v7_flush_kern_cache_all(void);
449 extern void v7_clean_kern_cache_all(void *);
450 extern void __flush_dcache_all(void *arg);
451 extern void __clean_dcache_all(void *arg);
452
453 void inner_flush_cache_all(void);
454 void inner_clean_cache_all(void);
455 void nvmap_flush_cache(struct page **pages, int numpages);
456
457 int nvmap_do_cache_maint_list(struct nvmap_handle **handles, u32 *offsets,
458                               u32 *sizes, int op, int nr);
459
460 /* Internal API to support dmabuf */
461 struct dma_buf *__nvmap_dmabuf_export(struct nvmap_client *client,
462                                  struct nvmap_handle *handle);
463 struct dma_buf *__nvmap_make_dmabuf(struct nvmap_client *client,
464                                     struct nvmap_handle *handle);
465 struct sg_table *__nvmap_sg_table(struct nvmap_client *client,
466                                   struct nvmap_handle *h);
467 void __nvmap_free_sg_table(struct nvmap_client *client,
468                            struct nvmap_handle *h, struct sg_table *sgt);
469 void *__nvmap_kmap(struct nvmap_handle *h, unsigned int pagenum);
470 void __nvmap_kunmap(struct nvmap_handle *h, unsigned int pagenum, void *addr);
471 void *__nvmap_mmap(struct nvmap_handle *h);
472 void __nvmap_munmap(struct nvmap_handle *h, void *addr);
473 int __nvmap_map(struct nvmap_handle *h, struct vm_area_struct *vma);
474 int __nvmap_get_handle_param(struct nvmap_client *client,
475                              struct nvmap_handle *h, u32 param, u64 *result);
476 int __nvmap_do_cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
477                            unsigned long start, unsigned long end,
478                            unsigned int op, bool clean_only_dirty);
479 struct nvmap_client *__nvmap_create_client(struct nvmap_device *dev,
480                                            const char *name);
481 struct dma_buf *__nvmap_dmabuf_export_from_ref(struct nvmap_handle_ref *ref);
482 struct nvmap_handle *__nvmap_ref_to_id(struct nvmap_handle_ref *ref);
483 int __nvmap_pin(struct nvmap_handle_ref *ref, phys_addr_t *phys);
484 void __nvmap_unpin(struct nvmap_handle_ref *ref);
485 int __nvmap_dmabuf_fd(struct dma_buf *dmabuf, int flags);
486
487 void nvmap_dmabuf_debugfs_init(struct dentry *nvmap_root);
488 int nvmap_dmabuf_stash_init(void);
489
490 void *nvmap_altalloc(size_t len);
491 void nvmap_altfree(void *ptr, size_t len);
492
493 static inline struct page *nvmap_to_page(struct page *page)
494 {
495         return (struct page *)((unsigned long)page & ~3UL);
496 }
497
498 static inline bool nvmap_page_dirty(struct page *page)
499 {
500         return (unsigned long)page & 1UL;
501 }
502
503 static inline void nvmap_page_mkdirty(struct page **page)
504 {
505         *page = (struct page *)((unsigned long)*page | 1UL);
506 }
507
508 static inline void nvmap_page_mkclean(struct page **page)
509 {
510         *page = (struct page *)((unsigned long)*page & ~1UL);
511 }
512
513 static inline bool nvmap_page_reserved(struct page *page)
514 {
515         return !!((unsigned long)page & 2UL);
516 }
517
518 static inline void nvmap_page_mkreserved(struct page **page)
519 {
520         *page = (struct page *)((unsigned long)*page | 2UL);
521 }
522
523 static inline void nvmap_page_mkunreserved(struct page **page)
524 {
525         *page = (struct page *)((unsigned long)*page & ~2UL);
526 }
527
528 /*
529  * FIXME: assume user space requests for reserve operations
530  * are page aligned
531  */
532 static inline void nvmap_handle_mk(struct nvmap_handle *h,
533                                    u32 offset, u32 size,
534                                    void (*fn)(struct page **))
535 {
536         int i;
537         int start_page = PAGE_ALIGN(offset) >> PAGE_SHIFT;
538         int end_page = (offset + size) >> PAGE_SHIFT;
539
540         if (h->heap_pgalloc) {
541                 for (i = start_page; i < end_page; i++)
542                         fn(&h->pgalloc.pages[i]);
543         }
544 }
545
546 static inline void nvmap_handle_mkclean(struct nvmap_handle *h,
547                                         u32 offset, u32 size)
548 {
549         nvmap_handle_mk(h, offset, size, nvmap_page_mkclean);
550 }
551
552 static inline void nvmap_handle_mkunreserved(struct nvmap_handle *h,
553                                              u32 offset, u32 size)
554 {
555         nvmap_handle_mk(h, offset, size, nvmap_page_mkunreserved);
556 }
557
558 static inline void nvmap_handle_mkreserved(struct nvmap_handle *h,
559                                            u32 offset, u32 size)
560 {
561         nvmap_handle_mk(h, offset, size, nvmap_page_mkreserved);
562 }
563
564 static inline struct page **nvmap_pages(struct page **pg_pages, u32 nr_pages)
565 {
566         struct page **pages;
567         int i;
568
569         pages = nvmap_altalloc(sizeof(*pages) * nr_pages);
570         if (!pages)
571                 return NULL;
572
573         for (i = 0; i < nr_pages; i++)
574                 pages[i] = nvmap_to_page(pg_pages[i]);
575
576         return pages;
577 }
578
579 void nvmap_zap_handle(struct nvmap_handle *handle, u32 offset, u32 size);
580
581 void nvmap_zap_handles(struct nvmap_handle **handles, u32 *offsets,
582                        u32 *sizes, u32 nr);
583
584 void nvmap_vma_open(struct vm_area_struct *vma);
585
586 int nvmap_reserve_pages(struct nvmap_handle **handles, u32 *offsets,
587                         u32 *sizes, u32 nr, u32 op);
588
589 static inline void nvmap_kmaps_inc(struct nvmap_handle *h)
590 {
591         mutex_lock(&h->lock);
592         atomic_inc(&h->kmap_count);
593         mutex_unlock(&h->lock);
594 }
595
596 static inline void nvmap_kmaps_inc_no_lock(struct nvmap_handle *h)
597 {
598         atomic_inc(&h->kmap_count);
599 }
600
601 static inline void nvmap_kmaps_dec(struct nvmap_handle *h)
602 {
603         atomic_dec(&h->kmap_count);
604 }
605
606 static inline void nvmap_umaps_inc(struct nvmap_handle *h)
607 {
608         mutex_lock(&h->lock);
609         atomic_inc(&h->umap_count);
610         mutex_unlock(&h->lock);
611 }
612
613 static inline void nvmap_umaps_dec(struct nvmap_handle *h)
614 {
615         atomic_dec(&h->umap_count);
616 }
617
618 static inline void nvmap_lru_add(struct nvmap_handle *h)
619 {
620         spin_lock(&nvmap_dev->lru_lock);
621         BUG_ON(!list_empty(&h->lru));
622         list_add_tail(&h->lru, &nvmap_dev->lru_handles);
623         spin_unlock(&nvmap_dev->lru_lock);
624 }
625
626 static inline void nvmap_lru_del(struct nvmap_handle *h)
627 {
628         spin_lock(&nvmap_dev->lru_lock);
629         list_del(&h->lru);
630         INIT_LIST_HEAD(&h->lru);
631         spin_unlock(&nvmap_dev->lru_lock);
632 }
633
634 static inline void nvmap_lru_reset(struct nvmap_handle *h)
635 {
636         spin_lock(&nvmap_dev->lru_lock);
637         BUG_ON(list_empty(&h->lru));
638         list_del(&h->lru);
639         list_add_tail(&h->lru, &nvmap_dev->lru_handles);
640         spin_unlock(&nvmap_dev->lru_lock);
641 }
642
643 #endif /* __VIDEO_TEGRA_NVMAP_NVMAP_H */