]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/video/tegra/nvmap/nvmap_priv.h
video: tegra: nvmap: Move pool lock functions
[sojka/nv-tegra/linux-3.10.git] / drivers / video / tegra / nvmap / nvmap_priv.h
1 /*
2  * drivers/video/tegra/nvmap/nvmap.h
3  *
4  * GPU memory management driver for Tegra
5  *
6  * Copyright (c) 2009-2014, NVIDIA CORPORATION. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *'
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #ifndef __VIDEO_TEGRA_NVMAP_NVMAP_H
24 #define __VIDEO_TEGRA_NVMAP_NVMAP_H
25
26 #include <linux/list.h>
27 #include <linux/mm.h>
28 #include <linux/mutex.h>
29 #include <linux/rbtree.h>
30 #include <linux/sched.h>
31 #include <linux/wait.h>
32 #include <linux/atomic.h>
33 #include <linux/dma-buf.h>
34 #include <linux/syscalls.h>
35 #include <linux/nvmap.h>
36
37 #include <linux/workqueue.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/dma-direction.h>
40 #include <linux/platform_device.h>
41 #include <linux/vmalloc.h>
42 #include <linux/slab.h>
43
44 #include <asm/cacheflush.h>
45 #include <asm/tlbflush.h>
46 #ifndef CONFIG_ARM64
47 #include <asm/outercache.h>
48 #endif
49 #include "nvmap_heap.h"
50
51 #ifdef CONFIG_NVMAP_HIGHMEM_ONLY
52 #define __GFP_NVMAP     __GFP_HIGHMEM
53 #else
54 #define __GFP_NVMAP     (GFP_KERNEL | __GFP_HIGHMEM)
55 #endif
56
57 #define GFP_NVMAP       (__GFP_NVMAP | __GFP_NOWARN)
58
59 #define NVMAP_NUM_PTES          64
60
61 extern bool zero_memory;
62
63 #ifdef CONFIG_64BIT
64 #define NVMAP_LAZY_VFREE
65 #endif
66
67 struct page;
68
69 extern const struct file_operations nvmap_fd_fops;
70 void _nvmap_handle_free(struct nvmap_handle *h);
71 /* holds max number of handles allocted per process at any time */
72 extern u32 nvmap_max_handle_count;
73 extern size_t cache_maint_inner_threshold;
74
75 extern struct platform_device *nvmap_pdev;
76
77 #if defined(CONFIG_TEGRA_NVMAP)
78 #define nvmap_err(_client, _fmt, ...)                           \
79         dev_err(nvmap_client_to_device(_client),                \
80                 "%s: "_fmt, __func__, ##__VA_ARGS__)
81
82 #define nvmap_warn(_client, _fmt, ...)                          \
83         dev_warn(nvmap_client_to_device(_client),               \
84                  "%s: "_fmt, __func__, ##__VA_ARGS__)
85
86 #define nvmap_debug(_client, _fmt, ...)                         \
87         dev_dbg(nvmap_client_to_device(_client),                \
88                 "%s: "_fmt, __func__, ##__VA_ARGS__)
89
90 /* If set force zeroed memory to userspace. */
91 extern bool zero_memory;
92
93 #ifdef CONFIG_ARM64
94 #define PG_PROT_KERNEL PAGE_KERNEL
95 #define FLUSH_TLB_PAGE(addr) flush_tlb_kernel_range(addr, PAGE_SIZE)
96 #define FLUSH_DCACHE_AREA __flush_dcache_area
97 #define outer_flush_range(s, e)
98 #define outer_inv_range(s, e)
99 #define outer_clean_range(s, e)
100 #define outer_flush_all()
101 #define outer_clean_all()
102 extern void __flush_dcache_page(struct page *);
103 #else
104 #define PG_PROT_KERNEL pgprot_kernel
105 #define FLUSH_TLB_PAGE(addr) flush_tlb_kernel_page(addr)
106 #define FLUSH_DCACHE_AREA __cpuc_flush_dcache_area
107 extern void __flush_dcache_page(struct address_space *, struct page *);
108 #endif
109
110 struct nvmap_vma_list {
111         struct list_head list;
112         struct vm_area_struct *vma;
113         pid_t pid;
114 };
115
116 /* handles allocated using shared system memory (either IOVMM- or high-order
117  * page allocations */
118 struct nvmap_pgalloc {
119         struct page **pages;
120         bool contig;                    /* contiguous system memory */
121         u32 iovm_addr;  /* is non-zero, if client need specific iova mapping */
122         atomic_t ndirty;        /* count number of dirty pages */
123 };
124
125 struct nvmap_handle {
126         struct rb_node node;    /* entry on global handle tree */
127         atomic_t ref;           /* reference count (i.e., # of duplications) */
128         atomic_t pin;           /* pin count */
129         unsigned long flags;    /* caching flags */
130         size_t size;            /* padded (as-allocated) size */
131         size_t orig_size;       /* original (as-requested) size */
132         size_t align;
133         u8 kind;                /* memory kind (0=pitch, !0 -> blocklinear) */
134         void *map_resources;    /* mapping resources associated with the
135                                    buffer */
136         struct nvmap_client *owner;
137         struct nvmap_handle_ref *owner_ref; /* use this ref to avoid spending
138                         time on validation in some cases.
139                         if handle was duplicated by other client and
140                         original client destroy ref, this field
141                         has to be set to zero. In this case ref should be
142                         obtained through validation */
143
144         /*
145          * dma_buf necessities. An attachment is made on dma_buf allocation to
146          * facilitate the nvmap_pin* APIs.
147          */
148         struct dma_buf *dmabuf;
149         struct dma_buf_attachment *attachment;
150
151         struct nvmap_device *dev;
152         union {
153                 struct nvmap_pgalloc pgalloc;
154                 struct nvmap_heap_block *carveout;
155         };
156         bool global;            /* handle may be duplicated by other clients */
157         bool secure;            /* zap IOVMM area on unpin */
158         bool heap_pgalloc;      /* handle is page allocated (sysmem / iovmm) */
159         bool alloc;             /* handle has memory allocated */
160         u32 heap_type;          /* handle heap is allocated from */
161         unsigned int userflags; /* flags passed from userspace */
162         void *vaddr;            /* mapping used inside kernel */
163         struct list_head vmas;  /* list of all user vma's */
164         atomic_t umap_count;    /* number of outstanding maps from user */
165         atomic_t kmap_count;    /* number of outstanding map from kernel */
166         atomic_t share_count;   /* number of processes sharing the handle */
167         struct list_head lru;   /* list head to track the lru */
168         struct mutex lock;
169         void *nvhost_priv;      /* nvhost private data */
170         void (*nvhost_priv_delete)(void *priv);
171 };
172
173 /* handle_ref objects are client-local references to an nvmap_handle;
174  * they are distinct objects so that handles can be unpinned and
175  * unreferenced the correct number of times when a client abnormally
176  * terminates */
177 struct nvmap_handle_ref {
178         struct nvmap_handle *handle;
179         struct rb_node  node;
180         atomic_t        dupes;  /* number of times to free on file close */
181         atomic_t        pin;    /* number of times to unpin on free */
182 };
183
184 #ifdef CONFIG_NVMAP_PAGE_POOLS
185 #define NVMAP_UC_POOL NVMAP_HANDLE_UNCACHEABLE
186 #define NVMAP_WC_POOL NVMAP_HANDLE_WRITE_COMBINE
187 #define NVMAP_IWB_POOL NVMAP_HANDLE_INNER_CACHEABLE
188 #define NVMAP_WB_POOL NVMAP_HANDLE_CACHEABLE
189 #define NVMAP_NUM_POOLS (NVMAP_HANDLE_CACHEABLE + 1)
190
191 /*
192  * This is the default ratio defining pool size. It can be thought of as pool
193  * size in either MB per GB or KB per MB. That means the max this number can
194  * be is 1024 (all physical memory - not a very good idea) or 0 (no page pool
195  * at all).
196  */
197 #define NVMAP_PP_POOL_SIZE (42)
198
199 /*
200  * The wakeup threshold is how many empty page slots there need to be in order
201  * for the background allocater to be woken up.
202  */
203 #define NVMAP_PP_DEF_FILL_THRESH (1024)
204
205 /*
206  * For when memory does not require zeroing this is the minimum number of pages
207  * remaining in the page pools before the background allocer is woken up. This
208  * essentially disables the page pools (unless its extremely small).
209  */
210 #define NVMAP_PP_ZERO_MEM_FILL_MIN (256)
211
212 struct nvmap_page_pool {
213         struct mutex lock;
214         u32 alloc;  /* Alloc index. */
215         u32 fill;   /* Fill index. */
216         u32 count;  /* Number of pages in the table. */
217         u32 length; /* Length of the pages array. */
218         struct page **page_array;
219
220 #ifdef CONFIG_NVMAP_PAGE_POOL_DEBUG
221         u64 allocs;
222         u64 fills;
223         u64 hits;
224         u64 misses;
225 #endif
226 };
227
228 #define pp_empty(pp)                            \
229         ((pp)->fill == (pp)->alloc && !(pp)->page_array[(pp)->alloc])
230 #define pp_full(pp)                             \
231         ((pp)->fill == (pp)->alloc && (pp)->page_array[(pp)->alloc])
232
233 #define nvmap_pp_alloc_inc(pp) nvmap_pp_inc_index((pp), &(pp)->alloc)
234 #define nvmap_pp_fill_inc(pp)  nvmap_pp_inc_index((pp), &(pp)->fill)
235
236 /* Handle wrap around. */
237 static inline void nvmap_pp_inc_index(struct nvmap_page_pool *pp, u32 *ind)
238 {
239         *ind += 1;
240
241         /* Wrap condition. */
242         if (*ind >= pp->length)
243                 *ind = 0;
244 }
245
246 static inline void nvmap_page_pool_lock(struct nvmap_page_pool *pool)
247 {
248         mutex_lock(&pool->lock);
249 }
250
251 static inline void nvmap_page_pool_unlock(struct nvmap_page_pool *pool)
252 {
253         mutex_unlock(&pool->lock);
254 }
255
256 int nvmap_page_pool_init(struct nvmap_device *dev);
257 int nvmap_page_pool_fini(struct nvmap_device *dev);
258 struct page *nvmap_page_pool_alloc(struct nvmap_page_pool *pool);
259 bool nvmap_page_pool_fill(struct nvmap_page_pool *pool, struct page *page);
260 int nvmap_page_pool_clear(void);
261 #endif
262
263 struct nvmap_client {
264         const char                      *name;
265         struct rb_root                  handle_refs;
266         struct mutex                    ref_lock;
267         bool                            kernel_client;
268         atomic_t                        count;
269         struct task_struct              *task;
270         struct list_head                list;
271         u32                             handle_count;
272 };
273
274 struct nvmap_vma_priv {
275         struct nvmap_handle *handle;
276         size_t          offs;
277         atomic_t        count;  /* number of processes cloning the VMA */
278 };
279
280 #include <linux/mm.h>
281 #include <linux/miscdevice.h>
282
283 struct nvmap_device {
284         struct vm_struct *vm_rgn;
285         pte_t           *ptes[NVMAP_NUM_PTES];
286         unsigned long   ptebits[NVMAP_NUM_PTES / BITS_PER_LONG];
287         unsigned int    lastpte;
288         spinlock_t      ptelock;
289
290         struct rb_root  handles;
291         spinlock_t      handle_lock;
292         wait_queue_head_t pte_wait;
293         struct miscdevice dev_user;
294         struct nvmap_carveout_node *heaps;
295         int nr_carveouts;
296 #ifdef CONFIG_NVMAP_PAGE_POOLS
297         struct nvmap_page_pool pool;
298 #endif
299         struct list_head clients;
300         struct mutex    clients_lock;
301         struct list_head lru_handles;
302         spinlock_t      lru_lock;
303 };
304
305 enum nvmap_stats_t {
306         NS_ALLOC = 0,
307         NS_RELEASE,
308         NS_UALLOC,
309         NS_URELEASE,
310         NS_KALLOC,
311         NS_KRELEASE,
312         NS_CFLUSH_RQ,
313         NS_CFLUSH_DONE,
314         NS_UCFLUSH_RQ,
315         NS_UCFLUSH_DONE,
316         NS_KCFLUSH_RQ,
317         NS_KCFLUSH_DONE,
318         NS_TOTAL,
319         NS_NUM,
320 };
321
322 struct nvmap_stats {
323         atomic64_t stats[NS_NUM];
324         atomic64_t collect;
325 };
326
327 extern struct nvmap_stats nvmap_stats;
328
329 void nvmap_stats_inc(enum nvmap_stats_t, size_t size);
330 void nvmap_stats_dec(enum nvmap_stats_t, size_t size);
331 u64 nvmap_stats_read(enum nvmap_stats_t);
332
333 static inline void nvmap_ref_lock(struct nvmap_client *priv)
334 {
335         mutex_lock(&priv->ref_lock);
336 }
337
338 static inline void nvmap_ref_unlock(struct nvmap_client *priv)
339 {
340         mutex_unlock(&priv->ref_lock);
341 }
342
343 /*
344  * NOTE: this does not ensure the continued existence of the underlying
345  * dma_buf. If you want ensure the existence of the dma_buf you must get an
346  * nvmap_handle_ref as that is what tracks the dma_buf refs.
347  */
348 static inline struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h)
349 {
350         if (unlikely(atomic_inc_return(&h->ref) <= 1)) {
351                 pr_err("%s: %s attempt to get a freed handle\n",
352                         __func__, current->group_leader->comm);
353                 atomic_dec(&h->ref);
354                 return NULL;
355         }
356         return h;
357 }
358
359 static inline pgprot_t nvmap_pgprot(struct nvmap_handle *h, pgprot_t prot)
360 {
361         if (h->flags == NVMAP_HANDLE_UNCACHEABLE)
362                 return pgprot_noncached(prot);
363         else if (h->flags == NVMAP_HANDLE_WRITE_COMBINE)
364                 return pgprot_writecombine(prot);
365         return prot;
366 }
367
368 #else /* CONFIG_TEGRA_NVMAP */
369 struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h);
370 void nvmap_handle_put(struct nvmap_handle *h);
371 pgprot_t nvmap_pgprot(struct nvmap_handle *h, pgprot_t prot);
372
373 #endif /* !CONFIG_TEGRA_NVMAP */
374
375 struct device *nvmap_client_to_device(struct nvmap_client *client);
376
377 pte_t **nvmap_alloc_pte(struct nvmap_device *dev, void **vaddr);
378
379 pte_t **nvmap_alloc_pte_irq(struct nvmap_device *dev, void **vaddr);
380
381 void nvmap_free_pte(struct nvmap_device *dev, pte_t **pte);
382
383 pte_t **nvmap_vaddr_to_pte(struct nvmap_device *dev, unsigned long vaddr);
384
385 struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *dev,
386                                               struct nvmap_handle *handle,
387                                               unsigned long type);
388
389 unsigned long nvmap_carveout_usage(struct nvmap_client *c,
390                                    struct nvmap_heap_block *b);
391
392 struct nvmap_carveout_node;
393
394 int nvmap_find_cache_maint_op(struct nvmap_device *dev,
395                 struct nvmap_handle *h);
396
397 void nvmap_handle_put(struct nvmap_handle *h);
398
399 struct nvmap_handle_ref *__nvmap_validate_locked(struct nvmap_client *priv,
400                                                  struct nvmap_handle *h);
401
402 struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
403                                              size_t size);
404
405 struct nvmap_handle_ref *nvmap_duplicate_handle(struct nvmap_client *client,
406                                         struct nvmap_handle *h, bool skip_val);
407
408 struct nvmap_handle_ref *nvmap_create_handle_from_fd(
409                         struct nvmap_client *client, int fd);
410
411 int nvmap_alloc_handle(struct nvmap_client *client,
412                        struct nvmap_handle *h, unsigned int heap_mask,
413                        size_t align, u8 kind,
414                        unsigned int flags);
415
416 void nvmap_free_handle(struct nvmap_client *c, struct nvmap_handle *h);
417
418 void nvmap_free_handle_user_id(struct nvmap_client *c, unsigned long user_id);
419
420 int nvmap_pin_ids(struct nvmap_client *client,
421                   unsigned int nr, struct nvmap_handle * const *ids);
422
423 void nvmap_unpin_ids(struct nvmap_client *priv,
424                      unsigned int nr, struct nvmap_handle * const *ids);
425
426 int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h);
427
428 void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h);
429
430 int is_nvmap_vma(struct vm_area_struct *vma);
431
432 int nvmap_get_dmabuf_fd(struct nvmap_client *client, struct nvmap_handle *h);
433 struct nvmap_handle *nvmap_get_id_from_dmabuf_fd(struct nvmap_client *client,
434                                                  int fd);
435
436 int nvmap_get_handle_param(struct nvmap_client *client,
437                 struct nvmap_handle_ref *ref, u32 param, u64 *result);
438
439 struct nvmap_client *nvmap_client_get(struct nvmap_client *client);
440
441 void nvmap_client_put(struct nvmap_client *c);
442
443 struct nvmap_handle *unmarshal_user_handle(__u32 handle);
444
445 static inline void nvmap_flush_tlb_kernel_page(unsigned long kaddr)
446 {
447 #ifdef CONFIG_ARM_ERRATA_798181
448         flush_tlb_kernel_page_skip_errata_798181(kaddr);
449 #else
450         FLUSH_TLB_PAGE(kaddr);
451 #endif
452 }
453
454 /* MM definitions. */
455 extern size_t cache_maint_outer_threshold;
456 extern int inner_cache_maint_threshold;
457
458 extern void v7_flush_kern_cache_all(void);
459 extern void v7_clean_kern_cache_all(void *);
460 extern void __flush_dcache_all(void *arg);
461 extern void __clean_dcache_all(void *arg);
462
463 void inner_flush_cache_all(void);
464 void inner_clean_cache_all(void);
465 void nvmap_flush_cache(struct page **pages, int numpages);
466
467 int nvmap_do_cache_maint_list(struct nvmap_handle **handles, u32 *offsets,
468                               u32 *sizes, int op, int nr);
469
470 /* Internal API to support dmabuf */
471 struct dma_buf *__nvmap_dmabuf_export(struct nvmap_client *client,
472                                  struct nvmap_handle *handle);
473 struct dma_buf *__nvmap_make_dmabuf(struct nvmap_client *client,
474                                     struct nvmap_handle *handle);
475 struct sg_table *__nvmap_sg_table(struct nvmap_client *client,
476                                   struct nvmap_handle *h);
477 void __nvmap_free_sg_table(struct nvmap_client *client,
478                            struct nvmap_handle *h, struct sg_table *sgt);
479 void *__nvmap_kmap(struct nvmap_handle *h, unsigned int pagenum);
480 void __nvmap_kunmap(struct nvmap_handle *h, unsigned int pagenum, void *addr);
481 void *__nvmap_mmap(struct nvmap_handle *h);
482 void __nvmap_munmap(struct nvmap_handle *h, void *addr);
483 int __nvmap_map(struct nvmap_handle *h, struct vm_area_struct *vma);
484 int __nvmap_get_handle_param(struct nvmap_client *client,
485                              struct nvmap_handle *h, u32 param, u64 *result);
486 int __nvmap_do_cache_maint(struct nvmap_client *client, struct nvmap_handle *h,
487                            unsigned long start, unsigned long end,
488                            unsigned int op, bool clean_only_dirty);
489 struct nvmap_client *__nvmap_create_client(struct nvmap_device *dev,
490                                            const char *name);
491 struct dma_buf *__nvmap_dmabuf_export_from_ref(struct nvmap_handle_ref *ref);
492 struct nvmap_handle *__nvmap_ref_to_id(struct nvmap_handle_ref *ref);
493 int __nvmap_pin(struct nvmap_handle_ref *ref, phys_addr_t *phys);
494 void __nvmap_unpin(struct nvmap_handle_ref *ref);
495 int __nvmap_dmabuf_fd(struct dma_buf *dmabuf, int flags);
496
497 void nvmap_dmabuf_debugfs_init(struct dentry *nvmap_root);
498 int nvmap_dmabuf_stash_init(void);
499
500 void *nvmap_altalloc(size_t len);
501 void nvmap_altfree(void *ptr, size_t len);
502
503 static inline struct page *nvmap_to_page(struct page *page)
504 {
505         return (struct page *)((unsigned long)page & ~3UL);
506 }
507
508 static inline bool nvmap_page_dirty(struct page *page)
509 {
510         return (unsigned long)page & 1UL;
511 }
512
513 static inline void nvmap_page_mkdirty(struct page **page)
514 {
515         *page = (struct page *)((unsigned long)*page | 1UL);
516 }
517
518 static inline void nvmap_page_mkclean(struct page **page)
519 {
520         *page = (struct page *)((unsigned long)*page & ~1UL);
521 }
522
523 static inline bool nvmap_page_reserved(struct page *page)
524 {
525         return !!((unsigned long)page & 2UL);
526 }
527
528 static inline void nvmap_page_mkreserved(struct page **page)
529 {
530         *page = (struct page *)((unsigned long)*page | 2UL);
531 }
532
533 static inline void nvmap_page_mkunreserved(struct page **page)
534 {
535         *page = (struct page *)((unsigned long)*page & ~2UL);
536 }
537
538 /*
539  * FIXME: assume user space requests for reserve operations
540  * are page aligned
541  */
542 static inline void nvmap_handle_mk(struct nvmap_handle *h,
543                                    u32 offset, u32 size,
544                                    void (*fn)(struct page **))
545 {
546         int i;
547         int start_page = PAGE_ALIGN(offset) >> PAGE_SHIFT;
548         int end_page = (offset + size) >> PAGE_SHIFT;
549
550         if (h->heap_pgalloc) {
551                 for (i = start_page; i < end_page; i++)
552                         fn(&h->pgalloc.pages[i]);
553         }
554 }
555
556 static inline void nvmap_handle_mkclean(struct nvmap_handle *h,
557                                         u32 offset, u32 size)
558 {
559         nvmap_handle_mk(h, offset, size, nvmap_page_mkclean);
560 }
561
562 static inline void nvmap_handle_mkunreserved(struct nvmap_handle *h,
563                                              u32 offset, u32 size)
564 {
565         nvmap_handle_mk(h, offset, size, nvmap_page_mkunreserved);
566 }
567
568 static inline void nvmap_handle_mkreserved(struct nvmap_handle *h,
569                                            u32 offset, u32 size)
570 {
571         nvmap_handle_mk(h, offset, size, nvmap_page_mkreserved);
572 }
573
574 static inline struct page **nvmap_pages(struct page **pg_pages, u32 nr_pages)
575 {
576         struct page **pages;
577         int i;
578
579         pages = nvmap_altalloc(sizeof(*pages) * nr_pages);
580         if (!pages)
581                 return NULL;
582
583         for (i = 0; i < nr_pages; i++)
584                 pages[i] = nvmap_to_page(pg_pages[i]);
585
586         return pages;
587 }
588
589 void nvmap_zap_handle(struct nvmap_handle *handle, u32 offset, u32 size);
590
591 void nvmap_zap_handles(struct nvmap_handle **handles, u32 *offsets,
592                        u32 *sizes, u32 nr);
593
594 void nvmap_vma_open(struct vm_area_struct *vma);
595
596 int nvmap_reserve_pages(struct nvmap_handle **handles, u32 *offsets,
597                         u32 *sizes, u32 nr, u32 op);
598
599 static inline void nvmap_kmaps_inc(struct nvmap_handle *h)
600 {
601         mutex_lock(&h->lock);
602         atomic_inc(&h->kmap_count);
603         mutex_unlock(&h->lock);
604 }
605
606 static inline void nvmap_kmaps_inc_no_lock(struct nvmap_handle *h)
607 {
608         atomic_inc(&h->kmap_count);
609 }
610
611 static inline void nvmap_kmaps_dec(struct nvmap_handle *h)
612 {
613         atomic_dec(&h->kmap_count);
614 }
615
616 static inline void nvmap_umaps_inc(struct nvmap_handle *h)
617 {
618         mutex_lock(&h->lock);
619         atomic_inc(&h->umap_count);
620         mutex_unlock(&h->lock);
621 }
622
623 static inline void nvmap_umaps_dec(struct nvmap_handle *h)
624 {
625         atomic_dec(&h->umap_count);
626 }
627
628 static inline void nvmap_lru_add(struct nvmap_handle *h)
629 {
630         spin_lock(&nvmap_dev->lru_lock);
631         BUG_ON(!list_empty(&h->lru));
632         list_add_tail(&h->lru, &nvmap_dev->lru_handles);
633         spin_unlock(&nvmap_dev->lru_lock);
634 }
635
636 static inline void nvmap_lru_del(struct nvmap_handle *h)
637 {
638         spin_lock(&nvmap_dev->lru_lock);
639         list_del(&h->lru);
640         INIT_LIST_HEAD(&h->lru);
641         spin_unlock(&nvmap_dev->lru_lock);
642 }
643
644 static inline void nvmap_lru_reset(struct nvmap_handle *h)
645 {
646         spin_lock(&nvmap_dev->lru_lock);
647         BUG_ON(list_empty(&h->lru));
648         list_del(&h->lru);
649         list_add_tail(&h->lru, &nvmap_dev->lru_handles);
650         spin_unlock(&nvmap_dev->lru_lock);
651 }
652
653 #endif /* __VIDEO_TEGRA_NVMAP_NVMAP_H */