]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/video/tegra/nvmap/nvmap_dev.c
video: tegra: nvmap: remove support for Deprecated GET_ID/FROM_ID ioctl's
[sojka/nv-tegra/linux-3.10.git] / drivers / video / tegra / nvmap / nvmap_dev.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap_dev.c
3  *
4  * User-space interface to nvmap
5  *
6  * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/backing-dev.h>
24 #include <linux/bitmap.h>
25 #include <linux/debugfs.h>
26 #include <linux/delay.h>
27 #include <linux/kernel.h>
28 #include <linux/device.h>
29 #include <linux/oom.h>
30 #include <linux/platform_device.h>
31 #include <linux/seq_file.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
34 #include <linux/uaccess.h>
35 #include <linux/vmalloc.h>
36 #include <linux/nvmap.h>
37 #include <linux/module.h>
38 #include <linux/resource.h>
39 #include <linux/security.h>
40 #include <linux/stat.h>
41 #include <linux/kthread.h>
42
43 #include <asm/cputype.h>
44
45 #define CREATE_TRACE_POINTS
46 #include <trace/events/nvmap.h>
47
48 #include "nvmap_priv.h"
49 #include "nvmap_ioctl.h"
50
51 #define NVMAP_CARVEOUT_KILLER_RETRY_TIME 100 /* msecs */
52
53 #ifdef CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS
54 size_t cache_maint_inner_threshold = SZ_2M;
55 #endif
56 #ifdef CONFIG_NVMAP_OUTER_CACHE_MAINT_BY_SET_WAYS
57 size_t cache_maint_outer_threshold = SZ_1M;
58 #endif
59
60 struct nvmap_carveout_node {
61         unsigned int            heap_bit;
62         struct nvmap_heap       *carveout;
63         int                     index;
64         struct list_head        clients;
65         spinlock_t              clients_lock;
66         phys_addr_t                     base;
67         size_t                  size;
68 };
69
70 struct platform_device *nvmap_pdev;
71 EXPORT_SYMBOL(nvmap_pdev);
72 struct nvmap_device *nvmap_dev;
73 EXPORT_SYMBOL(nvmap_dev);
74 struct nvmap_stats nvmap_stats;
75 EXPORT_SYMBOL(nvmap_stats);
76
77 static struct backing_dev_info nvmap_bdi = {
78         .ra_pages       = 0,
79         .capabilities   = (BDI_CAP_NO_ACCT_AND_WRITEBACK |
80                            BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP),
81 };
82
83 static struct device_dma_parameters nvmap_dma_parameters = {
84         .max_segment_size = UINT_MAX,
85 };
86
87 static int nvmap_open(struct inode *inode, struct file *filp);
88 static int nvmap_release(struct inode *inode, struct file *filp);
89 static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
90 static int nvmap_map(struct file *filp, struct vm_area_struct *vma);
91 static void nvmap_vma_close(struct vm_area_struct *vma);
92 static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
93
94 static const struct file_operations nvmap_user_fops = {
95         .owner          = THIS_MODULE,
96         .open           = nvmap_open,
97         .release        = nvmap_release,
98         .unlocked_ioctl = nvmap_ioctl,
99 #ifdef CONFIG_COMPAT
100         .compat_ioctl = nvmap_ioctl,
101 #endif
102         .mmap           = nvmap_map,
103 };
104
105 static struct vm_operations_struct nvmap_vma_ops = {
106         .open           = nvmap_vma_open,
107         .close          = nvmap_vma_close,
108         .fault          = nvmap_vma_fault,
109 };
110
111 int is_nvmap_vma(struct vm_area_struct *vma)
112 {
113         return vma->vm_ops == &nvmap_vma_ops;
114 }
115
116 struct device *nvmap_client_to_device(struct nvmap_client *client)
117 {
118         if (!client)
119                 return 0;
120         return nvmap_dev->dev_user.this_device;
121 }
122
123 /* allocates a PTE for the caller's use; returns the PTE pointer or
124  * a negative errno. not safe from IRQs */
125 pte_t **nvmap_alloc_pte_irq(struct nvmap_device *dev, void **vaddr)
126 {
127         unsigned long bit;
128
129         spin_lock(&dev->ptelock);
130         bit = find_next_zero_bit(dev->ptebits, NVMAP_NUM_PTES, dev->lastpte);
131         if (bit == NVMAP_NUM_PTES) {
132                 bit = find_first_zero_bit(dev->ptebits, dev->lastpte);
133                 if (bit == dev->lastpte)
134                         bit = NVMAP_NUM_PTES;
135         }
136
137         if (bit == NVMAP_NUM_PTES) {
138                 spin_unlock(&dev->ptelock);
139                 return ERR_PTR(-ENOMEM);
140         }
141
142         dev->lastpte = bit;
143         set_bit(bit, dev->ptebits);
144         spin_unlock(&dev->ptelock);
145
146         *vaddr = dev->vm_rgn->addr + bit * PAGE_SIZE;
147         return &(dev->ptes[bit]);
148 }
149
150 /* allocates a PTE for the caller's use; returns the PTE pointer or
151  * a negative errno. must be called from sleepable contexts */
152 pte_t **nvmap_alloc_pte(struct nvmap_device *dev, void **vaddr)
153 {
154         int ret;
155         pte_t **pte;
156         ret = wait_event_interruptible(dev->pte_wait,
157                         !IS_ERR(pte = nvmap_alloc_pte_irq(dev, vaddr)));
158
159         if (ret == -ERESTARTSYS)
160                 return ERR_PTR(-EINTR);
161
162         return pte;
163 }
164
165 /* frees a PTE */
166 void nvmap_free_pte(struct nvmap_device *dev, pte_t **pte)
167 {
168         unsigned long addr;
169         unsigned int bit = pte - dev->ptes;
170
171         if (WARN_ON(bit >= NVMAP_NUM_PTES))
172                 return;
173
174         addr = (unsigned long)dev->vm_rgn->addr + bit * PAGE_SIZE;
175         set_pte_at(&init_mm, addr, *pte, 0);
176
177         spin_lock(&dev->ptelock);
178         clear_bit(bit, dev->ptebits);
179         spin_unlock(&dev->ptelock);
180         wake_up(&dev->pte_wait);
181 }
182
183 /* get pte for the virtual address */
184 pte_t **nvmap_vaddr_to_pte(struct nvmap_device *dev, unsigned long vaddr)
185 {
186         unsigned int bit;
187
188         BUG_ON(vaddr < (unsigned long)dev->vm_rgn->addr);
189         bit = (vaddr - (unsigned long)dev->vm_rgn->addr) >> PAGE_SHIFT;
190         BUG_ON(bit >= NVMAP_NUM_PTES);
191         return &(dev->ptes[bit]);
192 }
193
194 /*
195  * Verifies that the passed ID is a valid handle ID. Then the passed client's
196  * reference to the handle is returned.
197  *
198  * Note: to call this function make sure you own the client ref lock.
199  */
200 struct nvmap_handle_ref *__nvmap_validate_locked(struct nvmap_client *c,
201                                                  struct nvmap_handle *h)
202 {
203         struct rb_node *n = c->handle_refs.rb_node;
204
205         while (n) {
206                 struct nvmap_handle_ref *ref;
207                 ref = rb_entry(n, struct nvmap_handle_ref, node);
208                 if (ref->handle == h)
209                         return ref;
210                 else if ((uintptr_t)h > (uintptr_t)ref->handle)
211                         n = n->rb_right;
212                 else
213                         n = n->rb_left;
214         }
215
216         return NULL;
217 }
218
219 unsigned long nvmap_carveout_usage(struct nvmap_client *c,
220                                    struct nvmap_heap_block *b)
221 {
222         struct nvmap_heap *h = nvmap_block_to_heap(b);
223         struct nvmap_carveout_node *n;
224         int i;
225
226         for (i = 0; i < nvmap_dev->nr_carveouts; i++) {
227                 n = &nvmap_dev->heaps[i];
228                 if (n->carveout == h)
229                         return n->heap_bit;
230         }
231         return 0;
232 }
233
234 /*
235  * This routine is used to flush the carveout memory from cache.
236  * Why cache flush is needed for carveout? Consider the case, where a piece of
237  * carveout is allocated as cached and released. After this, if the same memory is
238  * allocated for uncached request and the memory is not flushed out from cache.
239  * In this case, the client might pass this to H/W engine and it could start modify
240  * the memory. As this was cached earlier, it might have some portion of it in cache.
241  * During cpu request to read/write other memory, the cached portion of this memory
242  * might get flushed back to main memory and would cause corruptions, if it happens
243  * after H/W writes data to memory.
244  *
245  * But flushing out the memory blindly on each carveout allocation is redundant.
246  *
247  * In order to optimize the carveout buffer cache flushes, the following
248  * strategy is used.
249  *
250  * The whole Carveout is flushed out from cache during its initialization.
251  * During allocation, carveout buffers are not flused from cache.
252  * During deallocation, carveout buffers are flushed, if they were allocated as cached.
253  * if they were allocated as uncached/writecombined, no cache flush is needed.
254  * Just draining store buffers is enough.
255  */
256 int nvmap_flush_heap_block(struct nvmap_client *client,
257         struct nvmap_heap_block *block, size_t len, unsigned int prot)
258 {
259         pte_t **pte;
260         void *addr;
261         uintptr_t kaddr;
262         phys_addr_t phys = block->base;
263         phys_addr_t end = block->base + len;
264
265         if (prot == NVMAP_HANDLE_UNCACHEABLE || prot == NVMAP_HANDLE_WRITE_COMBINE)
266                 goto out;
267
268 #ifdef CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS
269         if (len >= cache_maint_inner_threshold) {
270                 inner_flush_cache_all();
271                 if (prot != NVMAP_HANDLE_INNER_CACHEABLE)
272                         outer_flush_range(block->base, block->base + len);
273                 goto out;
274         }
275 #endif
276
277         pte = nvmap_alloc_pte(nvmap_dev, &addr);
278         if (IS_ERR(pte))
279                 return PTR_ERR(pte);
280
281         kaddr = (uintptr_t)addr;
282
283         while (phys < end) {
284                 phys_addr_t next = (phys + PAGE_SIZE) & PAGE_MASK;
285                 unsigned long pfn = __phys_to_pfn(phys);
286                 void *base = (void *)kaddr + (phys & ~PAGE_MASK);
287
288                 next = min(next, end);
289                 set_pte_at(&init_mm, kaddr, *pte, pfn_pte(pfn, PG_PROT_KERNEL));
290                 nvmap_flush_tlb_kernel_page(kaddr);
291                 FLUSH_DCACHE_AREA(base, next - phys);
292                 phys = next;
293         }
294
295         if (prot != NVMAP_HANDLE_INNER_CACHEABLE)
296                 outer_flush_range(block->base, block->base + len);
297
298         nvmap_free_pte(nvmap_dev, pte);
299 out:
300         wmb();
301         return 0;
302 }
303
304 static
305 struct nvmap_heap_block *do_nvmap_carveout_alloc(struct nvmap_client *client,
306                                               struct nvmap_handle *handle,
307                                               unsigned long type)
308 {
309         struct nvmap_carveout_node *co_heap;
310         struct nvmap_device *dev = nvmap_dev;
311         int i;
312
313         for (i = 0; i < dev->nr_carveouts; i++) {
314                 struct nvmap_heap_block *block;
315                 co_heap = &dev->heaps[i];
316
317                 if (!(co_heap->heap_bit & type))
318                         continue;
319
320                 block = nvmap_heap_alloc(co_heap->carveout, handle);
321                 if (block)
322                         return block;
323         }
324         return NULL;
325 }
326
327 struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *client,
328                                               struct nvmap_handle *handle,
329                                               unsigned long type)
330 {
331         return do_nvmap_carveout_alloc(client, handle, type);
332 }
333
334 /* remove a handle from the device's tree of all handles; called
335  * when freeing handles. */
336 int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h)
337 {
338         spin_lock(&dev->handle_lock);
339
340         /* re-test inside the spinlock if the handle really has no clients;
341          * only remove the handle if it is unreferenced */
342         if (atomic_add_return(0, &h->ref) > 0) {
343                 spin_unlock(&dev->handle_lock);
344                 return -EBUSY;
345         }
346         smp_rmb();
347         BUG_ON(atomic_read(&h->ref) < 0);
348         BUG_ON(atomic_read(&h->pin) != 0);
349
350         nvmap_lru_del(h);
351         rb_erase(&h->node, &dev->handles);
352
353         spin_unlock(&dev->handle_lock);
354         return 0;
355 }
356
357 /* adds a newly-created handle to the device master tree */
358 void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h)
359 {
360         struct rb_node **p;
361         struct rb_node *parent = NULL;
362
363         spin_lock(&dev->handle_lock);
364         p = &dev->handles.rb_node;
365         while (*p) {
366                 struct nvmap_handle *b;
367
368                 parent = *p;
369                 b = rb_entry(parent, struct nvmap_handle, node);
370                 if (h > b)
371                         p = &parent->rb_right;
372                 else
373                         p = &parent->rb_left;
374         }
375         rb_link_node(&h->node, parent, p);
376         rb_insert_color(&h->node, &dev->handles);
377         nvmap_lru_add(h);
378         spin_unlock(&dev->handle_lock);
379 }
380
381 struct nvmap_client *__nvmap_create_client(struct nvmap_device *dev,
382                                            const char *name)
383 {
384         struct nvmap_client *client;
385         struct task_struct *task;
386
387         if (WARN_ON(!dev))
388                 return NULL;
389
390         client = kzalloc(sizeof(*client), GFP_KERNEL);
391         if (!client)
392                 return NULL;
393
394         client->name = name;
395         client->kernel_client = true;
396         client->handle_refs = RB_ROOT;
397
398         get_task_struct(current->group_leader);
399         task_lock(current->group_leader);
400         /* don't bother to store task struct for kernel threads,
401            they can't be killed anyway */
402         if (current->flags & PF_KTHREAD) {
403                 put_task_struct(current->group_leader);
404                 task = NULL;
405         } else {
406                 task = current->group_leader;
407         }
408         task_unlock(current->group_leader);
409         client->task = task;
410
411         mutex_init(&client->ref_lock);
412         atomic_set(&client->count, 1);
413
414         mutex_lock(&dev->clients_lock);
415         list_add(&client->list, &dev->clients);
416         mutex_unlock(&dev->clients_lock);
417         return client;
418 }
419
420 static void destroy_client(struct nvmap_client *client)
421 {
422         struct rb_node *n;
423
424         if (!client)
425                 return;
426
427         mutex_lock(&nvmap_dev->clients_lock);
428         list_del(&client->list);
429         mutex_unlock(&nvmap_dev->clients_lock);
430
431         while ((n = rb_first(&client->handle_refs))) {
432                 struct nvmap_handle_ref *ref;
433                 int pins, dupes;
434
435                 ref = rb_entry(n, struct nvmap_handle_ref, node);
436
437                 smp_rmb();
438                 pins = atomic_read(&ref->pin);
439
440                 while (pins--)
441                         __nvmap_unpin(ref);
442
443                 if (ref->handle->owner == client) {
444                         ref->handle->owner = NULL;
445                         ref->handle->owner_ref = NULL;
446                 }
447
448                 dma_buf_put(ref->handle->dmabuf);
449                 rb_erase(&ref->node, &client->handle_refs);
450                 atomic_dec(&ref->handle->share_count);
451
452                 dupes = atomic_read(&ref->dupes);
453                 while (dupes--)
454                         nvmap_handle_put(ref->handle);
455
456                 kfree(ref);
457         }
458
459         if (client->task)
460                 put_task_struct(client->task);
461
462         kfree(client);
463 }
464
465 struct nvmap_client *nvmap_client_get(struct nvmap_client *client)
466 {
467         if (!virt_addr_valid(client))
468                 return NULL;
469
470         if (!atomic_add_unless(&client->count, 1, 0))
471                 return NULL;
472
473         return client;
474 }
475
476 void nvmap_client_put(struct nvmap_client *client)
477 {
478         if (!client)
479                 return;
480
481         if (!atomic_dec_return(&client->count))
482                 destroy_client(client);
483 }
484
485 static int nvmap_open(struct inode *inode, struct file *filp)
486 {
487         struct miscdevice *miscdev = filp->private_data;
488         struct nvmap_device *dev = dev_get_drvdata(miscdev->parent);
489         struct nvmap_client *priv;
490         int ret;
491         __attribute__((unused)) struct rlimit old_rlim, new_rlim;
492
493         ret = nonseekable_open(inode, filp);
494         if (unlikely(ret))
495                 return ret;
496
497         BUG_ON(dev != nvmap_dev);
498         priv = __nvmap_create_client(dev, "user");
499         if (!priv)
500                 return -ENOMEM;
501         trace_nvmap_open(priv, priv->name);
502
503         priv->kernel_client = false;
504
505         filp->f_mapping->backing_dev_info = &nvmap_bdi;
506
507         filp->private_data = priv;
508         return 0;
509 }
510
511 static int nvmap_release(struct inode *inode, struct file *filp)
512 {
513         struct nvmap_client *priv = filp->private_data;
514
515         trace_nvmap_release(priv, priv->name);
516         nvmap_client_put(priv);
517         return 0;
518 }
519
520 int __nvmap_map(struct nvmap_handle *h, struct vm_area_struct *vma)
521 {
522         struct nvmap_vma_priv *priv;
523
524         h = nvmap_handle_get(h);
525         if (!h)
526                 return -EINVAL;
527
528         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
529         if (!priv)
530                 return -ENOMEM;
531         priv->handle = h;
532
533         vma->vm_flags |= VM_SHARED | VM_DONTEXPAND |
534                           VM_DONTDUMP | VM_DONTCOPY |
535                           (h->heap_pgalloc ? 0 : VM_PFNMAP);
536         vma->vm_ops = &nvmap_vma_ops;
537         BUG_ON(vma->vm_private_data != NULL);
538         vma->vm_private_data = priv;
539         vma->vm_page_prot = nvmap_pgprot(h, vma->vm_page_prot);
540         nvmap_vma_open(vma);
541         return 0;
542 }
543
544 static int nvmap_map(struct file *filp, struct vm_area_struct *vma)
545 {
546         BUG_ON(vma->vm_private_data != NULL);
547         vma->vm_flags |= (VM_SHARED | VM_DONTEXPAND |
548                           VM_DONTDUMP | VM_DONTCOPY);
549         vma->vm_ops = &nvmap_vma_ops;
550         return 0;
551 }
552
553 static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
554 {
555         int err = 0;
556         void __user *uarg = (void __user *)arg;
557
558         if (_IOC_TYPE(cmd) != NVMAP_IOC_MAGIC)
559                 return -ENOTTY;
560
561         if (_IOC_NR(cmd) > NVMAP_IOC_MAXNR)
562                 return -ENOTTY;
563
564         if (_IOC_DIR(cmd) & _IOC_READ)
565                 err = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
566         if (_IOC_DIR(cmd) & _IOC_WRITE)
567                 err = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
568
569         if (err)
570                 return -EFAULT;
571
572         switch (cmd) {
573         case NVMAP_IOC_CREATE:
574         case NVMAP_IOC_FROM_FD:
575                 err = nvmap_ioctl_create(filp, cmd, uarg);
576                 break;
577
578         case NVMAP_IOC_FROM_ID:
579         case NVMAP_IOC_GET_ID:
580                 pr_warn_once("nvmap: unsupported FROM_ID/GET_ID IOCTLs used.\n");
581                 return -ENOTTY;
582
583         case NVMAP_IOC_GET_FD:
584                 err = nvmap_ioctl_getfd(filp, uarg);
585                 break;
586
587 #ifdef CONFIG_COMPAT
588         case NVMAP_IOC_PARAM_32:
589                 err = nvmap_ioctl_get_param(filp, uarg, true);
590                 break;
591 #endif
592
593         case NVMAP_IOC_PARAM:
594                 err = nvmap_ioctl_get_param(filp, uarg, false);
595                 break;
596
597 #ifdef CONFIG_COMPAT
598         case NVMAP_IOC_UNPIN_MULT_32:
599         case NVMAP_IOC_PIN_MULT_32:
600                 err = nvmap_ioctl_pinop(filp, cmd == NVMAP_IOC_PIN_MULT_32,
601                         uarg, true);
602                 break;
603 #endif
604
605         case NVMAP_IOC_UNPIN_MULT:
606         case NVMAP_IOC_PIN_MULT:
607                 err = nvmap_ioctl_pinop(filp, cmd == NVMAP_IOC_PIN_MULT,
608                         uarg, false);
609                 break;
610
611         case NVMAP_IOC_ALLOC:
612                 err = nvmap_ioctl_alloc(filp, uarg);
613                 break;
614
615         case NVMAP_IOC_ALLOC_KIND:
616                 err = nvmap_ioctl_alloc_kind(filp, uarg);
617                 break;
618
619         case NVMAP_IOC_FREE:
620                 err = nvmap_ioctl_free(filp, arg);
621                 break;
622
623 #ifdef CONFIG_COMPAT
624         case NVMAP_IOC_MMAP_32:
625                 err = nvmap_map_into_caller_ptr(filp, uarg, true);
626                 break;
627 #endif
628
629         case NVMAP_IOC_MMAP:
630                 err = nvmap_map_into_caller_ptr(filp, uarg, false);
631                 break;
632
633 #ifdef CONFIG_COMPAT
634         case NVMAP_IOC_WRITE_32:
635         case NVMAP_IOC_READ_32:
636                 err = nvmap_ioctl_rw_handle(filp, cmd == NVMAP_IOC_READ_32,
637                         uarg, true);
638                 break;
639 #endif
640
641         case NVMAP_IOC_WRITE:
642         case NVMAP_IOC_READ:
643                 err = nvmap_ioctl_rw_handle(filp, cmd == NVMAP_IOC_READ, uarg,
644                         false);
645                 break;
646
647 #ifdef CONFIG_COMPAT
648         case NVMAP_IOC_CACHE_32:
649                 err = nvmap_ioctl_cache_maint(filp, uarg, true);
650                 break;
651 #endif
652
653         case NVMAP_IOC_CACHE:
654                 err = nvmap_ioctl_cache_maint(filp, uarg, false);
655                 break;
656
657         case NVMAP_IOC_CACHE_LIST:
658         case NVMAP_IOC_RESERVE:
659                 err = nvmap_ioctl_cache_maint_list(filp, uarg,
660                                                    cmd == NVMAP_IOC_RESERVE);
661                 break;
662
663         case NVMAP_IOC_SHARE:
664                 err = nvmap_ioctl_share_dmabuf(filp, uarg);
665                 break;
666
667         default:
668                 return -ENOTTY;
669         }
670         return err;
671 }
672
673 /* to ensure that the backing store for the VMA isn't freed while a fork'd
674  * reference still exists, nvmap_vma_open increments the reference count on
675  * the handle, and nvmap_vma_close decrements it. alternatively, we could
676  * disallow copying of the vma, or behave like pmem and zap the pages. FIXME.
677 */
678 void nvmap_vma_open(struct vm_area_struct *vma)
679 {
680         struct nvmap_vma_priv *priv;
681         struct nvmap_handle *h;
682         struct nvmap_vma_list *vma_list, *tmp;
683         struct list_head *tmp_head = NULL;
684         pid_t current_pid = current->pid;
685         bool vma_pos_found = false;
686
687         priv = vma->vm_private_data;
688         BUG_ON(!priv);
689         BUG_ON(!priv->handle);
690
691         atomic_inc(&priv->count);
692         h = priv->handle;
693         nvmap_umaps_inc(h);
694
695         vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL);
696         if (vma_list) {
697                 mutex_lock(&h->lock);
698                 tmp_head = &h->vmas;
699
700                 /* insert vma into handle's vmas list in the increasing order of
701                  * handle offsets
702                  */
703                 list_for_each_entry(tmp, &h->vmas, list) {
704                         BUG_ON(tmp->vma == vma);
705
706                         if (!vma_pos_found && (current_pid == tmp->pid)) {
707                                 if (vma->vm_pgoff < tmp->vma->vm_pgoff) {
708                                         tmp_head = &tmp->list;
709                                         vma_pos_found = true;
710                                 } else {
711                                         tmp_head = tmp->list.next;
712                                 }
713                         }
714                 }
715
716                 vma_list->vma = vma;
717                 vma_list->pid = current_pid;
718                 list_add_tail(&vma_list->list, tmp_head);
719                 mutex_unlock(&h->lock);
720         } else {
721                 WARN(1, "vma not tracked");
722         }
723 }
724
725 static void nvmap_vma_close(struct vm_area_struct *vma)
726 {
727         struct nvmap_vma_priv *priv = vma->vm_private_data;
728         struct nvmap_vma_list *vma_list;
729         struct nvmap_handle *h;
730         bool vma_found = false;
731
732         if (!priv)
733                 return;
734
735         BUG_ON(!priv->handle);
736
737         h = priv->handle;
738         mutex_lock(&h->lock);
739         list_for_each_entry(vma_list, &h->vmas, list) {
740                 if (vma_list->vma != vma)
741                         continue;
742                 list_del(&vma_list->list);
743                 kfree(vma_list);
744                 vma_found = true;
745                 break;
746         }
747         BUG_ON(!vma_found);
748         mutex_unlock(&h->lock);
749
750         if (__atomic_add_unless(&priv->count, -1, 0) == 1) {
751                 if (priv->handle)
752                         nvmap_handle_put(priv->handle);
753                 vma->vm_private_data = NULL;
754                 kfree(priv);
755         }
756         nvmap_umaps_dec(h);
757 }
758
759 static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
760 {
761         struct page *page;
762         struct nvmap_vma_priv *priv;
763         unsigned long offs;
764
765         offs = (unsigned long)(vmf->virtual_address - vma->vm_start);
766         priv = vma->vm_private_data;
767         if (!priv || !priv->handle || !priv->handle->alloc)
768                 return VM_FAULT_SIGBUS;
769
770         offs += priv->offs;
771         /* if the VMA was split for some reason, vm_pgoff will be the VMA's
772          * offset from the original VMA */
773         offs += (vma->vm_pgoff << PAGE_SHIFT);
774
775         if (offs >= priv->handle->size)
776                 return VM_FAULT_SIGBUS;
777
778         if (!priv->handle->heap_pgalloc) {
779                 unsigned long pfn;
780                 BUG_ON(priv->handle->carveout->base & ~PAGE_MASK);
781                 pfn = ((priv->handle->carveout->base + offs) >> PAGE_SHIFT);
782                 if (!pfn_valid(pfn)) {
783                         vm_insert_pfn(vma,
784                                 (unsigned long)vmf->virtual_address, pfn);
785                         return VM_FAULT_NOPAGE;
786                 }
787                 /* CMA memory would get here */
788                 page = pfn_to_page(pfn);
789         } else {
790                 offs >>= PAGE_SHIFT;
791                 if (nvmap_page_reserved(priv->handle->pgalloc.pages[offs]))
792                         return VM_FAULT_SIGBUS;
793                 page = nvmap_to_page(priv->handle->pgalloc.pages[offs]);
794                 nvmap_page_mkdirty(&priv->handle->pgalloc.pages[offs]);
795         }
796
797         if (page)
798                 get_page(page);
799         vmf->page = page;
800         return (page) ? 0 : VM_FAULT_SIGBUS;
801 }
802
803 #define DEBUGFS_OPEN_FOPS(name) \
804 static int nvmap_debug_##name##_open(struct inode *inode, \
805                                             struct file *file) \
806 { \
807         return single_open(file, nvmap_debug_##name##_show, \
808                             inode->i_private); \
809 } \
810 \
811 static const struct file_operations debug_##name##_fops = { \
812         .open = nvmap_debug_##name##_open, \
813         .read = seq_read, \
814         .llseek = seq_lseek, \
815         .release = single_release, \
816 }
817
818 #define K(x) (x >> 10)
819
820 static void client_stringify(struct nvmap_client *client, struct seq_file *s)
821 {
822         char task_comm[TASK_COMM_LEN];
823         if (!client->task) {
824                 seq_printf(s, "%-18s %18s %8u", client->name, "kernel", 0);
825                 return;
826         }
827         get_task_comm(task_comm, client->task);
828         seq_printf(s, "%-18s %18s %8u", client->name, task_comm,
829                    client->task->pid);
830 }
831
832 static void allocations_stringify(struct nvmap_client *client,
833                                   struct seq_file *s, u32 heap_type)
834 {
835         struct rb_node *n;
836
837         nvmap_ref_lock(client);
838         n = rb_first(&client->handle_refs);
839         for (; n != NULL; n = rb_next(n)) {
840                 struct nvmap_handle_ref *ref =
841                         rb_entry(n, struct nvmap_handle_ref, node);
842                 struct nvmap_handle *handle = ref->handle;
843                 if (handle->alloc && handle->heap_type == heap_type) {
844                         phys_addr_t base = heap_type == NVMAP_HEAP_IOVMM ? 0 :
845                                            (handle->carveout->base);
846                         seq_printf(s,
847                                 "%-18s %-18s %8llx %10zuK %8x %6u %6u %6u %6u %6u %6u\n",
848                                 "", "",
849                                 (unsigned long long)base, K(handle->size),
850                                 handle->userflags,
851                                 atomic_read(&handle->ref),
852                                 atomic_read(&ref->dupes),
853                                 atomic_read(&ref->pin),
854                                 atomic_read(&handle->kmap_count),
855                                 atomic_read(&handle->umap_count),
856                                 atomic_read(&handle->share_count));
857                 }
858         }
859         nvmap_ref_unlock(client);
860 }
861
862 /* compute the total amount of handle physical memory that is mapped
863  * into client's virtual address space. Remember that vmas list is
864  * sorted in ascending order of handle offsets.
865  * NOTE: This function should be called while holding handle's lock mutex.
866  */
867 static void nvmap_get_client_handle_mss(struct nvmap_client *client,
868                                 struct nvmap_handle *handle, u64 *total)
869 {
870         struct nvmap_vma_list *vma_list = NULL;
871         struct vm_area_struct *vma = NULL;
872         u64 end_offset = 0, vma_start_offset, vma_size;
873         int64_t overlap_size;
874
875         *total = 0;
876         list_for_each_entry(vma_list, &handle->vmas, list) {
877
878                 if (client->task->pid == vma_list->pid) {
879                         vma = vma_list->vma;
880                         vma_size = vma->vm_end - vma->vm_start;
881
882                         vma_start_offset = vma->vm_pgoff << PAGE_SHIFT;
883                         if (end_offset < vma_start_offset + vma_size) {
884                                 *total += vma_size;
885
886                                 overlap_size = end_offset - vma_start_offset;
887                                 if (overlap_size > 0)
888                                         *total -= overlap_size;
889                                 end_offset = vma_start_offset + vma_size;
890                         }
891                 }
892         }
893 }
894
895 static void maps_stringify(struct nvmap_client *client,
896                                 struct seq_file *s, u32 heap_type)
897 {
898         struct rb_node *n;
899         struct nvmap_vma_list *vma_list = NULL;
900         struct vm_area_struct *vma = NULL;
901         u64 total_mapped_size, vma_size;
902
903         nvmap_ref_lock(client);
904         n = rb_first(&client->handle_refs);
905         for (; n != NULL; n = rb_next(n)) {
906                 struct nvmap_handle_ref *ref =
907                         rb_entry(n, struct nvmap_handle_ref, node);
908                 struct nvmap_handle *handle = ref->handle;
909                 if (handle->alloc && handle->heap_type == heap_type) {
910                         phys_addr_t base = heap_type == NVMAP_HEAP_IOVMM ? 0 :
911                                            (handle->carveout->base);
912                         seq_printf(s,
913                                 "%-18s %-18s %8llx %10zuK %8x %6u %16p "
914                                 "%12s %12s ",
915                                 "", "",
916                                 (unsigned long long)base, K(handle->size),
917                                 handle->userflags,
918                                 atomic_read(&handle->share_count),
919                                 handle, "", "");
920
921                         mutex_lock(&handle->lock);
922                         nvmap_get_client_handle_mss(client, handle,
923                                                         &total_mapped_size);
924                         seq_printf(s, "%6lluK\n", K(total_mapped_size));
925
926                         list_for_each_entry(vma_list, &handle->vmas, list) {
927
928                                 if (vma_list->pid == client->task->pid) {
929                                         vma = vma_list->vma;
930                                         vma_size = vma->vm_end - vma->vm_start;
931                                         seq_printf(s,
932                                           "%-18s %-18s %8s %11s %8s %6s %16s "
933                                           "%-12lx-%12lx %6lluK\n",
934                                           "", "", "", "", "", "", "",
935                                           vma->vm_start, vma->vm_end,
936                                           K(vma_size));
937                                 }
938                         }
939                         mutex_unlock(&handle->lock);
940                 }
941         }
942         nvmap_ref_unlock(client);
943 }
944
945 static void nvmap_get_client_mss(struct nvmap_client *client,
946                                  u64 *total, u32 heap_type)
947 {
948         struct rb_node *n;
949
950         *total = 0;
951         nvmap_ref_lock(client);
952         n = rb_first(&client->handle_refs);
953         for (; n != NULL; n = rb_next(n)) {
954                 struct nvmap_handle_ref *ref =
955                         rb_entry(n, struct nvmap_handle_ref, node);
956                 struct nvmap_handle *handle = ref->handle;
957                 if (handle->alloc && handle->heap_type == heap_type)
958                         *total += handle->size /
959                                   atomic_read(&handle->share_count);
960         }
961         nvmap_ref_unlock(client);
962 }
963
964 static void nvmap_get_total_mss(u64 *pss, u64 *non_pss,
965                                       u64 *total, u32 heap_type)
966 {
967         int i;
968         struct rb_node *n;
969         struct nvmap_device *dev = nvmap_dev;
970
971         *total = 0;
972         if (pss)
973                 *pss = 0;
974         if (non_pss)
975                 *non_pss = 0;
976         if (!dev)
977                 return;
978         spin_lock(&dev->handle_lock);
979         n = rb_first(&dev->handles);
980         for (; n != NULL; n = rb_next(n)) {
981                 struct nvmap_handle *h =
982                         rb_entry(n, struct nvmap_handle, node);
983
984                 if (!h || !h->alloc || h->heap_type != heap_type)
985                         continue;
986                 if (!non_pss) {
987                         *total += h->size;
988                         continue;
989                 }
990
991                 for (i = 0; i < h->size >> PAGE_SHIFT; i++) {
992                         struct page *page = nvmap_to_page(h->pgalloc.pages[i]);
993                         int mapcount = page_mapcount(page);
994                         if (!mapcount)
995                                 *non_pss += PAGE_SIZE;
996                         *total += PAGE_SIZE;
997                 }
998         }
999         if (pss && non_pss)
1000                 *pss = *total - *non_pss;
1001         spin_unlock(&dev->handle_lock);
1002 }
1003
1004 static int nvmap_debug_allocations_show(struct seq_file *s, void *unused)
1005 {
1006         u64 total;
1007         struct nvmap_client *client;
1008         u32 heap_type = (u32)(uintptr_t)s->private;
1009
1010         mutex_lock(&nvmap_dev->clients_lock);
1011         seq_printf(s, "%-18s %18s %8s %11s\n",
1012                 "CLIENT", "PROCESS", "PID", "SIZE");
1013         seq_printf(s, "%-18s %18s %8s %11s %8s %6s %6s %6s %6s %6s %6s %8s\n",
1014                         "", "", "BASE", "SIZE", "FLAGS", "REFS",
1015                         "DUPES", "PINS", "KMAPS", "UMAPS", "SHARE", "UID");
1016         list_for_each_entry(client, &nvmap_dev->clients, list) {
1017                 u64 client_total;
1018                 client_stringify(client, s);
1019                 nvmap_get_client_mss(client, &client_total, heap_type);
1020                 seq_printf(s, " %10lluK\n", K(client_total));
1021                 allocations_stringify(client, s, heap_type);
1022                 seq_puts(s, "\n");
1023         }
1024         mutex_unlock(&nvmap_dev->clients_lock);
1025         nvmap_get_total_mss(NULL, NULL, &total, heap_type);
1026         seq_printf(s, "%-18s %-18s %8s %10lluK\n", "total", "", "", K(total));
1027         return 0;
1028 }
1029
1030 DEBUGFS_OPEN_FOPS(allocations);
1031
1032 static int nvmap_debug_maps_show(struct seq_file *s, void *unused)
1033 {
1034         u64 total;
1035         struct nvmap_client *client;
1036         u32 heap_type = (u32)(uintptr_t)s->private;
1037
1038         mutex_lock(&nvmap_dev->clients_lock);
1039         seq_printf(s, "%-18s %18s %8s %11s\n",
1040                 "CLIENT", "PROCESS", "PID", "SIZE");
1041         seq_printf(s, "%-18s %18s %8s %11s %8s %6s %9s %21s %18s\n",
1042                 "", "", "BASE", "SIZE", "FLAGS", "SHARE", "UID",
1043                 "MAPS", "MAPSIZE");
1044
1045         list_for_each_entry(client, &nvmap_dev->clients, list) {
1046                 u64 client_total;
1047                 client_stringify(client, s);
1048                 nvmap_get_client_mss(client, &client_total, heap_type);
1049                 seq_printf(s, " %10lluK\n", K(client_total));
1050                 maps_stringify(client, s, heap_type);
1051                 seq_puts(s, "\n");
1052         }
1053         mutex_unlock(&nvmap_dev->clients_lock);
1054
1055         nvmap_get_total_mss(NULL, NULL, &total, heap_type);
1056         seq_printf(s, "%-18s %-18s %8s %10lluK\n", "total", "", "", K(total));
1057         return 0;
1058 }
1059
1060 DEBUGFS_OPEN_FOPS(maps);
1061
1062 static int nvmap_debug_clients_show(struct seq_file *s, void *unused)
1063 {
1064         u64 total;
1065         struct nvmap_client *client;
1066         ulong heap_type = (ulong)s->private;
1067
1068         mutex_lock(&nvmap_dev->clients_lock);
1069         seq_printf(s, "%-18s %18s %8s %11s\n",
1070                 "CLIENT", "PROCESS", "PID", "SIZE");
1071         list_for_each_entry(client, &nvmap_dev->clients, list) {
1072                 u64 client_total;
1073                 client_stringify(client, s);
1074                 nvmap_get_client_mss(client, &client_total, heap_type);
1075                 seq_printf(s, " %10lluK\n", K(client_total));
1076         }
1077         mutex_unlock(&nvmap_dev->clients_lock);
1078         nvmap_get_total_mss(NULL, NULL, &total, heap_type);
1079         seq_printf(s, "%-18s %18s %8s %10lluK\n", "total", "", "", K(total));
1080         return 0;
1081 }
1082
1083 DEBUGFS_OPEN_FOPS(clients);
1084
1085 #define PRINT_MEM_STATS_NOTE(x) \
1086 { \
1087         seq_printf(s, "Note: total memory is precise account of pages " \
1088                 "allocated by NvMap.\nIt doesn't match with all clients " \
1089                 "\"%s\" accumulated as shared memory\nis accounted in " \
1090                 "full in each clients \"%s\" that shared memory.\n", #x, #x); \
1091 }
1092
1093 static int nvmap_debug_lru_allocations_show(struct seq_file *s, void *unused)
1094 {
1095         struct nvmap_handle *h;
1096         int total_handles = 0, migratable_handles = 0;
1097         size_t total_size = 0, migratable_size = 0;
1098
1099         seq_printf(s, "%-18s %18s %8s %11s %8s %6s %6s %6s %6s %6s %8s\n",
1100                         "", "", "", "", "", "",
1101                         "", "PINS", "KMAPS", "UMAPS", "UID");
1102         spin_lock(&nvmap_dev->lru_lock);
1103         list_for_each_entry(h, &nvmap_dev->lru_handles, lru) {
1104                 total_handles++;
1105                 total_size += h->size;
1106                 if (!atomic_read(&h->pin) && !atomic_read(&h->kmap_count)) {
1107                         migratable_handles++;
1108                         migratable_size += h->size;
1109                 }
1110                 seq_printf(s, "%-18s %18s %8s %10zuK %8s %6s %6s %6u %6u "
1111                         "%6u %8p\n", "", "", "", K(h->size), "", "",
1112                         "", atomic_read(&h->pin),
1113                             atomic_read(&h->kmap_count),
1114                             atomic_read(&h->umap_count),
1115                             h);
1116         }
1117         seq_printf(s, "total_handles = %d, migratable_handles = %d,"
1118                 "total_size=%zuK, migratable_size=%zuK\n",
1119                 total_handles, migratable_handles,
1120                 K(total_size), K(migratable_size));
1121         spin_unlock(&nvmap_dev->lru_lock);
1122         PRINT_MEM_STATS_NOTE(SIZE);
1123         return 0;
1124 }
1125
1126 DEBUGFS_OPEN_FOPS(lru_allocations);
1127
1128 static void nvmap_iovmm_get_client_mss(struct nvmap_client *client, u64 *pss,
1129                                    u64 *non_pss, u64 *total)
1130 {
1131         int i;
1132         struct rb_node *n;
1133
1134         *pss = *non_pss = *total = 0;
1135         nvmap_ref_lock(client);
1136         n = rb_first(&client->handle_refs);
1137         for (; n != NULL; n = rb_next(n)) {
1138                 struct nvmap_handle_ref *ref =
1139                         rb_entry(n, struct nvmap_handle_ref, node);
1140                 struct nvmap_handle *h = ref->handle;
1141
1142                 if (!h || !h->alloc || !h->heap_pgalloc)
1143                         continue;
1144
1145                 for (i = 0; i < h->size >> PAGE_SHIFT; i++) {
1146                         struct page *page = nvmap_to_page(h->pgalloc.pages[i]);
1147                         int mapcount = page_mapcount(page);
1148                         if (!mapcount)
1149                                 *non_pss += PAGE_SIZE;
1150                         *total += PAGE_SIZE;
1151                 }
1152                 *pss = *total - *non_pss;
1153         }
1154         nvmap_ref_unlock(client);
1155 }
1156
1157 static int nvmap_debug_iovmm_procrank_show(struct seq_file *s, void *unused)
1158 {
1159         u64 pss, non_pss, total;
1160         struct nvmap_client *client;
1161         struct nvmap_device *dev = s->private;
1162         u64 total_memory, total_pss, total_non_pss;
1163
1164         mutex_lock(&dev->clients_lock);
1165         seq_printf(s, "%-18s %18s %8s %11s %11s %11s\n",
1166                 "CLIENT", "PROCESS", "PID", "PSS", "NON-PSS", "TOTAL");
1167         list_for_each_entry(client, &dev->clients, list) {
1168                 client_stringify(client, s);
1169                 nvmap_iovmm_get_client_mss(client, &pss, &non_pss, &total);
1170                 seq_printf(s, " %10lluK %10lluK %10lluK\n", K(pss),
1171                         K(non_pss), K(total));
1172         }
1173         mutex_unlock(&dev->clients_lock);
1174
1175         nvmap_get_total_mss(&total_pss, &total_non_pss, &total_memory,
1176                 NVMAP_HEAP_IOVMM);
1177         seq_printf(s, "%-18s %18s %8s %10lluK %10lluK %10lluK\n",
1178                 "total", "", "", K(total_pss),
1179                 K(total_non_pss), K(total_memory));
1180         PRINT_MEM_STATS_NOTE(TOTAL);
1181         return 0;
1182 }
1183
1184 DEBUGFS_OPEN_FOPS(iovmm_procrank);
1185
1186 ulong nvmap_iovmm_get_used_pages(void)
1187 {
1188         u64 total;
1189
1190         nvmap_get_total_mss(NULL, NULL, &total, NVMAP_HEAP_IOVMM);
1191         return total >> PAGE_SHIFT;
1192 }
1193
1194 static int nvmap_stats_reset(void *data, u64 val)
1195 {
1196         int i;
1197
1198         if (val) {
1199                 atomic64_set(&nvmap_stats.collect, 0);
1200                 for (i = 0; i < NS_NUM; i++) {
1201                         if (i == NS_TOTAL)
1202                                 continue;
1203                         atomic64_set(&nvmap_stats.stats[i], 0);
1204                 }
1205         }
1206         return 0;
1207 }
1208
1209 static int nvmap_stats_get(void *data, u64 *val)
1210 {
1211         atomic64_t *ptr = data;
1212
1213         *val = atomic64_read(ptr);
1214         return 0;
1215 }
1216
1217 static int nvmap_stats_set(void *data, u64 val)
1218 {
1219         atomic64_t *ptr = data;
1220
1221         atomic64_set(ptr, val);
1222         return 0;
1223 }
1224
1225 DEFINE_SIMPLE_ATTRIBUTE(reset_stats_fops, NULL, nvmap_stats_reset, "%llu\n");
1226 DEFINE_SIMPLE_ATTRIBUTE(stats_fops, nvmap_stats_get, nvmap_stats_set, "%llu\n");
1227
1228 static void nvmap_stats_init(struct dentry *nvmap_debug_root)
1229 {
1230         struct dentry *stats_root;
1231
1232 #define CREATE_DF(x, y) \
1233         debugfs_create_file(#x, S_IRUGO, stats_root, &y, &stats_fops);
1234
1235         stats_root = debugfs_create_dir("stats", nvmap_debug_root);
1236         if (!IS_ERR_OR_NULL(stats_root)) {
1237                 CREATE_DF(alloc, nvmap_stats.stats[NS_ALLOC]);
1238                 CREATE_DF(release, nvmap_stats.stats[NS_RELEASE]);
1239                 CREATE_DF(ualloc, nvmap_stats.stats[NS_UALLOC]);
1240                 CREATE_DF(urelease, nvmap_stats.stats[NS_URELEASE]);
1241                 CREATE_DF(kalloc, nvmap_stats.stats[NS_KALLOC]);
1242                 CREATE_DF(krelease, nvmap_stats.stats[NS_KRELEASE]);
1243                 CREATE_DF(cflush_rq, nvmap_stats.stats[NS_CFLUSH_RQ]);
1244                 CREATE_DF(cflush_done, nvmap_stats.stats[NS_CFLUSH_DONE]);
1245                 CREATE_DF(ucflush_rq, nvmap_stats.stats[NS_UCFLUSH_RQ]);
1246                 CREATE_DF(ucflush_done, nvmap_stats.stats[NS_UCFLUSH_DONE]);
1247                 CREATE_DF(kcflush_rq, nvmap_stats.stats[NS_KCFLUSH_RQ]);
1248                 CREATE_DF(kcflush_done, nvmap_stats.stats[NS_KCFLUSH_DONE]);
1249                 CREATE_DF(total_memory, nvmap_stats.stats[NS_TOTAL]);
1250
1251                 debugfs_create_file("collect", S_IRUGO | S_IWUSR,
1252                         stats_root, &nvmap_stats.collect, &stats_fops);
1253                 debugfs_create_file("reset", S_IWUSR,
1254                         stats_root, NULL, &reset_stats_fops);
1255         }
1256
1257 #undef CREATE_DF
1258 }
1259
1260 void nvmap_stats_inc(enum nvmap_stats_t stat, size_t size)
1261 {
1262         if (atomic64_read(&nvmap_stats.collect) || stat == NS_TOTAL)
1263                 atomic64_add(size, &nvmap_stats.stats[stat]);
1264 }
1265
1266 void nvmap_stats_dec(enum nvmap_stats_t stat, size_t size)
1267 {
1268         if (atomic64_read(&nvmap_stats.collect) || stat == NS_TOTAL)
1269                 atomic64_sub(size, &nvmap_stats.stats[stat]);
1270 }
1271
1272 u64 nvmap_stats_read(enum nvmap_stats_t stat)
1273 {
1274         return atomic64_read(&nvmap_stats.stats[stat]);
1275 }
1276
1277 static int nvmap_probe(struct platform_device *pdev)
1278 {
1279         struct nvmap_platform_data *plat = pdev->dev.platform_data;
1280         struct nvmap_device *dev;
1281         struct dentry *nvmap_debug_root;
1282         unsigned int i;
1283         int e;
1284
1285         if (!plat) {
1286                 dev_err(&pdev->dev, "no platform data?\n");
1287                 return -ENODEV;
1288         }
1289
1290         /*
1291          * The DMA mapping API uses these parameters to decide how to map the
1292          * passed buffers. If the maximum physical segment size is set to
1293          * smaller than the size of the buffer, then the buffer will be mapped
1294          * as separate IO virtual address ranges.
1295          */
1296         pdev->dev.dma_parms = &nvmap_dma_parameters;
1297
1298         if (WARN_ON(nvmap_dev != NULL)) {
1299                 dev_err(&pdev->dev, "only one nvmap device may be present\n");
1300                 return -ENODEV;
1301         }
1302
1303         dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1304         if (!dev) {
1305                 dev_err(&pdev->dev, "out of memory for device\n");
1306                 return -ENOMEM;
1307         }
1308
1309         nvmap_dev = dev;
1310
1311         dev->dev_user.minor = MISC_DYNAMIC_MINOR;
1312         dev->dev_user.name = "nvmap";
1313         dev->dev_user.fops = &nvmap_user_fops;
1314         dev->dev_user.parent = &pdev->dev;
1315
1316         dev->handles = RB_ROOT;
1317
1318         init_waitqueue_head(&dev->pte_wait);
1319
1320 #ifdef CONFIG_NVMAP_PAGE_POOLS
1321         e = nvmap_page_pool_init(dev);
1322         if (e)
1323                 goto fail;
1324 #endif
1325
1326         dev->vm_rgn = alloc_vm_area(NVMAP_NUM_PTES * PAGE_SIZE, NULL);
1327         if (!dev->vm_rgn) {
1328                 e = -ENOMEM;
1329                 dev_err(&pdev->dev, "couldn't allocate remapping region\n");
1330                 goto fail;
1331         }
1332
1333         spin_lock_init(&dev->ptelock);
1334         spin_lock_init(&dev->handle_lock);
1335         INIT_LIST_HEAD(&dev->clients);
1336         mutex_init(&dev->clients_lock);
1337         INIT_LIST_HEAD(&dev->lru_handles);
1338         spin_lock_init(&dev->lru_lock);
1339
1340         for (i = 0; i < NVMAP_NUM_PTES; i++) {
1341                 unsigned long addr;
1342                 pgd_t *pgd;
1343                 pud_t *pud;
1344                 pmd_t *pmd;
1345
1346                 addr = (unsigned long)dev->vm_rgn->addr + (i * PAGE_SIZE);
1347                 pgd = pgd_offset_k(addr);
1348                 pud = pud_alloc(&init_mm, pgd, addr);
1349                 if (!pud) {
1350                         e = -ENOMEM;
1351                         dev_err(&pdev->dev, "couldn't allocate page tables\n");
1352                         goto fail;
1353                 }
1354                 pmd = pmd_alloc(&init_mm, pud, addr);
1355                 if (!pmd) {
1356                         e = -ENOMEM;
1357                         dev_err(&pdev->dev, "couldn't allocate page tables\n");
1358                         goto fail;
1359                 }
1360                 dev->ptes[i] = pte_alloc_kernel(pmd, addr);
1361                 if (!dev->ptes[i]) {
1362                         e = -ENOMEM;
1363                         dev_err(&pdev->dev, "couldn't allocate page tables\n");
1364                         goto fail;
1365                 }
1366         }
1367
1368         e = misc_register(&dev->dev_user);
1369         if (e) {
1370                 dev_err(&pdev->dev, "unable to register miscdevice %s\n",
1371                         dev->dev_user.name);
1372                 goto fail;
1373         }
1374
1375         dev->nr_carveouts = 0;
1376         dev->heaps = kzalloc(sizeof(struct nvmap_carveout_node) *
1377                              plat->nr_carveouts, GFP_KERNEL);
1378         if (!dev->heaps) {
1379                 e = -ENOMEM;
1380                 dev_err(&pdev->dev, "couldn't allocate carveout memory\n");
1381                 goto fail;
1382         }
1383
1384         nvmap_debug_root = debugfs_create_dir("nvmap", NULL);
1385         if (IS_ERR_OR_NULL(nvmap_debug_root))
1386                 dev_err(&pdev->dev, "couldn't create debug files\n");
1387
1388         debugfs_create_u32("max_handle_count", S_IRUGO,
1389                         nvmap_debug_root, &nvmap_max_handle_count);
1390
1391         for (i = 0; i < plat->nr_carveouts; i++) {
1392                 struct nvmap_carveout_node *node = &dev->heaps[dev->nr_carveouts];
1393                 const struct nvmap_platform_carveout *co = &plat->carveouts[i];
1394                 node->base = round_up(co->base, PAGE_SIZE);
1395                 node->size = round_down(co->size -
1396                                         (node->base - co->base), PAGE_SIZE);
1397                 if (!co->size)
1398                         continue;
1399
1400                 node->carveout = nvmap_heap_create(
1401                                 dev->dev_user.this_device, co,
1402                                 node->base, node->size, node);
1403
1404                 if (!node->carveout) {
1405                         e = -ENOMEM;
1406                         dev_err(&pdev->dev, "couldn't create %s\n", co->name);
1407                         goto fail_heaps;
1408                 }
1409                 node->index = dev->nr_carveouts;
1410                 dev->nr_carveouts++;
1411                 spin_lock_init(&node->clients_lock);
1412                 INIT_LIST_HEAD(&node->clients);
1413                 node->heap_bit = co->usage_mask;
1414
1415                 if (!IS_ERR_OR_NULL(nvmap_debug_root)) {
1416                         struct dentry *heap_root =
1417                                 debugfs_create_dir(co->name, nvmap_debug_root);
1418                         if (!IS_ERR_OR_NULL(heap_root)) {
1419                                 debugfs_create_file("clients", S_IRUGO,
1420                                         heap_root,
1421                                         (void *)(uintptr_t)node->heap_bit,
1422                                         &debug_clients_fops);
1423                                 debugfs_create_file("allocations", S_IRUGO,
1424                                         heap_root,
1425                                         (void *)(uintptr_t)node->heap_bit,
1426                                         &debug_allocations_fops);
1427                                 debugfs_create_file("maps", S_IRUGO,
1428                                         heap_root,
1429                                         (void *)(uintptr_t)node->heap_bit,
1430                                         &debug_maps_fops);
1431                         }
1432                 }
1433         }
1434         if (!IS_ERR_OR_NULL(nvmap_debug_root)) {
1435                 struct dentry *iovmm_root =
1436                         debugfs_create_dir("iovmm", nvmap_debug_root);
1437                 if (!IS_ERR_OR_NULL(iovmm_root)) {
1438                         debugfs_create_file("clients", S_IRUGO, iovmm_root,
1439                                 (void *)(uintptr_t)NVMAP_HEAP_IOVMM,
1440                                 &debug_clients_fops);
1441                         debugfs_create_file("allocations", S_IRUGO, iovmm_root,
1442                                 (void *)(uintptr_t)NVMAP_HEAP_IOVMM,
1443                                 &debug_allocations_fops);
1444                         debugfs_create_file("maps", S_IRUGO, iovmm_root,
1445                                 (void *)(uintptr_t)NVMAP_HEAP_IOVMM,
1446                                 &debug_maps_fops);
1447                         debugfs_create_file("procrank", S_IRUGO, iovmm_root,
1448                                 dev, &debug_iovmm_procrank_fops);
1449 #ifdef CONFIG_NVMAP_PAGE_POOLS
1450                         debugfs_create_u32("page_pool_available_pages",
1451                                            S_IRUGO, iovmm_root,
1452                                            &dev->pool.count);
1453 #ifdef CONFIG_NVMAP_PAGE_POOL_DEBUG
1454                         debugfs_create_u32("page_pool_alloc_ind",
1455                                            S_IRUGO, iovmm_root,
1456                                            &dev->pool.alloc);
1457                         debugfs_create_u32("page_pool_fill_ind",
1458                                            S_IRUGO, iovmm_root,
1459                                            &dev->pool.fill);
1460                         debugfs_create_u64("page_pool_allocs",
1461                                            S_IRUGO, iovmm_root,
1462                                            &dev->pool.allocs);
1463                         debugfs_create_u64("page_pool_fills",
1464                                            S_IRUGO, iovmm_root,
1465                                            &dev->pool.fills);
1466                         debugfs_create_u64("page_pool_hits",
1467                                            S_IRUGO, iovmm_root,
1468                                            &dev->pool.hits);
1469                         debugfs_create_u64("page_pool_misses",
1470                                            S_IRUGO, iovmm_root,
1471                                            &dev->pool.misses);
1472 #endif
1473 #endif
1474                 }
1475 #ifdef CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS
1476                 debugfs_create_size_t("cache_maint_inner_threshold",
1477                                       S_IRUSR | S_IWUSR,
1478                                       nvmap_debug_root,
1479                                       &cache_maint_inner_threshold);
1480
1481                 /* cortex-a9 */
1482                 if ((read_cpuid_id() >> 4 & 0xfff) == 0xc09)
1483                         cache_maint_inner_threshold = SZ_32K;
1484                 pr_info("nvmap:inner cache maint threshold=%zd",
1485                         cache_maint_inner_threshold);
1486 #endif
1487 #ifdef CONFIG_NVMAP_OUTER_CACHE_MAINT_BY_SET_WAYS
1488                 debugfs_create_size_t("cache_maint_outer_threshold",
1489                                       S_IRUSR | S_IWUSR,
1490                                       nvmap_debug_root,
1491                                       &cache_maint_outer_threshold);
1492                 pr_info("nvmap:outer cache maint threshold=%zd",
1493                         cache_maint_outer_threshold);
1494 #endif
1495         }
1496
1497         nvmap_stats_init(nvmap_debug_root);
1498         platform_set_drvdata(pdev, dev);
1499         nvmap_pdev = pdev;
1500         nvmap_dev = dev;
1501
1502         nvmap_dmabuf_debugfs_init(nvmap_debug_root);
1503         e = nvmap_dmabuf_stash_init();
1504         if (e)
1505                 goto fail_heaps;
1506
1507         return 0;
1508 fail_heaps:
1509         for (i = 0; i < dev->nr_carveouts; i++) {
1510                 struct nvmap_carveout_node *node = &dev->heaps[i];
1511                 nvmap_heap_destroy(node->carveout);
1512         }
1513 fail:
1514 #ifdef CONFIG_NVMAP_PAGE_POOLS
1515         nvmap_page_pool_fini(nvmap_dev);
1516 #endif
1517         kfree(dev->heaps);
1518         if (dev->dev_user.minor != MISC_DYNAMIC_MINOR)
1519                 misc_deregister(&dev->dev_user);
1520         if (dev->vm_rgn)
1521                 free_vm_area(dev->vm_rgn);
1522         kfree(dev);
1523         nvmap_dev = NULL;
1524         return e;
1525 }
1526
1527 static int nvmap_remove(struct platform_device *pdev)
1528 {
1529         struct nvmap_device *dev = platform_get_drvdata(pdev);
1530         struct rb_node *n;
1531         struct nvmap_handle *h;
1532         int i;
1533
1534         misc_deregister(&dev->dev_user);
1535
1536         while ((n = rb_first(&dev->handles))) {
1537                 h = rb_entry(n, struct nvmap_handle, node);
1538                 rb_erase(&h->node, &dev->handles);
1539                 kfree(h);
1540         }
1541
1542         for (i = 0; i < dev->nr_carveouts; i++) {
1543                 struct nvmap_carveout_node *node = &dev->heaps[i];
1544                 nvmap_heap_destroy(node->carveout);
1545         }
1546         kfree(dev->heaps);
1547
1548         free_vm_area(dev->vm_rgn);
1549         kfree(dev);
1550         nvmap_dev = NULL;
1551         return 0;
1552 }
1553
1554 static int nvmap_suspend(struct platform_device *pdev, pm_message_t state)
1555 {
1556         return 0;
1557 }
1558
1559 static int nvmap_resume(struct platform_device *pdev)
1560 {
1561         return 0;
1562 }
1563
1564 static struct platform_driver nvmap_driver = {
1565         .probe          = nvmap_probe,
1566         .remove         = nvmap_remove,
1567         .suspend        = nvmap_suspend,
1568         .resume         = nvmap_resume,
1569
1570         .driver = {
1571                 .name   = "tegra-nvmap",
1572                 .owner  = THIS_MODULE,
1573         },
1574 };
1575
1576 static int __init nvmap_init_driver(void)
1577 {
1578         int e;
1579
1580         nvmap_dev = NULL;
1581
1582         e = nvmap_heap_init();
1583         if (e)
1584                 goto fail;
1585
1586         e = platform_driver_register(&nvmap_driver);
1587         if (e) {
1588                 nvmap_heap_deinit();
1589                 goto fail;
1590         }
1591
1592 fail:
1593         return e;
1594 }
1595 fs_initcall(nvmap_init_driver);
1596
1597 static void __exit nvmap_exit_driver(void)
1598 {
1599         platform_driver_unregister(&nvmap_driver);
1600         nvmap_heap_deinit();
1601         nvmap_dev = NULL;
1602 }
1603 module_exit(nvmap_exit_driver);