]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/video/tegra/nvmap/nvmap_dev.c
video: tegra: nvmap: Add ref count in nvmap_vma_list
[sojka/nv-tegra/linux-3.10.git] / drivers / video / tegra / nvmap / nvmap_dev.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap_dev.c
3  *
4  * User-space interface to nvmap
5  *
6  * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/backing-dev.h>
24 #include <linux/bitmap.h>
25 #include <linux/debugfs.h>
26 #include <linux/delay.h>
27 #include <linux/io.h>
28 #include <linux/kernel.h>
29 #include <linux/device.h>
30 #include <linux/oom.h>
31 #include <linux/platform_device.h>
32 #include <linux/seq_file.h>
33 #include <linux/slab.h>
34 #include <linux/spinlock.h>
35 #include <linux/uaccess.h>
36 #include <linux/vmalloc.h>
37 #include <linux/nvmap.h>
38 #include <linux/module.h>
39 #include <linux/resource.h>
40 #include <linux/security.h>
41 #include <linux/stat.h>
42 #include <linux/kthread.h>
43
44 #include <asm/cputype.h>
45
46 #define CREATE_TRACE_POINTS
47 #include <trace/events/nvmap.h>
48
49 #include "nvmap_priv.h"
50 #include "nvmap_ioctl.h"
51
52 #define NVMAP_CARVEOUT_KILLER_RETRY_TIME 100 /* msecs */
53
54 /* this is basically the L2 cache size */
55 #ifdef CONFIG_DENVER_CPU
56 size_t cache_maint_inner_threshold = SZ_2M * 8;
57 #else
58 size_t cache_maint_inner_threshold = SZ_2M;
59 #endif
60
61 #ifdef CONFIG_NVMAP_OUTER_CACHE_MAINT_BY_SET_WAYS
62 size_t cache_maint_outer_threshold = SZ_1M;
63 #endif
64
65 struct nvmap_carveout_node {
66         unsigned int            heap_bit;
67         struct nvmap_heap       *carveout;
68         int                     index;
69         struct list_head        clients;
70         spinlock_t              clients_lock;
71         phys_addr_t                     base;
72         size_t                  size;
73 };
74
75 struct nvmap_device *nvmap_dev;
76 struct nvmap_stats nvmap_stats;
77
78 static struct backing_dev_info nvmap_bdi = {
79         .ra_pages       = 0,
80         .capabilities   = (BDI_CAP_NO_ACCT_AND_WRITEBACK |
81                            BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP),
82 };
83
84 static struct device_dma_parameters nvmap_dma_parameters = {
85         .max_segment_size = UINT_MAX,
86 };
87
88 static int nvmap_open(struct inode *inode, struct file *filp);
89 static int nvmap_release(struct inode *inode, struct file *filp);
90 static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
91 static int nvmap_map(struct file *filp, struct vm_area_struct *vma);
92 static void nvmap_vma_close(struct vm_area_struct *vma);
93 static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
94
95 static const struct file_operations nvmap_user_fops = {
96         .owner          = THIS_MODULE,
97         .open           = nvmap_open,
98         .release        = nvmap_release,
99         .unlocked_ioctl = nvmap_ioctl,
100 #ifdef CONFIG_COMPAT
101         .compat_ioctl = nvmap_ioctl,
102 #endif
103         .mmap           = nvmap_map,
104 };
105
106 static struct vm_operations_struct nvmap_vma_ops = {
107         .open           = nvmap_vma_open,
108         .close          = nvmap_vma_close,
109         .fault          = nvmap_vma_fault,
110 };
111
112 int is_nvmap_vma(struct vm_area_struct *vma)
113 {
114         return vma->vm_ops == &nvmap_vma_ops;
115 }
116
117 /*
118  * Verifies that the passed ID is a valid handle ID. Then the passed client's
119  * reference to the handle is returned.
120  *
121  * Note: to call this function make sure you own the client ref lock.
122  */
123 struct nvmap_handle_ref *__nvmap_validate_locked(struct nvmap_client *c,
124                                                  struct nvmap_handle *h)
125 {
126         struct rb_node *n = c->handle_refs.rb_node;
127
128         while (n) {
129                 struct nvmap_handle_ref *ref;
130                 ref = rb_entry(n, struct nvmap_handle_ref, node);
131                 if (ref->handle == h)
132                         return ref;
133                 else if ((uintptr_t)h > (uintptr_t)ref->handle)
134                         n = n->rb_right;
135                 else
136                         n = n->rb_left;
137         }
138
139         return NULL;
140 }
141
142 unsigned long nvmap_carveout_usage(struct nvmap_client *c,
143                                    struct nvmap_heap_block *b)
144 {
145         struct nvmap_heap *h = nvmap_block_to_heap(b);
146         struct nvmap_carveout_node *n;
147         int i;
148
149         for (i = 0; i < nvmap_dev->nr_carveouts; i++) {
150                 n = &nvmap_dev->heaps[i];
151                 if (n->carveout == h)
152                         return n->heap_bit;
153         }
154         return 0;
155 }
156
157 /*
158  * This routine is used to flush the carveout memory from cache.
159  * Why cache flush is needed for carveout? Consider the case, where a piece of
160  * carveout is allocated as cached and released. After this, if the same memory is
161  * allocated for uncached request and the memory is not flushed out from cache.
162  * In this case, the client might pass this to H/W engine and it could start modify
163  * the memory. As this was cached earlier, it might have some portion of it in cache.
164  * During cpu request to read/write other memory, the cached portion of this memory
165  * might get flushed back to main memory and would cause corruptions, if it happens
166  * after H/W writes data to memory.
167  *
168  * But flushing out the memory blindly on each carveout allocation is redundant.
169  *
170  * In order to optimize the carveout buffer cache flushes, the following
171  * strategy is used.
172  *
173  * The whole Carveout is flushed out from cache during its initialization.
174  * During allocation, carveout buffers are not flused from cache.
175  * During deallocation, carveout buffers are flushed, if they were allocated as cached.
176  * if they were allocated as uncached/writecombined, no cache flush is needed.
177  * Just draining store buffers is enough.
178  */
179 int nvmap_flush_heap_block(struct nvmap_client *client,
180         struct nvmap_heap_block *block, size_t len, unsigned int prot)
181 {
182         ulong kaddr;
183         phys_addr_t phys = block->base;
184         phys_addr_t end = block->base + len;
185         struct vm_struct *area = NULL;
186
187         if (prot == NVMAP_HANDLE_UNCACHEABLE || prot == NVMAP_HANDLE_WRITE_COMBINE)
188                 goto out;
189
190 #ifdef CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS
191         if (len >= cache_maint_inner_threshold) {
192                 inner_flush_cache_all();
193                 if (prot != NVMAP_HANDLE_INNER_CACHEABLE)
194                         outer_flush_range(block->base, block->base + len);
195                 goto out;
196         }
197 #endif
198
199         area = alloc_vm_area(PAGE_SIZE, NULL);
200         if (!area)
201                 return -ENOMEM;
202
203         kaddr = (ulong)area->addr;
204
205         while (phys < end) {
206                 phys_addr_t next = (phys + PAGE_SIZE) & PAGE_MASK;
207                 void *base = (void *)kaddr + (phys & ~PAGE_MASK);
208
209                 next = min(next, end);
210                 ioremap_page_range(kaddr, kaddr + PAGE_SIZE,
211                         phys, PG_PROT_KERNEL);
212                 FLUSH_DCACHE_AREA(base, next - phys);
213                 phys = next;
214                 unmap_kernel_range(kaddr, PAGE_SIZE);
215         }
216
217         if (prot != NVMAP_HANDLE_INNER_CACHEABLE)
218                 outer_flush_range(block->base, block->base + len);
219
220         free_vm_area(area);
221 out:
222         wmb();
223         return 0;
224 }
225
226 static
227 struct nvmap_heap_block *do_nvmap_carveout_alloc(struct nvmap_client *client,
228                                               struct nvmap_handle *handle,
229                                               unsigned long type)
230 {
231         struct nvmap_carveout_node *co_heap;
232         struct nvmap_device *dev = nvmap_dev;
233         int i;
234
235         for (i = 0; i < dev->nr_carveouts; i++) {
236                 struct nvmap_heap_block *block;
237                 co_heap = &dev->heaps[i];
238
239                 if (!(co_heap->heap_bit & type))
240                         continue;
241
242                 block = nvmap_heap_alloc(co_heap->carveout, handle);
243                 if (block)
244                         return block;
245         }
246         return NULL;
247 }
248
249 struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *client,
250                                               struct nvmap_handle *handle,
251                                               unsigned long type)
252 {
253         return do_nvmap_carveout_alloc(client, handle, type);
254 }
255
256 /* remove a handle from the device's tree of all handles; called
257  * when freeing handles. */
258 int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h)
259 {
260         spin_lock(&dev->handle_lock);
261
262         /* re-test inside the spinlock if the handle really has no clients;
263          * only remove the handle if it is unreferenced */
264         if (atomic_add_return(0, &h->ref) > 0) {
265                 spin_unlock(&dev->handle_lock);
266                 return -EBUSY;
267         }
268         smp_rmb();
269         BUG_ON(atomic_read(&h->ref) < 0);
270         BUG_ON(atomic_read(&h->pin) != 0);
271
272         rb_erase(&h->node, &dev->handles);
273
274         spin_unlock(&dev->handle_lock);
275         return 0;
276 }
277
278 /* adds a newly-created handle to the device master tree */
279 void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h)
280 {
281         struct rb_node **p;
282         struct rb_node *parent = NULL;
283
284         spin_lock(&dev->handle_lock);
285         p = &dev->handles.rb_node;
286         while (*p) {
287                 struct nvmap_handle *b;
288
289                 parent = *p;
290                 b = rb_entry(parent, struct nvmap_handle, node);
291                 if (h > b)
292                         p = &parent->rb_right;
293                 else
294                         p = &parent->rb_left;
295         }
296         rb_link_node(&h->node, parent, p);
297         rb_insert_color(&h->node, &dev->handles);
298         spin_unlock(&dev->handle_lock);
299 }
300
301 /* Validates that a handle is in the device master tree and that the
302  * client has permission to access it. */
303 struct nvmap_handle *nvmap_validate_get(struct nvmap_handle *id)
304 {
305         struct nvmap_handle *h = NULL;
306         struct rb_node *n;
307
308         spin_lock(&nvmap_dev->handle_lock);
309
310         n = nvmap_dev->handles.rb_node;
311
312         while (n) {
313                 h = rb_entry(n, struct nvmap_handle, node);
314                 if (h == id) {
315                         h = nvmap_handle_get(h);
316                         spin_unlock(&nvmap_dev->handle_lock);
317                         return h;
318                 }
319                 if (id > h)
320                         n = n->rb_right;
321                 else
322                         n = n->rb_left;
323         }
324         spin_unlock(&nvmap_dev->handle_lock);
325         return NULL;
326 }
327
328 struct nvmap_client *__nvmap_create_client(struct nvmap_device *dev,
329                                            const char *name)
330 {
331         struct nvmap_client *client;
332         struct task_struct *task;
333
334         if (WARN_ON(!dev))
335                 return NULL;
336
337         client = kzalloc(sizeof(*client), GFP_KERNEL);
338         if (!client)
339                 return NULL;
340
341         client->name = name;
342         client->kernel_client = true;
343         client->handle_refs = RB_ROOT;
344
345         get_task_struct(current->group_leader);
346         task_lock(current->group_leader);
347         /* don't bother to store task struct for kernel threads,
348            they can't be killed anyway */
349         if (current->flags & PF_KTHREAD) {
350                 put_task_struct(current->group_leader);
351                 task = NULL;
352         } else {
353                 task = current->group_leader;
354         }
355         task_unlock(current->group_leader);
356         client->task = task;
357
358         mutex_init(&client->ref_lock);
359         atomic_set(&client->count, 1);
360
361         spin_lock(&dev->clients_lock);
362         list_add(&client->list, &dev->clients);
363         spin_unlock(&dev->clients_lock);
364         return client;
365 }
366
367 static void destroy_client(struct nvmap_client *client)
368 {
369         struct rb_node *n;
370
371         if (!client)
372                 return;
373
374         spin_lock(&nvmap_dev->clients_lock);
375         list_del(&client->list);
376         spin_unlock(&nvmap_dev->clients_lock);
377
378         while ((n = rb_first(&client->handle_refs))) {
379                 struct nvmap_handle_ref *ref;
380                 int pins, dupes;
381
382                 ref = rb_entry(n, struct nvmap_handle_ref, node);
383
384                 smp_rmb();
385                 pins = atomic_read(&ref->pin);
386
387                 while (pins--)
388                         __nvmap_unpin(ref);
389
390                 if (ref->handle->owner == client)
391                         ref->handle->owner = NULL;
392
393                 dma_buf_put(ref->handle->dmabuf);
394                 rb_erase(&ref->node, &client->handle_refs);
395                 atomic_dec(&ref->handle->share_count);
396
397                 dupes = atomic_read(&ref->dupes);
398                 while (dupes--)
399                         nvmap_handle_put(ref->handle);
400
401                 kfree(ref);
402         }
403
404         if (client->task)
405                 put_task_struct(client->task);
406
407         kfree(client);
408 }
409
410 struct nvmap_client *nvmap_client_get(struct nvmap_client *client)
411 {
412         if (!virt_addr_valid(client))
413                 return NULL;
414
415         if (!atomic_add_unless(&client->count, 1, 0))
416                 return NULL;
417
418         return client;
419 }
420
421 void nvmap_client_put(struct nvmap_client *client)
422 {
423         if (!client)
424                 return;
425
426         if (!atomic_dec_return(&client->count))
427                 destroy_client(client);
428 }
429
430 static int nvmap_open(struct inode *inode, struct file *filp)
431 {
432         struct miscdevice *miscdev = filp->private_data;
433         struct nvmap_device *dev = dev_get_drvdata(miscdev->parent);
434         struct nvmap_client *priv;
435         int ret;
436         __attribute__((unused)) struct rlimit old_rlim, new_rlim;
437
438         ret = nonseekable_open(inode, filp);
439         if (unlikely(ret))
440                 return ret;
441
442         BUG_ON(dev != nvmap_dev);
443         priv = __nvmap_create_client(dev, "user");
444         if (!priv)
445                 return -ENOMEM;
446         trace_nvmap_open(priv, priv->name);
447
448         priv->kernel_client = false;
449
450         filp->f_mapping->backing_dev_info = &nvmap_bdi;
451
452         filp->private_data = priv;
453         return 0;
454 }
455
456 static int nvmap_release(struct inode *inode, struct file *filp)
457 {
458         struct nvmap_client *priv = filp->private_data;
459
460         trace_nvmap_release(priv, priv->name);
461         nvmap_client_put(priv);
462         return 0;
463 }
464
465 int __nvmap_map(struct nvmap_handle *h, struct vm_area_struct *vma)
466 {
467         struct nvmap_vma_priv *priv;
468
469         h = nvmap_handle_get(h);
470         if (!h)
471                 return -EINVAL;
472
473         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
474         if (!priv)
475                 return -ENOMEM;
476         priv->handle = h;
477
478         vma->vm_flags |= VM_SHARED | VM_DONTEXPAND |
479                           VM_DONTDUMP | VM_DONTCOPY |
480                           (h->heap_pgalloc ? 0 : VM_PFNMAP);
481         vma->vm_ops = &nvmap_vma_ops;
482         BUG_ON(vma->vm_private_data != NULL);
483         vma->vm_private_data = priv;
484         vma->vm_page_prot = nvmap_pgprot(h, vma->vm_page_prot);
485         nvmap_vma_open(vma);
486         return 0;
487 }
488
489 static int nvmap_map(struct file *filp, struct vm_area_struct *vma)
490 {
491         BUG_ON(vma->vm_private_data != NULL);
492         vma->vm_flags |= (VM_SHARED | VM_DONTEXPAND |
493                           VM_DONTDUMP | VM_DONTCOPY);
494         vma->vm_ops = &nvmap_vma_ops;
495         return 0;
496 }
497
498 static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
499 {
500         int err = 0;
501         void __user *uarg = (void __user *)arg;
502
503         if (_IOC_TYPE(cmd) != NVMAP_IOC_MAGIC)
504                 return -ENOTTY;
505
506         if (_IOC_NR(cmd) > NVMAP_IOC_MAXNR)
507                 return -ENOTTY;
508
509         if (_IOC_DIR(cmd) & _IOC_READ)
510                 err = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
511         if (_IOC_DIR(cmd) & _IOC_WRITE)
512                 err = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
513
514         if (err)
515                 return -EFAULT;
516
517         switch (cmd) {
518         case NVMAP_IOC_CREATE:
519         case NVMAP_IOC_FROM_FD:
520                 err = nvmap_ioctl_create(filp, cmd, uarg);
521                 break;
522
523         case NVMAP_IOC_FROM_ID:
524         case NVMAP_IOC_GET_ID:
525                 pr_warn_once("nvmap: unsupported FROM_ID/GET_ID IOCTLs used.\n");
526                 return -ENOTTY;
527
528         case NVMAP_IOC_GET_FD:
529                 err = nvmap_ioctl_getfd(filp, uarg);
530                 break;
531
532 #ifdef CONFIG_COMPAT
533         case NVMAP_IOC_PARAM_32:
534                 err = nvmap_ioctl_get_param(filp, uarg, true);
535                 break;
536 #endif
537
538         case NVMAP_IOC_PARAM:
539                 err = nvmap_ioctl_get_param(filp, uarg, false);
540                 break;
541
542 #ifdef CONFIG_COMPAT
543         case NVMAP_IOC_UNPIN_MULT_32:
544         case NVMAP_IOC_PIN_MULT_32:
545                 err = nvmap_ioctl_pinop(filp, cmd == NVMAP_IOC_PIN_MULT_32,
546                         uarg, true);
547                 break;
548 #endif
549
550         case NVMAP_IOC_UNPIN_MULT:
551         case NVMAP_IOC_PIN_MULT:
552                 err = nvmap_ioctl_pinop(filp, cmd == NVMAP_IOC_PIN_MULT,
553                         uarg, false);
554                 break;
555
556         case NVMAP_IOC_ALLOC:
557                 err = nvmap_ioctl_alloc(filp, uarg);
558                 break;
559
560         case NVMAP_IOC_ALLOC_KIND:
561                 err = nvmap_ioctl_alloc_kind(filp, uarg);
562                 break;
563
564         case NVMAP_IOC_FREE:
565                 err = nvmap_ioctl_free(filp, arg);
566                 break;
567
568 #ifdef CONFIG_COMPAT
569         case NVMAP_IOC_MMAP_32:
570                 err = nvmap_map_into_caller_ptr(filp, uarg, true);
571                 break;
572 #endif
573
574         case NVMAP_IOC_MMAP:
575                 err = nvmap_map_into_caller_ptr(filp, uarg, false);
576                 break;
577
578 #ifdef CONFIG_COMPAT
579         case NVMAP_IOC_WRITE_32:
580         case NVMAP_IOC_READ_32:
581                 err = nvmap_ioctl_rw_handle(filp, cmd == NVMAP_IOC_READ_32,
582                         uarg, true);
583                 break;
584 #endif
585
586         case NVMAP_IOC_WRITE:
587         case NVMAP_IOC_READ:
588                 err = nvmap_ioctl_rw_handle(filp, cmd == NVMAP_IOC_READ, uarg,
589                         false);
590                 break;
591
592 #ifdef CONFIG_COMPAT
593         case NVMAP_IOC_CACHE_32:
594                 err = nvmap_ioctl_cache_maint(filp, uarg, true);
595                 break;
596 #endif
597
598         case NVMAP_IOC_CACHE:
599                 err = nvmap_ioctl_cache_maint(filp, uarg, false);
600                 break;
601
602         case NVMAP_IOC_CACHE_LIST:
603         case NVMAP_IOC_RESERVE:
604                 err = nvmap_ioctl_cache_maint_list(filp, uarg,
605                                                    cmd == NVMAP_IOC_RESERVE);
606                 break;
607
608         case NVMAP_IOC_SHARE:
609                 err = nvmap_ioctl_share_dmabuf(filp, uarg);
610                 break;
611
612         default:
613                 return -ENOTTY;
614         }
615         return err;
616 }
617
618 /* to ensure that the backing store for the VMA isn't freed while a fork'd
619  * reference still exists, nvmap_vma_open increments the reference count on
620  * the handle, and nvmap_vma_close decrements it. alternatively, we could
621  * disallow copying of the vma, or behave like pmem and zap the pages. FIXME.
622 */
623 void nvmap_vma_open(struct vm_area_struct *vma)
624 {
625         struct nvmap_vma_priv *priv;
626         struct nvmap_handle *h;
627         struct nvmap_vma_list *vma_list, *tmp;
628         struct list_head *tmp_head = NULL;
629         pid_t current_pid = current->pid;
630         bool vma_pos_found = false;
631
632         priv = vma->vm_private_data;
633         BUG_ON(!priv);
634         BUG_ON(!priv->handle);
635
636         atomic_inc(&priv->count);
637         h = priv->handle;
638
639         vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL);
640         if (vma_list) {
641                 mutex_lock(&h->lock);
642                 tmp_head = &h->vmas;
643
644                 /* insert vma into handle's vmas list in the increasing order of
645                  * handle offsets
646                  */
647                 list_for_each_entry(tmp, &h->vmas, list) {
648                         /* if vma exits in list, just increment refcount */
649                         if (tmp->vma == vma) {
650                                 atomic_inc(&tmp->ref);
651                                 kfree(vma_list);
652                                 goto unlock;
653                         }
654
655                         if (!vma_pos_found && (current_pid == tmp->pid)) {
656                                 if (vma->vm_pgoff < tmp->vma->vm_pgoff) {
657                                         tmp_head = &tmp->list;
658                                         vma_pos_found = true;
659                                 } else {
660                                         tmp_head = tmp->list.next;
661                                 }
662                         }
663                 }
664
665                 vma_list->vma = vma;
666                 vma_list->pid = current_pid;
667                 atomic_set(&vma_list->ref, 1);
668                 list_add_tail(&vma_list->list, tmp_head);
669 unlock:
670                 mutex_unlock(&h->lock);
671         } else {
672                 WARN(1, "vma not tracked");
673         }
674 }
675
676 static void nvmap_vma_close(struct vm_area_struct *vma)
677 {
678         struct nvmap_vma_priv *priv = vma->vm_private_data;
679         struct nvmap_vma_list *vma_list;
680         struct nvmap_handle *h;
681         bool vma_found = false;
682
683         if (!priv)
684                 return;
685
686         BUG_ON(!priv->handle);
687
688         h = priv->handle;
689         mutex_lock(&h->lock);
690         list_for_each_entry(vma_list, &h->vmas, list) {
691                 if (vma_list->vma != vma)
692                         continue;
693                 if (atomic_dec_return(&vma_list->ref) == 0) {
694                         list_del(&vma_list->list);
695                         kfree(vma_list);
696                 }
697                 vma_found = true;
698                 break;
699         }
700         BUG_ON(!vma_found);
701         mutex_unlock(&h->lock);
702
703         if (__atomic_add_unless(&priv->count, -1, 0) == 1) {
704                 if (priv->handle)
705                         nvmap_handle_put(priv->handle);
706                 vma->vm_private_data = NULL;
707                 kfree(priv);
708         }
709 }
710
711 static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
712 {
713         struct page *page;
714         struct nvmap_vma_priv *priv;
715         unsigned long offs;
716
717         offs = (unsigned long)(vmf->virtual_address - vma->vm_start);
718         priv = vma->vm_private_data;
719         if (!priv || !priv->handle || !priv->handle->alloc)
720                 return VM_FAULT_SIGBUS;
721
722         offs += priv->offs;
723         /* if the VMA was split for some reason, vm_pgoff will be the VMA's
724          * offset from the original VMA */
725         offs += (vma->vm_pgoff << PAGE_SHIFT);
726
727         if (offs >= priv->handle->size)
728                 return VM_FAULT_SIGBUS;
729
730         if (!priv->handle->heap_pgalloc) {
731                 unsigned long pfn;
732                 BUG_ON(priv->handle->carveout->base & ~PAGE_MASK);
733                 pfn = ((priv->handle->carveout->base + offs) >> PAGE_SHIFT);
734                 if (!pfn_valid(pfn)) {
735                         vm_insert_pfn(vma,
736                                 (unsigned long)vmf->virtual_address, pfn);
737                         return VM_FAULT_NOPAGE;
738                 }
739                 /* CMA memory would get here */
740                 page = pfn_to_page(pfn);
741         } else {
742                 offs >>= PAGE_SHIFT;
743                 if (nvmap_page_reserved(priv->handle->pgalloc.pages[offs]))
744                         return VM_FAULT_SIGBUS;
745                 page = nvmap_to_page(priv->handle->pgalloc.pages[offs]);
746                 nvmap_page_mkdirty(&priv->handle->pgalloc.pages[offs]);
747         }
748
749         if (page)
750                 get_page(page);
751         vmf->page = page;
752         return (page) ? 0 : VM_FAULT_SIGBUS;
753 }
754
755 #define DEBUGFS_OPEN_FOPS(name) \
756 static int nvmap_debug_##name##_open(struct inode *inode, \
757                                             struct file *file) \
758 { \
759         return single_open(file, nvmap_debug_##name##_show, \
760                             inode->i_private); \
761 } \
762 \
763 static const struct file_operations debug_##name##_fops = { \
764         .open = nvmap_debug_##name##_open, \
765         .read = seq_read, \
766         .llseek = seq_lseek, \
767         .release = single_release, \
768 }
769
770 #define K(x) (x >> 10)
771
772 static void client_stringify(struct nvmap_client *client, struct seq_file *s)
773 {
774         char task_comm[TASK_COMM_LEN];
775         if (!client->task) {
776                 seq_printf(s, "%-18s %18s %8u", client->name, "kernel", 0);
777                 return;
778         }
779         get_task_comm(task_comm, client->task);
780         seq_printf(s, "%-18s %18s %8u", client->name, task_comm,
781                    client->task->pid);
782 }
783
784 static void allocations_stringify(struct nvmap_client *client,
785                                   struct seq_file *s, u32 heap_type)
786 {
787         struct rb_node *n;
788
789         nvmap_ref_lock(client);
790         n = rb_first(&client->handle_refs);
791         for (; n != NULL; n = rb_next(n)) {
792                 struct nvmap_handle_ref *ref =
793                         rb_entry(n, struct nvmap_handle_ref, node);
794                 struct nvmap_handle *handle = ref->handle;
795                 if (handle->alloc && handle->heap_type == heap_type) {
796                         phys_addr_t base = heap_type == NVMAP_HEAP_IOVMM ? 0 :
797                                            (handle->carveout->base);
798                         seq_printf(s,
799                                 "%-18s %-18s %8llx %10zuK %8x %6u %6u %6u %6u %8p\n",
800                                 "", "",
801                                 (unsigned long long)base, K(handle->size),
802                                 handle->userflags,
803                                 atomic_read(&handle->ref),
804                                 atomic_read(&ref->dupes),
805                                 atomic_read(&ref->pin),
806                                 atomic_read(&handle->share_count),
807                                 handle);
808                 }
809         }
810         nvmap_ref_unlock(client);
811 }
812
813 /* compute the total amount of handle physical memory that is mapped
814  * into client's virtual address space. Remember that vmas list is
815  * sorted in ascending order of handle offsets.
816  * NOTE: This function should be called while holding handle's lock mutex.
817  */
818 static void nvmap_get_client_handle_mss(struct nvmap_client *client,
819                                 struct nvmap_handle *handle, u64 *total)
820 {
821         struct nvmap_vma_list *vma_list = NULL;
822         struct vm_area_struct *vma = NULL;
823         u64 end_offset = 0, vma_start_offset, vma_size;
824         int64_t overlap_size;
825
826         *total = 0;
827         list_for_each_entry(vma_list, &handle->vmas, list) {
828
829                 if (client->task->pid == vma_list->pid) {
830                         vma = vma_list->vma;
831                         vma_size = vma->vm_end - vma->vm_start;
832
833                         vma_start_offset = vma->vm_pgoff << PAGE_SHIFT;
834                         if (end_offset < vma_start_offset + vma_size) {
835                                 *total += vma_size;
836
837                                 overlap_size = end_offset - vma_start_offset;
838                                 if (overlap_size > 0)
839                                         *total -= overlap_size;
840                                 end_offset = vma_start_offset + vma_size;
841                         }
842                 }
843         }
844 }
845
846 static void maps_stringify(struct nvmap_client *client,
847                                 struct seq_file *s, u32 heap_type)
848 {
849         struct rb_node *n;
850         struct nvmap_vma_list *vma_list = NULL;
851         struct vm_area_struct *vma = NULL;
852         u64 total_mapped_size, vma_size;
853
854         nvmap_ref_lock(client);
855         n = rb_first(&client->handle_refs);
856         for (; n != NULL; n = rb_next(n)) {
857                 struct nvmap_handle_ref *ref =
858                         rb_entry(n, struct nvmap_handle_ref, node);
859                 struct nvmap_handle *handle = ref->handle;
860                 if (handle->alloc && handle->heap_type == heap_type) {
861                         phys_addr_t base = heap_type == NVMAP_HEAP_IOVMM ? 0 :
862                                            (handle->carveout->base);
863                         seq_printf(s,
864                                 "%-18s %-18s %8llx %10zuK %8x %6u %16p "
865                                 "%12s %12s ",
866                                 "", "",
867                                 (unsigned long long)base, K(handle->size),
868                                 handle->userflags,
869                                 atomic_read(&handle->share_count),
870                                 handle, "", "");
871
872                         mutex_lock(&handle->lock);
873                         nvmap_get_client_handle_mss(client, handle,
874                                                         &total_mapped_size);
875                         seq_printf(s, "%6lluK\n", K(total_mapped_size));
876
877                         list_for_each_entry(vma_list, &handle->vmas, list) {
878
879                                 if (vma_list->pid == client->task->pid) {
880                                         vma = vma_list->vma;
881                                         vma_size = vma->vm_end - vma->vm_start;
882                                         seq_printf(s,
883                                           "%-18s %-18s %8s %11s %8s %6s %16s "
884                                           "%-12lx-%12lx %6lluK\n",
885                                           "", "", "", "", "", "", "",
886                                           vma->vm_start, vma->vm_end,
887                                           K(vma_size));
888                                 }
889                         }
890                         mutex_unlock(&handle->lock);
891                 }
892         }
893         nvmap_ref_unlock(client);
894 }
895
896 static void nvmap_get_client_mss(struct nvmap_client *client,
897                                  u64 *total, u32 heap_type)
898 {
899         struct rb_node *n;
900
901         *total = 0;
902         nvmap_ref_lock(client);
903         n = rb_first(&client->handle_refs);
904         for (; n != NULL; n = rb_next(n)) {
905                 struct nvmap_handle_ref *ref =
906                         rb_entry(n, struct nvmap_handle_ref, node);
907                 struct nvmap_handle *handle = ref->handle;
908                 if (handle->alloc && handle->heap_type == heap_type)
909                         *total += handle->size /
910                                   atomic_read(&handle->share_count);
911         }
912         nvmap_ref_unlock(client);
913 }
914
915 static void nvmap_get_total_mss(u64 *pss, u64 *non_pss,
916                                       u64 *total, u32 heap_type)
917 {
918         int i;
919         struct rb_node *n;
920         struct nvmap_device *dev = nvmap_dev;
921
922         *total = 0;
923         if (pss)
924                 *pss = 0;
925         if (non_pss)
926                 *non_pss = 0;
927         if (!dev)
928                 return;
929         spin_lock(&dev->handle_lock);
930         n = rb_first(&dev->handles);
931         for (; n != NULL; n = rb_next(n)) {
932                 struct nvmap_handle *h =
933                         rb_entry(n, struct nvmap_handle, node);
934
935                 if (!h || !h->alloc || h->heap_type != heap_type)
936                         continue;
937                 if (!non_pss) {
938                         *total += h->size;
939                         continue;
940                 }
941
942                 for (i = 0; i < h->size >> PAGE_SHIFT; i++) {
943                         struct page *page = nvmap_to_page(h->pgalloc.pages[i]);
944                         int mapcount = page_mapcount(page);
945                         if (!mapcount)
946                                 *non_pss += PAGE_SIZE;
947                         *total += PAGE_SIZE;
948                 }
949         }
950         if (pss && non_pss)
951                 *pss = *total - *non_pss;
952         spin_unlock(&dev->handle_lock);
953 }
954
955 static int nvmap_debug_allocations_show(struct seq_file *s, void *unused)
956 {
957         u64 total;
958         struct nvmap_client *client;
959         u32 heap_type = (u32)(uintptr_t)s->private;
960
961         spin_lock(&nvmap_dev->clients_lock);
962         seq_printf(s, "%-18s %18s %8s %11s\n",
963                 "CLIENT", "PROCESS", "PID", "SIZE");
964         seq_printf(s, "%-18s %18s %8s %11s %8s %6s %6s %6s %6s %6s %6s %8s\n",
965                         "", "", "BASE", "SIZE", "FLAGS", "REFS",
966                         "DUPES", "PINS", "KMAPS", "UMAPS", "SHARE", "UID");
967         list_for_each_entry(client, &nvmap_dev->clients, list) {
968                 u64 client_total;
969                 client_stringify(client, s);
970                 nvmap_get_client_mss(client, &client_total, heap_type);
971                 seq_printf(s, " %10lluK\n", K(client_total));
972                 allocations_stringify(client, s, heap_type);
973                 seq_printf(s, "\n");
974         }
975         spin_unlock(&nvmap_dev->clients_lock);
976         nvmap_get_total_mss(NULL, NULL, &total, heap_type);
977         seq_printf(s, "%-18s %-18s %8s %10lluK\n", "total", "", "", K(total));
978         return 0;
979 }
980
981 DEBUGFS_OPEN_FOPS(allocations);
982
983 static int nvmap_debug_maps_show(struct seq_file *s, void *unused)
984 {
985         u64 total;
986         struct nvmap_client *client;
987         u32 heap_type = (u32)(uintptr_t)s->private;
988
989         spin_lock(&nvmap_dev->clients_lock);
990         seq_printf(s, "%-18s %18s %8s %11s\n",
991                 "CLIENT", "PROCESS", "PID", "SIZE");
992         seq_printf(s, "%-18s %18s %8s %11s %8s %6s %9s %21s %18s\n",
993                 "", "", "BASE", "SIZE", "FLAGS", "SHARE", "UID",
994                 "MAPS", "MAPSIZE");
995
996         list_for_each_entry(client, &nvmap_dev->clients, list) {
997                 u64 client_total;
998                 client_stringify(client, s);
999                 nvmap_get_client_mss(client, &client_total, heap_type);
1000                 seq_printf(s, " %10lluK\n", K(client_total));
1001                 maps_stringify(client, s, heap_type);
1002                 seq_printf(s, "\n");
1003         }
1004         spin_unlock(&nvmap_dev->clients_lock);
1005
1006         nvmap_get_total_mss(NULL, NULL, &total, heap_type);
1007         seq_printf(s, "%-18s %-18s %8s %10lluK\n", "total", "", "", K(total));
1008         return 0;
1009 }
1010
1011 DEBUGFS_OPEN_FOPS(maps);
1012
1013 static int nvmap_debug_clients_show(struct seq_file *s, void *unused)
1014 {
1015         u64 total;
1016         struct nvmap_client *client;
1017         ulong heap_type = (ulong)s->private;
1018
1019         spin_lock(&nvmap_dev->clients_lock);
1020         seq_printf(s, "%-18s %18s %8s %11s\n",
1021                 "CLIENT", "PROCESS", "PID", "SIZE");
1022         list_for_each_entry(client, &nvmap_dev->clients, list) {
1023                 u64 client_total;
1024                 client_stringify(client, s);
1025                 nvmap_get_client_mss(client, &client_total, heap_type);
1026                 seq_printf(s, " %10lluK\n", K(client_total));
1027         }
1028         spin_unlock(&nvmap_dev->clients_lock);
1029         nvmap_get_total_mss(NULL, NULL, &total, heap_type);
1030         seq_printf(s, "%-18s %18s %8s %10lluK\n", "total", "", "", K(total));
1031         return 0;
1032 }
1033
1034 DEBUGFS_OPEN_FOPS(clients);
1035
1036 #define PRINT_MEM_STATS_NOTE(x) \
1037 do { \
1038         seq_printf(s, "Note: total memory is precise account of pages " \
1039                 "allocated by NvMap.\nIt doesn't match with all clients " \
1040                 "\"%s\" accumulated as shared memory \nis accounted in " \
1041                 "full in each clients \"%s\" that shared memory.\n", #x, #x); \
1042 } while (0)
1043
1044 static void nvmap_iovmm_get_client_mss(struct nvmap_client *client, u64 *pss,
1045                                    u64 *non_pss, u64 *total)
1046 {
1047         int i;
1048         struct rb_node *n;
1049
1050         *pss = *non_pss = *total = 0;
1051         nvmap_ref_lock(client);
1052         n = rb_first(&client->handle_refs);
1053         for (; n != NULL; n = rb_next(n)) {
1054                 struct nvmap_handle_ref *ref =
1055                         rb_entry(n, struct nvmap_handle_ref, node);
1056                 struct nvmap_handle *h = ref->handle;
1057
1058                 if (!h || !h->alloc || !h->heap_pgalloc)
1059                         continue;
1060
1061                 for (i = 0; i < h->size >> PAGE_SHIFT; i++) {
1062                         struct page *page = nvmap_to_page(h->pgalloc.pages[i]);
1063                         int mapcount = page_mapcount(page);
1064                         if (!mapcount)
1065                                 *non_pss += PAGE_SIZE;
1066                         *total += PAGE_SIZE;
1067                 }
1068                 *pss = *total - *non_pss;
1069         }
1070         nvmap_ref_unlock(client);
1071 }
1072
1073 static int nvmap_debug_iovmm_procrank_show(struct seq_file *s, void *unused)
1074 {
1075         u64 pss, non_pss, total;
1076         struct nvmap_client *client;
1077         struct nvmap_device *dev = s->private;
1078         u64 total_memory, total_pss, total_non_pss;
1079
1080         spin_lock(&dev->clients_lock);
1081         seq_printf(s, "%-18s %18s %8s %11s %11s %11s\n",
1082                 "CLIENT", "PROCESS", "PID", "PSS", "NON-PSS", "TOTAL");
1083         list_for_each_entry(client, &dev->clients, list) {
1084                 client_stringify(client, s);
1085                 nvmap_iovmm_get_client_mss(client, &pss, &non_pss, &total);
1086                 seq_printf(s, " %10lluK %10lluK %10lluK\n", K(pss),
1087                         K(non_pss), K(total));
1088         }
1089         spin_unlock(&dev->clients_lock);
1090
1091         nvmap_get_total_mss(&total_pss, &total_non_pss, &total_memory, NVMAP_HEAP_IOVMM);
1092         seq_printf(s, "%-18s %18s %8s %10lluK %10lluK %10lluK\n",
1093                 "total", "", "", K(total_pss),
1094                 K(total_non_pss), K(total_memory));
1095         PRINT_MEM_STATS_NOTE(TOTAL);
1096         return 0;
1097 }
1098
1099 DEBUGFS_OPEN_FOPS(iovmm_procrank);
1100
1101 ulong nvmap_iovmm_get_used_pages(void)
1102 {
1103         u64 total;
1104
1105         nvmap_get_total_mss(NULL, NULL, &total, NVMAP_HEAP_IOVMM);
1106         return total >> PAGE_SHIFT;
1107 }
1108
1109 static int nvmap_stats_reset(void *data, u64 val)
1110 {
1111         int i;
1112
1113         if (val) {
1114                 atomic64_set(&nvmap_stats.collect, 0);
1115                 for (i = 0; i < NS_NUM; i++) {
1116                         if (i == NS_TOTAL)
1117                                 continue;
1118                         atomic64_set(&nvmap_stats.stats[i], 0);
1119                 }
1120         }
1121         return 0;
1122 }
1123
1124 static int nvmap_stats_get(void *data, u64 *val)
1125 {
1126         atomic64_t *ptr = data;
1127
1128         *val = atomic64_read(ptr);
1129         return 0;
1130 }
1131
1132 static int nvmap_stats_set(void *data, u64 val)
1133 {
1134         atomic64_t *ptr = data;
1135
1136         atomic64_set(ptr, val);
1137         return 0;
1138 }
1139
1140 DEFINE_SIMPLE_ATTRIBUTE(reset_stats_fops, NULL, nvmap_stats_reset, "%llu\n");
1141 DEFINE_SIMPLE_ATTRIBUTE(stats_fops, nvmap_stats_get, nvmap_stats_set, "%llu\n");
1142
1143 static void nvmap_stats_init(struct dentry *nvmap_debug_root)
1144 {
1145         struct dentry *stats_root;
1146
1147 #define CREATE_DF(x, y) \
1148         debugfs_create_file(#x, S_IRUGO, stats_root, &y, &stats_fops);
1149
1150         stats_root = debugfs_create_dir("stats", nvmap_debug_root);
1151         if (!IS_ERR_OR_NULL(stats_root)) {
1152                 CREATE_DF(alloc, nvmap_stats.stats[NS_ALLOC]);
1153                 CREATE_DF(release, nvmap_stats.stats[NS_RELEASE]);
1154                 CREATE_DF(ualloc, nvmap_stats.stats[NS_UALLOC]);
1155                 CREATE_DF(urelease, nvmap_stats.stats[NS_URELEASE]);
1156                 CREATE_DF(kalloc, nvmap_stats.stats[NS_KALLOC]);
1157                 CREATE_DF(krelease, nvmap_stats.stats[NS_KRELEASE]);
1158                 CREATE_DF(cflush_rq, nvmap_stats.stats[NS_CFLUSH_RQ]);
1159                 CREATE_DF(cflush_done, nvmap_stats.stats[NS_CFLUSH_DONE]);
1160                 CREATE_DF(ucflush_rq, nvmap_stats.stats[NS_UCFLUSH_RQ]);
1161                 CREATE_DF(ucflush_done, nvmap_stats.stats[NS_UCFLUSH_DONE]);
1162                 CREATE_DF(kcflush_rq, nvmap_stats.stats[NS_KCFLUSH_RQ]);
1163                 CREATE_DF(kcflush_done, nvmap_stats.stats[NS_KCFLUSH_DONE]);
1164                 CREATE_DF(total_memory, nvmap_stats.stats[NS_TOTAL]);
1165
1166                 debugfs_create_file("collect", S_IRUGO | S_IWUSR,
1167                         stats_root, &nvmap_stats.collect, &stats_fops);
1168                 debugfs_create_file("reset", S_IWUSR,
1169                         stats_root, NULL, &reset_stats_fops);
1170         }
1171
1172 #undef CREATE_DF
1173 }
1174
1175 void nvmap_stats_inc(enum nvmap_stats_t stat, size_t size)
1176 {
1177         if (atomic64_read(&nvmap_stats.collect) || stat == NS_TOTAL)
1178                 atomic64_add(size, &nvmap_stats.stats[stat]);
1179 }
1180
1181 void nvmap_stats_dec(enum nvmap_stats_t stat, size_t size)
1182 {
1183         if (atomic64_read(&nvmap_stats.collect) || stat == NS_TOTAL)
1184                 atomic64_sub(size, &nvmap_stats.stats[stat]);
1185 }
1186
1187 u64 nvmap_stats_read(enum nvmap_stats_t stat)
1188 {
1189         return atomic64_read(&nvmap_stats.stats[stat]);
1190 }
1191
1192 static int nvmap_probe(struct platform_device *pdev)
1193 {
1194         struct nvmap_platform_data *plat = pdev->dev.platform_data;
1195         struct nvmap_device *dev;
1196         struct dentry *nvmap_debug_root;
1197         unsigned int i;
1198         int e;
1199
1200         if (!plat) {
1201                 dev_err(&pdev->dev, "no platform data?\n");
1202                 return -ENODEV;
1203         }
1204
1205         /*
1206          * The DMA mapping API uses these parameters to decide how to map the
1207          * passed buffers. If the maximum physical segment size is set to
1208          * smaller than the size of the buffer, then the buffer will be mapped
1209          * as separate IO virtual address ranges.
1210          */
1211         pdev->dev.dma_parms = &nvmap_dma_parameters;
1212
1213         if (WARN_ON(nvmap_dev != NULL)) {
1214                 dev_err(&pdev->dev, "only one nvmap device may be present\n");
1215                 return -ENODEV;
1216         }
1217
1218         dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1219         if (!dev) {
1220                 dev_err(&pdev->dev, "out of memory for device\n");
1221                 return -ENOMEM;
1222         }
1223
1224         nvmap_dev = dev;
1225
1226         dev->dev_user.minor = MISC_DYNAMIC_MINOR;
1227         dev->dev_user.name = "nvmap";
1228         dev->dev_user.fops = &nvmap_user_fops;
1229         dev->dev_user.parent = &pdev->dev;
1230
1231         dev->handles = RB_ROOT;
1232
1233 #ifdef CONFIG_NVMAP_PAGE_POOLS
1234         e = nvmap_page_pool_init(dev);
1235         if (e)
1236                 goto fail;
1237 #endif
1238
1239         spin_lock_init(&dev->handle_lock);
1240         INIT_LIST_HEAD(&dev->clients);
1241         spin_lock_init(&dev->clients_lock);
1242
1243         e = misc_register(&dev->dev_user);
1244         if (e) {
1245                 dev_err(&pdev->dev, "unable to register miscdevice %s\n",
1246                         dev->dev_user.name);
1247                 goto fail;
1248         }
1249
1250         dev->nr_carveouts = 0;
1251         dev->heaps = kzalloc(sizeof(struct nvmap_carveout_node) *
1252                              plat->nr_carveouts, GFP_KERNEL);
1253         if (!dev->heaps) {
1254                 e = -ENOMEM;
1255                 dev_err(&pdev->dev, "couldn't allocate carveout memory\n");
1256                 goto fail;
1257         }
1258
1259         nvmap_debug_root = debugfs_create_dir("nvmap", NULL);
1260         if (IS_ERR_OR_NULL(nvmap_debug_root))
1261                 dev_err(&pdev->dev, "couldn't create debug files\n");
1262
1263         debugfs_create_u32("max_handle_count", S_IRUGO,
1264                         nvmap_debug_root, &nvmap_max_handle_count);
1265
1266         for (i = 0; i < plat->nr_carveouts; i++) {
1267                 struct nvmap_carveout_node *node = &dev->heaps[dev->nr_carveouts];
1268                 const struct nvmap_platform_carveout *co = &plat->carveouts[i];
1269                 node->base = round_up(co->base, PAGE_SIZE);
1270                 node->size = round_down(co->size -
1271                                         (node->base - co->base), PAGE_SIZE);
1272                 if (!co->size)
1273                         continue;
1274
1275                 node->carveout = nvmap_heap_create(
1276                                 dev->dev_user.this_device, co,
1277                                 node->base, node->size, node);
1278
1279                 if (!node->carveout) {
1280                         e = -ENOMEM;
1281                         dev_err(&pdev->dev, "couldn't create %s\n", co->name);
1282                         goto fail_heaps;
1283                 }
1284                 node->index = dev->nr_carveouts;
1285                 dev->nr_carveouts++;
1286                 spin_lock_init(&node->clients_lock);
1287                 INIT_LIST_HEAD(&node->clients);
1288                 node->heap_bit = co->usage_mask;
1289
1290                 if (!IS_ERR_OR_NULL(nvmap_debug_root)) {
1291                         struct dentry *heap_root =
1292                                 debugfs_create_dir(co->name, nvmap_debug_root);
1293                         if (!IS_ERR_OR_NULL(heap_root)) {
1294                                 debugfs_create_file("clients", S_IRUGO,
1295                                         heap_root,
1296                                         (void *)(uintptr_t)node->heap_bit,
1297                                         &debug_clients_fops);
1298                                 debugfs_create_file("allocations", S_IRUGO,
1299                                         heap_root,
1300                                         (void *)(uintptr_t)node->heap_bit,
1301                                         &debug_allocations_fops);
1302                                 debugfs_create_file("maps", S_IRUGO,
1303                                         heap_root,
1304                                         (void *)(uintptr_t)node->heap_bit,
1305                                         &debug_maps_fops);
1306                                 nvmap_heap_debugfs_init(heap_root,
1307                                                         node->carveout);
1308                         }
1309                 }
1310         }
1311         if (!IS_ERR_OR_NULL(nvmap_debug_root)) {
1312                 struct dentry *iovmm_root =
1313                         debugfs_create_dir("iovmm", nvmap_debug_root);
1314                 if (!IS_ERR_OR_NULL(iovmm_root)) {
1315                         debugfs_create_file("clients", S_IRUGO, iovmm_root,
1316                                 (void *)(uintptr_t)NVMAP_HEAP_IOVMM,
1317                                 &debug_clients_fops);
1318                         debugfs_create_file("allocations", S_IRUGO, iovmm_root,
1319                                 (void *)(uintptr_t)NVMAP_HEAP_IOVMM,
1320                                 &debug_allocations_fops);
1321                         debugfs_create_file("maps", S_IRUGO, iovmm_root,
1322                                 (void *)(uintptr_t)NVMAP_HEAP_IOVMM,
1323                                 &debug_maps_fops);
1324                         debugfs_create_file("procrank", S_IRUGO, iovmm_root,
1325                                 dev, &debug_iovmm_procrank_fops);
1326                 }
1327 #ifdef CONFIG_NVMAP_PAGE_POOLS
1328                 nvmap_page_pool_debugfs_init(nvmap_debug_root);
1329 #endif
1330 #ifdef CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS
1331                 debugfs_create_size_t("cache_maint_inner_threshold",
1332                                       S_IRUSR | S_IWUSR,
1333                                       nvmap_debug_root,
1334                                       &cache_maint_inner_threshold);
1335
1336                 /* cortex-a9 */
1337                 if ((read_cpuid_id() >> 4 & 0xfff) == 0xc09)
1338                         cache_maint_inner_threshold = SZ_32K;
1339                 pr_info("nvmap:inner cache maint threshold=%zd",
1340                         cache_maint_inner_threshold);
1341 #endif
1342 #ifdef CONFIG_NVMAP_OUTER_CACHE_MAINT_BY_SET_WAYS
1343                 debugfs_create_size_t("cache_maint_outer_threshold",
1344                                       S_IRUSR | S_IWUSR,
1345                                       nvmap_debug_root,
1346                                       &cache_maint_outer_threshold);
1347                 pr_info("nvmap:outer cache maint threshold=%zd",
1348                         cache_maint_outer_threshold);
1349 #endif
1350         }
1351
1352         nvmap_stats_init(nvmap_debug_root);
1353         platform_set_drvdata(pdev, dev);
1354
1355         nvmap_dmabuf_debugfs_init(nvmap_debug_root);
1356         e = nvmap_dmabuf_stash_init();
1357         if (e)
1358                 goto fail_heaps;
1359
1360         return 0;
1361 fail_heaps:
1362         for (i = 0; i < dev->nr_carveouts; i++) {
1363                 struct nvmap_carveout_node *node = &dev->heaps[i];
1364                 nvmap_heap_destroy(node->carveout);
1365         }
1366 fail:
1367 #ifdef CONFIG_NVMAP_PAGE_POOLS
1368         nvmap_page_pool_fini(nvmap_dev);
1369 #endif
1370         kfree(dev->heaps);
1371         if (dev->dev_user.minor != MISC_DYNAMIC_MINOR)
1372                 misc_deregister(&dev->dev_user);
1373         kfree(dev);
1374         nvmap_dev = NULL;
1375         return e;
1376 }
1377
1378 static int nvmap_remove(struct platform_device *pdev)
1379 {
1380         struct nvmap_device *dev = platform_get_drvdata(pdev);
1381         struct rb_node *n;
1382         struct nvmap_handle *h;
1383         int i;
1384
1385         misc_deregister(&dev->dev_user);
1386
1387         while ((n = rb_first(&dev->handles))) {
1388                 h = rb_entry(n, struct nvmap_handle, node);
1389                 rb_erase(&h->node, &dev->handles);
1390                 kfree(h);
1391         }
1392
1393         for (i = 0; i < dev->nr_carveouts; i++) {
1394                 struct nvmap_carveout_node *node = &dev->heaps[i];
1395                 nvmap_heap_destroy(node->carveout);
1396         }
1397         kfree(dev->heaps);
1398
1399         kfree(dev);
1400         nvmap_dev = NULL;
1401         return 0;
1402 }
1403
1404 static int nvmap_suspend(struct platform_device *pdev, pm_message_t state)
1405 {
1406         return 0;
1407 }
1408
1409 static int nvmap_resume(struct platform_device *pdev)
1410 {
1411         return 0;
1412 }
1413
1414 static struct platform_driver nvmap_driver = {
1415         .probe          = nvmap_probe,
1416         .remove         = nvmap_remove,
1417         .suspend        = nvmap_suspend,
1418         .resume         = nvmap_resume,
1419
1420         .driver = {
1421                 .name   = "tegra-nvmap",
1422                 .owner  = THIS_MODULE,
1423         },
1424 };
1425
1426 static int __init nvmap_init_driver(void)
1427 {
1428         int e;
1429
1430         nvmap_dev = NULL;
1431
1432         e = nvmap_heap_init();
1433         if (e)
1434                 goto fail;
1435
1436         e = platform_driver_register(&nvmap_driver);
1437         if (e) {
1438                 nvmap_heap_deinit();
1439                 goto fail;
1440         }
1441
1442 fail:
1443         return e;
1444 }
1445 fs_initcall(nvmap_init_driver);
1446
1447 static void __exit nvmap_exit_driver(void)
1448 {
1449         platform_driver_unregister(&nvmap_driver);
1450         nvmap_heap_deinit();
1451         nvmap_dev = NULL;
1452 }
1453 module_exit(nvmap_exit_driver);