]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/video/tegra/nvmap/nvmap_dev.c
1c27e36c190dff8977d2549295fb10d0deddbd0f
[sojka/nv-tegra/linux-3.10.git] / drivers / video / tegra / nvmap / nvmap_dev.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap_dev.c
3  *
4  * User-space interface to nvmap
5  *
6  * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/backing-dev.h>
24 #include <linux/bitmap.h>
25 #include <linux/debugfs.h>
26 #include <linux/delay.h>
27 #include <linux/io.h>
28 #include <linux/kernel.h>
29 #include <linux/device.h>
30 #include <linux/oom.h>
31 #include <linux/platform_device.h>
32 #include <linux/seq_file.h>
33 #include <linux/slab.h>
34 #include <linux/spinlock.h>
35 #include <linux/uaccess.h>
36 #include <linux/vmalloc.h>
37 #include <linux/nvmap.h>
38 #include <linux/module.h>
39 #include <linux/resource.h>
40 #include <linux/security.h>
41 #include <linux/stat.h>
42 #include <linux/kthread.h>
43
44 #include <asm/cputype.h>
45
46 #define CREATE_TRACE_POINTS
47 #include <trace/events/nvmap.h>
48
49 #include "nvmap_priv.h"
50 #include "nvmap_ioctl.h"
51
52 #define NVMAP_CARVEOUT_KILLER_RETRY_TIME 100 /* msecs */
53
54 /* this is basically the L2 cache size */
55 #ifdef CONFIG_DENVER_CPU
56 size_t cache_maint_inner_threshold = SZ_2M * 8;
57 #else
58 size_t cache_maint_inner_threshold = SZ_2M;
59 #endif
60
61 #ifdef CONFIG_NVMAP_OUTER_CACHE_MAINT_BY_SET_WAYS
62 size_t cache_maint_outer_threshold = SZ_1M;
63 #endif
64
65 struct nvmap_carveout_node {
66         unsigned int            heap_bit;
67         struct nvmap_heap       *carveout;
68         int                     index;
69         struct list_head        clients;
70         spinlock_t              clients_lock;
71         phys_addr_t                     base;
72         size_t                  size;
73 };
74
75 struct nvmap_device *nvmap_dev;
76 struct nvmap_stats nvmap_stats;
77
78 static struct backing_dev_info nvmap_bdi = {
79         .ra_pages       = 0,
80         .capabilities   = (BDI_CAP_NO_ACCT_AND_WRITEBACK |
81                            BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP),
82 };
83
84 static struct device_dma_parameters nvmap_dma_parameters = {
85         .max_segment_size = UINT_MAX,
86 };
87
88 static int nvmap_open(struct inode *inode, struct file *filp);
89 static int nvmap_release(struct inode *inode, struct file *filp);
90 static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
91 static int nvmap_map(struct file *filp, struct vm_area_struct *vma);
92 static void nvmap_vma_close(struct vm_area_struct *vma);
93 static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
94
95 static const struct file_operations nvmap_user_fops = {
96         .owner          = THIS_MODULE,
97         .open           = nvmap_open,
98         .release        = nvmap_release,
99         .unlocked_ioctl = nvmap_ioctl,
100 #ifdef CONFIG_COMPAT
101         .compat_ioctl = nvmap_ioctl,
102 #endif
103         .mmap           = nvmap_map,
104 };
105
106 static struct vm_operations_struct nvmap_vma_ops = {
107         .open           = nvmap_vma_open,
108         .close          = nvmap_vma_close,
109         .fault          = nvmap_vma_fault,
110 };
111
112 int is_nvmap_vma(struct vm_area_struct *vma)
113 {
114         return vma->vm_ops == &nvmap_vma_ops;
115 }
116
117 /*
118  * Verifies that the passed ID is a valid handle ID. Then the passed client's
119  * reference to the handle is returned.
120  *
121  * Note: to call this function make sure you own the client ref lock.
122  */
123 struct nvmap_handle_ref *__nvmap_validate_locked(struct nvmap_client *c,
124                                                  struct nvmap_handle *h)
125 {
126         struct rb_node *n = c->handle_refs.rb_node;
127
128         while (n) {
129                 struct nvmap_handle_ref *ref;
130                 ref = rb_entry(n, struct nvmap_handle_ref, node);
131                 if (ref->handle == h)
132                         return ref;
133                 else if ((uintptr_t)h > (uintptr_t)ref->handle)
134                         n = n->rb_right;
135                 else
136                         n = n->rb_left;
137         }
138
139         return NULL;
140 }
141
142 unsigned long nvmap_carveout_usage(struct nvmap_client *c,
143                                    struct nvmap_heap_block *b)
144 {
145         struct nvmap_heap *h = nvmap_block_to_heap(b);
146         struct nvmap_carveout_node *n;
147         int i;
148
149         for (i = 0; i < nvmap_dev->nr_carveouts; i++) {
150                 n = &nvmap_dev->heaps[i];
151                 if (n->carveout == h)
152                         return n->heap_bit;
153         }
154         return 0;
155 }
156
157 /*
158  * This routine is used to flush the carveout memory from cache.
159  * Why cache flush is needed for carveout? Consider the case, where a piece of
160  * carveout is allocated as cached and released. After this, if the same memory is
161  * allocated for uncached request and the memory is not flushed out from cache.
162  * In this case, the client might pass this to H/W engine and it could start modify
163  * the memory. As this was cached earlier, it might have some portion of it in cache.
164  * During cpu request to read/write other memory, the cached portion of this memory
165  * might get flushed back to main memory and would cause corruptions, if it happens
166  * after H/W writes data to memory.
167  *
168  * But flushing out the memory blindly on each carveout allocation is redundant.
169  *
170  * In order to optimize the carveout buffer cache flushes, the following
171  * strategy is used.
172  *
173  * The whole Carveout is flushed out from cache during its initialization.
174  * During allocation, carveout buffers are not flused from cache.
175  * During deallocation, carveout buffers are flushed, if they were allocated as cached.
176  * if they were allocated as uncached/writecombined, no cache flush is needed.
177  * Just draining store buffers is enough.
178  */
179 int nvmap_flush_heap_block(struct nvmap_client *client,
180         struct nvmap_heap_block *block, size_t len, unsigned int prot)
181 {
182         ulong kaddr;
183         phys_addr_t phys = block->base;
184         phys_addr_t end = block->base + len;
185         struct vm_struct *area = NULL;
186
187         if (prot == NVMAP_HANDLE_UNCACHEABLE || prot == NVMAP_HANDLE_WRITE_COMBINE)
188                 goto out;
189
190 #ifdef CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS
191         if (len >= cache_maint_inner_threshold) {
192                 inner_flush_cache_all();
193                 if (prot != NVMAP_HANDLE_INNER_CACHEABLE)
194                         outer_flush_range(block->base, block->base + len);
195                 goto out;
196         }
197 #endif
198
199         area = alloc_vm_area(PAGE_SIZE, NULL);
200         if (!area)
201                 return -ENOMEM;
202
203         kaddr = (ulong)area->addr;
204
205         while (phys < end) {
206                 phys_addr_t next = (phys + PAGE_SIZE) & PAGE_MASK;
207                 void *base = (void *)kaddr + (phys & ~PAGE_MASK);
208
209                 next = min(next, end);
210                 ioremap_page_range(kaddr, kaddr + PAGE_SIZE,
211                         phys, PG_PROT_KERNEL);
212                 FLUSH_DCACHE_AREA(base, next - phys);
213                 phys = next;
214                 unmap_kernel_range(kaddr, PAGE_SIZE);
215         }
216
217         if (prot != NVMAP_HANDLE_INNER_CACHEABLE)
218                 outer_flush_range(block->base, block->base + len);
219
220         free_vm_area(area);
221 out:
222         wmb();
223         return 0;
224 }
225
226 static
227 struct nvmap_heap_block *do_nvmap_carveout_alloc(struct nvmap_client *client,
228                                               struct nvmap_handle *handle,
229                                               unsigned long type)
230 {
231         struct nvmap_carveout_node *co_heap;
232         struct nvmap_device *dev = nvmap_dev;
233         int i;
234
235         for (i = 0; i < dev->nr_carveouts; i++) {
236                 struct nvmap_heap_block *block;
237                 co_heap = &dev->heaps[i];
238
239                 if (!(co_heap->heap_bit & type))
240                         continue;
241
242                 block = nvmap_heap_alloc(co_heap->carveout, handle);
243                 if (block)
244                         return block;
245         }
246         return NULL;
247 }
248
249 struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *client,
250                                               struct nvmap_handle *handle,
251                                               unsigned long type)
252 {
253         return do_nvmap_carveout_alloc(client, handle, type);
254 }
255
256 /* remove a handle from the device's tree of all handles; called
257  * when freeing handles. */
258 int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h)
259 {
260         spin_lock(&dev->handle_lock);
261
262         /* re-test inside the spinlock if the handle really has no clients;
263          * only remove the handle if it is unreferenced */
264         if (atomic_add_return(0, &h->ref) > 0) {
265                 spin_unlock(&dev->handle_lock);
266                 return -EBUSY;
267         }
268         smp_rmb();
269         BUG_ON(atomic_read(&h->ref) < 0);
270         BUG_ON(atomic_read(&h->pin) != 0);
271
272         rb_erase(&h->node, &dev->handles);
273
274         spin_unlock(&dev->handle_lock);
275         return 0;
276 }
277
278 /* adds a newly-created handle to the device master tree */
279 void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h)
280 {
281         struct rb_node **p;
282         struct rb_node *parent = NULL;
283
284         spin_lock(&dev->handle_lock);
285         p = &dev->handles.rb_node;
286         while (*p) {
287                 struct nvmap_handle *b;
288
289                 parent = *p;
290                 b = rb_entry(parent, struct nvmap_handle, node);
291                 if (h > b)
292                         p = &parent->rb_right;
293                 else
294                         p = &parent->rb_left;
295         }
296         rb_link_node(&h->node, parent, p);
297         rb_insert_color(&h->node, &dev->handles);
298         spin_unlock(&dev->handle_lock);
299 }
300
301 /* Validates that a handle is in the device master tree and that the
302  * client has permission to access it. */
303 struct nvmap_handle *nvmap_validate_get(struct nvmap_handle *id)
304 {
305         struct nvmap_handle *h = NULL;
306         struct rb_node *n;
307
308         spin_lock(&nvmap_dev->handle_lock);
309
310         n = nvmap_dev->handles.rb_node;
311
312         while (n) {
313                 h = rb_entry(n, struct nvmap_handle, node);
314                 if (h == id) {
315                         h = nvmap_handle_get(h);
316                         spin_unlock(&nvmap_dev->handle_lock);
317                         return h;
318                 }
319                 if (id > h)
320                         n = n->rb_right;
321                 else
322                         n = n->rb_left;
323         }
324         spin_unlock(&nvmap_dev->handle_lock);
325         return NULL;
326 }
327
328 struct nvmap_client *__nvmap_create_client(struct nvmap_device *dev,
329                                            const char *name)
330 {
331         struct nvmap_client *client;
332         struct task_struct *task;
333
334         if (WARN_ON(!dev))
335                 return NULL;
336
337         client = kzalloc(sizeof(*client), GFP_KERNEL);
338         if (!client)
339                 return NULL;
340
341         client->name = name;
342         client->kernel_client = true;
343         client->handle_refs = RB_ROOT;
344
345         get_task_struct(current->group_leader);
346         task_lock(current->group_leader);
347         /* don't bother to store task struct for kernel threads,
348            they can't be killed anyway */
349         if (current->flags & PF_KTHREAD) {
350                 put_task_struct(current->group_leader);
351                 task = NULL;
352         } else {
353                 task = current->group_leader;
354         }
355         task_unlock(current->group_leader);
356         client->task = task;
357
358         mutex_init(&client->ref_lock);
359         atomic_set(&client->count, 1);
360
361         spin_lock(&dev->clients_lock);
362         list_add(&client->list, &dev->clients);
363         spin_unlock(&dev->clients_lock);
364         return client;
365 }
366
367 static void destroy_client(struct nvmap_client *client)
368 {
369         struct rb_node *n;
370
371         if (!client)
372                 return;
373
374         spin_lock(&nvmap_dev->clients_lock);
375         list_del(&client->list);
376         spin_unlock(&nvmap_dev->clients_lock);
377
378         while ((n = rb_first(&client->handle_refs))) {
379                 struct nvmap_handle_ref *ref;
380                 int pins, dupes;
381
382                 ref = rb_entry(n, struct nvmap_handle_ref, node);
383
384                 smp_rmb();
385                 pins = atomic_read(&ref->pin);
386
387                 while (pins--)
388                         __nvmap_unpin(ref);
389
390                 if (ref->handle->owner == client)
391                         ref->handle->owner = NULL;
392
393                 dma_buf_put(ref->handle->dmabuf);
394                 rb_erase(&ref->node, &client->handle_refs);
395                 atomic_dec(&ref->handle->share_count);
396
397                 dupes = atomic_read(&ref->dupes);
398                 while (dupes--)
399                         nvmap_handle_put(ref->handle);
400
401                 kfree(ref);
402         }
403
404         if (client->task)
405                 put_task_struct(client->task);
406
407         kfree(client);
408 }
409
410 struct nvmap_client *nvmap_client_get(struct nvmap_client *client)
411 {
412         if (!virt_addr_valid(client))
413                 return NULL;
414
415         if (!atomic_add_unless(&client->count, 1, 0))
416                 return NULL;
417
418         return client;
419 }
420
421 void nvmap_client_put(struct nvmap_client *client)
422 {
423         if (!client)
424                 return;
425
426         if (!atomic_dec_return(&client->count))
427                 destroy_client(client);
428 }
429
430 static int nvmap_open(struct inode *inode, struct file *filp)
431 {
432         struct miscdevice *miscdev = filp->private_data;
433         struct nvmap_device *dev = dev_get_drvdata(miscdev->parent);
434         struct nvmap_client *priv;
435         int ret;
436         __attribute__((unused)) struct rlimit old_rlim, new_rlim;
437
438         ret = nonseekable_open(inode, filp);
439         if (unlikely(ret))
440                 return ret;
441
442         BUG_ON(dev != nvmap_dev);
443         priv = __nvmap_create_client(dev, "user");
444         if (!priv)
445                 return -ENOMEM;
446         trace_nvmap_open(priv, priv->name);
447
448         priv->kernel_client = false;
449
450         filp->f_mapping->backing_dev_info = &nvmap_bdi;
451
452         filp->private_data = priv;
453         return 0;
454 }
455
456 static int nvmap_release(struct inode *inode, struct file *filp)
457 {
458         struct nvmap_client *priv = filp->private_data;
459
460         trace_nvmap_release(priv, priv->name);
461         nvmap_client_put(priv);
462         return 0;
463 }
464
465 int __nvmap_map(struct nvmap_handle *h, struct vm_area_struct *vma)
466 {
467         struct nvmap_vma_priv *priv;
468
469         h = nvmap_handle_get(h);
470         if (!h)
471                 return -EINVAL;
472
473         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
474         if (!priv)
475                 return -ENOMEM;
476         priv->handle = h;
477
478         vma->vm_flags |= VM_SHARED | VM_DONTEXPAND |
479                           VM_DONTDUMP | VM_DONTCOPY |
480                           (h->heap_pgalloc ? 0 : VM_PFNMAP);
481         vma->vm_ops = &nvmap_vma_ops;
482         BUG_ON(vma->vm_private_data != NULL);
483         vma->vm_private_data = priv;
484         vma->vm_page_prot = nvmap_pgprot(h, vma->vm_page_prot);
485         nvmap_vma_open(vma);
486         return 0;
487 }
488
489 static int nvmap_map(struct file *filp, struct vm_area_struct *vma)
490 {
491         BUG_ON(vma->vm_private_data != NULL);
492         vma->vm_flags |= (VM_SHARED | VM_DONTEXPAND |
493                           VM_DONTDUMP | VM_DONTCOPY);
494         vma->vm_ops = &nvmap_vma_ops;
495         return 0;
496 }
497
498 static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
499 {
500         int err = 0;
501         void __user *uarg = (void __user *)arg;
502
503         if (_IOC_TYPE(cmd) != NVMAP_IOC_MAGIC)
504                 return -ENOTTY;
505
506         if (_IOC_NR(cmd) > NVMAP_IOC_MAXNR)
507                 return -ENOTTY;
508
509         if (_IOC_DIR(cmd) & _IOC_READ)
510                 err = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
511         if (_IOC_DIR(cmd) & _IOC_WRITE)
512                 err = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
513
514         if (err)
515                 return -EFAULT;
516
517         switch (cmd) {
518         case NVMAP_IOC_CREATE:
519         case NVMAP_IOC_FROM_ID:
520         case NVMAP_IOC_FROM_FD:
521                 err = nvmap_ioctl_create(filp, cmd, uarg);
522                 break;
523
524         case NVMAP_IOC_GET_ID:
525                 err = nvmap_ioctl_getid(filp, uarg);
526                 break;
527
528         case NVMAP_IOC_GET_FD:
529                 err = nvmap_ioctl_getfd(filp, uarg);
530                 break;
531
532 #ifdef CONFIG_COMPAT
533         case NVMAP_IOC_PARAM_32:
534                 err = nvmap_ioctl_get_param(filp, uarg, true);
535                 break;
536 #endif
537
538         case NVMAP_IOC_PARAM:
539                 err = nvmap_ioctl_get_param(filp, uarg, false);
540                 break;
541
542 #ifdef CONFIG_COMPAT
543         case NVMAP_IOC_UNPIN_MULT_32:
544         case NVMAP_IOC_PIN_MULT_32:
545                 err = nvmap_ioctl_pinop(filp, cmd == NVMAP_IOC_PIN_MULT_32,
546                         uarg, true);
547                 break;
548 #endif
549
550         case NVMAP_IOC_UNPIN_MULT:
551         case NVMAP_IOC_PIN_MULT:
552                 err = nvmap_ioctl_pinop(filp, cmd == NVMAP_IOC_PIN_MULT,
553                         uarg, false);
554                 break;
555
556         case NVMAP_IOC_ALLOC:
557                 err = nvmap_ioctl_alloc(filp, uarg);
558                 break;
559
560         case NVMAP_IOC_ALLOC_KIND:
561                 err = nvmap_ioctl_alloc_kind(filp, uarg);
562                 break;
563
564         case NVMAP_IOC_FREE:
565                 err = nvmap_ioctl_free(filp, arg);
566                 break;
567
568 #ifdef CONFIG_COMPAT
569         case NVMAP_IOC_MMAP_32:
570                 err = nvmap_map_into_caller_ptr(filp, uarg, true);
571                 break;
572 #endif
573
574         case NVMAP_IOC_MMAP:
575                 err = nvmap_map_into_caller_ptr(filp, uarg, false);
576                 break;
577
578 #ifdef CONFIG_COMPAT
579         case NVMAP_IOC_WRITE_32:
580         case NVMAP_IOC_READ_32:
581                 err = nvmap_ioctl_rw_handle(filp, cmd == NVMAP_IOC_READ_32,
582                         uarg, true);
583                 break;
584 #endif
585
586         case NVMAP_IOC_WRITE:
587         case NVMAP_IOC_READ:
588                 err = nvmap_ioctl_rw_handle(filp, cmd == NVMAP_IOC_READ, uarg,
589                         false);
590                 break;
591
592 #ifdef CONFIG_COMPAT
593         case NVMAP_IOC_CACHE_32:
594                 err = nvmap_ioctl_cache_maint(filp, uarg, true);
595                 break;
596 #endif
597
598         case NVMAP_IOC_CACHE:
599                 err = nvmap_ioctl_cache_maint(filp, uarg, false);
600                 break;
601
602         case NVMAP_IOC_CACHE_LIST:
603         case NVMAP_IOC_RESERVE:
604                 err = nvmap_ioctl_cache_maint_list(filp, uarg,
605                                                    cmd == NVMAP_IOC_RESERVE);
606                 break;
607
608         case NVMAP_IOC_SHARE:
609                 err = nvmap_ioctl_share_dmabuf(filp, uarg);
610                 break;
611
612         default:
613                 return -ENOTTY;
614         }
615         return err;
616 }
617
618 /* to ensure that the backing store for the VMA isn't freed while a fork'd
619  * reference still exists, nvmap_vma_open increments the reference count on
620  * the handle, and nvmap_vma_close decrements it. alternatively, we could
621  * disallow copying of the vma, or behave like pmem and zap the pages. FIXME.
622 */
623 void nvmap_vma_open(struct vm_area_struct *vma)
624 {
625         struct nvmap_vma_priv *priv;
626         struct nvmap_handle *h;
627         struct nvmap_vma_list *vma_list, *tmp;
628         struct list_head *tmp_head = NULL;
629         pid_t current_pid = current->pid;
630         bool vma_pos_found = false;
631
632         priv = vma->vm_private_data;
633         BUG_ON(!priv);
634         BUG_ON(!priv->handle);
635
636         atomic_inc(&priv->count);
637         h = priv->handle;
638
639         vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL);
640         if (vma_list) {
641                 mutex_lock(&h->lock);
642                 tmp_head = &h->vmas;
643
644                 /* insert vma into handle's vmas list in the increasing order of
645                  * handle offsets
646                  */
647                 list_for_each_entry(tmp, &h->vmas, list) {
648                         BUG_ON(tmp->vma == vma);
649
650                         if (!vma_pos_found && (current_pid == tmp->pid)) {
651                                 if (vma->vm_pgoff < tmp->vma->vm_pgoff) {
652                                         tmp_head = &tmp->list;
653                                         vma_pos_found = true;
654                                 } else {
655                                         tmp_head = tmp->list.next;
656                                 }
657                         }
658                 }
659
660                 vma_list->vma = vma;
661                 vma_list->pid = current_pid;
662                 list_add_tail(&vma_list->list, tmp_head);
663                 mutex_unlock(&h->lock);
664         } else {
665                 WARN(1, "vma not tracked");
666         }
667 }
668
669 static void nvmap_vma_close(struct vm_area_struct *vma)
670 {
671         struct nvmap_vma_priv *priv = vma->vm_private_data;
672         struct nvmap_vma_list *vma_list;
673         struct nvmap_handle *h;
674         bool vma_found = false;
675
676         if (!priv)
677                 return;
678
679         BUG_ON(!priv->handle);
680
681         h = priv->handle;
682         mutex_lock(&h->lock);
683         list_for_each_entry(vma_list, &h->vmas, list) {
684                 if (vma_list->vma != vma)
685                         continue;
686                 list_del(&vma_list->list);
687                 kfree(vma_list);
688                 vma_found = true;
689                 break;
690         }
691         BUG_ON(!vma_found);
692         mutex_unlock(&h->lock);
693
694         if (__atomic_add_unless(&priv->count, -1, 0) == 1) {
695                 if (priv->handle)
696                         nvmap_handle_put(priv->handle);
697                 vma->vm_private_data = NULL;
698                 kfree(priv);
699         }
700 }
701
702 static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
703 {
704         struct page *page;
705         struct nvmap_vma_priv *priv;
706         unsigned long offs;
707
708         offs = (unsigned long)(vmf->virtual_address - vma->vm_start);
709         priv = vma->vm_private_data;
710         if (!priv || !priv->handle || !priv->handle->alloc)
711                 return VM_FAULT_SIGBUS;
712
713         offs += priv->offs;
714         /* if the VMA was split for some reason, vm_pgoff will be the VMA's
715          * offset from the original VMA */
716         offs += (vma->vm_pgoff << PAGE_SHIFT);
717
718         if (offs >= priv->handle->size)
719                 return VM_FAULT_SIGBUS;
720
721         if (!priv->handle->heap_pgalloc) {
722                 unsigned long pfn;
723                 BUG_ON(priv->handle->carveout->base & ~PAGE_MASK);
724                 pfn = ((priv->handle->carveout->base + offs) >> PAGE_SHIFT);
725                 if (!pfn_valid(pfn)) {
726                         vm_insert_pfn(vma,
727                                 (unsigned long)vmf->virtual_address, pfn);
728                         return VM_FAULT_NOPAGE;
729                 }
730                 /* CMA memory would get here */
731                 page = pfn_to_page(pfn);
732         } else {
733                 offs >>= PAGE_SHIFT;
734                 if (nvmap_page_reserved(priv->handle->pgalloc.pages[offs]))
735                         return VM_FAULT_SIGBUS;
736                 page = nvmap_to_page(priv->handle->pgalloc.pages[offs]);
737                 nvmap_page_mkdirty(&priv->handle->pgalloc.pages[offs]);
738         }
739
740         if (page)
741                 get_page(page);
742         vmf->page = page;
743         return (page) ? 0 : VM_FAULT_SIGBUS;
744 }
745
746 #define DEBUGFS_OPEN_FOPS(name) \
747 static int nvmap_debug_##name##_open(struct inode *inode, \
748                                             struct file *file) \
749 { \
750         return single_open(file, nvmap_debug_##name##_show, \
751                             inode->i_private); \
752 } \
753 \
754 static const struct file_operations debug_##name##_fops = { \
755         .open = nvmap_debug_##name##_open, \
756         .read = seq_read, \
757         .llseek = seq_lseek, \
758         .release = single_release, \
759 }
760
761 #define K(x) (x >> 10)
762
763 static void client_stringify(struct nvmap_client *client, struct seq_file *s)
764 {
765         char task_comm[TASK_COMM_LEN];
766         if (!client->task) {
767                 seq_printf(s, "%-18s %18s %8u", client->name, "kernel", 0);
768                 return;
769         }
770         get_task_comm(task_comm, client->task);
771         seq_printf(s, "%-18s %18s %8u", client->name, task_comm,
772                    client->task->pid);
773 }
774
775 static void allocations_stringify(struct nvmap_client *client,
776                                   struct seq_file *s, u32 heap_type)
777 {
778         struct rb_node *n;
779
780         nvmap_ref_lock(client);
781         n = rb_first(&client->handle_refs);
782         for (; n != NULL; n = rb_next(n)) {
783                 struct nvmap_handle_ref *ref =
784                         rb_entry(n, struct nvmap_handle_ref, node);
785                 struct nvmap_handle *handle = ref->handle;
786                 if (handle->alloc && handle->heap_type == heap_type) {
787                         phys_addr_t base = heap_type == NVMAP_HEAP_IOVMM ? 0 :
788                                            (handle->carveout->base);
789                         seq_printf(s,
790                                 "%-18s %-18s %8llx %10zuK %8x %6u %6u %6u %6u %8p\n",
791                                 "", "",
792                                 (unsigned long long)base, K(handle->size),
793                                 handle->userflags,
794                                 atomic_read(&handle->ref),
795                                 atomic_read(&ref->dupes),
796                                 atomic_read(&ref->pin),
797                                 atomic_read(&handle->share_count),
798                                 handle);
799                 }
800         }
801         nvmap_ref_unlock(client);
802 }
803
804 /* compute the total amount of handle physical memory that is mapped
805  * into client's virtual address space. Remember that vmas list is
806  * sorted in ascending order of handle offsets.
807  * NOTE: This function should be called while holding handle's lock mutex.
808  */
809 static void nvmap_get_client_handle_mss(struct nvmap_client *client,
810                                 struct nvmap_handle *handle, u64 *total)
811 {
812         struct nvmap_vma_list *vma_list = NULL;
813         struct vm_area_struct *vma = NULL;
814         u64 end_offset = 0, vma_start_offset, vma_size;
815         int64_t overlap_size;
816
817         *total = 0;
818         list_for_each_entry(vma_list, &handle->vmas, list) {
819
820                 if (client->task->pid == vma_list->pid) {
821                         vma = vma_list->vma;
822                         vma_size = vma->vm_end - vma->vm_start;
823
824                         vma_start_offset = vma->vm_pgoff << PAGE_SHIFT;
825                         if (end_offset < vma_start_offset + vma_size) {
826                                 *total += vma_size;
827
828                                 overlap_size = end_offset - vma_start_offset;
829                                 if (overlap_size > 0)
830                                         *total -= overlap_size;
831                                 end_offset = vma_start_offset + vma_size;
832                         }
833                 }
834         }
835 }
836
837 static void maps_stringify(struct nvmap_client *client,
838                                 struct seq_file *s, u32 heap_type)
839 {
840         struct rb_node *n;
841         struct nvmap_vma_list *vma_list = NULL;
842         struct vm_area_struct *vma = NULL;
843         u64 total_mapped_size, vma_size;
844
845         nvmap_ref_lock(client);
846         n = rb_first(&client->handle_refs);
847         for (; n != NULL; n = rb_next(n)) {
848                 struct nvmap_handle_ref *ref =
849                         rb_entry(n, struct nvmap_handle_ref, node);
850                 struct nvmap_handle *handle = ref->handle;
851                 if (handle->alloc && handle->heap_type == heap_type) {
852                         phys_addr_t base = heap_type == NVMAP_HEAP_IOVMM ? 0 :
853                                            (handle->carveout->base);
854                         seq_printf(s,
855                                 "%-18s %-18s %8llx %10zuK %8x %6u %16p "
856                                 "%12s %12s ",
857                                 "", "",
858                                 (unsigned long long)base, K(handle->size),
859                                 handle->userflags,
860                                 atomic_read(&handle->share_count),
861                                 handle, "", "");
862
863                         mutex_lock(&handle->lock);
864                         nvmap_get_client_handle_mss(client, handle,
865                                                         &total_mapped_size);
866                         seq_printf(s, "%6lluK\n", K(total_mapped_size));
867
868                         list_for_each_entry(vma_list, &handle->vmas, list) {
869
870                                 if (vma_list->pid == client->task->pid) {
871                                         vma = vma_list->vma;
872                                         vma_size = vma->vm_end - vma->vm_start;
873                                         seq_printf(s,
874                                           "%-18s %-18s %8s %11s %8s %6s %16s "
875                                           "%-12lx-%12lx %6lluK\n",
876                                           "", "", "", "", "", "", "",
877                                           vma->vm_start, vma->vm_end,
878                                           K(vma_size));
879                                 }
880                         }
881                         mutex_unlock(&handle->lock);
882                 }
883         }
884         nvmap_ref_unlock(client);
885 }
886
887 static void nvmap_get_client_mss(struct nvmap_client *client,
888                                  u64 *total, u32 heap_type)
889 {
890         struct rb_node *n;
891
892         *total = 0;
893         nvmap_ref_lock(client);
894         n = rb_first(&client->handle_refs);
895         for (; n != NULL; n = rb_next(n)) {
896                 struct nvmap_handle_ref *ref =
897                         rb_entry(n, struct nvmap_handle_ref, node);
898                 struct nvmap_handle *handle = ref->handle;
899                 if (handle->alloc && handle->heap_type == heap_type)
900                         *total += handle->size /
901                                   atomic_read(&handle->share_count);
902         }
903         nvmap_ref_unlock(client);
904 }
905
906 static void nvmap_get_total_mss(u64 *pss, u64 *non_pss,
907                                       u64 *total, u32 heap_type)
908 {
909         int i;
910         struct rb_node *n;
911         struct nvmap_device *dev = nvmap_dev;
912
913         *total = 0;
914         if (pss)
915                 *pss = 0;
916         if (non_pss)
917                 *non_pss = 0;
918         if (!dev)
919                 return;
920         spin_lock(&dev->handle_lock);
921         n = rb_first(&dev->handles);
922         for (; n != NULL; n = rb_next(n)) {
923                 struct nvmap_handle *h =
924                         rb_entry(n, struct nvmap_handle, node);
925
926                 if (!h || !h->alloc || h->heap_type != heap_type)
927                         continue;
928                 if (!non_pss) {
929                         *total += h->size;
930                         continue;
931                 }
932
933                 for (i = 0; i < h->size >> PAGE_SHIFT; i++) {
934                         struct page *page = nvmap_to_page(h->pgalloc.pages[i]);
935                         int mapcount = page_mapcount(page);
936                         if (!mapcount)
937                                 *non_pss += PAGE_SIZE;
938                         *total += PAGE_SIZE;
939                 }
940         }
941         if (pss && non_pss)
942                 *pss = *total - *non_pss;
943         spin_unlock(&dev->handle_lock);
944 }
945
946 static int nvmap_debug_allocations_show(struct seq_file *s, void *unused)
947 {
948         u64 total;
949         struct nvmap_client *client;
950         u32 heap_type = (u32)(uintptr_t)s->private;
951
952         spin_lock(&nvmap_dev->clients_lock);
953         seq_printf(s, "%-18s %18s %8s %11s\n",
954                 "CLIENT", "PROCESS", "PID", "SIZE");
955         seq_printf(s, "%-18s %18s %8s %11s %8s %6s %6s %6s %6s %6s %6s %8s\n",
956                         "", "", "BASE", "SIZE", "FLAGS", "REFS",
957                         "DUPES", "PINS", "KMAPS", "UMAPS", "SHARE", "UID");
958         list_for_each_entry(client, &nvmap_dev->clients, list) {
959                 u64 client_total;
960                 client_stringify(client, s);
961                 nvmap_get_client_mss(client, &client_total, heap_type);
962                 seq_printf(s, " %10lluK\n", K(client_total));
963                 allocations_stringify(client, s, heap_type);
964                 seq_printf(s, "\n");
965         }
966         spin_unlock(&nvmap_dev->clients_lock);
967         nvmap_get_total_mss(NULL, NULL, &total, heap_type);
968         seq_printf(s, "%-18s %-18s %8s %10lluK\n", "total", "", "", K(total));
969         return 0;
970 }
971
972 DEBUGFS_OPEN_FOPS(allocations);
973
974 static int nvmap_debug_maps_show(struct seq_file *s, void *unused)
975 {
976         u64 total;
977         struct nvmap_client *client;
978         u32 heap_type = (u32)(uintptr_t)s->private;
979
980         spin_lock(&nvmap_dev->clients_lock);
981         seq_printf(s, "%-18s %18s %8s %11s\n",
982                 "CLIENT", "PROCESS", "PID", "SIZE");
983         seq_printf(s, "%-18s %18s %8s %11s %8s %6s %9s %21s %18s\n",
984                 "", "", "BASE", "SIZE", "FLAGS", "SHARE", "UID",
985                 "MAPS", "MAPSIZE");
986
987         list_for_each_entry(client, &nvmap_dev->clients, list) {
988                 u64 client_total;
989                 client_stringify(client, s);
990                 nvmap_get_client_mss(client, &client_total, heap_type);
991                 seq_printf(s, " %10lluK\n", K(client_total));
992                 maps_stringify(client, s, heap_type);
993                 seq_printf(s, "\n");
994         }
995         spin_unlock(&nvmap_dev->clients_lock);
996
997         nvmap_get_total_mss(NULL, NULL, &total, heap_type);
998         seq_printf(s, "%-18s %-18s %8s %10lluK\n", "total", "", "", K(total));
999         return 0;
1000 }
1001
1002 DEBUGFS_OPEN_FOPS(maps);
1003
1004 static int nvmap_debug_clients_show(struct seq_file *s, void *unused)
1005 {
1006         u64 total;
1007         struct nvmap_client *client;
1008         ulong heap_type = (ulong)s->private;
1009
1010         spin_lock(&nvmap_dev->clients_lock);
1011         seq_printf(s, "%-18s %18s %8s %11s\n",
1012                 "CLIENT", "PROCESS", "PID", "SIZE");
1013         list_for_each_entry(client, &nvmap_dev->clients, list) {
1014                 u64 client_total;
1015                 client_stringify(client, s);
1016                 nvmap_get_client_mss(client, &client_total, heap_type);
1017                 seq_printf(s, " %10lluK\n", K(client_total));
1018         }
1019         spin_unlock(&nvmap_dev->clients_lock);
1020         nvmap_get_total_mss(NULL, NULL, &total, heap_type);
1021         seq_printf(s, "%-18s %18s %8s %10lluK\n", "total", "", "", K(total));
1022         return 0;
1023 }
1024
1025 DEBUGFS_OPEN_FOPS(clients);
1026
1027 #define PRINT_MEM_STATS_NOTE(x) \
1028 do { \
1029         seq_printf(s, "Note: total memory is precise account of pages " \
1030                 "allocated by NvMap.\nIt doesn't match with all clients " \
1031                 "\"%s\" accumulated as shared memory \nis accounted in " \
1032                 "full in each clients \"%s\" that shared memory.\n", #x, #x); \
1033 } while (0)
1034
1035 static void nvmap_iovmm_get_client_mss(struct nvmap_client *client, u64 *pss,
1036                                    u64 *non_pss, u64 *total)
1037 {
1038         int i;
1039         struct rb_node *n;
1040
1041         *pss = *non_pss = *total = 0;
1042         nvmap_ref_lock(client);
1043         n = rb_first(&client->handle_refs);
1044         for (; n != NULL; n = rb_next(n)) {
1045                 struct nvmap_handle_ref *ref =
1046                         rb_entry(n, struct nvmap_handle_ref, node);
1047                 struct nvmap_handle *h = ref->handle;
1048
1049                 if (!h || !h->alloc || !h->heap_pgalloc)
1050                         continue;
1051
1052                 for (i = 0; i < h->size >> PAGE_SHIFT; i++) {
1053                         struct page *page = nvmap_to_page(h->pgalloc.pages[i]);
1054                         int mapcount = page_mapcount(page);
1055                         if (!mapcount)
1056                                 *non_pss += PAGE_SIZE;
1057                         *total += PAGE_SIZE;
1058                 }
1059                 *pss = *total - *non_pss;
1060         }
1061         nvmap_ref_unlock(client);
1062 }
1063
1064 static int nvmap_debug_iovmm_procrank_show(struct seq_file *s, void *unused)
1065 {
1066         u64 pss, non_pss, total;
1067         struct nvmap_client *client;
1068         struct nvmap_device *dev = s->private;
1069         u64 total_memory, total_pss, total_non_pss;
1070
1071         spin_lock(&dev->clients_lock);
1072         seq_printf(s, "%-18s %18s %8s %11s %11s %11s\n",
1073                 "CLIENT", "PROCESS", "PID", "PSS", "NON-PSS", "TOTAL");
1074         list_for_each_entry(client, &dev->clients, list) {
1075                 client_stringify(client, s);
1076                 nvmap_iovmm_get_client_mss(client, &pss, &non_pss, &total);
1077                 seq_printf(s, " %10lluK %10lluK %10lluK\n", K(pss),
1078                         K(non_pss), K(total));
1079         }
1080         spin_unlock(&dev->clients_lock);
1081
1082         nvmap_get_total_mss(&total_pss, &total_non_pss, &total_memory, NVMAP_HEAP_IOVMM);
1083         seq_printf(s, "%-18s %18s %8s %10lluK %10lluK %10lluK\n",
1084                 "total", "", "", K(total_pss),
1085                 K(total_non_pss), K(total_memory));
1086         PRINT_MEM_STATS_NOTE(TOTAL);
1087         return 0;
1088 }
1089
1090 DEBUGFS_OPEN_FOPS(iovmm_procrank);
1091
1092 ulong nvmap_iovmm_get_used_pages(void)
1093 {
1094         u64 total;
1095
1096         nvmap_get_total_mss(NULL, NULL, &total, NVMAP_HEAP_IOVMM);
1097         return total >> PAGE_SHIFT;
1098 }
1099
1100 static int nvmap_stats_reset(void *data, u64 val)
1101 {
1102         int i;
1103
1104         if (val) {
1105                 atomic64_set(&nvmap_stats.collect, 0);
1106                 for (i = 0; i < NS_NUM; i++) {
1107                         if (i == NS_TOTAL)
1108                                 continue;
1109                         atomic64_set(&nvmap_stats.stats[i], 0);
1110                 }
1111         }
1112         return 0;
1113 }
1114
1115 static int nvmap_stats_get(void *data, u64 *val)
1116 {
1117         atomic64_t *ptr = data;
1118
1119         *val = atomic64_read(ptr);
1120         return 0;
1121 }
1122
1123 static int nvmap_stats_set(void *data, u64 val)
1124 {
1125         atomic64_t *ptr = data;
1126
1127         atomic64_set(ptr, val);
1128         return 0;
1129 }
1130
1131 DEFINE_SIMPLE_ATTRIBUTE(reset_stats_fops, NULL, nvmap_stats_reset, "%llu\n");
1132 DEFINE_SIMPLE_ATTRIBUTE(stats_fops, nvmap_stats_get, nvmap_stats_set, "%llu\n");
1133
1134 static void nvmap_stats_init(struct dentry *nvmap_debug_root)
1135 {
1136         struct dentry *stats_root;
1137
1138 #define CREATE_DF(x, y) \
1139         debugfs_create_file(#x, S_IRUGO, stats_root, &y, &stats_fops);
1140
1141         stats_root = debugfs_create_dir("stats", nvmap_debug_root);
1142         if (!IS_ERR_OR_NULL(stats_root)) {
1143                 CREATE_DF(alloc, nvmap_stats.stats[NS_ALLOC]);
1144                 CREATE_DF(release, nvmap_stats.stats[NS_RELEASE]);
1145                 CREATE_DF(ualloc, nvmap_stats.stats[NS_UALLOC]);
1146                 CREATE_DF(urelease, nvmap_stats.stats[NS_URELEASE]);
1147                 CREATE_DF(kalloc, nvmap_stats.stats[NS_KALLOC]);
1148                 CREATE_DF(krelease, nvmap_stats.stats[NS_KRELEASE]);
1149                 CREATE_DF(cflush_rq, nvmap_stats.stats[NS_CFLUSH_RQ]);
1150                 CREATE_DF(cflush_done, nvmap_stats.stats[NS_CFLUSH_DONE]);
1151                 CREATE_DF(ucflush_rq, nvmap_stats.stats[NS_UCFLUSH_RQ]);
1152                 CREATE_DF(ucflush_done, nvmap_stats.stats[NS_UCFLUSH_DONE]);
1153                 CREATE_DF(kcflush_rq, nvmap_stats.stats[NS_KCFLUSH_RQ]);
1154                 CREATE_DF(kcflush_done, nvmap_stats.stats[NS_KCFLUSH_DONE]);
1155                 CREATE_DF(total_memory, nvmap_stats.stats[NS_TOTAL]);
1156
1157                 debugfs_create_file("collect", S_IRUGO | S_IWUSR,
1158                         stats_root, &nvmap_stats.collect, &stats_fops);
1159                 debugfs_create_file("reset", S_IWUSR,
1160                         stats_root, NULL, &reset_stats_fops);
1161         }
1162
1163 #undef CREATE_DF
1164 }
1165
1166 void nvmap_stats_inc(enum nvmap_stats_t stat, size_t size)
1167 {
1168         if (atomic64_read(&nvmap_stats.collect) || stat == NS_TOTAL)
1169                 atomic64_add(size, &nvmap_stats.stats[stat]);
1170 }
1171
1172 void nvmap_stats_dec(enum nvmap_stats_t stat, size_t size)
1173 {
1174         if (atomic64_read(&nvmap_stats.collect) || stat == NS_TOTAL)
1175                 atomic64_sub(size, &nvmap_stats.stats[stat]);
1176 }
1177
1178 u64 nvmap_stats_read(enum nvmap_stats_t stat)
1179 {
1180         return atomic64_read(&nvmap_stats.stats[stat]);
1181 }
1182
1183 static int nvmap_probe(struct platform_device *pdev)
1184 {
1185         struct nvmap_platform_data *plat = pdev->dev.platform_data;
1186         struct nvmap_device *dev;
1187         struct dentry *nvmap_debug_root;
1188         unsigned int i;
1189         int e;
1190
1191         if (!plat) {
1192                 dev_err(&pdev->dev, "no platform data?\n");
1193                 return -ENODEV;
1194         }
1195
1196         /*
1197          * The DMA mapping API uses these parameters to decide how to map the
1198          * passed buffers. If the maximum physical segment size is set to
1199          * smaller than the size of the buffer, then the buffer will be mapped
1200          * as separate IO virtual address ranges.
1201          */
1202         pdev->dev.dma_parms = &nvmap_dma_parameters;
1203
1204         if (WARN_ON(nvmap_dev != NULL)) {
1205                 dev_err(&pdev->dev, "only one nvmap device may be present\n");
1206                 return -ENODEV;
1207         }
1208
1209         dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1210         if (!dev) {
1211                 dev_err(&pdev->dev, "out of memory for device\n");
1212                 return -ENOMEM;
1213         }
1214
1215         nvmap_dev = dev;
1216
1217         dev->dev_user.minor = MISC_DYNAMIC_MINOR;
1218         dev->dev_user.name = "nvmap";
1219         dev->dev_user.fops = &nvmap_user_fops;
1220         dev->dev_user.parent = &pdev->dev;
1221
1222         dev->handles = RB_ROOT;
1223
1224 #ifdef CONFIG_NVMAP_PAGE_POOLS
1225         e = nvmap_page_pool_init(dev);
1226         if (e)
1227                 goto fail;
1228 #endif
1229
1230         spin_lock_init(&dev->handle_lock);
1231         INIT_LIST_HEAD(&dev->clients);
1232         spin_lock_init(&dev->clients_lock);
1233
1234         e = misc_register(&dev->dev_user);
1235         if (e) {
1236                 dev_err(&pdev->dev, "unable to register miscdevice %s\n",
1237                         dev->dev_user.name);
1238                 goto fail;
1239         }
1240
1241         dev->nr_carveouts = 0;
1242         dev->heaps = kzalloc(sizeof(struct nvmap_carveout_node) *
1243                              plat->nr_carveouts, GFP_KERNEL);
1244         if (!dev->heaps) {
1245                 e = -ENOMEM;
1246                 dev_err(&pdev->dev, "couldn't allocate carveout memory\n");
1247                 goto fail;
1248         }
1249
1250         nvmap_debug_root = debugfs_create_dir("nvmap", NULL);
1251         if (IS_ERR_OR_NULL(nvmap_debug_root))
1252                 dev_err(&pdev->dev, "couldn't create debug files\n");
1253
1254         debugfs_create_u32("max_handle_count", S_IRUGO,
1255                         nvmap_debug_root, &nvmap_max_handle_count);
1256
1257         for (i = 0; i < plat->nr_carveouts; i++) {
1258                 struct nvmap_carveout_node *node = &dev->heaps[dev->nr_carveouts];
1259                 const struct nvmap_platform_carveout *co = &plat->carveouts[i];
1260                 node->base = round_up(co->base, PAGE_SIZE);
1261                 node->size = round_down(co->size -
1262                                         (node->base - co->base), PAGE_SIZE);
1263                 if (!co->size)
1264                         continue;
1265
1266                 node->carveout = nvmap_heap_create(
1267                                 dev->dev_user.this_device, co,
1268                                 node->base, node->size, node);
1269
1270                 if (!node->carveout) {
1271                         e = -ENOMEM;
1272                         dev_err(&pdev->dev, "couldn't create %s\n", co->name);
1273                         goto fail_heaps;
1274                 }
1275                 node->index = dev->nr_carveouts;
1276                 dev->nr_carveouts++;
1277                 spin_lock_init(&node->clients_lock);
1278                 INIT_LIST_HEAD(&node->clients);
1279                 node->heap_bit = co->usage_mask;
1280
1281                 if (!IS_ERR_OR_NULL(nvmap_debug_root)) {
1282                         struct dentry *heap_root =
1283                                 debugfs_create_dir(co->name, nvmap_debug_root);
1284                         if (!IS_ERR_OR_NULL(heap_root)) {
1285                                 debugfs_create_file("clients", S_IRUGO,
1286                                         heap_root,
1287                                         (void *)(uintptr_t)node->heap_bit,
1288                                         &debug_clients_fops);
1289                                 debugfs_create_file("allocations", S_IRUGO,
1290                                         heap_root,
1291                                         (void *)(uintptr_t)node->heap_bit,
1292                                         &debug_allocations_fops);
1293                                 debugfs_create_file("maps", S_IRUGO,
1294                                         heap_root,
1295                                         (void *)(uintptr_t)node->heap_bit,
1296                                         &debug_maps_fops);
1297                                 nvmap_heap_debugfs_init(heap_root,
1298                                                         node->carveout);
1299                         }
1300                 }
1301         }
1302         if (!IS_ERR_OR_NULL(nvmap_debug_root)) {
1303                 struct dentry *iovmm_root =
1304                         debugfs_create_dir("iovmm", nvmap_debug_root);
1305                 if (!IS_ERR_OR_NULL(iovmm_root)) {
1306                         debugfs_create_file("clients", S_IRUGO, iovmm_root,
1307                                 (void *)(uintptr_t)NVMAP_HEAP_IOVMM,
1308                                 &debug_clients_fops);
1309                         debugfs_create_file("allocations", S_IRUGO, iovmm_root,
1310                                 (void *)(uintptr_t)NVMAP_HEAP_IOVMM,
1311                                 &debug_allocations_fops);
1312                         debugfs_create_file("maps", S_IRUGO, iovmm_root,
1313                                 (void *)(uintptr_t)NVMAP_HEAP_IOVMM,
1314                                 &debug_maps_fops);
1315                         debugfs_create_file("procrank", S_IRUGO, iovmm_root,
1316                                 dev, &debug_iovmm_procrank_fops);
1317                 }
1318 #ifdef CONFIG_NVMAP_PAGE_POOLS
1319                 nvmap_page_pool_debugfs_init(nvmap_debug_root);
1320 #endif
1321 #ifdef CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS
1322                 debugfs_create_size_t("cache_maint_inner_threshold",
1323                                       S_IRUSR | S_IWUSR,
1324                                       nvmap_debug_root,
1325                                       &cache_maint_inner_threshold);
1326
1327                 /* cortex-a9 */
1328                 if ((read_cpuid_id() >> 4 & 0xfff) == 0xc09)
1329                         cache_maint_inner_threshold = SZ_32K;
1330                 pr_info("nvmap:inner cache maint threshold=%zd",
1331                         cache_maint_inner_threshold);
1332 #endif
1333 #ifdef CONFIG_NVMAP_OUTER_CACHE_MAINT_BY_SET_WAYS
1334                 debugfs_create_size_t("cache_maint_outer_threshold",
1335                                       S_IRUSR | S_IWUSR,
1336                                       nvmap_debug_root,
1337                                       &cache_maint_outer_threshold);
1338                 pr_info("nvmap:outer cache maint threshold=%zd",
1339                         cache_maint_outer_threshold);
1340 #endif
1341         }
1342
1343         nvmap_stats_init(nvmap_debug_root);
1344         platform_set_drvdata(pdev, dev);
1345
1346         nvmap_dmabuf_debugfs_init(nvmap_debug_root);
1347         e = nvmap_dmabuf_stash_init();
1348         if (e)
1349                 goto fail_heaps;
1350
1351         return 0;
1352 fail_heaps:
1353         for (i = 0; i < dev->nr_carveouts; i++) {
1354                 struct nvmap_carveout_node *node = &dev->heaps[i];
1355                 nvmap_heap_destroy(node->carveout);
1356         }
1357 fail:
1358 #ifdef CONFIG_NVMAP_PAGE_POOLS
1359         nvmap_page_pool_fini(nvmap_dev);
1360 #endif
1361         kfree(dev->heaps);
1362         if (dev->dev_user.minor != MISC_DYNAMIC_MINOR)
1363                 misc_deregister(&dev->dev_user);
1364         kfree(dev);
1365         nvmap_dev = NULL;
1366         return e;
1367 }
1368
1369 static int nvmap_remove(struct platform_device *pdev)
1370 {
1371         struct nvmap_device *dev = platform_get_drvdata(pdev);
1372         struct rb_node *n;
1373         struct nvmap_handle *h;
1374         int i;
1375
1376         misc_deregister(&dev->dev_user);
1377
1378         while ((n = rb_first(&dev->handles))) {
1379                 h = rb_entry(n, struct nvmap_handle, node);
1380                 rb_erase(&h->node, &dev->handles);
1381                 kfree(h);
1382         }
1383
1384         for (i = 0; i < dev->nr_carveouts; i++) {
1385                 struct nvmap_carveout_node *node = &dev->heaps[i];
1386                 nvmap_heap_destroy(node->carveout);
1387         }
1388         kfree(dev->heaps);
1389
1390         kfree(dev);
1391         nvmap_dev = NULL;
1392         return 0;
1393 }
1394
1395 static int nvmap_suspend(struct platform_device *pdev, pm_message_t state)
1396 {
1397         return 0;
1398 }
1399
1400 static int nvmap_resume(struct platform_device *pdev)
1401 {
1402         return 0;
1403 }
1404
1405 static struct platform_driver nvmap_driver = {
1406         .probe          = nvmap_probe,
1407         .remove         = nvmap_remove,
1408         .suspend        = nvmap_suspend,
1409         .resume         = nvmap_resume,
1410
1411         .driver = {
1412                 .name   = "tegra-nvmap",
1413                 .owner  = THIS_MODULE,
1414         },
1415 };
1416
1417 static int __init nvmap_init_driver(void)
1418 {
1419         int e;
1420
1421         nvmap_dev = NULL;
1422
1423         e = nvmap_heap_init();
1424         if (e)
1425                 goto fail;
1426
1427         e = platform_driver_register(&nvmap_driver);
1428         if (e) {
1429                 nvmap_heap_deinit();
1430                 goto fail;
1431         }
1432
1433 fail:
1434         return e;
1435 }
1436 fs_initcall(nvmap_init_driver);
1437
1438 static void __exit nvmap_exit_driver(void)
1439 {
1440         platform_driver_unregister(&nvmap_driver);
1441         nvmap_heap_deinit();
1442         nvmap_dev = NULL;
1443 }
1444 module_exit(nvmap_exit_driver);