]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/video/tegra/nvmap/nvmap_dev.c
video: tegra: nvmap: unify debug stats code
[sojka/nv-tegra/linux-3.10.git] / drivers / video / tegra / nvmap / nvmap_dev.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap_dev.c
3  *
4  * User-space interface to nvmap
5  *
6  * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/backing-dev.h>
24 #include <linux/bitmap.h>
25 #include <linux/debugfs.h>
26 #include <linux/delay.h>
27 #include <linux/io.h>
28 #include <linux/kernel.h>
29 #include <linux/device.h>
30 #include <linux/oom.h>
31 #include <linux/platform_device.h>
32 #include <linux/seq_file.h>
33 #include <linux/slab.h>
34 #include <linux/spinlock.h>
35 #include <linux/uaccess.h>
36 #include <linux/vmalloc.h>
37 #include <linux/nvmap.h>
38 #include <linux/module.h>
39 #include <linux/resource.h>
40 #include <linux/security.h>
41 #include <linux/stat.h>
42 #include <linux/kthread.h>
43
44 #include <asm/cputype.h>
45
46 #define CREATE_TRACE_POINTS
47 #include <trace/events/nvmap.h>
48
49 #include "nvmap_priv.h"
50 #include "nvmap_ioctl.h"
51
52 #define NVMAP_CARVEOUT_KILLER_RETRY_TIME 100 /* msecs */
53
54 /* this is basically the L2 cache size */
55 #ifdef CONFIG_DENVER_CPU
56 size_t cache_maint_inner_threshold = SZ_2M * 8;
57 #else
58 size_t cache_maint_inner_threshold = SZ_2M;
59 #endif
60
61 #ifdef CONFIG_NVMAP_OUTER_CACHE_MAINT_BY_SET_WAYS
62 size_t cache_maint_outer_threshold = SZ_1M;
63 #endif
64
65 struct nvmap_carveout_node {
66         unsigned int            heap_bit;
67         struct nvmap_heap       *carveout;
68         int                     index;
69         struct list_head        clients;
70         spinlock_t              clients_lock;
71         phys_addr_t                     base;
72         size_t                  size;
73 };
74
75 struct nvmap_device *nvmap_dev;
76 struct nvmap_stats nvmap_stats;
77
78 static struct backing_dev_info nvmap_bdi = {
79         .ra_pages       = 0,
80         .capabilities   = (BDI_CAP_NO_ACCT_AND_WRITEBACK |
81                            BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP),
82 };
83
84 static struct device_dma_parameters nvmap_dma_parameters = {
85         .max_segment_size = UINT_MAX,
86 };
87
88 static int nvmap_open(struct inode *inode, struct file *filp);
89 static int nvmap_release(struct inode *inode, struct file *filp);
90 static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
91 static int nvmap_map(struct file *filp, struct vm_area_struct *vma);
92 static void nvmap_vma_close(struct vm_area_struct *vma);
93 static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
94
95 static const struct file_operations nvmap_user_fops = {
96         .owner          = THIS_MODULE,
97         .open           = nvmap_open,
98         .release        = nvmap_release,
99         .unlocked_ioctl = nvmap_ioctl,
100 #ifdef CONFIG_COMPAT
101         .compat_ioctl = nvmap_ioctl,
102 #endif
103         .mmap           = nvmap_map,
104 };
105
106 static struct vm_operations_struct nvmap_vma_ops = {
107         .open           = nvmap_vma_open,
108         .close          = nvmap_vma_close,
109         .fault          = nvmap_vma_fault,
110 };
111
112 int is_nvmap_vma(struct vm_area_struct *vma)
113 {
114         return vma->vm_ops == &nvmap_vma_ops;
115 }
116
117 /*
118  * Verifies that the passed ID is a valid handle ID. Then the passed client's
119  * reference to the handle is returned.
120  *
121  * Note: to call this function make sure you own the client ref lock.
122  */
123 struct nvmap_handle_ref *__nvmap_validate_locked(struct nvmap_client *c,
124                                                  struct nvmap_handle *h)
125 {
126         struct rb_node *n = c->handle_refs.rb_node;
127
128         while (n) {
129                 struct nvmap_handle_ref *ref;
130                 ref = rb_entry(n, struct nvmap_handle_ref, node);
131                 if (ref->handle == h)
132                         return ref;
133                 else if ((uintptr_t)h > (uintptr_t)ref->handle)
134                         n = n->rb_right;
135                 else
136                         n = n->rb_left;
137         }
138
139         return NULL;
140 }
141
142 unsigned long nvmap_carveout_usage(struct nvmap_client *c,
143                                    struct nvmap_heap_block *b)
144 {
145         struct nvmap_heap *h = nvmap_block_to_heap(b);
146         struct nvmap_carveout_node *n;
147         int i;
148
149         for (i = 0; i < nvmap_dev->nr_carveouts; i++) {
150                 n = &nvmap_dev->heaps[i];
151                 if (n->carveout == h)
152                         return n->heap_bit;
153         }
154         return 0;
155 }
156
157 /*
158  * This routine is used to flush the carveout memory from cache.
159  * Why cache flush is needed for carveout? Consider the case, where a piece of
160  * carveout is allocated as cached and released. After this, if the same memory is
161  * allocated for uncached request and the memory is not flushed out from cache.
162  * In this case, the client might pass this to H/W engine and it could start modify
163  * the memory. As this was cached earlier, it might have some portion of it in cache.
164  * During cpu request to read/write other memory, the cached portion of this memory
165  * might get flushed back to main memory and would cause corruptions, if it happens
166  * after H/W writes data to memory.
167  *
168  * But flushing out the memory blindly on each carveout allocation is redundant.
169  *
170  * In order to optimize the carveout buffer cache flushes, the following
171  * strategy is used.
172  *
173  * The whole Carveout is flushed out from cache during its initialization.
174  * During allocation, carveout buffers are not flused from cache.
175  * During deallocation, carveout buffers are flushed, if they were allocated as cached.
176  * if they were allocated as uncached/writecombined, no cache flush is needed.
177  * Just draining store buffers is enough.
178  */
179 int nvmap_flush_heap_block(struct nvmap_client *client,
180         struct nvmap_heap_block *block, size_t len, unsigned int prot)
181 {
182         ulong kaddr;
183         phys_addr_t phys = block->base;
184         phys_addr_t end = block->base + len;
185         struct vm_struct *area = NULL;
186
187         if (prot == NVMAP_HANDLE_UNCACHEABLE || prot == NVMAP_HANDLE_WRITE_COMBINE)
188                 goto out;
189
190 #ifdef CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS
191         if (len >= cache_maint_inner_threshold) {
192                 inner_flush_cache_all();
193                 if (prot != NVMAP_HANDLE_INNER_CACHEABLE)
194                         outer_flush_range(block->base, block->base + len);
195                 goto out;
196         }
197 #endif
198
199         area = alloc_vm_area(PAGE_SIZE, NULL);
200         if (!area)
201                 return -ENOMEM;
202
203         kaddr = (ulong)area->addr;
204
205         while (phys < end) {
206                 phys_addr_t next = (phys + PAGE_SIZE) & PAGE_MASK;
207                 void *base = (void *)kaddr + (phys & ~PAGE_MASK);
208
209                 next = min(next, end);
210                 ioremap_page_range(kaddr, kaddr + PAGE_SIZE,
211                         phys, PG_PROT_KERNEL);
212                 FLUSH_DCACHE_AREA(base, next - phys);
213                 phys = next;
214                 unmap_kernel_range(kaddr, PAGE_SIZE);
215         }
216
217         if (prot != NVMAP_HANDLE_INNER_CACHEABLE)
218                 outer_flush_range(block->base, block->base + len);
219
220         free_vm_area(area);
221 out:
222         wmb();
223         return 0;
224 }
225
226 void nvmap_carveout_commit_add(struct nvmap_client *client,
227                                struct nvmap_carveout_node *node,
228                                size_t len)
229 {
230         spin_lock(&node->clients_lock);
231         BUG_ON(list_empty(&client->carveout_commit[node->index].list) &&
232                client->carveout_commit[node->index].commit != 0);
233
234         client->carveout_commit[node->index].commit += len;
235         /* if this client isn't already on the list of nodes for this heap,
236            add it */
237         if (list_empty(&client->carveout_commit[node->index].list)) {
238                 list_add(&client->carveout_commit[node->index].list,
239                          &node->clients);
240         }
241         spin_unlock(&node->clients_lock);
242 }
243
244 void nvmap_carveout_commit_subtract(struct nvmap_client *client,
245                                     struct nvmap_carveout_node *node,
246                                     size_t len)
247 {
248         if (!client)
249                 return;
250
251         spin_lock(&node->clients_lock);
252         BUG_ON(client->carveout_commit[node->index].commit < len);
253         client->carveout_commit[node->index].commit -= len;
254         /* if no more allocation in this carveout for this node, delete it */
255         if (!client->carveout_commit[node->index].commit)
256                 list_del_init(&client->carveout_commit[node->index].list);
257         spin_unlock(&node->clients_lock);
258 }
259
260 static
261 struct nvmap_heap_block *do_nvmap_carveout_alloc(struct nvmap_client *client,
262                                               struct nvmap_handle *handle,
263                                               unsigned long type)
264 {
265         struct nvmap_carveout_node *co_heap;
266         struct nvmap_device *dev = nvmap_dev;
267         int i;
268
269         for (i = 0; i < dev->nr_carveouts; i++) {
270                 struct nvmap_heap_block *block;
271                 co_heap = &dev->heaps[i];
272
273                 if (!(co_heap->heap_bit & type))
274                         continue;
275
276                 block = nvmap_heap_alloc(co_heap->carveout, handle);
277                 if (block)
278                         return block;
279         }
280         return NULL;
281 }
282
283 struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *client,
284                                               struct nvmap_handle *handle,
285                                               unsigned long type)
286 {
287         return do_nvmap_carveout_alloc(client, handle, type);
288 }
289
290 /* remove a handle from the device's tree of all handles; called
291  * when freeing handles. */
292 int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h)
293 {
294         spin_lock(&dev->handle_lock);
295
296         /* re-test inside the spinlock if the handle really has no clients;
297          * only remove the handle if it is unreferenced */
298         if (atomic_add_return(0, &h->ref) > 0) {
299                 spin_unlock(&dev->handle_lock);
300                 return -EBUSY;
301         }
302         smp_rmb();
303         BUG_ON(atomic_read(&h->ref) < 0);
304         BUG_ON(atomic_read(&h->pin) != 0);
305
306         rb_erase(&h->node, &dev->handles);
307
308         spin_unlock(&dev->handle_lock);
309         return 0;
310 }
311
312 /* adds a newly-created handle to the device master tree */
313 void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h)
314 {
315         struct rb_node **p;
316         struct rb_node *parent = NULL;
317
318         spin_lock(&dev->handle_lock);
319         p = &dev->handles.rb_node;
320         while (*p) {
321                 struct nvmap_handle *b;
322
323                 parent = *p;
324                 b = rb_entry(parent, struct nvmap_handle, node);
325                 if (h > b)
326                         p = &parent->rb_right;
327                 else
328                         p = &parent->rb_left;
329         }
330         rb_link_node(&h->node, parent, p);
331         rb_insert_color(&h->node, &dev->handles);
332         spin_unlock(&dev->handle_lock);
333 }
334
335 /* Validates that a handle is in the device master tree and that the
336  * client has permission to access it. */
337 struct nvmap_handle *nvmap_validate_get(struct nvmap_handle *id)
338 {
339         struct nvmap_handle *h = NULL;
340         struct rb_node *n;
341
342         spin_lock(&nvmap_dev->handle_lock);
343
344         n = nvmap_dev->handles.rb_node;
345
346         while (n) {
347                 h = rb_entry(n, struct nvmap_handle, node);
348                 if (h == id) {
349                         h = nvmap_handle_get(h);
350                         spin_unlock(&nvmap_dev->handle_lock);
351                         return h;
352                 }
353                 if (id > h)
354                         n = n->rb_right;
355                 else
356                         n = n->rb_left;
357         }
358         spin_unlock(&nvmap_dev->handle_lock);
359         return NULL;
360 }
361
362 struct nvmap_client *__nvmap_create_client(struct nvmap_device *dev,
363                                            const char *name)
364 {
365         struct nvmap_client *client;
366         struct task_struct *task;
367         int i;
368
369         if (WARN_ON(!dev))
370                 return NULL;
371
372         client = kzalloc(sizeof(*client) + (sizeof(struct nvmap_carveout_commit)
373                          * dev->nr_carveouts), GFP_KERNEL);
374         if (!client)
375                 return NULL;
376
377         client->name = name;
378         client->kernel_client = true;
379         client->handle_refs = RB_ROOT;
380
381         for (i = 0; i < dev->nr_carveouts; i++) {
382                 INIT_LIST_HEAD(&client->carveout_commit[i].list);
383                 client->carveout_commit[i].commit = 0;
384         }
385
386         get_task_struct(current->group_leader);
387         task_lock(current->group_leader);
388         /* don't bother to store task struct for kernel threads,
389            they can't be killed anyway */
390         if (current->flags & PF_KTHREAD) {
391                 put_task_struct(current->group_leader);
392                 task = NULL;
393         } else {
394                 task = current->group_leader;
395         }
396         task_unlock(current->group_leader);
397         client->task = task;
398
399         mutex_init(&client->ref_lock);
400         atomic_set(&client->count, 1);
401
402         spin_lock(&dev->clients_lock);
403         list_add(&client->list, &dev->clients);
404         spin_unlock(&dev->clients_lock);
405         return client;
406 }
407
408 static void destroy_client(struct nvmap_client *client)
409 {
410         struct rb_node *n;
411         int i;
412
413         if (!client)
414                 return;
415
416         spin_lock(&nvmap_dev->clients_lock);
417         list_del(&client->list);
418         spin_unlock(&nvmap_dev->clients_lock);
419
420         while ((n = rb_first(&client->handle_refs))) {
421                 struct nvmap_handle_ref *ref;
422                 int pins, dupes;
423
424                 ref = rb_entry(n, struct nvmap_handle_ref, node);
425
426                 smp_rmb();
427                 pins = atomic_read(&ref->pin);
428
429                 while (pins--)
430                         __nvmap_unpin(ref);
431
432                 if (ref->handle->owner == client)
433                         ref->handle->owner = NULL;
434
435                 dma_buf_put(ref->handle->dmabuf);
436                 rb_erase(&ref->node, &client->handle_refs);
437                 atomic_dec(&ref->handle->share_count);
438
439                 dupes = atomic_read(&ref->dupes);
440                 while (dupes--)
441                         nvmap_handle_put(ref->handle);
442
443                 kfree(ref);
444         }
445
446         for (i = 0; i < nvmap_dev->nr_carveouts; i++)
447                 list_del(&client->carveout_commit[i].list);
448
449         if (client->task)
450                 put_task_struct(client->task);
451
452         kfree(client);
453 }
454
455 struct nvmap_client *nvmap_client_get(struct nvmap_client *client)
456 {
457         if (!virt_addr_valid(client))
458                 return NULL;
459
460         if (!atomic_add_unless(&client->count, 1, 0))
461                 return NULL;
462
463         return client;
464 }
465
466 void nvmap_client_put(struct nvmap_client *client)
467 {
468         if (!client)
469                 return;
470
471         if (!atomic_dec_return(&client->count))
472                 destroy_client(client);
473 }
474
475 static int nvmap_open(struct inode *inode, struct file *filp)
476 {
477         struct miscdevice *miscdev = filp->private_data;
478         struct nvmap_device *dev = dev_get_drvdata(miscdev->parent);
479         struct nvmap_client *priv;
480         int ret;
481         __attribute__((unused)) struct rlimit old_rlim, new_rlim;
482
483         ret = nonseekable_open(inode, filp);
484         if (unlikely(ret))
485                 return ret;
486
487         BUG_ON(dev != nvmap_dev);
488         priv = __nvmap_create_client(dev, "user");
489         if (!priv)
490                 return -ENOMEM;
491         trace_nvmap_open(priv, priv->name);
492
493         priv->kernel_client = false;
494
495         filp->f_mapping->backing_dev_info = &nvmap_bdi;
496
497         filp->private_data = priv;
498         return 0;
499 }
500
501 static int nvmap_release(struct inode *inode, struct file *filp)
502 {
503         struct nvmap_client *priv = filp->private_data;
504
505         trace_nvmap_release(priv, priv->name);
506         nvmap_client_put(priv);
507         return 0;
508 }
509
510 int __nvmap_map(struct nvmap_handle *h, struct vm_area_struct *vma)
511 {
512         struct nvmap_vma_priv *priv;
513
514         h = nvmap_handle_get(h);
515         if (!h)
516                 return -EINVAL;
517
518         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
519         if (!priv)
520                 return -ENOMEM;
521         priv->handle = h;
522
523         vma->vm_flags |= VM_SHARED | VM_DONTEXPAND |
524                           VM_DONTDUMP | VM_DONTCOPY |
525                           (h->heap_pgalloc ? 0 : VM_PFNMAP);
526         vma->vm_ops = &nvmap_vma_ops;
527         BUG_ON(vma->vm_private_data != NULL);
528         vma->vm_private_data = priv;
529         vma->vm_page_prot = nvmap_pgprot(h, vma->vm_page_prot);
530         nvmap_vma_open(vma);
531         return 0;
532 }
533
534 static int nvmap_map(struct file *filp, struct vm_area_struct *vma)
535 {
536         BUG_ON(vma->vm_private_data != NULL);
537         vma->vm_flags |= (VM_SHARED | VM_DONTEXPAND |
538                           VM_DONTDUMP | VM_DONTCOPY);
539         vma->vm_ops = &nvmap_vma_ops;
540         return 0;
541 }
542
543 static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
544 {
545         int err = 0;
546         void __user *uarg = (void __user *)arg;
547
548         if (_IOC_TYPE(cmd) != NVMAP_IOC_MAGIC)
549                 return -ENOTTY;
550
551         if (_IOC_NR(cmd) > NVMAP_IOC_MAXNR)
552                 return -ENOTTY;
553
554         if (_IOC_DIR(cmd) & _IOC_READ)
555                 err = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
556         if (_IOC_DIR(cmd) & _IOC_WRITE)
557                 err = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
558
559         if (err)
560                 return -EFAULT;
561
562         switch (cmd) {
563         case NVMAP_IOC_CREATE:
564         case NVMAP_IOC_FROM_ID:
565         case NVMAP_IOC_FROM_FD:
566                 err = nvmap_ioctl_create(filp, cmd, uarg);
567                 break;
568
569         case NVMAP_IOC_GET_ID:
570                 err = nvmap_ioctl_getid(filp, uarg);
571                 break;
572
573         case NVMAP_IOC_GET_FD:
574                 err = nvmap_ioctl_getfd(filp, uarg);
575                 break;
576
577 #ifdef CONFIG_COMPAT
578         case NVMAP_IOC_PARAM_32:
579                 err = nvmap_ioctl_get_param(filp, uarg, true);
580                 break;
581 #endif
582
583         case NVMAP_IOC_PARAM:
584                 err = nvmap_ioctl_get_param(filp, uarg, false);
585                 break;
586
587 #ifdef CONFIG_COMPAT
588         case NVMAP_IOC_UNPIN_MULT_32:
589         case NVMAP_IOC_PIN_MULT_32:
590                 err = nvmap_ioctl_pinop(filp, cmd == NVMAP_IOC_PIN_MULT_32,
591                         uarg, true);
592                 break;
593 #endif
594
595         case NVMAP_IOC_UNPIN_MULT:
596         case NVMAP_IOC_PIN_MULT:
597                 err = nvmap_ioctl_pinop(filp, cmd == NVMAP_IOC_PIN_MULT,
598                         uarg, false);
599                 break;
600
601         case NVMAP_IOC_ALLOC:
602                 err = nvmap_ioctl_alloc(filp, uarg);
603                 break;
604
605         case NVMAP_IOC_ALLOC_KIND:
606                 err = nvmap_ioctl_alloc_kind(filp, uarg);
607                 break;
608
609         case NVMAP_IOC_FREE:
610                 err = nvmap_ioctl_free(filp, arg);
611                 break;
612
613 #ifdef CONFIG_COMPAT
614         case NVMAP_IOC_MMAP_32:
615                 err = nvmap_map_into_caller_ptr(filp, uarg, true);
616                 break;
617 #endif
618
619         case NVMAP_IOC_MMAP:
620                 err = nvmap_map_into_caller_ptr(filp, uarg, false);
621                 break;
622
623 #ifdef CONFIG_COMPAT
624         case NVMAP_IOC_WRITE_32:
625         case NVMAP_IOC_READ_32:
626                 err = nvmap_ioctl_rw_handle(filp, cmd == NVMAP_IOC_READ_32,
627                         uarg, true);
628                 break;
629 #endif
630
631         case NVMAP_IOC_WRITE:
632         case NVMAP_IOC_READ:
633                 err = nvmap_ioctl_rw_handle(filp, cmd == NVMAP_IOC_READ, uarg,
634                         false);
635                 break;
636
637 #ifdef CONFIG_COMPAT
638         case NVMAP_IOC_CACHE_32:
639                 err = nvmap_ioctl_cache_maint(filp, uarg, true);
640                 break;
641 #endif
642
643         case NVMAP_IOC_CACHE:
644                 err = nvmap_ioctl_cache_maint(filp, uarg, false);
645                 break;
646
647         case NVMAP_IOC_CACHE_LIST:
648         case NVMAP_IOC_RESERVE:
649                 err = nvmap_ioctl_cache_maint_list(filp, uarg,
650                                                    cmd == NVMAP_IOC_RESERVE);
651                 break;
652
653         case NVMAP_IOC_SHARE:
654                 err = nvmap_ioctl_share_dmabuf(filp, uarg);
655                 break;
656
657         default:
658                 return -ENOTTY;
659         }
660         return err;
661 }
662
663 /* to ensure that the backing store for the VMA isn't freed while a fork'd
664  * reference still exists, nvmap_vma_open increments the reference count on
665  * the handle, and nvmap_vma_close decrements it. alternatively, we could
666  * disallow copying of the vma, or behave like pmem and zap the pages. FIXME.
667 */
668 void nvmap_vma_open(struct vm_area_struct *vma)
669 {
670         struct nvmap_vma_priv *priv;
671         struct nvmap_handle *h;
672         struct nvmap_vma_list *vma_list, *tmp;
673
674         priv = vma->vm_private_data;
675         BUG_ON(!priv);
676         BUG_ON(!priv->handle);
677
678         atomic_inc(&priv->count);
679         h = priv->handle;
680
681         vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL);
682         if (vma_list) {
683                 mutex_lock(&h->lock);
684                 list_for_each_entry(tmp, &h->vmas, list)
685                         BUG_ON(tmp->vma == vma);
686
687                 vma_list->vma = vma;
688                 list_add(&vma_list->list, &h->vmas);
689                 mutex_unlock(&h->lock);
690         } else {
691                 WARN(1, "vma not tracked");
692         }
693 }
694
695 static void nvmap_vma_close(struct vm_area_struct *vma)
696 {
697         struct nvmap_vma_priv *priv = vma->vm_private_data;
698         struct nvmap_vma_list *vma_list;
699         struct nvmap_handle *h;
700         bool vma_found = false;
701
702         if (!priv)
703                 return;
704
705         BUG_ON(!priv->handle);
706
707         h = priv->handle;
708         mutex_lock(&h->lock);
709         list_for_each_entry(vma_list, &h->vmas, list) {
710                 if (vma_list->vma != vma)
711                         continue;
712                 list_del(&vma_list->list);
713                 kfree(vma_list);
714                 vma_found = true;
715                 break;
716         }
717         BUG_ON(!vma_found);
718         mutex_unlock(&h->lock);
719
720         if (__atomic_add_unless(&priv->count, -1, 0) == 1) {
721                 if (priv->handle)
722                         nvmap_handle_put(priv->handle);
723                 vma->vm_private_data = NULL;
724                 kfree(priv);
725         }
726 }
727
728 static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
729 {
730         struct page *page;
731         struct nvmap_vma_priv *priv;
732         unsigned long offs;
733
734         offs = (unsigned long)(vmf->virtual_address - vma->vm_start);
735         priv = vma->vm_private_data;
736         if (!priv || !priv->handle || !priv->handle->alloc)
737                 return VM_FAULT_SIGBUS;
738
739         offs += priv->offs;
740         /* if the VMA was split for some reason, vm_pgoff will be the VMA's
741          * offset from the original VMA */
742         offs += (vma->vm_pgoff << PAGE_SHIFT);
743
744         if (offs >= priv->handle->size)
745                 return VM_FAULT_SIGBUS;
746
747         if (!priv->handle->heap_pgalloc) {
748                 unsigned long pfn;
749                 BUG_ON(priv->handle->carveout->base & ~PAGE_MASK);
750                 pfn = ((priv->handle->carveout->base + offs) >> PAGE_SHIFT);
751                 if (!pfn_valid(pfn)) {
752                         vm_insert_pfn(vma,
753                                 (unsigned long)vmf->virtual_address, pfn);
754                         return VM_FAULT_NOPAGE;
755                 }
756                 /* CMA memory would get here */
757                 page = pfn_to_page(pfn);
758         } else {
759                 offs >>= PAGE_SHIFT;
760                 if (nvmap_page_reserved(priv->handle->pgalloc.pages[offs]))
761                         return VM_FAULT_SIGBUS;
762                 page = nvmap_to_page(priv->handle->pgalloc.pages[offs]);
763                 nvmap_page_mkdirty(&priv->handle->pgalloc.pages[offs]);
764         }
765
766         if (page)
767                 get_page(page);
768         vmf->page = page;
769         return (page) ? 0 : VM_FAULT_SIGBUS;
770 }
771
772 #define DEBUGFS_OPEN_FOPS(name) \
773 static int nvmap_debug_##name##_open(struct inode *inode, \
774                                             struct file *file) \
775 { \
776         return single_open(file, nvmap_debug_##name##_show, \
777                             inode->i_private); \
778 } \
779 \
780 static const struct file_operations debug_##name##_fops = { \
781         .open = nvmap_debug_##name##_open, \
782         .read = seq_read, \
783         .llseek = seq_lseek, \
784         .release = single_release, \
785 }
786
787 #define K(x) (x >> 10)
788
789 static void client_stringify(struct nvmap_client *client, struct seq_file *s)
790 {
791         char task_comm[TASK_COMM_LEN];
792         if (!client->task) {
793                 seq_printf(s, "%-18s %18s %8u", client->name, "kernel", 0);
794                 return;
795         }
796         get_task_comm(task_comm, client->task);
797         seq_printf(s, "%-18s %18s %8u", client->name, task_comm,
798                    client->task->pid);
799 }
800
801 static void allocations_stringify(struct nvmap_client *client,
802                                   struct seq_file *s, u32 heap_type)
803 {
804         struct rb_node *n;
805
806         nvmap_ref_lock(client);
807         n = rb_first(&client->handle_refs);
808         for (; n != NULL; n = rb_next(n)) {
809                 struct nvmap_handle_ref *ref =
810                         rb_entry(n, struct nvmap_handle_ref, node);
811                 struct nvmap_handle *handle = ref->handle;
812                 if (handle->alloc && handle->heap_type == heap_type) {
813                         phys_addr_t base = heap_type == NVMAP_HEAP_IOVMM ? 0 :
814                                            (handle->carveout->base);
815                         seq_printf(s,
816                                 "%-18s %-18s %8llx %10zuK %8x %6u %6u %6u %6u %8p\n",
817                                 "", "",
818                                 (unsigned long long)base, K(handle->size),
819                                 handle->userflags,
820                                 atomic_read(&handle->ref),
821                                 atomic_read(&ref->dupes),
822                                 atomic_read(&ref->pin),
823                                 atomic_read(&handle->share_count),
824                                 handle);
825                 }
826         }
827         nvmap_ref_unlock(client);
828 }
829
830 static void nvmap_get_client_mss(struct nvmap_client *client,
831                                  u64 *total, u32 heap_type)
832 {
833         struct rb_node *n;
834
835         *total = 0;
836         nvmap_ref_lock(client);
837         n = rb_first(&client->handle_refs);
838         for (; n != NULL; n = rb_next(n)) {
839                 struct nvmap_handle_ref *ref =
840                         rb_entry(n, struct nvmap_handle_ref, node);
841                 struct nvmap_handle *handle = ref->handle;
842                 if (handle->alloc && handle->heap_type == heap_type)
843                         *total += handle->size /
844                                   atomic_read(&handle->share_count);
845         }
846         nvmap_ref_unlock(client);
847 }
848
849 static void nvmap_get_total_mss(u64 *pss, u64 *non_pss,
850                                       u64 *total, u32 heap_type)
851 {
852         int i;
853         struct rb_node *n;
854         struct nvmap_device *dev = nvmap_dev;
855
856         *total = 0;
857         if (pss)
858                 *pss = 0;
859         if (non_pss)
860                 *non_pss = 0;
861         if (!dev)
862                 return;
863         spin_lock(&dev->handle_lock);
864         n = rb_first(&dev->handles);
865         for (; n != NULL; n = rb_next(n)) {
866                 struct nvmap_handle *h =
867                         rb_entry(n, struct nvmap_handle, node);
868
869                 if (!h || !h->alloc || h->heap_type != heap_type)
870                         continue;
871                 if (!non_pss) {
872                         *total += h->size;
873                         continue;
874                 }
875
876                 for (i = 0; i < h->size >> PAGE_SHIFT; i++) {
877                         struct page *page = nvmap_to_page(h->pgalloc.pages[i]);
878                         int mapcount = page_mapcount(page);
879                         if (!mapcount)
880                                 *non_pss += PAGE_SIZE;
881                         *total += PAGE_SIZE;
882                 }
883         }
884         if (pss && non_pss)
885                 *pss = *total - *non_pss;
886         spin_unlock(&dev->handle_lock);
887 }
888
889 static int nvmap_debug_allocations_show(struct seq_file *s, void *unused)
890 {
891         u64 total;
892         struct nvmap_client *client;
893         u32 heap_type = (u32)(uintptr_t)s->private;
894
895         spin_lock(&nvmap_dev->clients_lock);
896         seq_printf(s, "%-18s %18s %8s %11s\n",
897                 "CLIENT", "PROCESS", "PID", "SIZE");
898         seq_printf(s, "%-18s %18s %8s %11s %8s %6s %6s %6s %6s %6s %6s %8s\n",
899                         "", "", "BASE", "SIZE", "FLAGS", "REFS",
900                         "DUPES", "PINS", "KMAPS", "UMAPS", "SHARE", "UID");
901         list_for_each_entry(client, &nvmap_dev->clients, list) {
902                 u64 client_total;
903                 client_stringify(client, s);
904                 nvmap_get_client_mss(client, &client_total, heap_type);
905                 seq_printf(s, " %10lluK\n", K(client_total));
906                 allocations_stringify(client, s, heap_type);
907                 seq_printf(s, "\n");
908         }
909         spin_unlock(&nvmap_dev->clients_lock);
910         nvmap_get_total_mss(NULL, NULL, &total, heap_type);
911         seq_printf(s, "%-18s %-18s %8s %10lluK\n", "total", "", "", K(total));
912         return 0;
913 }
914
915 DEBUGFS_OPEN_FOPS(allocations);
916
917 static int nvmap_debug_clients_show(struct seq_file *s, void *unused)
918 {
919         u64 total;
920         struct nvmap_client *client;
921         ulong heap_type = (ulong)s->private;
922
923         spin_lock(&nvmap_dev->clients_lock);
924         seq_printf(s, "%-18s %18s %8s %11s\n",
925                 "CLIENT", "PROCESS", "PID", "SIZE");
926         list_for_each_entry(client, &nvmap_dev->clients, list) {
927                 u64 client_total;
928                 client_stringify(client, s);
929                 nvmap_get_client_mss(client, &client_total, heap_type);
930                 seq_printf(s, " %10lluK\n", K(client_total));
931         }
932         spin_unlock(&nvmap_dev->clients_lock);
933         nvmap_get_total_mss(NULL, NULL, &total, heap_type);
934         seq_printf(s, "%-18s %18s %8s %10lluK\n", "total", "", "", K(total));
935         return 0;
936 }
937
938 DEBUGFS_OPEN_FOPS(clients);
939
940 #define PRINT_MEM_STATS_NOTE(x) \
941 do { \
942         seq_printf(s, "Note: total memory is precise account of pages " \
943                 "allocated by NvMap.\nIt doesn't match with all clients " \
944                 "\"%s\" accumulated as shared memory \nis accounted in " \
945                 "full in each clients \"%s\" that shared memory.\n", #x, #x); \
946 } while (0)
947
948 static void nvmap_iovmm_get_client_mss(struct nvmap_client *client, u64 *pss,
949                                    u64 *non_pss, u64 *total)
950 {
951         int i;
952         struct rb_node *n;
953
954         *pss = *non_pss = *total = 0;
955         nvmap_ref_lock(client);
956         n = rb_first(&client->handle_refs);
957         for (; n != NULL; n = rb_next(n)) {
958                 struct nvmap_handle_ref *ref =
959                         rb_entry(n, struct nvmap_handle_ref, node);
960                 struct nvmap_handle *h = ref->handle;
961
962                 if (!h || !h->alloc || !h->heap_pgalloc)
963                         continue;
964
965                 for (i = 0; i < h->size >> PAGE_SHIFT; i++) {
966                         struct page *page = nvmap_to_page(h->pgalloc.pages[i]);
967                         int mapcount = page_mapcount(page);
968                         if (!mapcount)
969                                 *non_pss += PAGE_SIZE;
970                         *total += PAGE_SIZE;
971                 }
972                 *pss = *total - *non_pss;
973         }
974         nvmap_ref_unlock(client);
975 }
976
977 static int nvmap_debug_iovmm_procrank_show(struct seq_file *s, void *unused)
978 {
979         u64 pss, non_pss, total;
980         struct nvmap_client *client;
981         struct nvmap_device *dev = s->private;
982         u64 total_memory, total_pss, total_non_pss;
983
984         spin_lock(&dev->clients_lock);
985         seq_printf(s, "%-18s %18s %8s %11s %11s %11s\n",
986                 "CLIENT", "PROCESS", "PID", "PSS", "NON-PSS", "TOTAL");
987         list_for_each_entry(client, &dev->clients, list) {
988                 client_stringify(client, s);
989                 nvmap_iovmm_get_client_mss(client, &pss, &non_pss, &total);
990                 seq_printf(s, " %10lluK %10lluK %10lluK\n", K(pss),
991                         K(non_pss), K(total));
992         }
993         spin_unlock(&dev->clients_lock);
994
995         nvmap_get_total_mss(&total_pss, &total_non_pss, &total_memory, NVMAP_HEAP_IOVMM);
996         seq_printf(s, "%-18s %18s %8s %10lluK %10lluK %10lluK\n",
997                 "total", "", "", K(total_pss),
998                 K(total_non_pss), K(total_memory));
999         PRINT_MEM_STATS_NOTE(TOTAL);
1000         return 0;
1001 }
1002
1003 DEBUGFS_OPEN_FOPS(iovmm_procrank);
1004
1005 ulong nvmap_iovmm_get_used_pages(void)
1006 {
1007         u64 total;
1008
1009         nvmap_get_total_mss(NULL, NULL, &total, NVMAP_HEAP_IOVMM);
1010         return total >> PAGE_SHIFT;
1011 }
1012
1013 static int nvmap_stats_reset(void *data, u64 val)
1014 {
1015         int i;
1016
1017         if (val) {
1018                 atomic64_set(&nvmap_stats.collect, 0);
1019                 for (i = 0; i < NS_NUM; i++) {
1020                         if (i == NS_TOTAL)
1021                                 continue;
1022                         atomic64_set(&nvmap_stats.stats[i], 0);
1023                 }
1024         }
1025         return 0;
1026 }
1027
1028 static int nvmap_stats_get(void *data, u64 *val)
1029 {
1030         atomic64_t *ptr = data;
1031
1032         *val = atomic64_read(ptr);
1033         return 0;
1034 }
1035
1036 static int nvmap_stats_set(void *data, u64 val)
1037 {
1038         atomic64_t *ptr = data;
1039
1040         atomic64_set(ptr, val);
1041         return 0;
1042 }
1043
1044 DEFINE_SIMPLE_ATTRIBUTE(reset_stats_fops, NULL, nvmap_stats_reset, "%llu\n");
1045 DEFINE_SIMPLE_ATTRIBUTE(stats_fops, nvmap_stats_get, nvmap_stats_set, "%llu\n");
1046
1047 static void nvmap_stats_init(struct dentry *nvmap_debug_root)
1048 {
1049         struct dentry *stats_root;
1050
1051 #define CREATE_DF(x, y) \
1052         debugfs_create_file(#x, S_IRUGO, stats_root, &y, &stats_fops);
1053
1054         stats_root = debugfs_create_dir("stats", nvmap_debug_root);
1055         if (!IS_ERR_OR_NULL(stats_root)) {
1056                 CREATE_DF(alloc, nvmap_stats.stats[NS_ALLOC]);
1057                 CREATE_DF(release, nvmap_stats.stats[NS_RELEASE]);
1058                 CREATE_DF(ualloc, nvmap_stats.stats[NS_UALLOC]);
1059                 CREATE_DF(urelease, nvmap_stats.stats[NS_URELEASE]);
1060                 CREATE_DF(kalloc, nvmap_stats.stats[NS_KALLOC]);
1061                 CREATE_DF(krelease, nvmap_stats.stats[NS_KRELEASE]);
1062                 CREATE_DF(cflush_rq, nvmap_stats.stats[NS_CFLUSH_RQ]);
1063                 CREATE_DF(cflush_done, nvmap_stats.stats[NS_CFLUSH_DONE]);
1064                 CREATE_DF(ucflush_rq, nvmap_stats.stats[NS_UCFLUSH_RQ]);
1065                 CREATE_DF(ucflush_done, nvmap_stats.stats[NS_UCFLUSH_DONE]);
1066                 CREATE_DF(kcflush_rq, nvmap_stats.stats[NS_KCFLUSH_RQ]);
1067                 CREATE_DF(kcflush_done, nvmap_stats.stats[NS_KCFLUSH_DONE]);
1068                 CREATE_DF(total_memory, nvmap_stats.stats[NS_TOTAL]);
1069
1070                 debugfs_create_file("collect", S_IRUGO | S_IWUSR,
1071                         stats_root, &nvmap_stats.collect, &stats_fops);
1072                 debugfs_create_file("reset", S_IWUSR,
1073                         stats_root, NULL, &reset_stats_fops);
1074         }
1075
1076 #undef CREATE_DF
1077 }
1078
1079 void nvmap_stats_inc(enum nvmap_stats_t stat, size_t size)
1080 {
1081         if (atomic64_read(&nvmap_stats.collect) || stat == NS_TOTAL)
1082                 atomic64_add(size, &nvmap_stats.stats[stat]);
1083 }
1084
1085 void nvmap_stats_dec(enum nvmap_stats_t stat, size_t size)
1086 {
1087         if (atomic64_read(&nvmap_stats.collect) || stat == NS_TOTAL)
1088                 atomic64_sub(size, &nvmap_stats.stats[stat]);
1089 }
1090
1091 u64 nvmap_stats_read(enum nvmap_stats_t stat)
1092 {
1093         return atomic64_read(&nvmap_stats.stats[stat]);
1094 }
1095
1096 static int nvmap_probe(struct platform_device *pdev)
1097 {
1098         struct nvmap_platform_data *plat = pdev->dev.platform_data;
1099         struct nvmap_device *dev;
1100         struct dentry *nvmap_debug_root;
1101         unsigned int i;
1102         int e;
1103
1104         if (!plat) {
1105                 dev_err(&pdev->dev, "no platform data?\n");
1106                 return -ENODEV;
1107         }
1108
1109         /*
1110          * The DMA mapping API uses these parameters to decide how to map the
1111          * passed buffers. If the maximum physical segment size is set to
1112          * smaller than the size of the buffer, then the buffer will be mapped
1113          * as separate IO virtual address ranges.
1114          */
1115         pdev->dev.dma_parms = &nvmap_dma_parameters;
1116
1117         if (WARN_ON(nvmap_dev != NULL)) {
1118                 dev_err(&pdev->dev, "only one nvmap device may be present\n");
1119                 return -ENODEV;
1120         }
1121
1122         dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1123         if (!dev) {
1124                 dev_err(&pdev->dev, "out of memory for device\n");
1125                 return -ENOMEM;
1126         }
1127
1128         nvmap_dev = dev;
1129
1130         dev->dev_user.minor = MISC_DYNAMIC_MINOR;
1131         dev->dev_user.name = "nvmap";
1132         dev->dev_user.fops = &nvmap_user_fops;
1133         dev->dev_user.parent = &pdev->dev;
1134
1135         dev->handles = RB_ROOT;
1136
1137 #ifdef CONFIG_NVMAP_PAGE_POOLS
1138         e = nvmap_page_pool_init(dev);
1139         if (e)
1140                 goto fail;
1141 #endif
1142
1143         spin_lock_init(&dev->handle_lock);
1144         INIT_LIST_HEAD(&dev->clients);
1145         spin_lock_init(&dev->clients_lock);
1146
1147         e = misc_register(&dev->dev_user);
1148         if (e) {
1149                 dev_err(&pdev->dev, "unable to register miscdevice %s\n",
1150                         dev->dev_user.name);
1151                 goto fail;
1152         }
1153
1154         dev->nr_carveouts = 0;
1155         dev->heaps = kzalloc(sizeof(struct nvmap_carveout_node) *
1156                              plat->nr_carveouts, GFP_KERNEL);
1157         if (!dev->heaps) {
1158                 e = -ENOMEM;
1159                 dev_err(&pdev->dev, "couldn't allocate carveout memory\n");
1160                 goto fail;
1161         }
1162
1163         nvmap_debug_root = debugfs_create_dir("nvmap", NULL);
1164         if (IS_ERR_OR_NULL(nvmap_debug_root))
1165                 dev_err(&pdev->dev, "couldn't create debug files\n");
1166
1167         debugfs_create_u32("max_handle_count", S_IRUGO,
1168                         nvmap_debug_root, &nvmap_max_handle_count);
1169
1170         for (i = 0; i < plat->nr_carveouts; i++) {
1171                 struct nvmap_carveout_node *node = &dev->heaps[dev->nr_carveouts];
1172                 const struct nvmap_platform_carveout *co = &plat->carveouts[i];
1173                 node->base = round_up(co->base, PAGE_SIZE);
1174                 node->size = round_down(co->size -
1175                                         (node->base - co->base), PAGE_SIZE);
1176                 if (!co->size)
1177                         continue;
1178
1179                 node->carveout = nvmap_heap_create(
1180                                 dev->dev_user.this_device, co,
1181                                 node->base, node->size, node);
1182
1183                 if (!node->carveout) {
1184                         e = -ENOMEM;
1185                         dev_err(&pdev->dev, "couldn't create %s\n", co->name);
1186                         goto fail_heaps;
1187                 }
1188                 node->index = dev->nr_carveouts;
1189                 dev->nr_carveouts++;
1190                 spin_lock_init(&node->clients_lock);
1191                 INIT_LIST_HEAD(&node->clients);
1192                 node->heap_bit = co->usage_mask;
1193
1194                 if (!IS_ERR_OR_NULL(nvmap_debug_root)) {
1195                         struct dentry *heap_root =
1196                                 debugfs_create_dir(co->name, nvmap_debug_root);
1197                         if (!IS_ERR_OR_NULL(heap_root)) {
1198                                 debugfs_create_file("clients", S_IRUGO,
1199                                         heap_root,
1200                                         (void *)(uintptr_t)node->heap_bit,
1201                                         &debug_clients_fops);
1202                                 debugfs_create_file("allocations", S_IRUGO,
1203                                         heap_root,
1204                                         (void *)(uintptr_t)node->heap_bit,
1205                                         &debug_allocations_fops);
1206                                 nvmap_heap_debugfs_init(heap_root,
1207                                                         node->carveout);
1208                         }
1209                 }
1210         }
1211         if (!IS_ERR_OR_NULL(nvmap_debug_root)) {
1212                 struct dentry *iovmm_root =
1213                         debugfs_create_dir("iovmm", nvmap_debug_root);
1214                 if (!IS_ERR_OR_NULL(iovmm_root)) {
1215                         debugfs_create_file("clients", S_IRUGO, iovmm_root,
1216                                 (void *)(uintptr_t)NVMAP_HEAP_IOVMM,
1217                                 &debug_clients_fops);
1218                         debugfs_create_file("allocations", S_IRUGO, iovmm_root,
1219                                 (void *)(uintptr_t)NVMAP_HEAP_IOVMM,
1220                                 &debug_allocations_fops);
1221                         debugfs_create_file("procrank", S_IRUGO, iovmm_root,
1222                                 dev, &debug_iovmm_procrank_fops);
1223                 }
1224 #ifdef CONFIG_NVMAP_PAGE_POOLS
1225                 nvmap_page_pool_debugfs_init(nvmap_debug_root);
1226 #endif
1227 #ifdef CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS
1228                 debugfs_create_size_t("cache_maint_inner_threshold",
1229                                       S_IRUSR | S_IWUSR,
1230                                       nvmap_debug_root,
1231                                       &cache_maint_inner_threshold);
1232
1233                 /* cortex-a9 */
1234                 if ((read_cpuid_id() >> 4 & 0xfff) == 0xc09)
1235                         cache_maint_inner_threshold = SZ_32K;
1236                 pr_info("nvmap:inner cache maint threshold=%zd",
1237                         cache_maint_inner_threshold);
1238 #endif
1239 #ifdef CONFIG_NVMAP_OUTER_CACHE_MAINT_BY_SET_WAYS
1240                 debugfs_create_size_t("cache_maint_outer_threshold",
1241                                       S_IRUSR | S_IWUSR,
1242                                       nvmap_debug_root,
1243                                       &cache_maint_outer_threshold);
1244                 pr_info("nvmap:outer cache maint threshold=%zd",
1245                         cache_maint_outer_threshold);
1246 #endif
1247         }
1248
1249         nvmap_stats_init(nvmap_debug_root);
1250         platform_set_drvdata(pdev, dev);
1251
1252         nvmap_dmabuf_debugfs_init(nvmap_debug_root);
1253         e = nvmap_dmabuf_stash_init();
1254         if (e)
1255                 goto fail_heaps;
1256
1257         return 0;
1258 fail_heaps:
1259         for (i = 0; i < dev->nr_carveouts; i++) {
1260                 struct nvmap_carveout_node *node = &dev->heaps[i];
1261                 nvmap_heap_destroy(node->carveout);
1262         }
1263 fail:
1264 #ifdef CONFIG_NVMAP_PAGE_POOLS
1265         nvmap_page_pool_fini(nvmap_dev);
1266 #endif
1267         kfree(dev->heaps);
1268         if (dev->dev_user.minor != MISC_DYNAMIC_MINOR)
1269                 misc_deregister(&dev->dev_user);
1270         kfree(dev);
1271         nvmap_dev = NULL;
1272         return e;
1273 }
1274
1275 static int nvmap_remove(struct platform_device *pdev)
1276 {
1277         struct nvmap_device *dev = platform_get_drvdata(pdev);
1278         struct rb_node *n;
1279         struct nvmap_handle *h;
1280         int i;
1281
1282         misc_deregister(&dev->dev_user);
1283
1284         while ((n = rb_first(&dev->handles))) {
1285                 h = rb_entry(n, struct nvmap_handle, node);
1286                 rb_erase(&h->node, &dev->handles);
1287                 kfree(h);
1288         }
1289
1290         for (i = 0; i < dev->nr_carveouts; i++) {
1291                 struct nvmap_carveout_node *node = &dev->heaps[i];
1292                 nvmap_heap_destroy(node->carveout);
1293         }
1294         kfree(dev->heaps);
1295
1296         kfree(dev);
1297         nvmap_dev = NULL;
1298         return 0;
1299 }
1300
1301 static int nvmap_suspend(struct platform_device *pdev, pm_message_t state)
1302 {
1303         return 0;
1304 }
1305
1306 static int nvmap_resume(struct platform_device *pdev)
1307 {
1308         return 0;
1309 }
1310
1311 static struct platform_driver nvmap_driver = {
1312         .probe          = nvmap_probe,
1313         .remove         = nvmap_remove,
1314         .suspend        = nvmap_suspend,
1315         .resume         = nvmap_resume,
1316
1317         .driver = {
1318                 .name   = "tegra-nvmap",
1319                 .owner  = THIS_MODULE,
1320         },
1321 };
1322
1323 static int __init nvmap_init_driver(void)
1324 {
1325         int e;
1326
1327         nvmap_dev = NULL;
1328
1329         e = nvmap_heap_init();
1330         if (e)
1331                 goto fail;
1332
1333         e = platform_driver_register(&nvmap_driver);
1334         if (e) {
1335                 nvmap_heap_deinit();
1336                 goto fail;
1337         }
1338
1339 fail:
1340         return e;
1341 }
1342 fs_initcall(nvmap_init_driver);
1343
1344 static void __exit nvmap_exit_driver(void)
1345 {
1346         platform_driver_unregister(&nvmap_driver);
1347         nvmap_heap_deinit();
1348         nvmap_dev = NULL;
1349 }
1350 module_exit(nvmap_exit_driver);