]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blobdiff - drivers/video/tegra/nvmap/nvmap_dev.c
video: tegra: nvmap: track handle's mapped memory
[sojka/nv-tegra/linux-3.10.git] / drivers / video / tegra / nvmap / nvmap_dev.c
index e463fe758883a1f6fff56523fc20245f420de3aa..1c27e36c190dff8977d2549295fb10d0deddbd0f 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/bitmap.h>
 #include <linux/debugfs.h>
 #include <linux/delay.h>
+#include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/device.h>
 #include <linux/oom.h>
 
 #define NVMAP_CARVEOUT_KILLER_RETRY_TIME 100 /* msecs */
 
-#ifdef CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS
+/* this is basically the L2 cache size */
+#ifdef CONFIG_DENVER_CPU
+size_t cache_maint_inner_threshold = SZ_2M * 8;
+#else
 size_t cache_maint_inner_threshold = SZ_2M;
 #endif
+
 #ifdef CONFIG_NVMAP_OUTER_CACHE_MAINT_BY_SET_WAYS
 size_t cache_maint_outer_threshold = SZ_1M;
 #endif
@@ -67,14 +72,8 @@ struct nvmap_carveout_node {
        size_t                  size;
 };
 
-struct platform_device *nvmap_pdev;
-EXPORT_SYMBOL(nvmap_pdev);
 struct nvmap_device *nvmap_dev;
-EXPORT_SYMBOL(nvmap_dev);
-struct nvmap_share *nvmap_share;
-EXPORT_SYMBOL(nvmap_share);
 struct nvmap_stats nvmap_stats;
-EXPORT_SYMBOL(nvmap_stats);
 
 static struct backing_dev_info nvmap_bdi = {
        .ra_pages       = 0,
@@ -90,7 +89,6 @@ static int nvmap_open(struct inode *inode, struct file *filp);
 static int nvmap_release(struct inode *inode, struct file *filp);
 static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
 static int nvmap_map(struct file *filp, struct vm_area_struct *vma);
-static void nvmap_vma_open(struct vm_area_struct *vma);
 static void nvmap_vma_close(struct vm_area_struct *vma);
 static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 
@@ -105,17 +103,6 @@ static const struct file_operations nvmap_user_fops = {
        .mmap           = nvmap_map,
 };
 
-static const struct file_operations nvmap_super_fops = {
-       .owner          = THIS_MODULE,
-       .open           = nvmap_open,
-       .release        = nvmap_release,
-       .unlocked_ioctl = nvmap_ioctl,
-#ifdef CONFIG_COMPAT
-       .compat_ioctl = nvmap_ioctl,
-#endif
-       .mmap           = nvmap_map,
-};
-
 static struct vm_operations_struct nvmap_vma_ops = {
        .open           = nvmap_vma_open,
        .close          = nvmap_vma_close,
@@ -127,92 +114,6 @@ int is_nvmap_vma(struct vm_area_struct *vma)
        return vma->vm_ops == &nvmap_vma_ops;
 }
 
-struct device *nvmap_client_to_device(struct nvmap_client *client)
-{
-       if (!client)
-               return 0;
-       if (client->super)
-               return nvmap_dev->dev_super.this_device;
-       else
-               return nvmap_dev->dev_user.this_device;
-}
-
-struct nvmap_share *nvmap_get_share_from_dev(struct nvmap_device *dev)
-{
-       return &dev->iovmm_master;
-}
-
-/* allocates a PTE for the caller's use; returns the PTE pointer or
- * a negative errno. not safe from IRQs */
-pte_t **nvmap_alloc_pte_irq(struct nvmap_device *dev, void **vaddr)
-{
-       unsigned long bit;
-
-       spin_lock(&dev->ptelock);
-       bit = find_next_zero_bit(dev->ptebits, NVMAP_NUM_PTES, dev->lastpte);
-       if (bit == NVMAP_NUM_PTES) {
-               bit = find_first_zero_bit(dev->ptebits, dev->lastpte);
-               if (bit == dev->lastpte)
-                       bit = NVMAP_NUM_PTES;
-       }
-
-       if (bit == NVMAP_NUM_PTES) {
-               spin_unlock(&dev->ptelock);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       dev->lastpte = bit;
-       set_bit(bit, dev->ptebits);
-       spin_unlock(&dev->ptelock);
-
-       *vaddr = dev->vm_rgn->addr + bit * PAGE_SIZE;
-       return &(dev->ptes[bit]);
-}
-
-/* allocates a PTE for the caller's use; returns the PTE pointer or
- * a negative errno. must be called from sleepable contexts */
-pte_t **nvmap_alloc_pte(struct nvmap_device *dev, void **vaddr)
-{
-       int ret;
-       pte_t **pte;
-       ret = wait_event_interruptible(dev->pte_wait,
-                       !IS_ERR(pte = nvmap_alloc_pte_irq(dev, vaddr)));
-
-       if (ret == -ERESTARTSYS)
-               return ERR_PTR(-EINTR);
-
-       return pte;
-}
-
-/* frees a PTE */
-void nvmap_free_pte(struct nvmap_device *dev, pte_t **pte)
-{
-       unsigned long addr;
-       unsigned int bit = pte - dev->ptes;
-
-       if (WARN_ON(bit >= NVMAP_NUM_PTES))
-               return;
-
-       addr = (unsigned long)dev->vm_rgn->addr + bit * PAGE_SIZE;
-       set_pte_at(&init_mm, addr, *pte, 0);
-
-       spin_lock(&dev->ptelock);
-       clear_bit(bit, dev->ptebits);
-       spin_unlock(&dev->ptelock);
-       wake_up(&dev->pte_wait);
-}
-
-/* get pte for the virtual address */
-pte_t **nvmap_vaddr_to_pte(struct nvmap_device *dev, unsigned long vaddr)
-{
-       unsigned int bit;
-
-       BUG_ON(vaddr < (unsigned long)dev->vm_rgn->addr);
-       bit = (vaddr - (unsigned long)dev->vm_rgn->addr) >> PAGE_SHIFT;
-       BUG_ON(bit >= NVMAP_NUM_PTES);
-       return &(dev->ptes[bit]);
-}
-
 /*
  * Verifies that the passed ID is a valid handle ID. Then the passed client's
  * reference to the handle is returned.
@@ -278,11 +179,10 @@ unsigned long nvmap_carveout_usage(struct nvmap_client *c,
 int nvmap_flush_heap_block(struct nvmap_client *client,
        struct nvmap_heap_block *block, size_t len, unsigned int prot)
 {
-       pte_t **pte;
-       void *addr;
-       uintptr_t kaddr;
+       ulong kaddr;
        phys_addr_t phys = block->base;
        phys_addr_t end = block->base + len;
+       struct vm_struct *area = NULL;
 
        if (prot == NVMAP_HANDLE_UNCACHEABLE || prot == NVMAP_HANDLE_WRITE_COMBINE)
                goto out;
@@ -296,75 +196,33 @@ int nvmap_flush_heap_block(struct nvmap_client *client,
        }
 #endif
 
-       pte = nvmap_alloc_pte(nvmap_dev, &addr);
-       if (IS_ERR(pte))
-               return PTR_ERR(pte);
+       area = alloc_vm_area(PAGE_SIZE, NULL);
+       if (!area)
+               return -ENOMEM;
 
-       kaddr = (uintptr_t)addr;
+       kaddr = (ulong)area->addr;
 
        while (phys < end) {
                phys_addr_t next = (phys + PAGE_SIZE) & PAGE_MASK;
-               unsigned long pfn = __phys_to_pfn(phys);
                void *base = (void *)kaddr + (phys & ~PAGE_MASK);
 
                next = min(next, end);
-               set_pte_at(&init_mm, kaddr, *pte, pfn_pte(pfn, PG_PROT_KERNEL));
-               nvmap_flush_tlb_kernel_page(kaddr);
+               ioremap_page_range(kaddr, kaddr + PAGE_SIZE,
+                       phys, PG_PROT_KERNEL);
                FLUSH_DCACHE_AREA(base, next - phys);
                phys = next;
+               unmap_kernel_range(kaddr, PAGE_SIZE);
        }
 
        if (prot != NVMAP_HANDLE_INNER_CACHEABLE)
                outer_flush_range(block->base, block->base + len);
 
-       nvmap_free_pte(nvmap_dev, pte);
+       free_vm_area(area);
 out:
        wmb();
        return 0;
 }
 
-void nvmap_carveout_commit_add(struct nvmap_client *client,
-                              struct nvmap_carveout_node *node,
-                              size_t len)
-{
-       spin_lock(&node->clients_lock);
-       BUG_ON(list_empty(&client->carveout_commit[node->index].list) &&
-              client->carveout_commit[node->index].commit != 0);
-
-       client->carveout_commit[node->index].commit += len;
-       /* if this client isn't already on the list of nodes for this heap,
-          add it */
-       if (list_empty(&client->carveout_commit[node->index].list)) {
-               list_add(&client->carveout_commit[node->index].list,
-                        &node->clients);
-       }
-       spin_unlock(&node->clients_lock);
-}
-
-void nvmap_carveout_commit_subtract(struct nvmap_client *client,
-                                   struct nvmap_carveout_node *node,
-                                   size_t len)
-{
-       if (!client)
-               return;
-
-       spin_lock(&node->clients_lock);
-       BUG_ON(client->carveout_commit[node->index].commit < len);
-       client->carveout_commit[node->index].commit -= len;
-       /* if no more allocation in this carveout for this node, delete it */
-       if (!client->carveout_commit[node->index].commit)
-               list_del_init(&client->carveout_commit[node->index].list);
-       spin_unlock(&node->clients_lock);
-}
-
-static struct nvmap_client *get_client_from_carveout_commit(
-       struct nvmap_carveout_node *node, struct nvmap_carveout_commit *commit)
-{
-       struct nvmap_carveout_commit *first_commit = commit - node->index;
-       return (void *)first_commit - offsetof(struct nvmap_client,
-                                              carveout_commit);
-}
-
 static
 struct nvmap_heap_block *do_nvmap_carveout_alloc(struct nvmap_client *client,
                                              struct nvmap_handle *handle,
@@ -440,33 +298,50 @@ void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h)
        spin_unlock(&dev->handle_lock);
 }
 
+/* Validates that a handle is in the device master tree and that the
+ * client has permission to access it. */
+struct nvmap_handle *nvmap_validate_get(struct nvmap_handle *id)
+{
+       struct nvmap_handle *h = NULL;
+       struct rb_node *n;
+
+       spin_lock(&nvmap_dev->handle_lock);
+
+       n = nvmap_dev->handles.rb_node;
+
+       while (n) {
+               h = rb_entry(n, struct nvmap_handle, node);
+               if (h == id) {
+                       h = nvmap_handle_get(h);
+                       spin_unlock(&nvmap_dev->handle_lock);
+                       return h;
+               }
+               if (id > h)
+                       n = n->rb_right;
+               else
+                       n = n->rb_left;
+       }
+       spin_unlock(&nvmap_dev->handle_lock);
+       return NULL;
+}
+
 struct nvmap_client *__nvmap_create_client(struct nvmap_device *dev,
                                           const char *name)
 {
        struct nvmap_client *client;
        struct task_struct *task;
-       int i;
 
        if (WARN_ON(!dev))
                return NULL;
 
-       client = kzalloc(sizeof(*client) + (sizeof(struct nvmap_carveout_commit)
-                        * dev->nr_carveouts), GFP_KERNEL);
+       client = kzalloc(sizeof(*client), GFP_KERNEL);
        if (!client)
                return NULL;
 
        client->name = name;
-       client->super = true;
        client->kernel_client = true;
        client->handle_refs = RB_ROOT;
 
-       atomic_set(&client->iovm_commit, 0);
-
-       for (i = 0; i < dev->nr_carveouts; i++) {
-               INIT_LIST_HEAD(&client->carveout_commit[i].list);
-               client->carveout_commit[i].commit = 0;
-       }
-
        get_task_struct(current->group_leader);
        task_lock(current->group_leader);
        /* don't bother to store task struct for kernel threads,
@@ -492,7 +367,6 @@ struct nvmap_client *__nvmap_create_client(struct nvmap_device *dev,
 static void destroy_client(struct nvmap_client *client)
 {
        struct rb_node *n;
-       int i;
 
        if (!client)
                return;
@@ -513,13 +387,12 @@ static void destroy_client(struct nvmap_client *client)
                while (pins--)
                        __nvmap_unpin(ref);
 
-               if (ref->handle->owner == client) {
+               if (ref->handle->owner == client)
                        ref->handle->owner = NULL;
-                       ref->handle->owner_ref = NULL;
-               }
 
                dma_buf_put(ref->handle->dmabuf);
                rb_erase(&ref->node, &client->handle_refs);
+               atomic_dec(&ref->handle->share_count);
 
                dupes = atomic_read(&ref->dupes);
                while (dupes--)
@@ -528,9 +401,6 @@ static void destroy_client(struct nvmap_client *client)
                kfree(ref);
        }
 
-       for (i = 0; i < nvmap_dev->nr_carveouts; i++)
-               list_del(&client->carveout_commit[i].list);
-
        if (client->task)
                put_task_struct(client->task);
 
@@ -576,7 +446,6 @@ static int nvmap_open(struct inode *inode, struct file *filp)
        trace_nvmap_open(priv, priv->name);
 
        priv->kernel_client = false;
-       priv->super = (filp->f_op == &nvmap_super_fops);
 
        filp->f_mapping->backing_dev_info = &nvmap_bdi;
 
@@ -604,10 +473,7 @@ int __nvmap_map(struct nvmap_handle *h, struct vm_area_struct *vma)
        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
        if (!priv)
                return -ENOMEM;
-
-       priv->offs = 0;
        priv->handle = h;
-       atomic_set(&priv->count, 1);
 
        vma->vm_flags |= VM_SHARED | VM_DONTEXPAND |
                          VM_DONTDUMP | VM_DONTCOPY |
@@ -616,31 +482,16 @@ int __nvmap_map(struct nvmap_handle *h, struct vm_area_struct *vma)
        BUG_ON(vma->vm_private_data != NULL);
        vma->vm_private_data = priv;
        vma->vm_page_prot = nvmap_pgprot(h, vma->vm_page_prot);
+       nvmap_vma_open(vma);
        return 0;
 }
 
 static int nvmap_map(struct file *filp, struct vm_area_struct *vma)
 {
-       struct nvmap_vma_priv *priv;
-
-       /* after NVMAP_IOC_MMAP, the handle that is mapped by this VMA
-        * will be stored in vm_private_data and faulted in. until the
-        * ioctl is made, the VMA is mapped no-access */
-       vma->vm_private_data = NULL;
-
-       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-       if (!priv)
-               return -ENOMEM;
-
-       priv->offs = 0;
-       priv->handle = NULL;
-       atomic_set(&priv->count, 1);
-
+       BUG_ON(vma->vm_private_data != NULL);
        vma->vm_flags |= (VM_SHARED | VM_DONTEXPAND |
                          VM_DONTDUMP | VM_DONTCOPY);
        vma->vm_ops = &nvmap_vma_ops;
-       vma->vm_private_data = priv;
-
        return 0;
 }
 
@@ -748,6 +599,12 @@ static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                err = nvmap_ioctl_cache_maint(filp, uarg, false);
                break;
 
+       case NVMAP_IOC_CACHE_LIST:
+       case NVMAP_IOC_RESERVE:
+               err = nvmap_ioctl_cache_maint_list(filp, uarg,
+                                                  cmd == NVMAP_IOC_RESERVE);
+               break;
+
        case NVMAP_IOC_SHARE:
                err = nvmap_ioctl_share_dmabuf(filp, uarg);
                break;
@@ -763,28 +620,83 @@ static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  * the handle, and nvmap_vma_close decrements it. alternatively, we could
  * disallow copying of the vma, or behave like pmem and zap the pages. FIXME.
 */
-static void nvmap_vma_open(struct vm_area_struct *vma)
+void nvmap_vma_open(struct vm_area_struct *vma)
 {
        struct nvmap_vma_priv *priv;
+       struct nvmap_handle *h;
+       struct nvmap_vma_list *vma_list, *tmp;
+       struct list_head *tmp_head = NULL;
+       pid_t current_pid = current->pid;
+       bool vma_pos_found = false;
 
        priv = vma->vm_private_data;
        BUG_ON(!priv);
+       BUG_ON(!priv->handle);
 
        atomic_inc(&priv->count);
+       h = priv->handle;
+
+       vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL);
+       if (vma_list) {
+               mutex_lock(&h->lock);
+               tmp_head = &h->vmas;
+
+               /* insert vma into handle's vmas list in the increasing order of
+                * handle offsets
+                */
+               list_for_each_entry(tmp, &h->vmas, list) {
+                       BUG_ON(tmp->vma == vma);
+
+                       if (!vma_pos_found && (current_pid == tmp->pid)) {
+                               if (vma->vm_pgoff < tmp->vma->vm_pgoff) {
+                                       tmp_head = &tmp->list;
+                                       vma_pos_found = true;
+                               } else {
+                                       tmp_head = tmp->list.next;
+                               }
+                       }
+               }
+
+               vma_list->vma = vma;
+               vma_list->pid = current_pid;
+               list_add_tail(&vma_list->list, tmp_head);
+               mutex_unlock(&h->lock);
+       } else {
+               WARN(1, "vma not tracked");
+       }
 }
 
 static void nvmap_vma_close(struct vm_area_struct *vma)
 {
        struct nvmap_vma_priv *priv = vma->vm_private_data;
+       struct nvmap_vma_list *vma_list;
+       struct nvmap_handle *h;
+       bool vma_found = false;
 
-       if (priv) {
-               if (!atomic_dec_return(&priv->count)) {
-                       if (priv->handle)
-                               nvmap_handle_put(priv->handle);
-                       kfree(priv);
-               }
+       if (!priv)
+               return;
+
+       BUG_ON(!priv->handle);
+
+       h = priv->handle;
+       mutex_lock(&h->lock);
+       list_for_each_entry(vma_list, &h->vmas, list) {
+               if (vma_list->vma != vma)
+                       continue;
+               list_del(&vma_list->list);
+               kfree(vma_list);
+               vma_found = true;
+               break;
+       }
+       BUG_ON(!vma_found);
+       mutex_unlock(&h->lock);
+
+       if (__atomic_add_unless(&priv->count, -1, 0) == 1) {
+               if (priv->handle)
+                       nvmap_handle_put(priv->handle);
+               vma->vm_private_data = NULL;
+               kfree(priv);
        }
-       vma->vm_private_data = NULL;
 }
 
 static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
@@ -819,7 +731,10 @@ static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                page = pfn_to_page(pfn);
        } else {
                offs >>= PAGE_SHIFT;
-               page = priv->handle->pgalloc.pages[offs];
+               if (nvmap_page_reserved(priv->handle->pgalloc.pages[offs]))
+                       return VM_FAULT_SIGBUS;
+               page = nvmap_to_page(priv->handle->pgalloc.pages[offs]);
+               nvmap_page_mkdirty(&priv->handle->pgalloc.pages[offs]);
        }
 
        if (page)
@@ -858,7 +773,7 @@ static void client_stringify(struct nvmap_client *client, struct seq_file *s)
 }
 
 static void allocations_stringify(struct nvmap_client *client,
-                                 struct seq_file *s, bool iovmm)
+                                 struct seq_file *s, u32 heap_type)
 {
        struct rb_node *n;
 
@@ -868,74 +783,128 @@ static void allocations_stringify(struct nvmap_client *client,
                struct nvmap_handle_ref *ref =
                        rb_entry(n, struct nvmap_handle_ref, node);
                struct nvmap_handle *handle = ref->handle;
-               if (handle->alloc && handle->heap_pgalloc == iovmm) {
-                       phys_addr_t base = iovmm ? 0 :
+               if (handle->alloc && handle->heap_type == heap_type) {
+                       phys_addr_t base = heap_type == NVMAP_HEAP_IOVMM ? 0 :
                                           (handle->carveout->base);
                        seq_printf(s,
-                               "%-18s %-18s %8llx %10zuK %8x %6u %6u %6u\n",
+                               "%-18s %-18s %8llx %10zuK %8x %6u %6u %6u %6u %8p\n",
                                "", "",
                                (unsigned long long)base, K(handle->size),
                                handle->userflags,
                                atomic_read(&handle->ref),
                                atomic_read(&ref->dupes),
-                               atomic_read(&ref->pin));
+                               atomic_read(&ref->pin),
+                               atomic_read(&handle->share_count),
+                               handle);
                }
        }
        nvmap_ref_unlock(client);
 }
 
-static int nvmap_debug_allocations_show(struct seq_file *s, void *unused)
+/* compute the total amount of handle physical memory that is mapped
+ * into client's virtual address space. Remember that vmas list is
+ * sorted in ascending order of handle offsets.
+ * NOTE: This function should be called while holding handle's lock mutex.
+ */
+static void nvmap_get_client_handle_mss(struct nvmap_client *client,
+                               struct nvmap_handle *handle, u64 *total)
 {
-       struct nvmap_carveout_node *node = s->private;
-       struct nvmap_carveout_commit *commit;
-       unsigned int total = 0;
+       struct nvmap_vma_list *vma_list = NULL;
+       struct vm_area_struct *vma = NULL;
+       u64 end_offset = 0, vma_start_offset, vma_size;
+       int64_t overlap_size;
 
-       spin_lock(&node->clients_lock);
-       seq_printf(s, "%-18s %18s %8s %11s\n",
-               "CLIENT", "PROCESS", "PID", "SIZE");
-       seq_printf(s, "%-18s %18s %8s %11s %8s %6s %6s %6s\n",
-                       "", "", "BASE", "SIZE", "FLAGS", "REFS",
-                       "DUPES", "PINS");
-       list_for_each_entry(commit, &node->clients, list) {
-               struct nvmap_client *client =
-                       get_client_from_carveout_commit(node, commit);
-               client_stringify(client, s);
-               seq_printf(s, " %10zuK\n", K(commit->commit));
-               allocations_stringify(client, s, false);
-               seq_printf(s, "\n");
-               total += commit->commit;
+       *total = 0;
+       list_for_each_entry(vma_list, &handle->vmas, list) {
+
+               if (client->task->pid == vma_list->pid) {
+                       vma = vma_list->vma;
+                       vma_size = vma->vm_end - vma->vm_start;
+
+                       vma_start_offset = vma->vm_pgoff << PAGE_SHIFT;
+                       if (end_offset < vma_start_offset + vma_size) {
+                               *total += vma_size;
+
+                               overlap_size = end_offset - vma_start_offset;
+                               if (overlap_size > 0)
+                                       *total -= overlap_size;
+                               end_offset = vma_start_offset + vma_size;
+                       }
+               }
        }
-       seq_printf(s, "%-18s %-18s %8s %10uK\n", "total", "", "", K(total));
-       spin_unlock(&node->clients_lock);
-       return 0;
 }
 
-DEBUGFS_OPEN_FOPS(allocations);
-
-static int nvmap_debug_clients_show(struct seq_file *s, void *unused)
+static void maps_stringify(struct nvmap_client *client,
+                               struct seq_file *s, u32 heap_type)
 {
-       struct nvmap_carveout_node *node = s->private;
-       struct nvmap_carveout_commit *commit;
-       unsigned int total = 0;
+       struct rb_node *n;
+       struct nvmap_vma_list *vma_list = NULL;
+       struct vm_area_struct *vma = NULL;
+       u64 total_mapped_size, vma_size;
 
-       spin_lock(&node->clients_lock);
-       seq_printf(s, "%-18s %18s %8s %11s\n",
-               "CLIENT", "PROCESS", "PID", "SIZE");
-       list_for_each_entry(commit, &node->clients, list) {
-               struct nvmap_client *client =
-                       get_client_from_carveout_commit(node, commit);
-               client_stringify(client, s);
-               seq_printf(s, " %10zu\n", K(commit->commit));
-               total += commit->commit;
+       nvmap_ref_lock(client);
+       n = rb_first(&client->handle_refs);
+       for (; n != NULL; n = rb_next(n)) {
+               struct nvmap_handle_ref *ref =
+                       rb_entry(n, struct nvmap_handle_ref, node);
+               struct nvmap_handle *handle = ref->handle;
+               if (handle->alloc && handle->heap_type == heap_type) {
+                       phys_addr_t base = heap_type == NVMAP_HEAP_IOVMM ? 0 :
+                                          (handle->carveout->base);
+                       seq_printf(s,
+                               "%-18s %-18s %8llx %10zuK %8x %6u %16p "
+                               "%12s %12s ",
+                               "", "",
+                               (unsigned long long)base, K(handle->size),
+                               handle->userflags,
+                               atomic_read(&handle->share_count),
+                               handle, "", "");
+
+                       mutex_lock(&handle->lock);
+                       nvmap_get_client_handle_mss(client, handle,
+                                                       &total_mapped_size);
+                       seq_printf(s, "%6lluK\n", K(total_mapped_size));
+
+                       list_for_each_entry(vma_list, &handle->vmas, list) {
+
+                               if (vma_list->pid == client->task->pid) {
+                                       vma = vma_list->vma;
+                                       vma_size = vma->vm_end - vma->vm_start;
+                                       seq_printf(s,
+                                         "%-18s %-18s %8s %11s %8s %6s %16s "
+                                         "%-12lx-%12lx %6lluK\n",
+                                         "", "", "", "", "", "", "",
+                                         vma->vm_start, vma->vm_end,
+                                         K(vma_size));
+                               }
+                       }
+                       mutex_unlock(&handle->lock);
+               }
        }
-       seq_printf(s, "%-18s %18s %8s %10uK\n", "total", "", "", K(total));
-       spin_unlock(&node->clients_lock);
-       return 0;
+       nvmap_ref_unlock(client);
 }
 
-DEBUGFS_OPEN_FOPS(clients);
+static void nvmap_get_client_mss(struct nvmap_client *client,
+                                u64 *total, u32 heap_type)
+{
+       struct rb_node *n;
 
-static void nvmap_iovmm_get_total_mss(u64 *pss, u64 *non_pss, u64 *total)
+       *total = 0;
+       nvmap_ref_lock(client);
+       n = rb_first(&client->handle_refs);
+       for (; n != NULL; n = rb_next(n)) {
+               struct nvmap_handle_ref *ref =
+                       rb_entry(n, struct nvmap_handle_ref, node);
+               struct nvmap_handle *handle = ref->handle;
+               if (handle->alloc && handle->heap_type == heap_type)
+                       *total += handle->size /
+                                 atomic_read(&handle->share_count);
+       }
+       nvmap_ref_unlock(client);
+}
+
+static void nvmap_get_total_mss(u64 *pss, u64 *non_pss,
+                                     u64 *total, u32 heap_type)
 {
        int i;
        struct rb_node *n;
@@ -954,7 +923,7 @@ static void nvmap_iovmm_get_total_mss(u64 *pss, u64 *non_pss, u64 *total)
                struct nvmap_handle *h =
                        rb_entry(n, struct nvmap_handle, node);
 
-               if (!h || !h->alloc || !h->heap_pgalloc)
+               if (!h || !h->alloc || h->heap_type != heap_type)
                        continue;
                if (!non_pss) {
                        *total += h->size;
@@ -962,7 +931,8 @@ static void nvmap_iovmm_get_total_mss(u64 *pss, u64 *non_pss, u64 *total)
                }
 
                for (i = 0; i < h->size >> PAGE_SHIFT; i++) {
-                       int mapcount = page_mapcount(h->pgalloc.pages[i]);
+                       struct page *page = nvmap_to_page(h->pgalloc.pages[i]);
+                       int mapcount = page_mapcount(page);
                        if (!mapcount)
                                *non_pss += PAGE_SIZE;
                        *total += PAGE_SIZE;
@@ -973,64 +943,94 @@ static void nvmap_iovmm_get_total_mss(u64 *pss, u64 *non_pss, u64 *total)
        spin_unlock(&dev->handle_lock);
 }
 
-#define PRINT_MEM_STATS_NOTE(x) \
-do { \
-       seq_printf(s, "Note: total memory is precise account of pages " \
-               "allocated by NvMap.\nIt doesn't match with all clients " \
-               "\"%s\" accumulated as shared memory \nis accounted in " \
-               "full in each clients \"%s\" that shared memory.\n", #x, #x); \
-} while (0)
-
-static int nvmap_debug_iovmm_clients_show(struct seq_file *s, void *unused)
+static int nvmap_debug_allocations_show(struct seq_file *s, void *unused)
 {
        u64 total;
        struct nvmap_client *client;
-       struct nvmap_device *dev = s->private;
+       u32 heap_type = (u32)(uintptr_t)s->private;
 
-       spin_lock(&dev->clients_lock);
+       spin_lock(&nvmap_dev->clients_lock);
        seq_printf(s, "%-18s %18s %8s %11s\n",
                "CLIENT", "PROCESS", "PID", "SIZE");
-       list_for_each_entry(client, &dev->clients, list) {
-               int iovm_commit = atomic_read(&client->iovm_commit);
+       seq_printf(s, "%-18s %18s %8s %11s %8s %6s %6s %6s %6s %6s %6s %8s\n",
+                       "", "", "BASE", "SIZE", "FLAGS", "REFS",
+                       "DUPES", "PINS", "KMAPS", "UMAPS", "SHARE", "UID");
+       list_for_each_entry(client, &nvmap_dev->clients, list) {
+               u64 client_total;
                client_stringify(client, s);
-               seq_printf(s, " %10uK\n", K(iovm_commit));
+               nvmap_get_client_mss(client, &client_total, heap_type);
+               seq_printf(s, " %10lluK\n", K(client_total));
+               allocations_stringify(client, s, heap_type);
+               seq_printf(s, "\n");
        }
-       spin_unlock(&dev->clients_lock);
-       nvmap_iovmm_get_total_mss(NULL, NULL, &total);
-       seq_printf(s, "%-18s %18s %8s %10lluK\n", "total", "", "", K(total));
-       PRINT_MEM_STATS_NOTE(SIZE);
+       spin_unlock(&nvmap_dev->clients_lock);
+       nvmap_get_total_mss(NULL, NULL, &total, heap_type);
+       seq_printf(s, "%-18s %-18s %8s %10lluK\n", "total", "", "", K(total));
        return 0;
 }
 
-DEBUGFS_OPEN_FOPS(iovmm_clients);
+DEBUGFS_OPEN_FOPS(allocations);
 
-static int nvmap_debug_iovmm_allocations_show(struct seq_file *s, void *unused)
+static int nvmap_debug_maps_show(struct seq_file *s, void *unused)
 {
        u64 total;
        struct nvmap_client *client;
-       struct nvmap_device *dev = s->private;
+       u32 heap_type = (u32)(uintptr_t)s->private;
 
-       spin_lock(&dev->clients_lock);
+       spin_lock(&nvmap_dev->clients_lock);
        seq_printf(s, "%-18s %18s %8s %11s\n",
                "CLIENT", "PROCESS", "PID", "SIZE");
-       seq_printf(s, "%-18s %18s %8s %11s %8s %6s %6s %6s\n",
-                       "", "", "BASE", "SIZE", "FLAGS", "REFS",
-                       "DUPES", "PINS");
-       list_for_each_entry(client, &dev->clients, list) {
-               int iovm_commit = atomic_read(&client->iovm_commit);
+       seq_printf(s, "%-18s %18s %8s %11s %8s %6s %9s %21s %18s\n",
+               "", "", "BASE", "SIZE", "FLAGS", "SHARE", "UID",
+               "MAPS", "MAPSIZE");
+
+       list_for_each_entry(client, &nvmap_dev->clients, list) {
+               u64 client_total;
                client_stringify(client, s);
-               seq_printf(s, " %10uK\n", K(iovm_commit));
-               allocations_stringify(client, s, true);
+               nvmap_get_client_mss(client, &client_total, heap_type);
+               seq_printf(s, " %10lluK\n", K(client_total));
+               maps_stringify(client, s, heap_type);
                seq_printf(s, "\n");
        }
-       spin_unlock(&dev->clients_lock);
-       nvmap_iovmm_get_total_mss(NULL, NULL, &total);
+       spin_unlock(&nvmap_dev->clients_lock);
+
+       nvmap_get_total_mss(NULL, NULL, &total, heap_type);
        seq_printf(s, "%-18s %-18s %8s %10lluK\n", "total", "", "", K(total));
-       PRINT_MEM_STATS_NOTE(SIZE);
        return 0;
 }
 
-DEBUGFS_OPEN_FOPS(iovmm_allocations);
+DEBUGFS_OPEN_FOPS(maps);
+
+static int nvmap_debug_clients_show(struct seq_file *s, void *unused)
+{
+       u64 total;
+       struct nvmap_client *client;
+       ulong heap_type = (ulong)s->private;
+
+       spin_lock(&nvmap_dev->clients_lock);
+       seq_printf(s, "%-18s %18s %8s %11s\n",
+               "CLIENT", "PROCESS", "PID", "SIZE");
+       list_for_each_entry(client, &nvmap_dev->clients, list) {
+               u64 client_total;
+               client_stringify(client, s);
+               nvmap_get_client_mss(client, &client_total, heap_type);
+               seq_printf(s, " %10lluK\n", K(client_total));
+       }
+       spin_unlock(&nvmap_dev->clients_lock);
+       nvmap_get_total_mss(NULL, NULL, &total, heap_type);
+       seq_printf(s, "%-18s %18s %8s %10lluK\n", "total", "", "", K(total));
+       return 0;
+}
+
+DEBUGFS_OPEN_FOPS(clients);
+
+#define PRINT_MEM_STATS_NOTE(x) \
+do { \
+       seq_printf(s, "Note: total memory is precise account of pages " \
+               "allocated by NvMap.\nIt doesn't match with all clients " \
+               "\"%s\" accumulated as shared memory \nis accounted in " \
+               "full in each clients \"%s\" that shared memory.\n", #x, #x); \
+} while (0)
 
 static void nvmap_iovmm_get_client_mss(struct nvmap_client *client, u64 *pss,
                                   u64 *non_pss, u64 *total)
@@ -1050,7 +1050,8 @@ static void nvmap_iovmm_get_client_mss(struct nvmap_client *client, u64 *pss,
                        continue;
 
                for (i = 0; i < h->size >> PAGE_SHIFT; i++) {
-                       int mapcount = page_mapcount(h->pgalloc.pages[i]);
+                       struct page *page = nvmap_to_page(h->pgalloc.pages[i]);
+                       int mapcount = page_mapcount(page);
                        if (!mapcount)
                                *non_pss += PAGE_SIZE;
                        *total += PAGE_SIZE;
@@ -1078,7 +1079,7 @@ static int nvmap_debug_iovmm_procrank_show(struct seq_file *s, void *unused)
        }
        spin_unlock(&dev->clients_lock);
 
-       nvmap_iovmm_get_total_mss(&total_pss, &total_non_pss, &total_memory);
+       nvmap_get_total_mss(&total_pss, &total_non_pss, &total_memory, NVMAP_HEAP_IOVMM);
        seq_printf(s, "%-18s %18s %8s %10lluK %10lluK %10lluK\n",
                "total", "", "", K(total_pss),
                K(total_non_pss), K(total_memory));
@@ -1092,7 +1093,7 @@ ulong nvmap_iovmm_get_used_pages(void)
 {
        u64 total;
 
-       nvmap_iovmm_get_total_mss(NULL, NULL, &total);
+       nvmap_get_total_mss(NULL, NULL, &total, NVMAP_HEAP_IOVMM);
        return total >> PAGE_SHIFT;
 }
 
@@ -1211,69 +1212,25 @@ static int nvmap_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
 
+       nvmap_dev = dev;
+
        dev->dev_user.minor = MISC_DYNAMIC_MINOR;
        dev->dev_user.name = "nvmap";
        dev->dev_user.fops = &nvmap_user_fops;
        dev->dev_user.parent = &pdev->dev;
 
-       dev->dev_super.minor = MISC_DYNAMIC_MINOR;
-       dev->dev_super.name = "knvmap";
-       dev->dev_super.fops = &nvmap_super_fops;
-       dev->dev_super.parent = &pdev->dev;
-
        dev->handles = RB_ROOT;
 
-       init_waitqueue_head(&dev->pte_wait);
-
-       init_waitqueue_head(&dev->iovmm_master.pin_wait);
-
-       mutex_init(&dev->iovmm_master.pin_lock);
 #ifdef CONFIG_NVMAP_PAGE_POOLS
        e = nvmap_page_pool_init(dev);
        if (e)
                goto fail;
 #endif
 
-       dev->vm_rgn = alloc_vm_area(NVMAP_NUM_PTES * PAGE_SIZE, NULL);
-       if (!dev->vm_rgn) {
-               e = -ENOMEM;
-               dev_err(&pdev->dev, "couldn't allocate remapping region\n");
-               goto fail;
-       }
-
-       spin_lock_init(&dev->ptelock);
        spin_lock_init(&dev->handle_lock);
        INIT_LIST_HEAD(&dev->clients);
        spin_lock_init(&dev->clients_lock);
 
-       for (i = 0; i < NVMAP_NUM_PTES; i++) {
-               unsigned long addr;
-               pgd_t *pgd;
-               pud_t *pud;
-               pmd_t *pmd;
-
-               addr = (unsigned long)dev->vm_rgn->addr + (i * PAGE_SIZE);
-               pgd = pgd_offset_k(addr);
-               pud = pud_alloc(&init_mm, pgd, addr);
-               if (!pud) {
-                       e = -ENOMEM;
-                       dev_err(&pdev->dev, "couldn't allocate page tables\n");
-                       goto fail;
-               }
-               pmd = pmd_alloc(&init_mm, pud, addr);
-               if (!pmd) {
-                       e = -ENOMEM;
-                       dev_err(&pdev->dev, "couldn't allocate page tables\n");
-                       goto fail;
-               }
-               dev->ptes[i] = pte_alloc_kernel(pmd, addr);
-               if (!dev->ptes[i]) {
-                       e = -ENOMEM;
-                       dev_err(&pdev->dev, "couldn't allocate page tables\n");
-                       goto fail;
-               }
-       }
-
        e = misc_register(&dev->dev_user);
        if (e) {
                dev_err(&pdev->dev, "unable to register miscdevice %s\n",
@@ -1281,13 +1238,6 @@ static int nvmap_probe(struct platform_device *pdev)
                goto fail;
        }
 
-       e = misc_register(&dev->dev_super);
-       if (e) {
-               dev_err(&pdev->dev, "unable to register miscdevice %s\n",
-                       dev->dev_super.name);
-               goto fail;
-       }
-
        dev->nr_carveouts = 0;
        dev->heaps = kzalloc(sizeof(struct nvmap_carveout_node) *
                             plat->nr_carveouts, GFP_KERNEL);
@@ -1333,12 +1283,19 @@ static int nvmap_probe(struct platform_device *pdev)
                                debugfs_create_dir(co->name, nvmap_debug_root);
                        if (!IS_ERR_OR_NULL(heap_root)) {
                                debugfs_create_file("clients", S_IRUGO,
-                                       heap_root, node, &debug_clients_fops);
+                                       heap_root,
+                                       (void *)(uintptr_t)node->heap_bit,
+                                       &debug_clients_fops);
                                debugfs_create_file("allocations", S_IRUGO,
-                                       heap_root, node,
+                                       heap_root,
+                                       (void *)(uintptr_t)node->heap_bit,
                                        &debug_allocations_fops);
+                               debugfs_create_file("maps", S_IRUGO,
+                                       heap_root,
+                                       (void *)(uintptr_t)node->heap_bit,
+                                       &debug_maps_fops);
                                nvmap_heap_debugfs_init(heap_root,
-                                       node->carveout);
+                                                       node->carveout);
                        }
                }
        }
@@ -1347,37 +1304,20 @@ static int nvmap_probe(struct platform_device *pdev)
                        debugfs_create_dir("iovmm", nvmap_debug_root);
                if (!IS_ERR_OR_NULL(iovmm_root)) {
                        debugfs_create_file("clients", S_IRUGO, iovmm_root,
-                               dev, &debug_iovmm_clients_fops);
+                               (void *)(uintptr_t)NVMAP_HEAP_IOVMM,
+                               &debug_clients_fops);
                        debugfs_create_file("allocations", S_IRUGO, iovmm_root,
-                               dev, &debug_iovmm_allocations_fops);
+                               (void *)(uintptr_t)NVMAP_HEAP_IOVMM,
+                               &debug_allocations_fops);
+                       debugfs_create_file("maps", S_IRUGO, iovmm_root,
+                               (void *)(uintptr_t)NVMAP_HEAP_IOVMM,
+                               &debug_maps_fops);
                        debugfs_create_file("procrank", S_IRUGO, iovmm_root,
                                dev, &debug_iovmm_procrank_fops);
+               }
 #ifdef CONFIG_NVMAP_PAGE_POOLS
-                       debugfs_create_u32("page_pool_available_pages",
-                                          S_IRUGO, iovmm_root,
-                                          &dev->iovmm_master.pool.count);
-#ifdef CONFIG_NVMAP_PAGE_POOL_DEBUG
-                       debugfs_create_u32("page_pool_alloc_ind",
-                                          S_IRUGO, iovmm_root,
-                                          &dev->iovmm_master.pool.alloc);
-                       debugfs_create_u32("page_pool_fill_ind",
-                                          S_IRUGO, iovmm_root,
-                                          &dev->iovmm_master.pool.fill);
-                       debugfs_create_u64("page_pool_allocs",
-                                          S_IRUGO, iovmm_root,
-                                          &dev->iovmm_master.pool.allocs);
-                       debugfs_create_u64("page_pool_fills",
-                                          S_IRUGO, iovmm_root,
-                                          &dev->iovmm_master.pool.fills);
-                       debugfs_create_u64("page_pool_hits",
-                                          S_IRUGO, iovmm_root,
-                                          &dev->iovmm_master.pool.hits);
-                       debugfs_create_u64("page_pool_misses",
-                                          S_IRUGO, iovmm_root,
-                                          &dev->iovmm_master.pool.misses);
+               nvmap_page_pool_debugfs_init(nvmap_debug_root);
 #endif
-#endif
-               }
 #ifdef CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS
                debugfs_create_size_t("cache_maint_inner_threshold",
                                      S_IRUSR | S_IWUSR,
@@ -1402,9 +1342,6 @@ static int nvmap_probe(struct platform_device *pdev)
 
        nvmap_stats_init(nvmap_debug_root);
        platform_set_drvdata(pdev, dev);
-       nvmap_pdev = pdev;
-       nvmap_dev = dev;
-       nvmap_share = &dev->iovmm_master;
 
        nvmap_dmabuf_debugfs_init(nvmap_debug_root);
        e = nvmap_dmabuf_stash_init();
@@ -1418,13 +1355,12 @@ fail_heaps:
                nvmap_heap_destroy(node->carveout);
        }
 fail:
+#ifdef CONFIG_NVMAP_PAGE_POOLS
+       nvmap_page_pool_fini(nvmap_dev);
+#endif
        kfree(dev->heaps);
-       if (dev->dev_super.minor != MISC_DYNAMIC_MINOR)
-               misc_deregister(&dev->dev_super);
        if (dev->dev_user.minor != MISC_DYNAMIC_MINOR)
                misc_deregister(&dev->dev_user);
-       if (dev->vm_rgn)
-               free_vm_area(dev->vm_rgn);
        kfree(dev);
        nvmap_dev = NULL;
        return e;
@@ -1437,7 +1373,6 @@ static int nvmap_remove(struct platform_device *pdev)
        struct nvmap_handle *h;
        int i;
 
-       misc_deregister(&dev->dev_super);
        misc_deregister(&dev->dev_user);
 
        while ((n = rb_first(&dev->handles))) {
@@ -1452,7 +1387,6 @@ static int nvmap_remove(struct platform_device *pdev)
        }
        kfree(dev->heaps);
 
-       free_vm_area(dev->vm_rgn);
        kfree(dev);
        nvmap_dev = NULL;
        return 0;