]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/commitdiff
video: tegra: nvmap: track handle's mapped memory
authorManeet Singh <mmaneetsingh@nvidia.com>
Wed, 23 Jul 2014 00:28:04 +0000 (17:28 -0700)
committerWinnie Hsu <whsu@nvidia.com>
Sat, 30 Aug 2014 01:01:03 +0000 (18:01 -0700)
Added new file "maps" for nvmap heaps. In addition to data given by
existing "allocations" file, this also shows the client's virtual
mappings and total amount of handle physical memory that is actually
mapped to a client's virtual address space.

This change will help in tracking nvmap memory usage of processes.

Bug 1529015

Change-Id: I85b5c221c7a7475cbc3585b130fda6a282756662
Signed-off-by: Maneet Singh <mmaneetsingh@nvidia.com>
Reviewed-on: http://git-master/r/448503
(cherry picked from commit 1f632679174cea962406980c8201d63c163635ee)
Reviewed-on: http://git-master/r/448580
Reviewed-by: Krishna Reddy <vdumpa@nvidia.com>
drivers/video/tegra/nvmap/nvmap_dev.c

index 5c0685ac877637be2500d6b1542657734efabe4a..1c27e36c190dff8977d2549295fb10d0deddbd0f 100644 (file)
@@ -801,6 +801,89 @@ static void allocations_stringify(struct nvmap_client *client,
        nvmap_ref_unlock(client);
 }
 
+/* compute the total amount of handle physical memory that is mapped
+ * into client's virtual address space. Remember that vmas list is
+ * sorted in ascending order of handle offsets.
+ * NOTE: This function should be called while holding handle's lock mutex.
+ */
+static void nvmap_get_client_handle_mss(struct nvmap_client *client,
+                               struct nvmap_handle *handle, u64 *total)
+{
+       struct nvmap_vma_list *vma_list = NULL;
+       struct vm_area_struct *vma = NULL;
+       u64 end_offset = 0, vma_start_offset, vma_size;
+       int64_t overlap_size;
+
+       *total = 0;
+       list_for_each_entry(vma_list, &handle->vmas, list) {
+
+               if (client->task->pid == vma_list->pid) {
+                       vma = vma_list->vma;
+                       vma_size = vma->vm_end - vma->vm_start;
+
+                       vma_start_offset = vma->vm_pgoff << PAGE_SHIFT;
+                       if (end_offset < vma_start_offset + vma_size) {
+                               *total += vma_size;
+
+                               overlap_size = end_offset - vma_start_offset;
+                               if (overlap_size > 0)
+                                       *total -= overlap_size;
+                               end_offset = vma_start_offset + vma_size;
+                       }
+               }
+       }
+}
+
+static void maps_stringify(struct nvmap_client *client,
+                               struct seq_file *s, u32 heap_type)
+{
+       struct rb_node *n;
+       struct nvmap_vma_list *vma_list = NULL;
+       struct vm_area_struct *vma = NULL;
+       u64 total_mapped_size, vma_size;
+
+       nvmap_ref_lock(client);
+       n = rb_first(&client->handle_refs);
+       for (; n != NULL; n = rb_next(n)) {
+               struct nvmap_handle_ref *ref =
+                       rb_entry(n, struct nvmap_handle_ref, node);
+               struct nvmap_handle *handle = ref->handle;
+               if (handle->alloc && handle->heap_type == heap_type) {
+                       phys_addr_t base = heap_type == NVMAP_HEAP_IOVMM ? 0 :
+                                          (handle->carveout->base);
+                       seq_printf(s,
+                               "%-18s %-18s %8llx %10zuK %8x %6u %16p "
+                               "%12s %12s ",
+                               "", "",
+                               (unsigned long long)base, K(handle->size),
+                               handle->userflags,
+                               atomic_read(&handle->share_count),
+                               handle, "", "");
+
+                       mutex_lock(&handle->lock);
+                       nvmap_get_client_handle_mss(client, handle,
+                                                       &total_mapped_size);
+                       seq_printf(s, "%6lluK\n", K(total_mapped_size));
+
+                       list_for_each_entry(vma_list, &handle->vmas, list) {
+
+                               if (vma_list->pid == client->task->pid) {
+                                       vma = vma_list->vma;
+                                       vma_size = vma->vm_end - vma->vm_start;
+                                       seq_printf(s,
+                                         "%-18s %-18s %8s %11s %8s %6s %16s "
+                                         "%-12lx-%12lx %6lluK\n",
+                                         "", "", "", "", "", "", "",
+                                         vma->vm_start, vma->vm_end,
+                                         K(vma_size));
+                               }
+                       }
+                       mutex_unlock(&handle->lock);
+               }
+       }
+       nvmap_ref_unlock(client);
+}
+
 static void nvmap_get_client_mss(struct nvmap_client *client,
                                 u64 *total, u32 heap_type)
 {
@@ -888,6 +971,36 @@ static int nvmap_debug_allocations_show(struct seq_file *s, void *unused)
 
 DEBUGFS_OPEN_FOPS(allocations);
 
+static int nvmap_debug_maps_show(struct seq_file *s, void *unused)
+{
+       u64 total;
+       struct nvmap_client *client;
+       u32 heap_type = (u32)(uintptr_t)s->private;
+
+       spin_lock(&nvmap_dev->clients_lock);
+       seq_printf(s, "%-18s %18s %8s %11s\n",
+               "CLIENT", "PROCESS", "PID", "SIZE");
+       seq_printf(s, "%-18s %18s %8s %11s %8s %6s %9s %21s %18s\n",
+               "", "", "BASE", "SIZE", "FLAGS", "SHARE", "UID",
+               "MAPS", "MAPSIZE");
+
+       list_for_each_entry(client, &nvmap_dev->clients, list) {
+               u64 client_total;
+               client_stringify(client, s);
+               nvmap_get_client_mss(client, &client_total, heap_type);
+               seq_printf(s, " %10lluK\n", K(client_total));
+               maps_stringify(client, s, heap_type);
+               seq_printf(s, "\n");
+       }
+       spin_unlock(&nvmap_dev->clients_lock);
+
+       nvmap_get_total_mss(NULL, NULL, &total, heap_type);
+       seq_printf(s, "%-18s %-18s %8s %10lluK\n", "total", "", "", K(total));
+       return 0;
+}
+
+DEBUGFS_OPEN_FOPS(maps);
+
 static int nvmap_debug_clients_show(struct seq_file *s, void *unused)
 {
        u64 total;
@@ -1177,6 +1290,10 @@ static int nvmap_probe(struct platform_device *pdev)
                                        heap_root,
                                        (void *)(uintptr_t)node->heap_bit,
                                        &debug_allocations_fops);
+                               debugfs_create_file("maps", S_IRUGO,
+                                       heap_root,
+                                       (void *)(uintptr_t)node->heap_bit,
+                                       &debug_maps_fops);
                                nvmap_heap_debugfs_init(heap_root,
                                                        node->carveout);
                        }
@@ -1192,6 +1309,9 @@ static int nvmap_probe(struct platform_device *pdev)
                        debugfs_create_file("allocations", S_IRUGO, iovmm_root,
                                (void *)(uintptr_t)NVMAP_HEAP_IOVMM,
                                &debug_allocations_fops);
+                       debugfs_create_file("maps", S_IRUGO, iovmm_root,
+                               (void *)(uintptr_t)NVMAP_HEAP_IOVMM,
+                               &debug_maps_fops);
                        debugfs_create_file("procrank", S_IRUGO, iovmm_root,
                                dev, &debug_iovmm_procrank_fops);
                }