2 * drivers/video/tegra/nvmap/nvmap_dev.c
4 * User-space interface to nvmap
6 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
23 #include <linux/backing-dev.h>
24 #include <linux/bitmap.h>
25 #include <linux/debugfs.h>
26 #include <linux/delay.h>
28 #include <linux/kernel.h>
29 #include <linux/device.h>
30 #include <linux/oom.h>
31 #include <linux/platform_device.h>
32 #include <linux/seq_file.h>
33 #include <linux/slab.h>
34 #include <linux/spinlock.h>
35 #include <linux/uaccess.h>
36 #include <linux/vmalloc.h>
37 #include <linux/nvmap.h>
38 #include <linux/module.h>
39 #include <linux/resource.h>
40 #include <linux/security.h>
41 #include <linux/stat.h>
42 #include <linux/kthread.h>
44 #include <asm/cputype.h>
46 #define CREATE_TRACE_POINTS
47 #include <trace/events/nvmap.h>
49 #include "nvmap_priv.h"
50 #include "nvmap_ioctl.h"
52 #define NVMAP_CARVEOUT_KILLER_RETRY_TIME 100 /* msecs */
54 /* this is basically the L2 cache size */
55 #ifdef CONFIG_DENVER_CPU
56 size_t cache_maint_inner_threshold = SZ_2M * 8;
58 size_t cache_maint_inner_threshold = SZ_2M;
61 #ifdef CONFIG_NVMAP_OUTER_CACHE_MAINT_BY_SET_WAYS
62 size_t cache_maint_outer_threshold = SZ_1M;
65 struct nvmap_carveout_node {
66 unsigned int heap_bit;
67 struct nvmap_heap *carveout;
69 struct list_head clients;
70 spinlock_t clients_lock;
75 struct nvmap_device *nvmap_dev;
76 struct nvmap_stats nvmap_stats;
78 static struct backing_dev_info nvmap_bdi = {
80 .capabilities = (BDI_CAP_NO_ACCT_AND_WRITEBACK |
81 BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP),
84 static struct device_dma_parameters nvmap_dma_parameters = {
85 .max_segment_size = UINT_MAX,
88 static int nvmap_open(struct inode *inode, struct file *filp);
89 static int nvmap_release(struct inode *inode, struct file *filp);
90 static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
91 static int nvmap_map(struct file *filp, struct vm_area_struct *vma);
92 static void nvmap_vma_close(struct vm_area_struct *vma);
93 static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
95 static const struct file_operations nvmap_user_fops = {
98 .release = nvmap_release,
99 .unlocked_ioctl = nvmap_ioctl,
101 .compat_ioctl = nvmap_ioctl,
106 static struct vm_operations_struct nvmap_vma_ops = {
107 .open = nvmap_vma_open,
108 .close = nvmap_vma_close,
109 .fault = nvmap_vma_fault,
112 int is_nvmap_vma(struct vm_area_struct *vma)
114 return vma->vm_ops == &nvmap_vma_ops;
118 * Verifies that the passed ID is a valid handle ID. Then the passed client's
119 * reference to the handle is returned.
121 * Note: to call this function make sure you own the client ref lock.
123 struct nvmap_handle_ref *__nvmap_validate_locked(struct nvmap_client *c,
124 struct nvmap_handle *h)
126 struct rb_node *n = c->handle_refs.rb_node;
129 struct nvmap_handle_ref *ref;
130 ref = rb_entry(n, struct nvmap_handle_ref, node);
131 if (ref->handle == h)
133 else if ((uintptr_t)h > (uintptr_t)ref->handle)
142 unsigned long nvmap_carveout_usage(struct nvmap_client *c,
143 struct nvmap_heap_block *b)
145 struct nvmap_heap *h = nvmap_block_to_heap(b);
146 struct nvmap_carveout_node *n;
149 for (i = 0; i < nvmap_dev->nr_carveouts; i++) {
150 n = &nvmap_dev->heaps[i];
151 if (n->carveout == h)
158 * This routine is used to flush the carveout memory from cache.
159 * Why cache flush is needed for carveout? Consider the case, where a piece of
160 * carveout is allocated as cached and released. After this, if the same memory is
161 * allocated for uncached request and the memory is not flushed out from cache.
162 * In this case, the client might pass this to H/W engine and it could start modify
163 * the memory. As this was cached earlier, it might have some portion of it in cache.
164 * During cpu request to read/write other memory, the cached portion of this memory
165 * might get flushed back to main memory and would cause corruptions, if it happens
166 * after H/W writes data to memory.
168 * But flushing out the memory blindly on each carveout allocation is redundant.
170 * In order to optimize the carveout buffer cache flushes, the following
173 * The whole Carveout is flushed out from cache during its initialization.
174 * During allocation, carveout buffers are not flused from cache.
175 * During deallocation, carveout buffers are flushed, if they were allocated as cached.
176 * if they were allocated as uncached/writecombined, no cache flush is needed.
177 * Just draining store buffers is enough.
179 int nvmap_flush_heap_block(struct nvmap_client *client,
180 struct nvmap_heap_block *block, size_t len, unsigned int prot)
183 phys_addr_t phys = block->base;
184 phys_addr_t end = block->base + len;
185 struct vm_struct *area = NULL;
187 if (prot == NVMAP_HANDLE_UNCACHEABLE || prot == NVMAP_HANDLE_WRITE_COMBINE)
190 #ifdef CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS
191 if (len >= cache_maint_inner_threshold) {
192 inner_flush_cache_all();
193 if (prot != NVMAP_HANDLE_INNER_CACHEABLE)
194 outer_flush_range(block->base, block->base + len);
199 area = alloc_vm_area(PAGE_SIZE, NULL);
203 kaddr = (ulong)area->addr;
206 phys_addr_t next = (phys + PAGE_SIZE) & PAGE_MASK;
207 void *base = (void *)kaddr + (phys & ~PAGE_MASK);
209 next = min(next, end);
210 ioremap_page_range(kaddr, kaddr + PAGE_SIZE,
211 phys, PG_PROT_KERNEL);
212 FLUSH_DCACHE_AREA(base, next - phys);
214 unmap_kernel_range(kaddr, PAGE_SIZE);
217 if (prot != NVMAP_HANDLE_INNER_CACHEABLE)
218 outer_flush_range(block->base, block->base + len);
226 void nvmap_carveout_commit_add(struct nvmap_client *client,
227 struct nvmap_carveout_node *node,
230 spin_lock(&node->clients_lock);
231 BUG_ON(list_empty(&client->carveout_commit[node->index].list) &&
232 client->carveout_commit[node->index].commit != 0);
234 client->carveout_commit[node->index].commit += len;
235 /* if this client isn't already on the list of nodes for this heap,
237 if (list_empty(&client->carveout_commit[node->index].list)) {
238 list_add(&client->carveout_commit[node->index].list,
241 spin_unlock(&node->clients_lock);
244 void nvmap_carveout_commit_subtract(struct nvmap_client *client,
245 struct nvmap_carveout_node *node,
251 spin_lock(&node->clients_lock);
252 BUG_ON(client->carveout_commit[node->index].commit < len);
253 client->carveout_commit[node->index].commit -= len;
254 /* if no more allocation in this carveout for this node, delete it */
255 if (!client->carveout_commit[node->index].commit)
256 list_del_init(&client->carveout_commit[node->index].list);
257 spin_unlock(&node->clients_lock);
261 struct nvmap_heap_block *do_nvmap_carveout_alloc(struct nvmap_client *client,
262 struct nvmap_handle *handle,
265 struct nvmap_carveout_node *co_heap;
266 struct nvmap_device *dev = nvmap_dev;
269 for (i = 0; i < dev->nr_carveouts; i++) {
270 struct nvmap_heap_block *block;
271 co_heap = &dev->heaps[i];
273 if (!(co_heap->heap_bit & type))
276 block = nvmap_heap_alloc(co_heap->carveout, handle);
283 struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *client,
284 struct nvmap_handle *handle,
287 return do_nvmap_carveout_alloc(client, handle, type);
290 /* remove a handle from the device's tree of all handles; called
291 * when freeing handles. */
292 int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h)
294 spin_lock(&dev->handle_lock);
296 /* re-test inside the spinlock if the handle really has no clients;
297 * only remove the handle if it is unreferenced */
298 if (atomic_add_return(0, &h->ref) > 0) {
299 spin_unlock(&dev->handle_lock);
303 BUG_ON(atomic_read(&h->ref) < 0);
304 BUG_ON(atomic_read(&h->pin) != 0);
306 rb_erase(&h->node, &dev->handles);
308 spin_unlock(&dev->handle_lock);
312 /* adds a newly-created handle to the device master tree */
313 void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h)
316 struct rb_node *parent = NULL;
318 spin_lock(&dev->handle_lock);
319 p = &dev->handles.rb_node;
321 struct nvmap_handle *b;
324 b = rb_entry(parent, struct nvmap_handle, node);
326 p = &parent->rb_right;
328 p = &parent->rb_left;
330 rb_link_node(&h->node, parent, p);
331 rb_insert_color(&h->node, &dev->handles);
332 spin_unlock(&dev->handle_lock);
335 /* Validates that a handle is in the device master tree and that the
336 * client has permission to access it. */
337 struct nvmap_handle *nvmap_validate_get(struct nvmap_handle *id)
339 struct nvmap_handle *h = NULL;
342 spin_lock(&nvmap_dev->handle_lock);
344 n = nvmap_dev->handles.rb_node;
347 h = rb_entry(n, struct nvmap_handle, node);
349 h = nvmap_handle_get(h);
350 spin_unlock(&nvmap_dev->handle_lock);
358 spin_unlock(&nvmap_dev->handle_lock);
362 struct nvmap_client *__nvmap_create_client(struct nvmap_device *dev,
365 struct nvmap_client *client;
366 struct task_struct *task;
372 client = kzalloc(sizeof(*client) + (sizeof(struct nvmap_carveout_commit)
373 * dev->nr_carveouts), GFP_KERNEL);
378 client->kernel_client = true;
379 client->handle_refs = RB_ROOT;
381 for (i = 0; i < dev->nr_carveouts; i++) {
382 INIT_LIST_HEAD(&client->carveout_commit[i].list);
383 client->carveout_commit[i].commit = 0;
386 get_task_struct(current->group_leader);
387 task_lock(current->group_leader);
388 /* don't bother to store task struct for kernel threads,
389 they can't be killed anyway */
390 if (current->flags & PF_KTHREAD) {
391 put_task_struct(current->group_leader);
394 task = current->group_leader;
396 task_unlock(current->group_leader);
399 mutex_init(&client->ref_lock);
400 atomic_set(&client->count, 1);
402 spin_lock(&dev->clients_lock);
403 list_add(&client->list, &dev->clients);
404 spin_unlock(&dev->clients_lock);
408 static void destroy_client(struct nvmap_client *client)
416 spin_lock(&nvmap_dev->clients_lock);
417 list_del(&client->list);
418 spin_unlock(&nvmap_dev->clients_lock);
420 while ((n = rb_first(&client->handle_refs))) {
421 struct nvmap_handle_ref *ref;
424 ref = rb_entry(n, struct nvmap_handle_ref, node);
427 pins = atomic_read(&ref->pin);
432 if (ref->handle->owner == client)
433 ref->handle->owner = NULL;
435 dma_buf_put(ref->handle->dmabuf);
436 rb_erase(&ref->node, &client->handle_refs);
437 atomic_dec(&ref->handle->share_count);
439 dupes = atomic_read(&ref->dupes);
441 nvmap_handle_put(ref->handle);
446 for (i = 0; i < nvmap_dev->nr_carveouts; i++)
447 list_del(&client->carveout_commit[i].list);
450 put_task_struct(client->task);
455 struct nvmap_client *nvmap_client_get(struct nvmap_client *client)
457 if (!virt_addr_valid(client))
460 if (!atomic_add_unless(&client->count, 1, 0))
466 void nvmap_client_put(struct nvmap_client *client)
471 if (!atomic_dec_return(&client->count))
472 destroy_client(client);
475 static int nvmap_open(struct inode *inode, struct file *filp)
477 struct miscdevice *miscdev = filp->private_data;
478 struct nvmap_device *dev = dev_get_drvdata(miscdev->parent);
479 struct nvmap_client *priv;
481 __attribute__((unused)) struct rlimit old_rlim, new_rlim;
483 ret = nonseekable_open(inode, filp);
487 BUG_ON(dev != nvmap_dev);
488 priv = __nvmap_create_client(dev, "user");
491 trace_nvmap_open(priv, priv->name);
493 priv->kernel_client = false;
495 filp->f_mapping->backing_dev_info = &nvmap_bdi;
497 filp->private_data = priv;
501 static int nvmap_release(struct inode *inode, struct file *filp)
503 struct nvmap_client *priv = filp->private_data;
505 trace_nvmap_release(priv, priv->name);
506 nvmap_client_put(priv);
510 int __nvmap_map(struct nvmap_handle *h, struct vm_area_struct *vma)
512 struct nvmap_vma_priv *priv;
514 h = nvmap_handle_get(h);
518 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
523 vma->vm_flags |= VM_SHARED | VM_DONTEXPAND |
524 VM_DONTDUMP | VM_DONTCOPY |
525 (h->heap_pgalloc ? 0 : VM_PFNMAP);
526 vma->vm_ops = &nvmap_vma_ops;
527 BUG_ON(vma->vm_private_data != NULL);
528 vma->vm_private_data = priv;
529 vma->vm_page_prot = nvmap_pgprot(h, vma->vm_page_prot);
534 static int nvmap_map(struct file *filp, struct vm_area_struct *vma)
536 BUG_ON(vma->vm_private_data != NULL);
537 vma->vm_flags |= (VM_SHARED | VM_DONTEXPAND |
538 VM_DONTDUMP | VM_DONTCOPY);
539 vma->vm_ops = &nvmap_vma_ops;
543 static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
546 void __user *uarg = (void __user *)arg;
548 if (_IOC_TYPE(cmd) != NVMAP_IOC_MAGIC)
551 if (_IOC_NR(cmd) > NVMAP_IOC_MAXNR)
554 if (_IOC_DIR(cmd) & _IOC_READ)
555 err = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
556 if (_IOC_DIR(cmd) & _IOC_WRITE)
557 err = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
563 case NVMAP_IOC_CREATE:
564 case NVMAP_IOC_FROM_ID:
565 case NVMAP_IOC_FROM_FD:
566 err = nvmap_ioctl_create(filp, cmd, uarg);
569 case NVMAP_IOC_GET_ID:
570 err = nvmap_ioctl_getid(filp, uarg);
573 case NVMAP_IOC_GET_FD:
574 err = nvmap_ioctl_getfd(filp, uarg);
578 case NVMAP_IOC_PARAM_32:
579 err = nvmap_ioctl_get_param(filp, uarg, true);
583 case NVMAP_IOC_PARAM:
584 err = nvmap_ioctl_get_param(filp, uarg, false);
588 case NVMAP_IOC_UNPIN_MULT_32:
589 case NVMAP_IOC_PIN_MULT_32:
590 err = nvmap_ioctl_pinop(filp, cmd == NVMAP_IOC_PIN_MULT_32,
595 case NVMAP_IOC_UNPIN_MULT:
596 case NVMAP_IOC_PIN_MULT:
597 err = nvmap_ioctl_pinop(filp, cmd == NVMAP_IOC_PIN_MULT,
601 case NVMAP_IOC_ALLOC:
602 err = nvmap_ioctl_alloc(filp, uarg);
605 case NVMAP_IOC_ALLOC_KIND:
606 err = nvmap_ioctl_alloc_kind(filp, uarg);
610 err = nvmap_ioctl_free(filp, arg);
614 case NVMAP_IOC_MMAP_32:
615 err = nvmap_map_into_caller_ptr(filp, uarg, true);
620 err = nvmap_map_into_caller_ptr(filp, uarg, false);
624 case NVMAP_IOC_WRITE_32:
625 case NVMAP_IOC_READ_32:
626 err = nvmap_ioctl_rw_handle(filp, cmd == NVMAP_IOC_READ_32,
631 case NVMAP_IOC_WRITE:
633 err = nvmap_ioctl_rw_handle(filp, cmd == NVMAP_IOC_READ, uarg,
638 case NVMAP_IOC_CACHE_32:
639 err = nvmap_ioctl_cache_maint(filp, uarg, true);
643 case NVMAP_IOC_CACHE:
644 err = nvmap_ioctl_cache_maint(filp, uarg, false);
647 case NVMAP_IOC_CACHE_LIST:
648 case NVMAP_IOC_RESERVE:
649 err = nvmap_ioctl_cache_maint_list(filp, uarg,
650 cmd == NVMAP_IOC_RESERVE);
653 case NVMAP_IOC_SHARE:
654 err = nvmap_ioctl_share_dmabuf(filp, uarg);
663 /* to ensure that the backing store for the VMA isn't freed while a fork'd
664 * reference still exists, nvmap_vma_open increments the reference count on
665 * the handle, and nvmap_vma_close decrements it. alternatively, we could
666 * disallow copying of the vma, or behave like pmem and zap the pages. FIXME.
668 void nvmap_vma_open(struct vm_area_struct *vma)
670 struct nvmap_vma_priv *priv;
671 struct nvmap_handle *h;
672 struct nvmap_vma_list *vma_list, *tmp;
674 priv = vma->vm_private_data;
676 BUG_ON(!priv->handle);
678 atomic_inc(&priv->count);
681 vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL);
683 mutex_lock(&h->lock);
684 list_for_each_entry(tmp, &h->vmas, list)
685 BUG_ON(tmp->vma == vma);
688 list_add(&vma_list->list, &h->vmas);
689 mutex_unlock(&h->lock);
691 WARN(1, "vma not tracked");
695 static void nvmap_vma_close(struct vm_area_struct *vma)
697 struct nvmap_vma_priv *priv = vma->vm_private_data;
698 struct nvmap_vma_list *vma_list;
699 struct nvmap_handle *h;
700 bool vma_found = false;
705 BUG_ON(!priv->handle);
708 mutex_lock(&h->lock);
709 list_for_each_entry(vma_list, &h->vmas, list) {
710 if (vma_list->vma != vma)
712 list_del(&vma_list->list);
718 mutex_unlock(&h->lock);
720 if (__atomic_add_unless(&priv->count, -1, 0) == 1) {
722 nvmap_handle_put(priv->handle);
723 vma->vm_private_data = NULL;
728 static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
731 struct nvmap_vma_priv *priv;
734 offs = (unsigned long)(vmf->virtual_address - vma->vm_start);
735 priv = vma->vm_private_data;
736 if (!priv || !priv->handle || !priv->handle->alloc)
737 return VM_FAULT_SIGBUS;
740 /* if the VMA was split for some reason, vm_pgoff will be the VMA's
741 * offset from the original VMA */
742 offs += (vma->vm_pgoff << PAGE_SHIFT);
744 if (offs >= priv->handle->size)
745 return VM_FAULT_SIGBUS;
747 if (!priv->handle->heap_pgalloc) {
749 BUG_ON(priv->handle->carveout->base & ~PAGE_MASK);
750 pfn = ((priv->handle->carveout->base + offs) >> PAGE_SHIFT);
751 if (!pfn_valid(pfn)) {
753 (unsigned long)vmf->virtual_address, pfn);
754 return VM_FAULT_NOPAGE;
756 /* CMA memory would get here */
757 page = pfn_to_page(pfn);
760 if (nvmap_page_reserved(priv->handle->pgalloc.pages[offs]))
761 return VM_FAULT_SIGBUS;
762 page = nvmap_to_page(priv->handle->pgalloc.pages[offs]);
763 nvmap_page_mkdirty(&priv->handle->pgalloc.pages[offs]);
769 return (page) ? 0 : VM_FAULT_SIGBUS;
772 #define DEBUGFS_OPEN_FOPS(name) \
773 static int nvmap_debug_##name##_open(struct inode *inode, \
776 return single_open(file, nvmap_debug_##name##_show, \
780 static const struct file_operations debug_##name##_fops = { \
781 .open = nvmap_debug_##name##_open, \
783 .llseek = seq_lseek, \
784 .release = single_release, \
787 #define K(x) (x >> 10)
789 static void client_stringify(struct nvmap_client *client, struct seq_file *s)
791 char task_comm[TASK_COMM_LEN];
793 seq_printf(s, "%-18s %18s %8u", client->name, "kernel", 0);
796 get_task_comm(task_comm, client->task);
797 seq_printf(s, "%-18s %18s %8u", client->name, task_comm,
801 static void allocations_stringify(struct nvmap_client *client,
802 struct seq_file *s, u32 heap_type)
806 nvmap_ref_lock(client);
807 n = rb_first(&client->handle_refs);
808 for (; n != NULL; n = rb_next(n)) {
809 struct nvmap_handle_ref *ref =
810 rb_entry(n, struct nvmap_handle_ref, node);
811 struct nvmap_handle *handle = ref->handle;
812 if (handle->alloc && handle->heap_type == heap_type) {
813 phys_addr_t base = heap_type == NVMAP_HEAP_IOVMM ? 0 :
814 (handle->carveout->base);
816 "%-18s %-18s %8llx %10zuK %8x %6u %6u %6u %6u %8p\n",
818 (unsigned long long)base, K(handle->size),
820 atomic_read(&handle->ref),
821 atomic_read(&ref->dupes),
822 atomic_read(&ref->pin),
823 atomic_read(&handle->share_count),
827 nvmap_ref_unlock(client);
830 static void nvmap_get_client_mss(struct nvmap_client *client,
831 u64 *total, u32 heap_type)
836 nvmap_ref_lock(client);
837 n = rb_first(&client->handle_refs);
838 for (; n != NULL; n = rb_next(n)) {
839 struct nvmap_handle_ref *ref =
840 rb_entry(n, struct nvmap_handle_ref, node);
841 struct nvmap_handle *handle = ref->handle;
842 if (handle->alloc && handle->heap_type == heap_type)
843 *total += handle->size /
844 atomic_read(&handle->share_count);
846 nvmap_ref_unlock(client);
849 static void nvmap_get_total_mss(u64 *pss, u64 *non_pss,
850 u64 *total, u32 heap_type)
854 struct nvmap_device *dev = nvmap_dev;
863 spin_lock(&dev->handle_lock);
864 n = rb_first(&dev->handles);
865 for (; n != NULL; n = rb_next(n)) {
866 struct nvmap_handle *h =
867 rb_entry(n, struct nvmap_handle, node);
869 if (!h || !h->alloc || h->heap_type != heap_type)
876 for (i = 0; i < h->size >> PAGE_SHIFT; i++) {
877 struct page *page = nvmap_to_page(h->pgalloc.pages[i]);
878 int mapcount = page_mapcount(page);
880 *non_pss += PAGE_SIZE;
885 *pss = *total - *non_pss;
886 spin_unlock(&dev->handle_lock);
889 static int nvmap_debug_allocations_show(struct seq_file *s, void *unused)
892 struct nvmap_client *client;
893 u32 heap_type = (u32)(uintptr_t)s->private;
895 spin_lock(&nvmap_dev->clients_lock);
896 seq_printf(s, "%-18s %18s %8s %11s\n",
897 "CLIENT", "PROCESS", "PID", "SIZE");
898 seq_printf(s, "%-18s %18s %8s %11s %8s %6s %6s %6s %6s %6s %6s %8s\n",
899 "", "", "BASE", "SIZE", "FLAGS", "REFS",
900 "DUPES", "PINS", "KMAPS", "UMAPS", "SHARE", "UID");
901 list_for_each_entry(client, &nvmap_dev->clients, list) {
903 client_stringify(client, s);
904 nvmap_get_client_mss(client, &client_total, heap_type);
905 seq_printf(s, " %10lluK\n", K(client_total));
906 allocations_stringify(client, s, heap_type);
909 spin_unlock(&nvmap_dev->clients_lock);
910 nvmap_get_total_mss(NULL, NULL, &total, heap_type);
911 seq_printf(s, "%-18s %-18s %8s %10lluK\n", "total", "", "", K(total));
915 DEBUGFS_OPEN_FOPS(allocations);
917 static int nvmap_debug_clients_show(struct seq_file *s, void *unused)
920 struct nvmap_client *client;
921 ulong heap_type = (ulong)s->private;
923 spin_lock(&nvmap_dev->clients_lock);
924 seq_printf(s, "%-18s %18s %8s %11s\n",
925 "CLIENT", "PROCESS", "PID", "SIZE");
926 list_for_each_entry(client, &nvmap_dev->clients, list) {
928 client_stringify(client, s);
929 nvmap_get_client_mss(client, &client_total, heap_type);
930 seq_printf(s, " %10lluK\n", K(client_total));
932 spin_unlock(&nvmap_dev->clients_lock);
933 nvmap_get_total_mss(NULL, NULL, &total, heap_type);
934 seq_printf(s, "%-18s %18s %8s %10lluK\n", "total", "", "", K(total));
938 DEBUGFS_OPEN_FOPS(clients);
940 #define PRINT_MEM_STATS_NOTE(x) \
942 seq_printf(s, "Note: total memory is precise account of pages " \
943 "allocated by NvMap.\nIt doesn't match with all clients " \
944 "\"%s\" accumulated as shared memory \nis accounted in " \
945 "full in each clients \"%s\" that shared memory.\n", #x, #x); \
948 static void nvmap_iovmm_get_client_mss(struct nvmap_client *client, u64 *pss,
949 u64 *non_pss, u64 *total)
954 *pss = *non_pss = *total = 0;
955 nvmap_ref_lock(client);
956 n = rb_first(&client->handle_refs);
957 for (; n != NULL; n = rb_next(n)) {
958 struct nvmap_handle_ref *ref =
959 rb_entry(n, struct nvmap_handle_ref, node);
960 struct nvmap_handle *h = ref->handle;
962 if (!h || !h->alloc || !h->heap_pgalloc)
965 for (i = 0; i < h->size >> PAGE_SHIFT; i++) {
966 struct page *page = nvmap_to_page(h->pgalloc.pages[i]);
967 int mapcount = page_mapcount(page);
969 *non_pss += PAGE_SIZE;
972 *pss = *total - *non_pss;
974 nvmap_ref_unlock(client);
977 static int nvmap_debug_iovmm_procrank_show(struct seq_file *s, void *unused)
979 u64 pss, non_pss, total;
980 struct nvmap_client *client;
981 struct nvmap_device *dev = s->private;
982 u64 total_memory, total_pss, total_non_pss;
984 spin_lock(&dev->clients_lock);
985 seq_printf(s, "%-18s %18s %8s %11s %11s %11s\n",
986 "CLIENT", "PROCESS", "PID", "PSS", "NON-PSS", "TOTAL");
987 list_for_each_entry(client, &dev->clients, list) {
988 client_stringify(client, s);
989 nvmap_iovmm_get_client_mss(client, &pss, &non_pss, &total);
990 seq_printf(s, " %10lluK %10lluK %10lluK\n", K(pss),
991 K(non_pss), K(total));
993 spin_unlock(&dev->clients_lock);
995 nvmap_get_total_mss(&total_pss, &total_non_pss, &total_memory, NVMAP_HEAP_IOVMM);
996 seq_printf(s, "%-18s %18s %8s %10lluK %10lluK %10lluK\n",
997 "total", "", "", K(total_pss),
998 K(total_non_pss), K(total_memory));
999 PRINT_MEM_STATS_NOTE(TOTAL);
1003 DEBUGFS_OPEN_FOPS(iovmm_procrank);
1005 ulong nvmap_iovmm_get_used_pages(void)
1009 nvmap_get_total_mss(NULL, NULL, &total, NVMAP_HEAP_IOVMM);
1010 return total >> PAGE_SHIFT;
1013 static int nvmap_stats_reset(void *data, u64 val)
1018 atomic64_set(&nvmap_stats.collect, 0);
1019 for (i = 0; i < NS_NUM; i++) {
1022 atomic64_set(&nvmap_stats.stats[i], 0);
1028 static int nvmap_stats_get(void *data, u64 *val)
1030 atomic64_t *ptr = data;
1032 *val = atomic64_read(ptr);
1036 static int nvmap_stats_set(void *data, u64 val)
1038 atomic64_t *ptr = data;
1040 atomic64_set(ptr, val);
1044 DEFINE_SIMPLE_ATTRIBUTE(reset_stats_fops, NULL, nvmap_stats_reset, "%llu\n");
1045 DEFINE_SIMPLE_ATTRIBUTE(stats_fops, nvmap_stats_get, nvmap_stats_set, "%llu\n");
1047 static void nvmap_stats_init(struct dentry *nvmap_debug_root)
1049 struct dentry *stats_root;
1051 #define CREATE_DF(x, y) \
1052 debugfs_create_file(#x, S_IRUGO, stats_root, &y, &stats_fops);
1054 stats_root = debugfs_create_dir("stats", nvmap_debug_root);
1055 if (!IS_ERR_OR_NULL(stats_root)) {
1056 CREATE_DF(alloc, nvmap_stats.stats[NS_ALLOC]);
1057 CREATE_DF(release, nvmap_stats.stats[NS_RELEASE]);
1058 CREATE_DF(ualloc, nvmap_stats.stats[NS_UALLOC]);
1059 CREATE_DF(urelease, nvmap_stats.stats[NS_URELEASE]);
1060 CREATE_DF(kalloc, nvmap_stats.stats[NS_KALLOC]);
1061 CREATE_DF(krelease, nvmap_stats.stats[NS_KRELEASE]);
1062 CREATE_DF(cflush_rq, nvmap_stats.stats[NS_CFLUSH_RQ]);
1063 CREATE_DF(cflush_done, nvmap_stats.stats[NS_CFLUSH_DONE]);
1064 CREATE_DF(ucflush_rq, nvmap_stats.stats[NS_UCFLUSH_RQ]);
1065 CREATE_DF(ucflush_done, nvmap_stats.stats[NS_UCFLUSH_DONE]);
1066 CREATE_DF(kcflush_rq, nvmap_stats.stats[NS_KCFLUSH_RQ]);
1067 CREATE_DF(kcflush_done, nvmap_stats.stats[NS_KCFLUSH_DONE]);
1068 CREATE_DF(total_memory, nvmap_stats.stats[NS_TOTAL]);
1070 debugfs_create_file("collect", S_IRUGO | S_IWUSR,
1071 stats_root, &nvmap_stats.collect, &stats_fops);
1072 debugfs_create_file("reset", S_IWUSR,
1073 stats_root, NULL, &reset_stats_fops);
1079 void nvmap_stats_inc(enum nvmap_stats_t stat, size_t size)
1081 if (atomic64_read(&nvmap_stats.collect) || stat == NS_TOTAL)
1082 atomic64_add(size, &nvmap_stats.stats[stat]);
1085 void nvmap_stats_dec(enum nvmap_stats_t stat, size_t size)
1087 if (atomic64_read(&nvmap_stats.collect) || stat == NS_TOTAL)
1088 atomic64_sub(size, &nvmap_stats.stats[stat]);
1091 u64 nvmap_stats_read(enum nvmap_stats_t stat)
1093 return atomic64_read(&nvmap_stats.stats[stat]);
1096 static int nvmap_probe(struct platform_device *pdev)
1098 struct nvmap_platform_data *plat = pdev->dev.platform_data;
1099 struct nvmap_device *dev;
1100 struct dentry *nvmap_debug_root;
1105 dev_err(&pdev->dev, "no platform data?\n");
1110 * The DMA mapping API uses these parameters to decide how to map the
1111 * passed buffers. If the maximum physical segment size is set to
1112 * smaller than the size of the buffer, then the buffer will be mapped
1113 * as separate IO virtual address ranges.
1115 pdev->dev.dma_parms = &nvmap_dma_parameters;
1117 if (WARN_ON(nvmap_dev != NULL)) {
1118 dev_err(&pdev->dev, "only one nvmap device may be present\n");
1122 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1124 dev_err(&pdev->dev, "out of memory for device\n");
1130 dev->dev_user.minor = MISC_DYNAMIC_MINOR;
1131 dev->dev_user.name = "nvmap";
1132 dev->dev_user.fops = &nvmap_user_fops;
1133 dev->dev_user.parent = &pdev->dev;
1135 dev->handles = RB_ROOT;
1137 #ifdef CONFIG_NVMAP_PAGE_POOLS
1138 e = nvmap_page_pool_init(dev);
1143 spin_lock_init(&dev->handle_lock);
1144 INIT_LIST_HEAD(&dev->clients);
1145 spin_lock_init(&dev->clients_lock);
1147 e = misc_register(&dev->dev_user);
1149 dev_err(&pdev->dev, "unable to register miscdevice %s\n",
1150 dev->dev_user.name);
1154 dev->nr_carveouts = 0;
1155 dev->heaps = kzalloc(sizeof(struct nvmap_carveout_node) *
1156 plat->nr_carveouts, GFP_KERNEL);
1159 dev_err(&pdev->dev, "couldn't allocate carveout memory\n");
1163 nvmap_debug_root = debugfs_create_dir("nvmap", NULL);
1164 if (IS_ERR_OR_NULL(nvmap_debug_root))
1165 dev_err(&pdev->dev, "couldn't create debug files\n");
1167 debugfs_create_u32("max_handle_count", S_IRUGO,
1168 nvmap_debug_root, &nvmap_max_handle_count);
1170 for (i = 0; i < plat->nr_carveouts; i++) {
1171 struct nvmap_carveout_node *node = &dev->heaps[dev->nr_carveouts];
1172 const struct nvmap_platform_carveout *co = &plat->carveouts[i];
1173 node->base = round_up(co->base, PAGE_SIZE);
1174 node->size = round_down(co->size -
1175 (node->base - co->base), PAGE_SIZE);
1179 node->carveout = nvmap_heap_create(
1180 dev->dev_user.this_device, co,
1181 node->base, node->size, node);
1183 if (!node->carveout) {
1185 dev_err(&pdev->dev, "couldn't create %s\n", co->name);
1188 node->index = dev->nr_carveouts;
1189 dev->nr_carveouts++;
1190 spin_lock_init(&node->clients_lock);
1191 INIT_LIST_HEAD(&node->clients);
1192 node->heap_bit = co->usage_mask;
1194 if (!IS_ERR_OR_NULL(nvmap_debug_root)) {
1195 struct dentry *heap_root =
1196 debugfs_create_dir(co->name, nvmap_debug_root);
1197 if (!IS_ERR_OR_NULL(heap_root)) {
1198 debugfs_create_file("clients", S_IRUGO,
1200 (void *)(uintptr_t)node->heap_bit,
1201 &debug_clients_fops);
1202 debugfs_create_file("allocations", S_IRUGO,
1204 (void *)(uintptr_t)node->heap_bit,
1205 &debug_allocations_fops);
1206 nvmap_heap_debugfs_init(heap_root,
1211 if (!IS_ERR_OR_NULL(nvmap_debug_root)) {
1212 struct dentry *iovmm_root =
1213 debugfs_create_dir("iovmm", nvmap_debug_root);
1214 if (!IS_ERR_OR_NULL(iovmm_root)) {
1215 debugfs_create_file("clients", S_IRUGO, iovmm_root,
1216 (void *)(uintptr_t)NVMAP_HEAP_IOVMM,
1217 &debug_clients_fops);
1218 debugfs_create_file("allocations", S_IRUGO, iovmm_root,
1219 (void *)(uintptr_t)NVMAP_HEAP_IOVMM,
1220 &debug_allocations_fops);
1221 debugfs_create_file("procrank", S_IRUGO, iovmm_root,
1222 dev, &debug_iovmm_procrank_fops);
1224 #ifdef CONFIG_NVMAP_PAGE_POOLS
1225 nvmap_page_pool_debugfs_init(nvmap_debug_root);
1227 #ifdef CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS
1228 debugfs_create_size_t("cache_maint_inner_threshold",
1231 &cache_maint_inner_threshold);
1234 if ((read_cpuid_id() >> 4 & 0xfff) == 0xc09)
1235 cache_maint_inner_threshold = SZ_32K;
1236 pr_info("nvmap:inner cache maint threshold=%zd",
1237 cache_maint_inner_threshold);
1239 #ifdef CONFIG_NVMAP_OUTER_CACHE_MAINT_BY_SET_WAYS
1240 debugfs_create_size_t("cache_maint_outer_threshold",
1243 &cache_maint_outer_threshold);
1244 pr_info("nvmap:outer cache maint threshold=%zd",
1245 cache_maint_outer_threshold);
1249 nvmap_stats_init(nvmap_debug_root);
1250 platform_set_drvdata(pdev, dev);
1252 nvmap_dmabuf_debugfs_init(nvmap_debug_root);
1253 e = nvmap_dmabuf_stash_init();
1259 for (i = 0; i < dev->nr_carveouts; i++) {
1260 struct nvmap_carveout_node *node = &dev->heaps[i];
1261 nvmap_heap_destroy(node->carveout);
1264 #ifdef CONFIG_NVMAP_PAGE_POOLS
1265 nvmap_page_pool_fini(nvmap_dev);
1268 if (dev->dev_user.minor != MISC_DYNAMIC_MINOR)
1269 misc_deregister(&dev->dev_user);
1275 static int nvmap_remove(struct platform_device *pdev)
1277 struct nvmap_device *dev = platform_get_drvdata(pdev);
1279 struct nvmap_handle *h;
1282 misc_deregister(&dev->dev_user);
1284 while ((n = rb_first(&dev->handles))) {
1285 h = rb_entry(n, struct nvmap_handle, node);
1286 rb_erase(&h->node, &dev->handles);
1290 for (i = 0; i < dev->nr_carveouts; i++) {
1291 struct nvmap_carveout_node *node = &dev->heaps[i];
1292 nvmap_heap_destroy(node->carveout);
1301 static int nvmap_suspend(struct platform_device *pdev, pm_message_t state)
1306 static int nvmap_resume(struct platform_device *pdev)
1311 static struct platform_driver nvmap_driver = {
1312 .probe = nvmap_probe,
1313 .remove = nvmap_remove,
1314 .suspend = nvmap_suspend,
1315 .resume = nvmap_resume,
1318 .name = "tegra-nvmap",
1319 .owner = THIS_MODULE,
1323 static int __init nvmap_init_driver(void)
1329 e = nvmap_heap_init();
1333 e = platform_driver_register(&nvmap_driver);
1335 nvmap_heap_deinit();
1342 fs_initcall(nvmap_init_driver);
1344 static void __exit nvmap_exit_driver(void)
1346 platform_driver_unregister(&nvmap_driver);
1347 nvmap_heap_deinit();
1350 module_exit(nvmap_exit_driver);