2 * drivers/video/tegra/nvmap/nvmap_dev.c
4 * User-space interface to nvmap
6 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
23 #include <linux/backing-dev.h>
24 #include <linux/bitmap.h>
25 #include <linux/debugfs.h>
26 #include <linux/delay.h>
28 #include <linux/kernel.h>
29 #include <linux/device.h>
30 #include <linux/oom.h>
31 #include <linux/platform_device.h>
32 #include <linux/seq_file.h>
33 #include <linux/slab.h>
34 #include <linux/spinlock.h>
35 #include <linux/uaccess.h>
36 #include <linux/vmalloc.h>
37 #include <linux/nvmap.h>
38 #include <linux/module.h>
39 #include <linux/resource.h>
40 #include <linux/security.h>
41 #include <linux/stat.h>
42 #include <linux/kthread.h>
44 #include <asm/cputype.h>
46 #define CREATE_TRACE_POINTS
47 #include <trace/events/nvmap.h>
49 #include "nvmap_priv.h"
50 #include "nvmap_ioctl.h"
52 #define NVMAP_CARVEOUT_KILLER_RETRY_TIME 100 /* msecs */
54 /* this is basically the L2 cache size */
55 #ifdef CONFIG_DENVER_CPU
56 size_t cache_maint_inner_threshold = SZ_2M * 8;
58 size_t cache_maint_inner_threshold = SZ_2M;
61 #ifdef CONFIG_NVMAP_OUTER_CACHE_MAINT_BY_SET_WAYS
62 size_t cache_maint_outer_threshold = SZ_1M;
65 struct nvmap_carveout_node {
66 unsigned int heap_bit;
67 struct nvmap_heap *carveout;
69 struct list_head clients;
70 spinlock_t clients_lock;
75 struct nvmap_device *nvmap_dev;
76 struct nvmap_stats nvmap_stats;
78 static struct backing_dev_info nvmap_bdi = {
80 .capabilities = (BDI_CAP_NO_ACCT_AND_WRITEBACK |
81 BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP),
84 static struct device_dma_parameters nvmap_dma_parameters = {
85 .max_segment_size = UINT_MAX,
88 static int nvmap_open(struct inode *inode, struct file *filp);
89 static int nvmap_release(struct inode *inode, struct file *filp);
90 static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
91 static int nvmap_map(struct file *filp, struct vm_area_struct *vma);
92 static void nvmap_vma_close(struct vm_area_struct *vma);
93 static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
95 static const struct file_operations nvmap_user_fops = {
98 .release = nvmap_release,
99 .unlocked_ioctl = nvmap_ioctl,
101 .compat_ioctl = nvmap_ioctl,
106 static struct vm_operations_struct nvmap_vma_ops = {
107 .open = nvmap_vma_open,
108 .close = nvmap_vma_close,
109 .fault = nvmap_vma_fault,
112 int is_nvmap_vma(struct vm_area_struct *vma)
114 return vma->vm_ops == &nvmap_vma_ops;
118 * Verifies that the passed ID is a valid handle ID. Then the passed client's
119 * reference to the handle is returned.
121 * Note: to call this function make sure you own the client ref lock.
123 struct nvmap_handle_ref *__nvmap_validate_locked(struct nvmap_client *c,
124 struct nvmap_handle *h)
126 struct rb_node *n = c->handle_refs.rb_node;
129 struct nvmap_handle_ref *ref;
130 ref = rb_entry(n, struct nvmap_handle_ref, node);
131 if (ref->handle == h)
133 else if ((uintptr_t)h > (uintptr_t)ref->handle)
142 unsigned long nvmap_carveout_usage(struct nvmap_client *c,
143 struct nvmap_heap_block *b)
145 struct nvmap_heap *h = nvmap_block_to_heap(b);
146 struct nvmap_carveout_node *n;
149 for (i = 0; i < nvmap_dev->nr_carveouts; i++) {
150 n = &nvmap_dev->heaps[i];
151 if (n->carveout == h)
158 * This routine is used to flush the carveout memory from cache.
159 * Why cache flush is needed for carveout? Consider the case, where a piece of
160 * carveout is allocated as cached and released. After this, if the same memory is
161 * allocated for uncached request and the memory is not flushed out from cache.
162 * In this case, the client might pass this to H/W engine and it could start modify
163 * the memory. As this was cached earlier, it might have some portion of it in cache.
164 * During cpu request to read/write other memory, the cached portion of this memory
165 * might get flushed back to main memory and would cause corruptions, if it happens
166 * after H/W writes data to memory.
168 * But flushing out the memory blindly on each carveout allocation is redundant.
170 * In order to optimize the carveout buffer cache flushes, the following
173 * The whole Carveout is flushed out from cache during its initialization.
174 * During allocation, carveout buffers are not flused from cache.
175 * During deallocation, carveout buffers are flushed, if they were allocated as cached.
176 * if they were allocated as uncached/writecombined, no cache flush is needed.
177 * Just draining store buffers is enough.
179 int nvmap_flush_heap_block(struct nvmap_client *client,
180 struct nvmap_heap_block *block, size_t len, unsigned int prot)
183 phys_addr_t phys = block->base;
184 phys_addr_t end = block->base + len;
185 struct vm_struct *area = NULL;
187 if (prot == NVMAP_HANDLE_UNCACHEABLE || prot == NVMAP_HANDLE_WRITE_COMBINE)
190 #ifdef CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS
191 if (len >= cache_maint_inner_threshold) {
192 inner_flush_cache_all();
193 if (prot != NVMAP_HANDLE_INNER_CACHEABLE)
194 outer_flush_range(block->base, block->base + len);
199 area = alloc_vm_area(PAGE_SIZE, NULL);
203 kaddr = (ulong)area->addr;
206 phys_addr_t next = (phys + PAGE_SIZE) & PAGE_MASK;
207 void *base = (void *)kaddr + (phys & ~PAGE_MASK);
209 next = min(next, end);
210 ioremap_page_range(kaddr, kaddr + PAGE_SIZE,
211 phys, PG_PROT_KERNEL);
212 FLUSH_DCACHE_AREA(base, next - phys);
214 unmap_kernel_range(kaddr, PAGE_SIZE);
217 if (prot != NVMAP_HANDLE_INNER_CACHEABLE)
218 outer_flush_range(block->base, block->base + len);
227 struct nvmap_heap_block *do_nvmap_carveout_alloc(struct nvmap_client *client,
228 struct nvmap_handle *handle,
231 struct nvmap_carveout_node *co_heap;
232 struct nvmap_device *dev = nvmap_dev;
235 for (i = 0; i < dev->nr_carveouts; i++) {
236 struct nvmap_heap_block *block;
237 co_heap = &dev->heaps[i];
239 if (!(co_heap->heap_bit & type))
242 block = nvmap_heap_alloc(co_heap->carveout, handle);
249 struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *client,
250 struct nvmap_handle *handle,
253 return do_nvmap_carveout_alloc(client, handle, type);
256 /* remove a handle from the device's tree of all handles; called
257 * when freeing handles. */
258 int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h)
260 spin_lock(&dev->handle_lock);
262 /* re-test inside the spinlock if the handle really has no clients;
263 * only remove the handle if it is unreferenced */
264 if (atomic_add_return(0, &h->ref) > 0) {
265 spin_unlock(&dev->handle_lock);
269 BUG_ON(atomic_read(&h->ref) < 0);
270 BUG_ON(atomic_read(&h->pin) != 0);
272 rb_erase(&h->node, &dev->handles);
274 spin_unlock(&dev->handle_lock);
278 /* adds a newly-created handle to the device master tree */
279 void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h)
282 struct rb_node *parent = NULL;
284 spin_lock(&dev->handle_lock);
285 p = &dev->handles.rb_node;
287 struct nvmap_handle *b;
290 b = rb_entry(parent, struct nvmap_handle, node);
292 p = &parent->rb_right;
294 p = &parent->rb_left;
296 rb_link_node(&h->node, parent, p);
297 rb_insert_color(&h->node, &dev->handles);
298 spin_unlock(&dev->handle_lock);
301 /* Validates that a handle is in the device master tree and that the
302 * client has permission to access it. */
303 struct nvmap_handle *nvmap_validate_get(struct nvmap_handle *id)
305 struct nvmap_handle *h = NULL;
308 spin_lock(&nvmap_dev->handle_lock);
310 n = nvmap_dev->handles.rb_node;
313 h = rb_entry(n, struct nvmap_handle, node);
315 h = nvmap_handle_get(h);
316 spin_unlock(&nvmap_dev->handle_lock);
324 spin_unlock(&nvmap_dev->handle_lock);
328 struct nvmap_client *__nvmap_create_client(struct nvmap_device *dev,
331 struct nvmap_client *client;
332 struct task_struct *task;
337 client = kzalloc(sizeof(*client), GFP_KERNEL);
342 client->kernel_client = true;
343 client->handle_refs = RB_ROOT;
345 get_task_struct(current->group_leader);
346 task_lock(current->group_leader);
347 /* don't bother to store task struct for kernel threads,
348 they can't be killed anyway */
349 if (current->flags & PF_KTHREAD) {
350 put_task_struct(current->group_leader);
353 task = current->group_leader;
355 task_unlock(current->group_leader);
358 mutex_init(&client->ref_lock);
359 atomic_set(&client->count, 1);
361 spin_lock(&dev->clients_lock);
362 list_add(&client->list, &dev->clients);
363 spin_unlock(&dev->clients_lock);
367 static void destroy_client(struct nvmap_client *client)
374 spin_lock(&nvmap_dev->clients_lock);
375 list_del(&client->list);
376 spin_unlock(&nvmap_dev->clients_lock);
378 while ((n = rb_first(&client->handle_refs))) {
379 struct nvmap_handle_ref *ref;
382 ref = rb_entry(n, struct nvmap_handle_ref, node);
385 pins = atomic_read(&ref->pin);
390 if (ref->handle->owner == client)
391 ref->handle->owner = NULL;
393 dma_buf_put(ref->handle->dmabuf);
394 rb_erase(&ref->node, &client->handle_refs);
395 atomic_dec(&ref->handle->share_count);
397 dupes = atomic_read(&ref->dupes);
399 nvmap_handle_put(ref->handle);
405 put_task_struct(client->task);
410 struct nvmap_client *nvmap_client_get(struct nvmap_client *client)
412 if (!virt_addr_valid(client))
415 if (!atomic_add_unless(&client->count, 1, 0))
421 void nvmap_client_put(struct nvmap_client *client)
426 if (!atomic_dec_return(&client->count))
427 destroy_client(client);
430 static int nvmap_open(struct inode *inode, struct file *filp)
432 struct miscdevice *miscdev = filp->private_data;
433 struct nvmap_device *dev = dev_get_drvdata(miscdev->parent);
434 struct nvmap_client *priv;
436 __attribute__((unused)) struct rlimit old_rlim, new_rlim;
438 ret = nonseekable_open(inode, filp);
442 BUG_ON(dev != nvmap_dev);
443 priv = __nvmap_create_client(dev, "user");
446 trace_nvmap_open(priv, priv->name);
448 priv->kernel_client = false;
450 filp->f_mapping->backing_dev_info = &nvmap_bdi;
452 filp->private_data = priv;
456 static int nvmap_release(struct inode *inode, struct file *filp)
458 struct nvmap_client *priv = filp->private_data;
460 trace_nvmap_release(priv, priv->name);
461 nvmap_client_put(priv);
465 int __nvmap_map(struct nvmap_handle *h, struct vm_area_struct *vma)
467 struct nvmap_vma_priv *priv;
469 h = nvmap_handle_get(h);
473 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
478 vma->vm_flags |= VM_SHARED | VM_DONTEXPAND |
479 VM_DONTDUMP | VM_DONTCOPY |
480 (h->heap_pgalloc ? 0 : VM_PFNMAP);
481 vma->vm_ops = &nvmap_vma_ops;
482 BUG_ON(vma->vm_private_data != NULL);
483 vma->vm_private_data = priv;
484 vma->vm_page_prot = nvmap_pgprot(h, vma->vm_page_prot);
489 static int nvmap_map(struct file *filp, struct vm_area_struct *vma)
491 BUG_ON(vma->vm_private_data != NULL);
492 vma->vm_flags |= (VM_SHARED | VM_DONTEXPAND |
493 VM_DONTDUMP | VM_DONTCOPY);
494 vma->vm_ops = &nvmap_vma_ops;
498 static long nvmap_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
501 void __user *uarg = (void __user *)arg;
503 if (_IOC_TYPE(cmd) != NVMAP_IOC_MAGIC)
506 if (_IOC_NR(cmd) > NVMAP_IOC_MAXNR)
509 if (_IOC_DIR(cmd) & _IOC_READ)
510 err = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
511 if (_IOC_DIR(cmd) & _IOC_WRITE)
512 err = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
518 case NVMAP_IOC_CREATE:
519 case NVMAP_IOC_FROM_ID:
520 case NVMAP_IOC_FROM_FD:
521 err = nvmap_ioctl_create(filp, cmd, uarg);
524 case NVMAP_IOC_GET_ID:
525 err = nvmap_ioctl_getid(filp, uarg);
528 case NVMAP_IOC_GET_FD:
529 err = nvmap_ioctl_getfd(filp, uarg);
533 case NVMAP_IOC_PARAM_32:
534 err = nvmap_ioctl_get_param(filp, uarg, true);
538 case NVMAP_IOC_PARAM:
539 err = nvmap_ioctl_get_param(filp, uarg, false);
543 case NVMAP_IOC_UNPIN_MULT_32:
544 case NVMAP_IOC_PIN_MULT_32:
545 err = nvmap_ioctl_pinop(filp, cmd == NVMAP_IOC_PIN_MULT_32,
550 case NVMAP_IOC_UNPIN_MULT:
551 case NVMAP_IOC_PIN_MULT:
552 err = nvmap_ioctl_pinop(filp, cmd == NVMAP_IOC_PIN_MULT,
556 case NVMAP_IOC_ALLOC:
557 err = nvmap_ioctl_alloc(filp, uarg);
560 case NVMAP_IOC_ALLOC_KIND:
561 err = nvmap_ioctl_alloc_kind(filp, uarg);
565 err = nvmap_ioctl_free(filp, arg);
569 case NVMAP_IOC_MMAP_32:
570 err = nvmap_map_into_caller_ptr(filp, uarg, true);
575 err = nvmap_map_into_caller_ptr(filp, uarg, false);
579 case NVMAP_IOC_WRITE_32:
580 case NVMAP_IOC_READ_32:
581 err = nvmap_ioctl_rw_handle(filp, cmd == NVMAP_IOC_READ_32,
586 case NVMAP_IOC_WRITE:
588 err = nvmap_ioctl_rw_handle(filp, cmd == NVMAP_IOC_READ, uarg,
593 case NVMAP_IOC_CACHE_32:
594 err = nvmap_ioctl_cache_maint(filp, uarg, true);
598 case NVMAP_IOC_CACHE:
599 err = nvmap_ioctl_cache_maint(filp, uarg, false);
602 case NVMAP_IOC_CACHE_LIST:
603 case NVMAP_IOC_RESERVE:
604 err = nvmap_ioctl_cache_maint_list(filp, uarg,
605 cmd == NVMAP_IOC_RESERVE);
608 case NVMAP_IOC_SHARE:
609 err = nvmap_ioctl_share_dmabuf(filp, uarg);
618 /* to ensure that the backing store for the VMA isn't freed while a fork'd
619 * reference still exists, nvmap_vma_open increments the reference count on
620 * the handle, and nvmap_vma_close decrements it. alternatively, we could
621 * disallow copying of the vma, or behave like pmem and zap the pages. FIXME.
623 void nvmap_vma_open(struct vm_area_struct *vma)
625 struct nvmap_vma_priv *priv;
626 struct nvmap_handle *h;
627 struct nvmap_vma_list *vma_list, *tmp;
629 priv = vma->vm_private_data;
631 BUG_ON(!priv->handle);
633 atomic_inc(&priv->count);
636 vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL);
638 mutex_lock(&h->lock);
639 list_for_each_entry(tmp, &h->vmas, list)
640 BUG_ON(tmp->vma == vma);
643 list_add(&vma_list->list, &h->vmas);
644 mutex_unlock(&h->lock);
646 WARN(1, "vma not tracked");
650 static void nvmap_vma_close(struct vm_area_struct *vma)
652 struct nvmap_vma_priv *priv = vma->vm_private_data;
653 struct nvmap_vma_list *vma_list;
654 struct nvmap_handle *h;
655 bool vma_found = false;
660 BUG_ON(!priv->handle);
663 mutex_lock(&h->lock);
664 list_for_each_entry(vma_list, &h->vmas, list) {
665 if (vma_list->vma != vma)
667 list_del(&vma_list->list);
673 mutex_unlock(&h->lock);
675 if (__atomic_add_unless(&priv->count, -1, 0) == 1) {
677 nvmap_handle_put(priv->handle);
678 vma->vm_private_data = NULL;
683 static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
686 struct nvmap_vma_priv *priv;
689 offs = (unsigned long)(vmf->virtual_address - vma->vm_start);
690 priv = vma->vm_private_data;
691 if (!priv || !priv->handle || !priv->handle->alloc)
692 return VM_FAULT_SIGBUS;
695 /* if the VMA was split for some reason, vm_pgoff will be the VMA's
696 * offset from the original VMA */
697 offs += (vma->vm_pgoff << PAGE_SHIFT);
699 if (offs >= priv->handle->size)
700 return VM_FAULT_SIGBUS;
702 if (!priv->handle->heap_pgalloc) {
704 BUG_ON(priv->handle->carveout->base & ~PAGE_MASK);
705 pfn = ((priv->handle->carveout->base + offs) >> PAGE_SHIFT);
706 if (!pfn_valid(pfn)) {
708 (unsigned long)vmf->virtual_address, pfn);
709 return VM_FAULT_NOPAGE;
711 /* CMA memory would get here */
712 page = pfn_to_page(pfn);
715 if (nvmap_page_reserved(priv->handle->pgalloc.pages[offs]))
716 return VM_FAULT_SIGBUS;
717 page = nvmap_to_page(priv->handle->pgalloc.pages[offs]);
718 nvmap_page_mkdirty(&priv->handle->pgalloc.pages[offs]);
724 return (page) ? 0 : VM_FAULT_SIGBUS;
727 #define DEBUGFS_OPEN_FOPS(name) \
728 static int nvmap_debug_##name##_open(struct inode *inode, \
731 return single_open(file, nvmap_debug_##name##_show, \
735 static const struct file_operations debug_##name##_fops = { \
736 .open = nvmap_debug_##name##_open, \
738 .llseek = seq_lseek, \
739 .release = single_release, \
742 #define K(x) (x >> 10)
744 static void client_stringify(struct nvmap_client *client, struct seq_file *s)
746 char task_comm[TASK_COMM_LEN];
748 seq_printf(s, "%-18s %18s %8u", client->name, "kernel", 0);
751 get_task_comm(task_comm, client->task);
752 seq_printf(s, "%-18s %18s %8u", client->name, task_comm,
756 static void allocations_stringify(struct nvmap_client *client,
757 struct seq_file *s, u32 heap_type)
761 nvmap_ref_lock(client);
762 n = rb_first(&client->handle_refs);
763 for (; n != NULL; n = rb_next(n)) {
764 struct nvmap_handle_ref *ref =
765 rb_entry(n, struct nvmap_handle_ref, node);
766 struct nvmap_handle *handle = ref->handle;
767 if (handle->alloc && handle->heap_type == heap_type) {
768 phys_addr_t base = heap_type == NVMAP_HEAP_IOVMM ? 0 :
769 (handle->carveout->base);
771 "%-18s %-18s %8llx %10zuK %8x %6u %6u %6u %6u %8p\n",
773 (unsigned long long)base, K(handle->size),
775 atomic_read(&handle->ref),
776 atomic_read(&ref->dupes),
777 atomic_read(&ref->pin),
778 atomic_read(&handle->share_count),
782 nvmap_ref_unlock(client);
785 static void nvmap_get_client_mss(struct nvmap_client *client,
786 u64 *total, u32 heap_type)
791 nvmap_ref_lock(client);
792 n = rb_first(&client->handle_refs);
793 for (; n != NULL; n = rb_next(n)) {
794 struct nvmap_handle_ref *ref =
795 rb_entry(n, struct nvmap_handle_ref, node);
796 struct nvmap_handle *handle = ref->handle;
797 if (handle->alloc && handle->heap_type == heap_type)
798 *total += handle->size /
799 atomic_read(&handle->share_count);
801 nvmap_ref_unlock(client);
804 static void nvmap_get_total_mss(u64 *pss, u64 *non_pss,
805 u64 *total, u32 heap_type)
809 struct nvmap_device *dev = nvmap_dev;
818 spin_lock(&dev->handle_lock);
819 n = rb_first(&dev->handles);
820 for (; n != NULL; n = rb_next(n)) {
821 struct nvmap_handle *h =
822 rb_entry(n, struct nvmap_handle, node);
824 if (!h || !h->alloc || h->heap_type != heap_type)
831 for (i = 0; i < h->size >> PAGE_SHIFT; i++) {
832 struct page *page = nvmap_to_page(h->pgalloc.pages[i]);
833 int mapcount = page_mapcount(page);
835 *non_pss += PAGE_SIZE;
840 *pss = *total - *non_pss;
841 spin_unlock(&dev->handle_lock);
844 static int nvmap_debug_allocations_show(struct seq_file *s, void *unused)
847 struct nvmap_client *client;
848 u32 heap_type = (u32)(uintptr_t)s->private;
850 spin_lock(&nvmap_dev->clients_lock);
851 seq_printf(s, "%-18s %18s %8s %11s\n",
852 "CLIENT", "PROCESS", "PID", "SIZE");
853 seq_printf(s, "%-18s %18s %8s %11s %8s %6s %6s %6s %6s %6s %6s %8s\n",
854 "", "", "BASE", "SIZE", "FLAGS", "REFS",
855 "DUPES", "PINS", "KMAPS", "UMAPS", "SHARE", "UID");
856 list_for_each_entry(client, &nvmap_dev->clients, list) {
858 client_stringify(client, s);
859 nvmap_get_client_mss(client, &client_total, heap_type);
860 seq_printf(s, " %10lluK\n", K(client_total));
861 allocations_stringify(client, s, heap_type);
864 spin_unlock(&nvmap_dev->clients_lock);
865 nvmap_get_total_mss(NULL, NULL, &total, heap_type);
866 seq_printf(s, "%-18s %-18s %8s %10lluK\n", "total", "", "", K(total));
870 DEBUGFS_OPEN_FOPS(allocations);
872 static int nvmap_debug_clients_show(struct seq_file *s, void *unused)
875 struct nvmap_client *client;
876 ulong heap_type = (ulong)s->private;
878 spin_lock(&nvmap_dev->clients_lock);
879 seq_printf(s, "%-18s %18s %8s %11s\n",
880 "CLIENT", "PROCESS", "PID", "SIZE");
881 list_for_each_entry(client, &nvmap_dev->clients, list) {
883 client_stringify(client, s);
884 nvmap_get_client_mss(client, &client_total, heap_type);
885 seq_printf(s, " %10lluK\n", K(client_total));
887 spin_unlock(&nvmap_dev->clients_lock);
888 nvmap_get_total_mss(NULL, NULL, &total, heap_type);
889 seq_printf(s, "%-18s %18s %8s %10lluK\n", "total", "", "", K(total));
893 DEBUGFS_OPEN_FOPS(clients);
895 #define PRINT_MEM_STATS_NOTE(x) \
897 seq_printf(s, "Note: total memory is precise account of pages " \
898 "allocated by NvMap.\nIt doesn't match with all clients " \
899 "\"%s\" accumulated as shared memory \nis accounted in " \
900 "full in each clients \"%s\" that shared memory.\n", #x, #x); \
903 static void nvmap_iovmm_get_client_mss(struct nvmap_client *client, u64 *pss,
904 u64 *non_pss, u64 *total)
909 *pss = *non_pss = *total = 0;
910 nvmap_ref_lock(client);
911 n = rb_first(&client->handle_refs);
912 for (; n != NULL; n = rb_next(n)) {
913 struct nvmap_handle_ref *ref =
914 rb_entry(n, struct nvmap_handle_ref, node);
915 struct nvmap_handle *h = ref->handle;
917 if (!h || !h->alloc || !h->heap_pgalloc)
920 for (i = 0; i < h->size >> PAGE_SHIFT; i++) {
921 struct page *page = nvmap_to_page(h->pgalloc.pages[i]);
922 int mapcount = page_mapcount(page);
924 *non_pss += PAGE_SIZE;
927 *pss = *total - *non_pss;
929 nvmap_ref_unlock(client);
932 static int nvmap_debug_iovmm_procrank_show(struct seq_file *s, void *unused)
934 u64 pss, non_pss, total;
935 struct nvmap_client *client;
936 struct nvmap_device *dev = s->private;
937 u64 total_memory, total_pss, total_non_pss;
939 spin_lock(&dev->clients_lock);
940 seq_printf(s, "%-18s %18s %8s %11s %11s %11s\n",
941 "CLIENT", "PROCESS", "PID", "PSS", "NON-PSS", "TOTAL");
942 list_for_each_entry(client, &dev->clients, list) {
943 client_stringify(client, s);
944 nvmap_iovmm_get_client_mss(client, &pss, &non_pss, &total);
945 seq_printf(s, " %10lluK %10lluK %10lluK\n", K(pss),
946 K(non_pss), K(total));
948 spin_unlock(&dev->clients_lock);
950 nvmap_get_total_mss(&total_pss, &total_non_pss, &total_memory, NVMAP_HEAP_IOVMM);
951 seq_printf(s, "%-18s %18s %8s %10lluK %10lluK %10lluK\n",
952 "total", "", "", K(total_pss),
953 K(total_non_pss), K(total_memory));
954 PRINT_MEM_STATS_NOTE(TOTAL);
958 DEBUGFS_OPEN_FOPS(iovmm_procrank);
960 ulong nvmap_iovmm_get_used_pages(void)
964 nvmap_get_total_mss(NULL, NULL, &total, NVMAP_HEAP_IOVMM);
965 return total >> PAGE_SHIFT;
968 static int nvmap_stats_reset(void *data, u64 val)
973 atomic64_set(&nvmap_stats.collect, 0);
974 for (i = 0; i < NS_NUM; i++) {
977 atomic64_set(&nvmap_stats.stats[i], 0);
983 static int nvmap_stats_get(void *data, u64 *val)
985 atomic64_t *ptr = data;
987 *val = atomic64_read(ptr);
991 static int nvmap_stats_set(void *data, u64 val)
993 atomic64_t *ptr = data;
995 atomic64_set(ptr, val);
999 DEFINE_SIMPLE_ATTRIBUTE(reset_stats_fops, NULL, nvmap_stats_reset, "%llu\n");
1000 DEFINE_SIMPLE_ATTRIBUTE(stats_fops, nvmap_stats_get, nvmap_stats_set, "%llu\n");
1002 static void nvmap_stats_init(struct dentry *nvmap_debug_root)
1004 struct dentry *stats_root;
1006 #define CREATE_DF(x, y) \
1007 debugfs_create_file(#x, S_IRUGO, stats_root, &y, &stats_fops);
1009 stats_root = debugfs_create_dir("stats", nvmap_debug_root);
1010 if (!IS_ERR_OR_NULL(stats_root)) {
1011 CREATE_DF(alloc, nvmap_stats.stats[NS_ALLOC]);
1012 CREATE_DF(release, nvmap_stats.stats[NS_RELEASE]);
1013 CREATE_DF(ualloc, nvmap_stats.stats[NS_UALLOC]);
1014 CREATE_DF(urelease, nvmap_stats.stats[NS_URELEASE]);
1015 CREATE_DF(kalloc, nvmap_stats.stats[NS_KALLOC]);
1016 CREATE_DF(krelease, nvmap_stats.stats[NS_KRELEASE]);
1017 CREATE_DF(cflush_rq, nvmap_stats.stats[NS_CFLUSH_RQ]);
1018 CREATE_DF(cflush_done, nvmap_stats.stats[NS_CFLUSH_DONE]);
1019 CREATE_DF(ucflush_rq, nvmap_stats.stats[NS_UCFLUSH_RQ]);
1020 CREATE_DF(ucflush_done, nvmap_stats.stats[NS_UCFLUSH_DONE]);
1021 CREATE_DF(kcflush_rq, nvmap_stats.stats[NS_KCFLUSH_RQ]);
1022 CREATE_DF(kcflush_done, nvmap_stats.stats[NS_KCFLUSH_DONE]);
1023 CREATE_DF(total_memory, nvmap_stats.stats[NS_TOTAL]);
1025 debugfs_create_file("collect", S_IRUGO | S_IWUSR,
1026 stats_root, &nvmap_stats.collect, &stats_fops);
1027 debugfs_create_file("reset", S_IWUSR,
1028 stats_root, NULL, &reset_stats_fops);
1034 void nvmap_stats_inc(enum nvmap_stats_t stat, size_t size)
1036 if (atomic64_read(&nvmap_stats.collect) || stat == NS_TOTAL)
1037 atomic64_add(size, &nvmap_stats.stats[stat]);
1040 void nvmap_stats_dec(enum nvmap_stats_t stat, size_t size)
1042 if (atomic64_read(&nvmap_stats.collect) || stat == NS_TOTAL)
1043 atomic64_sub(size, &nvmap_stats.stats[stat]);
1046 u64 nvmap_stats_read(enum nvmap_stats_t stat)
1048 return atomic64_read(&nvmap_stats.stats[stat]);
1051 static int nvmap_probe(struct platform_device *pdev)
1053 struct nvmap_platform_data *plat = pdev->dev.platform_data;
1054 struct nvmap_device *dev;
1055 struct dentry *nvmap_debug_root;
1060 dev_err(&pdev->dev, "no platform data?\n");
1065 * The DMA mapping API uses these parameters to decide how to map the
1066 * passed buffers. If the maximum physical segment size is set to
1067 * smaller than the size of the buffer, then the buffer will be mapped
1068 * as separate IO virtual address ranges.
1070 pdev->dev.dma_parms = &nvmap_dma_parameters;
1072 if (WARN_ON(nvmap_dev != NULL)) {
1073 dev_err(&pdev->dev, "only one nvmap device may be present\n");
1077 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1079 dev_err(&pdev->dev, "out of memory for device\n");
1085 dev->dev_user.minor = MISC_DYNAMIC_MINOR;
1086 dev->dev_user.name = "nvmap";
1087 dev->dev_user.fops = &nvmap_user_fops;
1088 dev->dev_user.parent = &pdev->dev;
1090 dev->handles = RB_ROOT;
1092 #ifdef CONFIG_NVMAP_PAGE_POOLS
1093 e = nvmap_page_pool_init(dev);
1098 spin_lock_init(&dev->handle_lock);
1099 INIT_LIST_HEAD(&dev->clients);
1100 spin_lock_init(&dev->clients_lock);
1102 e = misc_register(&dev->dev_user);
1104 dev_err(&pdev->dev, "unable to register miscdevice %s\n",
1105 dev->dev_user.name);
1109 dev->nr_carveouts = 0;
1110 dev->heaps = kzalloc(sizeof(struct nvmap_carveout_node) *
1111 plat->nr_carveouts, GFP_KERNEL);
1114 dev_err(&pdev->dev, "couldn't allocate carveout memory\n");
1118 nvmap_debug_root = debugfs_create_dir("nvmap", NULL);
1119 if (IS_ERR_OR_NULL(nvmap_debug_root))
1120 dev_err(&pdev->dev, "couldn't create debug files\n");
1122 debugfs_create_u32("max_handle_count", S_IRUGO,
1123 nvmap_debug_root, &nvmap_max_handle_count);
1125 for (i = 0; i < plat->nr_carveouts; i++) {
1126 struct nvmap_carveout_node *node = &dev->heaps[dev->nr_carveouts];
1127 const struct nvmap_platform_carveout *co = &plat->carveouts[i];
1128 node->base = round_up(co->base, PAGE_SIZE);
1129 node->size = round_down(co->size -
1130 (node->base - co->base), PAGE_SIZE);
1134 node->carveout = nvmap_heap_create(
1135 dev->dev_user.this_device, co,
1136 node->base, node->size, node);
1138 if (!node->carveout) {
1140 dev_err(&pdev->dev, "couldn't create %s\n", co->name);
1143 node->index = dev->nr_carveouts;
1144 dev->nr_carveouts++;
1145 spin_lock_init(&node->clients_lock);
1146 INIT_LIST_HEAD(&node->clients);
1147 node->heap_bit = co->usage_mask;
1149 if (!IS_ERR_OR_NULL(nvmap_debug_root)) {
1150 struct dentry *heap_root =
1151 debugfs_create_dir(co->name, nvmap_debug_root);
1152 if (!IS_ERR_OR_NULL(heap_root)) {
1153 debugfs_create_file("clients", S_IRUGO,
1155 (void *)(uintptr_t)node->heap_bit,
1156 &debug_clients_fops);
1157 debugfs_create_file("allocations", S_IRUGO,
1159 (void *)(uintptr_t)node->heap_bit,
1160 &debug_allocations_fops);
1161 nvmap_heap_debugfs_init(heap_root,
1166 if (!IS_ERR_OR_NULL(nvmap_debug_root)) {
1167 struct dentry *iovmm_root =
1168 debugfs_create_dir("iovmm", nvmap_debug_root);
1169 if (!IS_ERR_OR_NULL(iovmm_root)) {
1170 debugfs_create_file("clients", S_IRUGO, iovmm_root,
1171 (void *)(uintptr_t)NVMAP_HEAP_IOVMM,
1172 &debug_clients_fops);
1173 debugfs_create_file("allocations", S_IRUGO, iovmm_root,
1174 (void *)(uintptr_t)NVMAP_HEAP_IOVMM,
1175 &debug_allocations_fops);
1176 debugfs_create_file("procrank", S_IRUGO, iovmm_root,
1177 dev, &debug_iovmm_procrank_fops);
1179 #ifdef CONFIG_NVMAP_PAGE_POOLS
1180 nvmap_page_pool_debugfs_init(nvmap_debug_root);
1182 #ifdef CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS
1183 debugfs_create_size_t("cache_maint_inner_threshold",
1186 &cache_maint_inner_threshold);
1189 if ((read_cpuid_id() >> 4 & 0xfff) == 0xc09)
1190 cache_maint_inner_threshold = SZ_32K;
1191 pr_info("nvmap:inner cache maint threshold=%zd",
1192 cache_maint_inner_threshold);
1194 #ifdef CONFIG_NVMAP_OUTER_CACHE_MAINT_BY_SET_WAYS
1195 debugfs_create_size_t("cache_maint_outer_threshold",
1198 &cache_maint_outer_threshold);
1199 pr_info("nvmap:outer cache maint threshold=%zd",
1200 cache_maint_outer_threshold);
1204 nvmap_stats_init(nvmap_debug_root);
1205 platform_set_drvdata(pdev, dev);
1207 nvmap_dmabuf_debugfs_init(nvmap_debug_root);
1208 e = nvmap_dmabuf_stash_init();
1214 for (i = 0; i < dev->nr_carveouts; i++) {
1215 struct nvmap_carveout_node *node = &dev->heaps[i];
1216 nvmap_heap_destroy(node->carveout);
1219 #ifdef CONFIG_NVMAP_PAGE_POOLS
1220 nvmap_page_pool_fini(nvmap_dev);
1223 if (dev->dev_user.minor != MISC_DYNAMIC_MINOR)
1224 misc_deregister(&dev->dev_user);
1230 static int nvmap_remove(struct platform_device *pdev)
1232 struct nvmap_device *dev = platform_get_drvdata(pdev);
1234 struct nvmap_handle *h;
1237 misc_deregister(&dev->dev_user);
1239 while ((n = rb_first(&dev->handles))) {
1240 h = rb_entry(n, struct nvmap_handle, node);
1241 rb_erase(&h->node, &dev->handles);
1245 for (i = 0; i < dev->nr_carveouts; i++) {
1246 struct nvmap_carveout_node *node = &dev->heaps[i];
1247 nvmap_heap_destroy(node->carveout);
1256 static int nvmap_suspend(struct platform_device *pdev, pm_message_t state)
1261 static int nvmap_resume(struct platform_device *pdev)
1266 static struct platform_driver nvmap_driver = {
1267 .probe = nvmap_probe,
1268 .remove = nvmap_remove,
1269 .suspend = nvmap_suspend,
1270 .resume = nvmap_resume,
1273 .name = "tegra-nvmap",
1274 .owner = THIS_MODULE,
1278 static int __init nvmap_init_driver(void)
1284 e = nvmap_heap_init();
1288 e = platform_driver_register(&nvmap_driver);
1290 nvmap_heap_deinit();
1297 fs_initcall(nvmap_init_driver);
1299 static void __exit nvmap_exit_driver(void)
1301 platform_driver_unregister(&nvmap_driver);
1302 nvmap_heap_deinit();
1305 module_exit(nvmap_exit_driver);