2 * drivers/video/tegra/nvmap/nvmap_handle.c
4 * Handle allocation and freeing routines for nvmap
6 * Copyright (c) 2009-2014, NVIDIA CORPORATION. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
23 #define pr_fmt(fmt) "%s: " fmt, __func__
25 #include <linux/err.h>
27 #include <linux/kernel.h>
28 #include <linux/list.h>
30 #include <linux/rbtree.h>
31 #include <linux/dma-buf.h>
32 #include <linux/moduleparam.h>
33 #include <linux/nvmap.h>
34 #include <linux/tegra-soc.h>
36 #include <asm/pgtable.h>
38 #include <trace/events/nvmap.h>
40 #include "nvmap_priv.h"
41 #include "nvmap_ioctl.h"
45 static int zero_memory_set(const char *arg, const struct kernel_param *kp)
47 param_set_bool(arg, kp);
48 nvmap_page_pool_clear();
52 static struct kernel_param_ops zero_memory_ops = {
53 .get = param_get_bool,
54 .set = zero_memory_set,
57 module_param_cb(zero_memory, &zero_memory_ops, &zero_memory, 0644);
59 u32 nvmap_max_handle_count;
61 /* handles may be arbitrarily large (16+MiB), and any handle allocated from
62 * the kernel (i.e., not a carveout handle) includes its array of pages. to
63 * preserve kmalloc space, if the array of pages exceeds PAGELIST_VMALLOC_MIN,
64 * the array is allocated using vmalloc. */
65 #define PAGELIST_VMALLOC_MIN (PAGE_SIZE)
67 void *nvmap_altalloc(size_t len)
69 if (len > PAGELIST_VMALLOC_MIN)
72 return kmalloc(len, GFP_KERNEL);
75 void nvmap_altfree(void *ptr, size_t len)
80 if (len > PAGELIST_VMALLOC_MIN)
86 void _nvmap_handle_free(struct nvmap_handle *h)
88 unsigned int i, nr_page, page_index = 0;
89 #if defined(CONFIG_NVMAP_PAGE_POOLS) && \
90 !defined(CONFIG_NVMAP_FORCE_ZEROED_USER_PAGES)
91 struct nvmap_page_pool *pool;
95 h->nvhost_priv_delete(h->nvhost_priv);
97 if (nvmap_handle_remove(nvmap_dev, h) != 0)
103 nvmap_stats_inc(NS_RELEASE, h->size);
104 nvmap_stats_dec(NS_TOTAL, PAGE_ALIGN(h->orig_size));
105 if (!h->heap_pgalloc) {
106 nvmap_heap_free(h->carveout);
110 nr_page = DIV_ROUND_UP(h->size, PAGE_SIZE);
112 BUG_ON(h->size & ~PAGE_MASK);
113 BUG_ON(!h->pgalloc.pages);
115 #ifdef NVMAP_LAZY_VFREE
117 vm_unmap_ram(h->vaddr, h->size >> PAGE_SHIFT);
120 for (i = 0; i < nr_page; i++)
121 h->pgalloc.pages[i] = nvmap_to_page(h->pgalloc.pages[i]);
123 #if defined(CONFIG_NVMAP_PAGE_POOLS) && \
124 !defined(CONFIG_NVMAP_FORCE_ZEROED_USER_PAGES)
126 pool = &nvmap_dev->pool;
128 nvmap_page_pool_lock(pool);
129 page_index = __nvmap_page_pool_fill_lots_locked(pool,
130 h->pgalloc.pages, nr_page);
131 nvmap_page_pool_unlock(pool);
135 for (i = page_index; i < nr_page; i++)
136 __free_page(h->pgalloc.pages[i]);
138 nvmap_altfree(h->pgalloc.pages, nr_page * sizeof(struct page *));
144 static struct page *nvmap_alloc_pages_exact(gfp_t gfp, size_t size)
146 struct page *page, *p, *e;
149 size = PAGE_ALIGN(size);
150 order = get_order(size);
151 page = alloc_pages(gfp, order);
156 split_page(page, order);
157 e = page + (1 << order);
158 for (p = page + (size >> PAGE_SHIFT); p < e; p++)
164 static int handle_page_alloc(struct nvmap_client *client,
165 struct nvmap_handle *h, bool contiguous)
167 size_t size = PAGE_ALIGN(h->size);
168 unsigned int nr_page = size >> PAGE_SHIFT;
170 unsigned int i = 0, page_index = 0;
172 #ifdef CONFIG_NVMAP_PAGE_POOLS
173 struct nvmap_page_pool *pool = NULL;
175 gfp_t gfp = GFP_NVMAP;
180 pages = nvmap_altalloc(nr_page * sizeof(*pages));
184 prot = nvmap_pgprot(h, PG_PROT_KERNEL);
188 page = nvmap_alloc_pages_exact(gfp, size);
192 for (i = 0; i < nr_page; i++)
193 pages[i] = nth_page(page, i);
196 #ifdef CONFIG_NVMAP_PAGE_POOLS
197 pool = &nvmap_dev->pool;
200 * Get as many pages from the pools as possible.
202 nvmap_page_pool_lock(pool);
203 page_index = __nvmap_page_pool_alloc_lots_locked(pool, pages,
205 nvmap_page_pool_unlock(pool);
207 for (i = page_index; i < nr_page; i++) {
208 pages[i] = nvmap_alloc_pages_exact(gfp, PAGE_SIZE);
215 * Make sure any data in the caches is cleaned out before
216 * passing these pages to userspace. otherwise, It can lead to
217 * corruption in pages that get mapped as something other than WB in
218 * userspace and leaked kernel data structures.
220 * FIXME: For ARMv7 we don't have __clean_dcache_page() so we continue
221 * to use the flush cache version.
224 nvmap_clean_cache(pages, nr_page);
226 nvmap_flush_cache(pages, nr_page);
230 h->pgalloc.pages = pages;
231 h->pgalloc.contig = contiguous;
232 atomic_set(&h->pgalloc.ndirty, 0);
237 __free_page(pages[i]);
238 nvmap_altfree(pages, nr_page * sizeof(*pages));
243 static void alloc_handle(struct nvmap_client *client,
244 struct nvmap_handle *h, unsigned int type)
246 unsigned int carveout_mask = NVMAP_HEAP_CARVEOUT_MASK;
247 unsigned int iovmm_mask = NVMAP_HEAP_IOVMM;
249 BUG_ON(type & (type - 1));
251 #ifdef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM
252 /* Convert generic carveout requests to iovmm requests. */
253 carveout_mask &= ~NVMAP_HEAP_CARVEOUT_GENERIC;
254 iovmm_mask |= NVMAP_HEAP_CARVEOUT_GENERIC;
257 if (type & carveout_mask) {
258 struct nvmap_heap_block *b;
260 b = nvmap_carveout_alloc(client, h, type);
263 h->heap_pgalloc = false;
264 /* barrier to ensure all handle alloc data
265 * is visible before alloc is seen by other
271 } else if (type & iovmm_mask) {
274 ret = handle_page_alloc(client, h,
275 h->userflags & NVMAP_HANDLE_PHYS_CONTIG);
278 h->heap_type = NVMAP_HEAP_IOVMM;
279 h->heap_pgalloc = true;
285 /* small allocations will try to allocate from generic OS memory before
286 * any of the limited heaps, to increase the effective memory for graphics
287 * allocations, and to reduce fragmentation of the graphics heaps with
288 * sub-page splinters */
289 static const unsigned int heap_policy_small[] = {
290 NVMAP_HEAP_CARVEOUT_VPR,
291 NVMAP_HEAP_CARVEOUT_IRAM,
292 NVMAP_HEAP_CARVEOUT_MASK,
297 static const unsigned int heap_policy_large[] = {
298 NVMAP_HEAP_CARVEOUT_VPR,
299 NVMAP_HEAP_CARVEOUT_IRAM,
301 NVMAP_HEAP_CARVEOUT_MASK,
305 int nvmap_alloc_handle(struct nvmap_client *client,
306 struct nvmap_handle *h, unsigned int heap_mask,
311 const unsigned int *alloc_policy;
315 h = nvmap_handle_get(h);
325 nvmap_stats_inc(NS_TOTAL, PAGE_ALIGN(h->orig_size));
326 nvmap_stats_inc(NS_ALLOC, PAGE_ALIGN(h->size));
327 trace_nvmap_alloc_handle(client, h,
328 h->size, heap_mask, align, flags,
329 nvmap_stats_read(NS_TOTAL),
330 nvmap_stats_read(NS_ALLOC));
331 h->userflags = flags;
332 nr_page = ((h->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
333 h->flags = (flags & NVMAP_HANDLE_CACHE_FLAG);
334 h->align = max_t(size_t, align, L1_CACHE_BYTES);
337 /* convert iovmm requests to generic carveout. */
338 if (heap_mask & NVMAP_HEAP_IOVMM) {
339 heap_mask = (heap_mask & ~NVMAP_HEAP_IOVMM) |
340 NVMAP_HEAP_CARVEOUT_GENERIC;
348 alloc_policy = (nr_page == 1) ? heap_policy_small : heap_policy_large;
350 while (!h->alloc && *alloc_policy) {
351 unsigned int heap_type;
353 heap_type = *alloc_policy++;
354 heap_type &= heap_mask;
359 heap_mask &= ~heap_type;
361 while (heap_type && !h->alloc) {
364 /* iterate possible heaps MSB-to-LSB, since higher-
365 * priority carveouts will have higher usage masks */
366 heap = 1 << __fls(heap_type);
367 alloc_handle(client, h, heap);
374 if (client->kernel_client)
375 nvmap_stats_inc(NS_KALLOC, h->size);
377 nvmap_stats_inc(NS_UALLOC, h->size);
379 nvmap_stats_dec(NS_TOTAL, PAGE_ALIGN(h->orig_size));
380 nvmap_stats_dec(NS_ALLOC, PAGE_ALIGN(h->orig_size));
383 err = (h->alloc) ? 0 : err;
388 void nvmap_free_handle(struct nvmap_client *client,
389 struct nvmap_handle *handle)
391 struct nvmap_handle_ref *ref;
392 struct nvmap_handle *h;
395 nvmap_ref_lock(client);
397 ref = __nvmap_validate_locked(client, handle);
399 nvmap_ref_unlock(client);
403 trace_nvmap_free_handle(client, handle);
404 BUG_ON(!ref->handle);
407 if (atomic_dec_return(&ref->dupes)) {
408 nvmap_ref_unlock(client);
413 pins = atomic_read(&ref->pin);
414 rb_erase(&ref->node, &client->handle_refs);
415 client->handle_count--;
416 atomic_dec(&ref->handle->share_count);
418 if (h->alloc && !h->heap_pgalloc) {
419 mutex_lock(&h->lock);
420 nvmap_carveout_commit_subtract(client,
421 nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
423 mutex_unlock(&h->lock);
426 nvmap_ref_unlock(client);
429 pr_debug("%s freeing pinned handle %p\n",
430 current->group_leader->comm, h);
432 while (atomic_read(&ref->pin))
435 if (h->owner == client)
438 dma_buf_put(ref->handle->dmabuf);
442 BUG_ON(!atomic_read(&h->ref));
445 EXPORT_SYMBOL(nvmap_free_handle);
447 void nvmap_free_handle_user_id(struct nvmap_client *client,
448 unsigned long user_id)
450 nvmap_free_handle(client, unmarshal_user_id(user_id));
453 static void add_handle_ref(struct nvmap_client *client,
454 struct nvmap_handle_ref *ref)
456 struct rb_node **p, *parent = NULL;
458 nvmap_ref_lock(client);
459 p = &client->handle_refs.rb_node;
461 struct nvmap_handle_ref *node;
463 node = rb_entry(parent, struct nvmap_handle_ref, node);
464 if (ref->handle > node->handle)
465 p = &parent->rb_right;
467 p = &parent->rb_left;
469 rb_link_node(&ref->node, parent, p);
470 rb_insert_color(&ref->node, &client->handle_refs);
471 client->handle_count++;
472 if (client->handle_count > nvmap_max_handle_count)
473 nvmap_max_handle_count = client->handle_count;
474 atomic_inc(&ref->handle->share_count);
475 nvmap_ref_unlock(client);
478 struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
481 void *err = ERR_PTR(-ENOMEM);
482 struct nvmap_handle *h;
483 struct nvmap_handle_ref *ref = NULL;
486 return ERR_PTR(-EINVAL);
489 return ERR_PTR(-EINVAL);
491 h = kzalloc(sizeof(*h), GFP_KERNEL);
493 return ERR_PTR(-ENOMEM);
495 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
499 atomic_set(&h->ref, 1);
500 atomic_set(&h->pin, 0);
503 h->size = h->orig_size = size;
504 h->flags = NVMAP_HANDLE_WRITE_COMBINE;
505 mutex_init(&h->lock);
506 INIT_LIST_HEAD(&h->vmas);
509 * This takes out 1 ref on the dambuf. This corresponds to the
510 * handle_ref that gets automatically made by nvmap_create_handle().
512 h->dmabuf = __nvmap_make_dmabuf(client, h);
513 if (IS_ERR(h->dmabuf)) {
515 goto make_dmabuf_fail;
519 * Pre-attach nvmap to this new dmabuf. This gets unattached during the
520 * dma_buf_release() operation.
522 h->attachment = dma_buf_attach(h->dmabuf, nvmap_dev->dev_user.parent);
523 if (IS_ERR(h->attachment)) {
525 goto dma_buf_attach_fail;
528 nvmap_handle_add(nvmap_dev, h);
531 * Major assumption here: the dma_buf object that the handle contains
532 * is created with a ref count of 1.
534 atomic_set(&ref->dupes, 1);
536 atomic_set(&ref->pin, 0);
537 add_handle_ref(client, ref);
538 trace_nvmap_create_handle(client, client->name, h, size, ref);
542 dma_buf_put(h->dmabuf);
550 struct nvmap_handle_ref *nvmap_duplicate_handle(struct nvmap_client *client,
551 struct nvmap_handle *h, bool skip_val)
553 struct nvmap_handle_ref *ref = NULL;
556 /* on success, the reference count for the handle should be
557 * incremented, so the success paths will not call nvmap_handle_put */
558 h = nvmap_validate_get(h);
561 pr_debug("%s duplicate handle failed\n",
562 current->group_leader->comm);
563 return ERR_PTR(-EPERM);
567 pr_err("%s duplicating unallocated handle\n",
568 current->group_leader->comm);
570 return ERR_PTR(-EINVAL);
573 nvmap_ref_lock(client);
574 ref = __nvmap_validate_locked(client, h);
577 /* handle already duplicated in client; just increment
578 * the reference count rather than re-duplicating it */
579 atomic_inc(&ref->dupes);
580 nvmap_ref_unlock(client);
584 nvmap_ref_unlock(client);
586 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
589 return ERR_PTR(-ENOMEM);
592 if (!h->heap_pgalloc) {
593 mutex_lock(&h->lock);
594 nvmap_carveout_commit_add(client,
595 nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
597 mutex_unlock(&h->lock);
600 atomic_set(&ref->dupes, 1);
602 atomic_set(&ref->pin, 0);
603 add_handle_ref(client, ref);
606 * Ref counting on the dma_bufs follows the creation and destruction of
607 * nvmap_handle_refs. That is every time a handle_ref is made the
608 * dma_buf ref count goes up and everytime a handle_ref is destroyed
609 * the dma_buf ref count goes down.
611 get_dma_buf(h->dmabuf);
613 trace_nvmap_duplicate_handle(client, h, ref);
617 struct nvmap_handle_ref *nvmap_create_handle_from_fd(
618 struct nvmap_client *client, int fd)
620 struct nvmap_handle *handle;
621 struct nvmap_handle_ref *ref;
625 handle = nvmap_get_id_from_dmabuf_fd(client, fd);
627 return ERR_CAST(handle);
628 ref = nvmap_duplicate_handle(client, handle, 1);
632 struct nvmap_handle *nvmap_duplicate_handle_id_ex(struct nvmap_client *client,
633 struct nvmap_handle *h)
635 struct nvmap_handle_ref *ref = nvmap_duplicate_handle(client, h, 0);
640 return __nvmap_ref_to_id(ref);
642 EXPORT_SYMBOL(nvmap_duplicate_handle_id_ex);
644 int nvmap_get_page_list_info(struct nvmap_client *client,
645 struct nvmap_handle *handle, u32 *size,
646 u32 *flags, u32 *nr_page, bool *contig)
648 struct nvmap_handle *h;
650 BUG_ON(!size || !flags || !nr_page || !contig);
657 h = nvmap_handle_get(handle);
660 pr_err("%s query invalid handle %p\n",
661 current->group_leader->comm, handle);
665 if (!h->alloc || !h->heap_pgalloc) {
666 pr_err("%s query unallocated handle %p\n",
667 current->group_leader->comm, handle);
673 *size = h->orig_size;
674 *nr_page = PAGE_ALIGN(h->size) >> PAGE_SHIFT;
675 *contig = h->pgalloc.contig;
680 EXPORT_SYMBOL(nvmap_get_page_list_info);
682 int nvmap_acquire_page_list(struct nvmap_client *client,
683 struct nvmap_handle *handle, struct page **pages,
686 struct nvmap_handle *h;
687 struct nvmap_handle_ref *ref;
693 h = nvmap_handle_get(handle);
696 pr_err("%s query invalid handle %p\n",
697 current->group_leader->comm, handle);
701 if (!h->alloc || !h->heap_pgalloc) {
702 pr_err("%s query unallocated handle %p\n",
703 current->group_leader->comm, handle);
708 BUG_ON(nr_page != PAGE_ALIGN(h->size) >> PAGE_SHIFT);
710 for (idx = 0; idx < nr_page; idx++)
711 pages[idx] = h->pgalloc.pages[idx];
713 nvmap_ref_lock(client);
714 ref = __nvmap_validate_locked(client, h);
716 __nvmap_pin(ref, &dummy);
717 nvmap_ref_unlock(client);
721 EXPORT_SYMBOL(nvmap_acquire_page_list);
723 int nvmap_release_page_list(struct nvmap_client *client,
724 struct nvmap_handle *handle)
726 struct nvmap_handle_ref *ref;
727 struct nvmap_handle *h = NULL;
731 nvmap_ref_lock(client);
733 ref = __nvmap_validate_locked(client, handle);
737 nvmap_ref_unlock(client);
746 EXPORT_SYMBOL(nvmap_release_page_list);
748 int __nvmap_get_handle_param(struct nvmap_client *client,
749 struct nvmap_handle *h, u32 param, u64 *result)
753 if (WARN_ON(!virt_addr_valid(h)))
757 case NVMAP_HANDLE_PARAM_SIZE:
758 *result = h->orig_size;
760 case NVMAP_HANDLE_PARAM_ALIGNMENT:
763 case NVMAP_HANDLE_PARAM_BASE:
764 if (!h->alloc || !atomic_read(&h->pin))
766 else if (!h->heap_pgalloc) {
767 mutex_lock(&h->lock);
768 *result = h->carveout->base;
769 mutex_unlock(&h->lock);
770 } else if (h->attachment->priv)
771 *result = sg_dma_address(
772 ((struct sg_table *)h->attachment->priv)->sgl);
776 case NVMAP_HANDLE_PARAM_HEAP:
779 else if (!h->heap_pgalloc) {
780 mutex_lock(&h->lock);
781 *result = nvmap_carveout_usage(client, h->carveout);
782 mutex_unlock(&h->lock);
784 *result = NVMAP_HEAP_IOVMM;
786 case NVMAP_HANDLE_PARAM_KIND:
789 case NVMAP_HANDLE_PARAM_COMPR:
790 /* ignored, to be removed */
799 int nvmap_get_handle_param(struct nvmap_client *client,
800 struct nvmap_handle_ref *ref, u32 param, u64 *result)
802 if (WARN_ON(!virt_addr_valid(ref)) ||
803 WARN_ON(!virt_addr_valid(client)) ||
807 return __nvmap_get_handle_param(client, ref->handle, param, result);