2 * drivers/video/tegra/nvmap/nvmap_handle.c
4 * Handle allocation and freeing routines for nvmap
6 * Copyright (c) 2009-2014, NVIDIA CORPORATION. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
23 #define pr_fmt(fmt) "%s: " fmt, __func__
25 #include <linux/err.h>
26 #include <linux/kernel.h>
27 #include <linux/list.h>
29 #include <linux/rbtree.h>
30 #include <linux/dma-buf.h>
31 #include <linux/moduleparam.h>
32 #include <linux/nvmap.h>
33 #include <linux/tegra-soc.h>
35 #include <asm/pgtable.h>
37 #include <trace/events/nvmap.h>
39 #include "nvmap_priv.h"
40 #include "nvmap_ioctl.h"
42 #ifdef CONFIG_NVMAP_FORCE_ZEROED_USER_PAGES
47 u32 nvmap_max_handle_count;
49 static int zero_memory_set(const char *arg, const struct kernel_param *kp)
51 #ifdef CONFIG_NVMAP_FORCE_ZEROED_USER_PAGES
54 param_set_bool(arg, kp);
55 nvmap_page_pool_clear();
60 static struct kernel_param_ops zero_memory_ops = {
61 .get = param_get_bool,
62 .set = zero_memory_set,
65 module_param_cb(zero_memory, &zero_memory_ops, &zero_memory, 0644);
68 #define NVMAP_SECURE_HEAPS (NVMAP_HEAP_CARVEOUT_IRAM | NVMAP_HEAP_IOVMM | \
69 NVMAP_HEAP_CARVEOUT_VPR)
71 /* handles may be arbitrarily large (16+MiB), and any handle allocated from
72 * the kernel (i.e., not a carveout handle) includes its array of pages. to
73 * preserve kmalloc space, if the array of pages exceeds PAGELIST_VMALLOC_MIN,
74 * the array is allocated using vmalloc. */
75 #define PAGELIST_VMALLOC_MIN (PAGE_SIZE)
77 void *nvmap_altalloc(size_t len)
79 if (len > PAGELIST_VMALLOC_MIN)
82 return kmalloc(len, GFP_KERNEL);
85 void nvmap_altfree(void *ptr, size_t len)
90 if (len > PAGELIST_VMALLOC_MIN)
96 void _nvmap_handle_free(struct nvmap_handle *h)
98 unsigned int i, nr_page, page_index = 0;
99 #if defined(CONFIG_NVMAP_PAGE_POOLS) && \
100 !defined(CONFIG_NVMAP_FORCE_ZEROED_USER_PAGES)
101 struct nvmap_page_pool *pool = NULL;
105 h->nvhost_priv_delete(h->nvhost_priv);
107 if (nvmap_handle_remove(h->dev, h) != 0)
113 nvmap_stats_inc(NS_RELEASE, h->size);
114 nvmap_stats_dec(NS_TOTAL, PAGE_ALIGN(h->orig_size));
115 if (!h->heap_pgalloc) {
116 nvmap_heap_free(h->carveout);
120 nr_page = DIV_ROUND_UP(h->size, PAGE_SIZE);
122 BUG_ON(h->size & ~PAGE_MASK);
123 BUG_ON(!h->pgalloc.pages);
125 #ifdef NVMAP_LAZY_VFREE
128 vm_unmap_ram(h->vaddr, h->size >> PAGE_SHIFT);
132 for (i = 0; i < nr_page; i++)
133 h->pgalloc.pages[i] = nvmap_to_page(h->pgalloc.pages[i]);
135 #if defined(CONFIG_NVMAP_PAGE_POOLS) && \
136 !defined(CONFIG_NVMAP_FORCE_ZEROED_USER_PAGES)
138 pool = &nvmap_dev->pool;
140 while (page_index < nr_page) {
141 if (!nvmap_page_pool_fill(pool,
142 h->pgalloc.pages[page_index]))
150 for (i = page_index; i < nr_page; i++)
151 __free_page(h->pgalloc.pages[i]);
153 nvmap_altfree(h->pgalloc.pages, nr_page * sizeof(struct page *));
159 static struct page *nvmap_alloc_pages_exact(gfp_t gfp, size_t size)
161 struct page *page, *p, *e;
164 size = PAGE_ALIGN(size);
165 order = get_order(size);
166 page = alloc_pages(gfp, order);
171 split_page(page, order);
172 e = page + (1 << order);
173 for (p = page + (size >> PAGE_SHIFT); p < e; p++)
179 static int handle_page_alloc(struct nvmap_client *client,
180 struct nvmap_handle *h, bool contiguous)
183 size_t size = PAGE_ALIGN(h->size);
184 unsigned int nr_page = size >> PAGE_SHIFT;
186 unsigned int i = 0, page_index = 0;
188 #ifdef CONFIG_NVMAP_PAGE_POOLS
189 struct nvmap_page_pool *pool = NULL;
192 gfp_t gfp = GFP_NVMAP;
199 pages = nvmap_altalloc(nr_page * sizeof(*pages));
203 prot = nvmap_pgprot(h, PG_PROT_KERNEL);
207 page = nvmap_alloc_pages_exact(gfp, size);
211 for (i = 0; i < nr_page; i++)
212 pages[i] = nth_page(page, i);
215 #ifdef CONFIG_NVMAP_PAGE_POOLS
216 pool = &nvmap_dev->pool;
218 for (i = 0; i < nr_page; i++) {
219 /* Get pages from pool, if available. */
220 pages[i] = nvmap_page_pool_alloc(pool);
226 for (i = page_index; i < nr_page; i++) {
227 pages[i] = nvmap_alloc_pages_exact(gfp, PAGE_SIZE);
234 * Make sure any data in the caches is flushed out before
235 * passing these pages to userspace. otherwise, It can lead to
236 * corruption in pages that get mapped as something other than WB in
237 * userspace and leaked kernel data structures.
239 if (page_index < nr_page)
240 nvmap_flush_cache(&pages[page_index], nr_page - page_index);
246 h->pgalloc.pages = pages;
247 h->pgalloc.contig = contiguous;
248 atomic_set(&h->pgalloc.ndirty, 0);
253 __free_page(pages[i]);
254 nvmap_altfree(pages, nr_page * sizeof(*pages));
259 static void alloc_handle(struct nvmap_client *client,
260 struct nvmap_handle *h, unsigned int type)
262 unsigned int carveout_mask = NVMAP_HEAP_CARVEOUT_MASK;
263 unsigned int iovmm_mask = NVMAP_HEAP_IOVMM;
265 BUG_ON(type & (type - 1));
267 #ifdef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM
268 /* Convert generic carveout requests to iovmm requests. */
269 carveout_mask &= ~NVMAP_HEAP_CARVEOUT_GENERIC;
270 iovmm_mask |= NVMAP_HEAP_CARVEOUT_GENERIC;
273 if (type & carveout_mask) {
274 struct nvmap_heap_block *b;
276 b = nvmap_carveout_alloc(client, h, type);
279 h->heap_pgalloc = false;
280 /* barrier to ensure all handle alloc data
281 * is visible before alloc is seen by other
287 } else if (type & iovmm_mask) {
290 ret = handle_page_alloc(client, h,
291 h->userflags & NVMAP_HANDLE_PHYS_CONTIG);
294 h->heap_type = NVMAP_HEAP_IOVMM;
295 h->heap_pgalloc = true;
301 /* small allocations will try to allocate from generic OS memory before
302 * any of the limited heaps, to increase the effective memory for graphics
303 * allocations, and to reduce fragmentation of the graphics heaps with
304 * sub-page splinters */
305 static const unsigned int heap_policy_small[] = {
306 NVMAP_HEAP_CARVEOUT_VPR,
307 NVMAP_HEAP_CARVEOUT_IRAM,
308 NVMAP_HEAP_CARVEOUT_MASK,
313 static const unsigned int heap_policy_large[] = {
314 NVMAP_HEAP_CARVEOUT_VPR,
315 NVMAP_HEAP_CARVEOUT_IRAM,
317 NVMAP_HEAP_CARVEOUT_MASK,
321 int nvmap_alloc_handle(struct nvmap_client *client,
322 struct nvmap_handle *h, unsigned int heap_mask,
327 const unsigned int *alloc_policy;
331 h = nvmap_handle_get(h);
341 nvmap_stats_inc(NS_TOTAL, PAGE_ALIGN(h->orig_size));
342 nvmap_stats_inc(NS_ALLOC, PAGE_ALIGN(h->size));
343 trace_nvmap_alloc_handle(client, h,
344 h->size, heap_mask, align, flags,
345 nvmap_stats_read(NS_TOTAL),
346 nvmap_stats_read(NS_ALLOC));
347 h->userflags = flags;
348 nr_page = ((h->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
349 h->secure = !!(flags & NVMAP_HANDLE_SECURE);
350 h->flags = (flags & NVMAP_HANDLE_CACHE_FLAG);
351 h->align = max_t(size_t, align, L1_CACHE_BYTES);
353 h->map_resources = 0;
355 #ifndef CONFIG_TEGRA_IOVMM
356 /* convert iovmm requests to generic carveout. */
357 if (heap_mask & NVMAP_HEAP_IOVMM) {
358 heap_mask = (heap_mask & ~NVMAP_HEAP_IOVMM) |
359 NVMAP_HEAP_CARVEOUT_GENERIC;
362 /* secure allocations can only be served from secure heaps */
364 heap_mask &= NVMAP_SECURE_HEAPS;
371 alloc_policy = (nr_page == 1) ? heap_policy_small : heap_policy_large;
373 while (!h->alloc && *alloc_policy) {
374 unsigned int heap_type;
376 heap_type = *alloc_policy++;
377 heap_type &= heap_mask;
382 heap_mask &= ~heap_type;
384 while (heap_type && !h->alloc) {
387 /* iterate possible heaps MSB-to-LSB, since higher-
388 * priority carveouts will have higher usage masks */
389 heap = 1 << __fls(heap_type);
390 alloc_handle(client, h, heap);
397 if (client->kernel_client)
398 nvmap_stats_inc(NS_KALLOC, h->size);
400 nvmap_stats_inc(NS_UALLOC, h->size);
402 nvmap_stats_dec(NS_TOTAL, PAGE_ALIGN(h->orig_size));
403 nvmap_stats_dec(NS_ALLOC, PAGE_ALIGN(h->orig_size));
406 err = (h->alloc) ? 0 : err;
411 void nvmap_free_handle(struct nvmap_client *client,
412 struct nvmap_handle *handle)
414 struct nvmap_handle_ref *ref;
415 struct nvmap_handle *h;
418 nvmap_ref_lock(client);
420 ref = __nvmap_validate_locked(client, handle);
422 nvmap_ref_unlock(client);
426 trace_nvmap_free_handle(client, handle);
427 BUG_ON(!ref->handle);
430 if (atomic_dec_return(&ref->dupes)) {
431 nvmap_ref_unlock(client);
436 pins = atomic_read(&ref->pin);
437 rb_erase(&ref->node, &client->handle_refs);
438 client->handle_count--;
439 atomic_dec(&ref->handle->share_count);
441 nvmap_ref_unlock(client);
444 nvmap_debug(client, "%s freeing pinned handle %p\n",
445 current->group_leader->comm, h);
447 while (atomic_read(&ref->pin))
450 if (h->owner == client) {
455 dma_buf_put(ref->handle->dmabuf);
459 BUG_ON(!atomic_read(&h->ref));
462 EXPORT_SYMBOL(nvmap_free_handle);
464 void nvmap_free_handle_user_id(struct nvmap_client *client,
465 unsigned long user_id)
467 nvmap_free_handle(client, unmarshal_user_id(user_id));
470 static void add_handle_ref(struct nvmap_client *client,
471 struct nvmap_handle_ref *ref)
473 struct rb_node **p, *parent = NULL;
475 nvmap_ref_lock(client);
476 p = &client->handle_refs.rb_node;
478 struct nvmap_handle_ref *node;
480 node = rb_entry(parent, struct nvmap_handle_ref, node);
481 if (ref->handle > node->handle)
482 p = &parent->rb_right;
484 p = &parent->rb_left;
486 rb_link_node(&ref->node, parent, p);
487 rb_insert_color(&ref->node, &client->handle_refs);
488 client->handle_count++;
489 if (client->handle_count > nvmap_max_handle_count)
490 nvmap_max_handle_count = client->handle_count;
491 atomic_inc(&ref->handle->share_count);
492 nvmap_ref_unlock(client);
495 struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
498 void *err = ERR_PTR(-ENOMEM);
499 struct nvmap_handle *h;
500 struct nvmap_handle_ref *ref = NULL;
503 return ERR_PTR(-EINVAL);
506 return ERR_PTR(-EINVAL);
508 h = kzalloc(sizeof(*h), GFP_KERNEL);
510 return ERR_PTR(-ENOMEM);
512 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
516 atomic_set(&h->ref, 1);
517 atomic_set(&h->pin, 0);
522 h->size = h->orig_size = size;
523 h->flags = NVMAP_HANDLE_WRITE_COMBINE;
524 mutex_init(&h->lock);
525 INIT_LIST_HEAD(&h->vmas);
526 INIT_LIST_HEAD(&h->lru);
529 * This takes out 1 ref on the dambuf. This corresponds to the
530 * handle_ref that gets automatically made by nvmap_create_handle().
532 h->dmabuf = __nvmap_make_dmabuf(client, h);
533 if (IS_ERR(h->dmabuf)) {
535 goto make_dmabuf_fail;
539 * Pre-attach nvmap to this new dmabuf. This gets unattached during the
540 * dma_buf_release() operation.
542 h->attachment = dma_buf_attach(h->dmabuf, &nvmap_pdev->dev);
543 if (IS_ERR(h->attachment)) {
545 goto dma_buf_attach_fail;
548 nvmap_handle_add(nvmap_dev, h);
551 * Major assumption here: the dma_buf object that the handle contains
552 * is created with a ref count of 1.
554 atomic_set(&ref->dupes, 1);
556 atomic_set(&ref->pin, 0);
557 add_handle_ref(client, ref);
558 trace_nvmap_create_handle(client, client->name, h, size, ref);
562 dma_buf_put(h->dmabuf);
570 struct nvmap_handle_ref *nvmap_duplicate_handle(struct nvmap_client *client,
571 struct nvmap_handle *h, bool skip_val)
573 struct nvmap_handle_ref *ref = NULL;
576 /* on success, the reference count for the handle should be
577 * incremented, so the success paths will not call nvmap_handle_put */
578 h = nvmap_handle_get(h);
581 nvmap_debug(client, "%s duplicate handle failed\n",
582 current->group_leader->comm);
583 return ERR_PTR(-EPERM);
587 nvmap_err(client, "%s duplicating unallocated handle\n",
588 current->group_leader->comm);
590 return ERR_PTR(-EINVAL);
593 nvmap_ref_lock(client);
594 ref = __nvmap_validate_locked(client, h);
597 /* handle already duplicated in client; just increment
598 * the reference count rather than re-duplicating it */
599 atomic_inc(&ref->dupes);
600 nvmap_ref_unlock(client);
604 nvmap_ref_unlock(client);
606 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
609 return ERR_PTR(-ENOMEM);
612 atomic_set(&ref->dupes, 1);
614 atomic_set(&ref->pin, 0);
615 add_handle_ref(client, ref);
618 * Ref counting on the dma_bufs follows the creation and destruction of
619 * nvmap_handle_refs. That is every time a handle_ref is made the
620 * dma_buf ref count goes up and everytime a handle_ref is destroyed
621 * the dma_buf ref count goes down.
623 get_dma_buf(h->dmabuf);
625 trace_nvmap_duplicate_handle(client, h, ref);
629 struct nvmap_handle_ref *nvmap_create_handle_from_fd(
630 struct nvmap_client *client, int fd)
632 struct nvmap_handle *handle;
633 struct nvmap_handle_ref *ref;
637 handle = nvmap_get_id_from_dmabuf_fd(client, fd);
639 return ERR_CAST(handle);
640 ref = nvmap_duplicate_handle(client, handle, 1);
644 struct nvmap_handle *nvmap_duplicate_handle_id_ex(struct nvmap_client *client,
645 struct nvmap_handle *h)
647 struct nvmap_handle_ref *ref = nvmap_duplicate_handle(client, h, 0);
652 return __nvmap_ref_to_id(ref);
654 EXPORT_SYMBOL(nvmap_duplicate_handle_id_ex);
656 int nvmap_get_page_list_info(struct nvmap_client *client,
657 struct nvmap_handle *handle, u32 *size,
658 u32 *flags, u32 *nr_page, bool *contig)
660 struct nvmap_handle *h;
662 BUG_ON(!size || !flags || !nr_page || !contig);
669 h = nvmap_handle_get(handle);
672 nvmap_err(client, "%s query invalid handle %p\n",
673 current->group_leader->comm, handle);
677 if (!h->alloc || !h->heap_pgalloc) {
678 nvmap_err(client, "%s query unallocated handle %p\n",
679 current->group_leader->comm, handle);
685 *size = h->orig_size;
686 *nr_page = PAGE_ALIGN(h->size) >> PAGE_SHIFT;
687 *contig = h->pgalloc.contig;
692 EXPORT_SYMBOL(nvmap_get_page_list_info);
694 int nvmap_acquire_page_list(struct nvmap_client *client,
695 struct nvmap_handle *handle, struct page **pages,
698 struct nvmap_handle *h;
699 struct nvmap_handle_ref *ref;
705 h = nvmap_handle_get(handle);
708 nvmap_err(client, "%s query invalid handle %p\n",
709 current->group_leader->comm, handle);
713 if (!h->alloc || !h->heap_pgalloc) {
714 nvmap_err(client, "%s query unallocated handle %p\n",
715 current->group_leader->comm, handle);
720 BUG_ON(nr_page != PAGE_ALIGN(h->size) >> PAGE_SHIFT);
722 for (idx = 0; idx < nr_page; idx++)
723 pages[idx] = h->pgalloc.pages[idx];
725 nvmap_ref_lock(client);
726 ref = __nvmap_validate_locked(client, h);
728 __nvmap_pin(ref, &dummy);
729 nvmap_ref_unlock(client);
733 EXPORT_SYMBOL(nvmap_acquire_page_list);
735 int nvmap_release_page_list(struct nvmap_client *client,
736 struct nvmap_handle *handle)
738 struct nvmap_handle_ref *ref;
739 struct nvmap_handle *h = NULL;
743 nvmap_ref_lock(client);
745 ref = __nvmap_validate_locked(client, handle);
749 nvmap_ref_unlock(client);
758 EXPORT_SYMBOL(nvmap_release_page_list);
760 int __nvmap_get_handle_param(struct nvmap_client *client,
761 struct nvmap_handle *h, u32 param, u64 *result)
765 if (WARN_ON(!virt_addr_valid(h)))
769 case NVMAP_HANDLE_PARAM_SIZE:
770 *result = h->orig_size;
772 case NVMAP_HANDLE_PARAM_ALIGNMENT:
775 case NVMAP_HANDLE_PARAM_BASE:
776 if (!h->alloc || !atomic_read(&h->pin))
778 else if (!h->heap_pgalloc) {
779 mutex_lock(&h->lock);
780 *result = h->carveout->base;
781 mutex_unlock(&h->lock);
782 } else if (h->pgalloc.contig)
783 *result = page_to_phys(h->pgalloc.pages[0]);
784 else if (h->attachment->priv)
785 *result = sg_dma_address(
786 ((struct sg_table *)h->attachment->priv)->sgl);
790 case NVMAP_HANDLE_PARAM_HEAP:
793 else if (!h->heap_pgalloc) {
794 mutex_lock(&h->lock);
795 *result = nvmap_carveout_usage(client, h->carveout);
796 mutex_unlock(&h->lock);
798 *result = NVMAP_HEAP_IOVMM;
800 case NVMAP_HANDLE_PARAM_KIND:
803 case NVMAP_HANDLE_PARAM_COMPR:
804 /* ignored, to be removed */
813 int nvmap_get_handle_param(struct nvmap_client *client,
814 struct nvmap_handle_ref *ref, u32 param, u64 *result)
816 if (WARN_ON(!virt_addr_valid(ref)) ||
817 WARN_ON(!virt_addr_valid(client)) ||
821 return __nvmap_get_handle_param(client, ref->handle, param, result);