]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/video/tegra/nvmap/nvmap_handle.c
0312f6d436130e81c7025a52430ed2bb58983091
[sojka/nv-tegra/linux-3.10.git] / drivers / video / tegra / nvmap / nvmap_handle.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap_handle.c
3  *
4  * Handle allocation and freeing routines for nvmap
5  *
6  * Copyright (c) 2009-2014, NVIDIA CORPORATION. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #define pr_fmt(fmt)     "%s: " fmt, __func__
24
25 #include <linux/err.h>
26 #include <linux/kernel.h>
27 #include <linux/list.h>
28 #include <linux/mm.h>
29 #include <linux/rbtree.h>
30 #include <linux/dma-buf.h>
31 #include <linux/moduleparam.h>
32 #include <linux/nvmap.h>
33 #include <linux/tegra-soc.h>
34
35 #include <asm/pgtable.h>
36
37 #include <trace/events/nvmap.h>
38
39 #include "nvmap_priv.h"
40 #include "nvmap_ioctl.h"
41
42 #ifdef CONFIG_NVMAP_FORCE_ZEROED_USER_PAGES
43 bool zero_memory = 1;
44 #else
45 bool zero_memory;
46 #endif
47 u32 nvmap_max_handle_count;
48
49 static int zero_memory_set(const char *arg, const struct kernel_param *kp)
50 {
51 #ifdef CONFIG_NVMAP_FORCE_ZEROED_USER_PAGES
52         return -EPERM;
53 #else
54         param_set_bool(arg, kp);
55         nvmap_page_pool_clear();
56         return 0;
57 #endif
58 }
59
60 static struct kernel_param_ops zero_memory_ops = {
61         .get = param_get_bool,
62         .set = zero_memory_set,
63 };
64
65 module_param_cb(zero_memory, &zero_memory_ops, &zero_memory, 0644);
66
67
68 #define NVMAP_SECURE_HEAPS      (NVMAP_HEAP_CARVEOUT_IRAM | NVMAP_HEAP_IOVMM | \
69                                  NVMAP_HEAP_CARVEOUT_VPR)
70
71 /* handles may be arbitrarily large (16+MiB), and any handle allocated from
72  * the kernel (i.e., not a carveout handle) includes its array of pages. to
73  * preserve kmalloc space, if the array of pages exceeds PAGELIST_VMALLOC_MIN,
74  * the array is allocated using vmalloc. */
75 #define PAGELIST_VMALLOC_MIN    (PAGE_SIZE)
76
77 void *nvmap_altalloc(size_t len)
78 {
79         if (len > PAGELIST_VMALLOC_MIN)
80                 return vmalloc(len);
81         else
82                 return kmalloc(len, GFP_KERNEL);
83 }
84
85 void nvmap_altfree(void *ptr, size_t len)
86 {
87         if (!ptr)
88                 return;
89
90         if (len > PAGELIST_VMALLOC_MIN)
91                 vfree(ptr);
92         else
93                 kfree(ptr);
94 }
95
96 void _nvmap_handle_free(struct nvmap_handle *h)
97 {
98         unsigned int i, nr_page, page_index = 0;
99 #if defined(CONFIG_NVMAP_PAGE_POOLS) && \
100         !defined(CONFIG_NVMAP_FORCE_ZEROED_USER_PAGES)
101         struct nvmap_page_pool *pool = NULL;
102 #endif
103
104         if (h->nvhost_priv)
105                 h->nvhost_priv_delete(h->nvhost_priv);
106
107         if (nvmap_handle_remove(h->dev, h) != 0)
108                 return;
109
110         if (!h->alloc)
111                 goto out;
112
113         nvmap_stats_inc(NS_RELEASE, h->size);
114         nvmap_stats_dec(NS_TOTAL, PAGE_ALIGN(h->orig_size));
115         if (!h->heap_pgalloc) {
116                 nvmap_heap_free(h->carveout);
117                 goto out;
118         }
119
120         nr_page = DIV_ROUND_UP(h->size, PAGE_SIZE);
121
122         BUG_ON(h->size & ~PAGE_MASK);
123         BUG_ON(!h->pgalloc.pages);
124
125 #ifdef NVMAP_LAZY_VFREE
126         if (h->vaddr) {
127                 nvmap_kmaps_dec(h);
128                 vm_unmap_ram(h->vaddr, h->size >> PAGE_SHIFT);
129         }
130 #endif
131
132         for (i = 0; i < nr_page; i++)
133                 h->pgalloc.pages[i] = nvmap_to_page(h->pgalloc.pages[i]);
134
135 #if defined(CONFIG_NVMAP_PAGE_POOLS) && \
136         !defined(CONFIG_NVMAP_FORCE_ZEROED_USER_PAGES)
137         if (!zero_memory) {
138                 pool = &nvmap_dev->pool;
139
140                 while (page_index < nr_page) {
141                         if (!nvmap_page_pool_fill(pool,
142                                 h->pgalloc.pages[page_index]))
143                                 break;
144
145                         page_index++;
146                 }
147         }
148 #endif
149
150         for (i = page_index; i < nr_page; i++)
151                 __free_page(h->pgalloc.pages[i]);
152
153         nvmap_altfree(h->pgalloc.pages, nr_page * sizeof(struct page *));
154
155 out:
156         kfree(h);
157 }
158
159 static struct page *nvmap_alloc_pages_exact(gfp_t gfp, size_t size)
160 {
161         struct page *page, *p, *e;
162         unsigned int order;
163
164         size = PAGE_ALIGN(size);
165         order = get_order(size);
166         page = alloc_pages(gfp, order);
167
168         if (!page)
169                 return NULL;
170
171         split_page(page, order);
172         e = page + (1 << order);
173         for (p = page + (size >> PAGE_SHIFT); p < e; p++)
174                 __free_page(p);
175
176         return page;
177 }
178
179 static int handle_page_alloc(struct nvmap_client *client,
180                              struct nvmap_handle *h, bool contiguous)
181 {
182         int err = 0;
183         size_t size = PAGE_ALIGN(h->size);
184         unsigned int nr_page = size >> PAGE_SHIFT;
185         pgprot_t prot;
186         unsigned int i = 0, page_index = 0;
187         struct page **pages;
188 #ifdef CONFIG_NVMAP_PAGE_POOLS
189         struct nvmap_page_pool *pool = NULL;
190         phys_addr_t paddr;
191 #endif
192         gfp_t gfp = GFP_NVMAP;
193         unsigned long kaddr;
194         pte_t **pte = NULL;
195
196         if (zero_memory)
197                 gfp |= __GFP_ZERO;
198
199         pages = nvmap_altalloc(nr_page * sizeof(*pages));
200         if (!pages)
201                 return -ENOMEM;
202
203         prot = nvmap_pgprot(h, PG_PROT_KERNEL);
204
205         if (contiguous) {
206                 struct page *page;
207                 page = nvmap_alloc_pages_exact(gfp, size);
208                 if (!page)
209                         goto fail;
210
211                 for (i = 0; i < nr_page; i++)
212                         pages[i] = nth_page(page, i);
213
214         } else {
215 #ifdef CONFIG_NVMAP_PAGE_POOLS
216                 pool = &nvmap_dev->pool;
217
218                 for (i = 0; i < nr_page; i++) {
219                         /* Get pages from pool, if available. */
220                         pages[i] = nvmap_page_pool_alloc(pool);
221                         if (!pages[i])
222                                 break;
223                         page_index++;
224                 }
225 #endif
226                 for (i = page_index; i < nr_page; i++) {
227                         pages[i] = nvmap_alloc_pages_exact(gfp, PAGE_SIZE);
228                         if (!pages[i])
229                                 goto fail;
230                 }
231         }
232
233         /*
234          * Make sure any data in the caches is flushed out before
235          * passing these pages to userspace. otherwise, It can lead to
236          * corruption in pages that get mapped as something other than WB in
237          * userspace and leaked kernel data structures.
238          */
239         if (page_index < nr_page)
240                 nvmap_flush_cache(&pages[page_index], nr_page - page_index);
241
242         if (err)
243                 goto fail;
244
245         h->size = size;
246         h->pgalloc.pages = pages;
247         h->pgalloc.contig = contiguous;
248         atomic_set(&h->pgalloc.ndirty, 0);
249         return 0;
250
251 fail:
252         while (i--)
253                 __free_page(pages[i]);
254         nvmap_altfree(pages, nr_page * sizeof(*pages));
255         wmb();
256         return -ENOMEM;
257 }
258
259 static void alloc_handle(struct nvmap_client *client,
260                          struct nvmap_handle *h, unsigned int type)
261 {
262         unsigned int carveout_mask = NVMAP_HEAP_CARVEOUT_MASK;
263         unsigned int iovmm_mask = NVMAP_HEAP_IOVMM;
264
265         BUG_ON(type & (type - 1));
266
267 #ifdef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM
268         /* Convert generic carveout requests to iovmm requests. */
269         carveout_mask &= ~NVMAP_HEAP_CARVEOUT_GENERIC;
270         iovmm_mask |= NVMAP_HEAP_CARVEOUT_GENERIC;
271 #endif
272
273         if (type & carveout_mask) {
274                 struct nvmap_heap_block *b;
275
276                 b = nvmap_carveout_alloc(client, h, type);
277                 if (b) {
278                         h->heap_type = type;
279                         h->heap_pgalloc = false;
280                         /* barrier to ensure all handle alloc data
281                          * is visible before alloc is seen by other
282                          * processors.
283                          */
284                         mb();
285                         h->alloc = true;
286                 }
287         } else if (type & iovmm_mask) {
288                 int ret;
289
290                 ret = handle_page_alloc(client, h,
291                         h->userflags & NVMAP_HANDLE_PHYS_CONTIG);
292                 if (ret)
293                         return;
294                 h->heap_type = NVMAP_HEAP_IOVMM;
295                 h->heap_pgalloc = true;
296                 mb();
297                 h->alloc = true;
298         }
299 }
300
301 /* small allocations will try to allocate from generic OS memory before
302  * any of the limited heaps, to increase the effective memory for graphics
303  * allocations, and to reduce fragmentation of the graphics heaps with
304  * sub-page splinters */
305 static const unsigned int heap_policy_small[] = {
306         NVMAP_HEAP_CARVEOUT_VPR,
307         NVMAP_HEAP_CARVEOUT_IRAM,
308         NVMAP_HEAP_CARVEOUT_MASK,
309         NVMAP_HEAP_IOVMM,
310         0,
311 };
312
313 static const unsigned int heap_policy_large[] = {
314         NVMAP_HEAP_CARVEOUT_VPR,
315         NVMAP_HEAP_CARVEOUT_IRAM,
316         NVMAP_HEAP_IOVMM,
317         NVMAP_HEAP_CARVEOUT_MASK,
318         0,
319 };
320
321 int nvmap_alloc_handle(struct nvmap_client *client,
322                        struct nvmap_handle *h, unsigned int heap_mask,
323                        size_t align,
324                        u8 kind,
325                        unsigned int flags)
326 {
327         const unsigned int *alloc_policy;
328         int nr_page;
329         int err = -ENOMEM;
330
331         h = nvmap_handle_get(h);
332
333         if (!h)
334                 return -EINVAL;
335
336         if (h->alloc) {
337                 nvmap_handle_put(h);
338                 return -EEXIST;
339         }
340
341         nvmap_stats_inc(NS_TOTAL, PAGE_ALIGN(h->orig_size));
342         nvmap_stats_inc(NS_ALLOC, PAGE_ALIGN(h->size));
343         trace_nvmap_alloc_handle(client, h,
344                 h->size, heap_mask, align, flags,
345                 nvmap_stats_read(NS_TOTAL),
346                 nvmap_stats_read(NS_ALLOC));
347         h->userflags = flags;
348         nr_page = ((h->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
349         h->secure = !!(flags & NVMAP_HANDLE_SECURE);
350         h->flags = (flags & NVMAP_HANDLE_CACHE_FLAG);
351         h->align = max_t(size_t, align, L1_CACHE_BYTES);
352         h->kind = kind;
353         h->map_resources = 0;
354
355 #ifndef CONFIG_TEGRA_IOVMM
356         /* convert iovmm requests to generic carveout. */
357         if (heap_mask & NVMAP_HEAP_IOVMM) {
358                 heap_mask = (heap_mask & ~NVMAP_HEAP_IOVMM) |
359                             NVMAP_HEAP_CARVEOUT_GENERIC;
360         }
361 #endif
362         /* secure allocations can only be served from secure heaps */
363         if (h->secure)
364                 heap_mask &= NVMAP_SECURE_HEAPS;
365
366         if (!heap_mask) {
367                 err = -EINVAL;
368                 goto out;
369         }
370
371         alloc_policy = (nr_page == 1) ? heap_policy_small : heap_policy_large;
372
373         while (!h->alloc && *alloc_policy) {
374                 unsigned int heap_type;
375
376                 heap_type = *alloc_policy++;
377                 heap_type &= heap_mask;
378
379                 if (!heap_type)
380                         continue;
381
382                 heap_mask &= ~heap_type;
383
384                 while (heap_type && !h->alloc) {
385                         unsigned int heap;
386
387                         /* iterate possible heaps MSB-to-LSB, since higher-
388                          * priority carveouts will have higher usage masks */
389                         heap = 1 << __fls(heap_type);
390                         alloc_handle(client, h, heap);
391                         heap_type &= ~heap;
392                 }
393         }
394
395 out:
396         if (h->alloc) {
397                 if (client->kernel_client)
398                         nvmap_stats_inc(NS_KALLOC, h->size);
399                 else
400                         nvmap_stats_inc(NS_UALLOC, h->size);
401         } else {
402                 nvmap_stats_dec(NS_TOTAL, PAGE_ALIGN(h->orig_size));
403                 nvmap_stats_dec(NS_ALLOC, PAGE_ALIGN(h->orig_size));
404         }
405
406         err = (h->alloc) ? 0 : err;
407         nvmap_handle_put(h);
408         return err;
409 }
410
411 void nvmap_free_handle(struct nvmap_client *client,
412                        struct nvmap_handle *handle)
413 {
414         struct nvmap_handle_ref *ref;
415         struct nvmap_handle *h;
416         int pins;
417
418         nvmap_ref_lock(client);
419
420         ref = __nvmap_validate_locked(client, handle);
421         if (!ref) {
422                 nvmap_ref_unlock(client);
423                 return;
424         }
425
426         trace_nvmap_free_handle(client, handle);
427         BUG_ON(!ref->handle);
428         h = ref->handle;
429
430         if (atomic_dec_return(&ref->dupes)) {
431                 nvmap_ref_unlock(client);
432                 goto out;
433         }
434
435         smp_rmb();
436         pins = atomic_read(&ref->pin);
437         rb_erase(&ref->node, &client->handle_refs);
438         client->handle_count--;
439         atomic_dec(&ref->handle->share_count);
440
441         nvmap_ref_unlock(client);
442
443         if (pins)
444                 nvmap_debug(client, "%s freeing pinned handle %p\n",
445                             current->group_leader->comm, h);
446
447         while (atomic_read(&ref->pin))
448                 __nvmap_unpin(ref);
449
450         if (h->owner == client) {
451                 h->owner = NULL;
452                 h->owner_ref = NULL;
453         }
454
455         dma_buf_put(ref->handle->dmabuf);
456         kfree(ref);
457
458 out:
459         BUG_ON(!atomic_read(&h->ref));
460         nvmap_handle_put(h);
461 }
462 EXPORT_SYMBOL(nvmap_free_handle);
463
464 void nvmap_free_handle_user_id(struct nvmap_client *client,
465                                unsigned long user_id)
466 {
467         nvmap_free_handle(client, unmarshal_user_id(user_id));
468 }
469
470 static void add_handle_ref(struct nvmap_client *client,
471                            struct nvmap_handle_ref *ref)
472 {
473         struct rb_node **p, *parent = NULL;
474
475         nvmap_ref_lock(client);
476         p = &client->handle_refs.rb_node;
477         while (*p) {
478                 struct nvmap_handle_ref *node;
479                 parent = *p;
480                 node = rb_entry(parent, struct nvmap_handle_ref, node);
481                 if (ref->handle > node->handle)
482                         p = &parent->rb_right;
483                 else
484                         p = &parent->rb_left;
485         }
486         rb_link_node(&ref->node, parent, p);
487         rb_insert_color(&ref->node, &client->handle_refs);
488         client->handle_count++;
489         if (client->handle_count > nvmap_max_handle_count)
490                 nvmap_max_handle_count = client->handle_count;
491         atomic_inc(&ref->handle->share_count);
492         nvmap_ref_unlock(client);
493 }
494
495 struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
496                                              size_t size)
497 {
498         void *err = ERR_PTR(-ENOMEM);
499         struct nvmap_handle *h;
500         struct nvmap_handle_ref *ref = NULL;
501
502         if (!client)
503                 return ERR_PTR(-EINVAL);
504
505         if (!size)
506                 return ERR_PTR(-EINVAL);
507
508         h = kzalloc(sizeof(*h), GFP_KERNEL);
509         if (!h)
510                 return ERR_PTR(-ENOMEM);
511
512         ref = kzalloc(sizeof(*ref), GFP_KERNEL);
513         if (!ref)
514                 goto ref_alloc_fail;
515
516         atomic_set(&h->ref, 1);
517         atomic_set(&h->pin, 0);
518         h->owner = client;
519         h->owner_ref = ref;
520         h->dev = nvmap_dev;
521         BUG_ON(!h->owner);
522         h->size = h->orig_size = size;
523         h->flags = NVMAP_HANDLE_WRITE_COMBINE;
524         mutex_init(&h->lock);
525         INIT_LIST_HEAD(&h->vmas);
526         INIT_LIST_HEAD(&h->lru);
527
528         /*
529          * This takes out 1 ref on the dambuf. This corresponds to the
530          * handle_ref that gets automatically made by nvmap_create_handle().
531          */
532         h->dmabuf = __nvmap_make_dmabuf(client, h);
533         if (IS_ERR(h->dmabuf)) {
534                 err = h->dmabuf;
535                 goto make_dmabuf_fail;
536         }
537
538         /*
539          * Pre-attach nvmap to this new dmabuf. This gets unattached during the
540          * dma_buf_release() operation.
541          */
542         h->attachment = dma_buf_attach(h->dmabuf, &nvmap_pdev->dev);
543         if (IS_ERR(h->attachment)) {
544                 err = h->attachment;
545                 goto dma_buf_attach_fail;
546         }
547
548         nvmap_handle_add(nvmap_dev, h);
549
550         /*
551          * Major assumption here: the dma_buf object that the handle contains
552          * is created with a ref count of 1.
553          */
554         atomic_set(&ref->dupes, 1);
555         ref->handle = h;
556         atomic_set(&ref->pin, 0);
557         add_handle_ref(client, ref);
558         trace_nvmap_create_handle(client, client->name, h, size, ref);
559         return ref;
560
561 dma_buf_attach_fail:
562         dma_buf_put(h->dmabuf);
563 make_dmabuf_fail:
564         kfree(ref);
565 ref_alloc_fail:
566         kfree(h);
567         return err;
568 }
569
570 struct nvmap_handle_ref *nvmap_duplicate_handle(struct nvmap_client *client,
571                                         struct nvmap_handle *h, bool skip_val)
572 {
573         struct nvmap_handle_ref *ref = NULL;
574
575         BUG_ON(!client);
576         /* on success, the reference count for the handle should be
577          * incremented, so the success paths will not call nvmap_handle_put */
578         h = nvmap_handle_get(h);
579
580         if (!h) {
581                 nvmap_debug(client, "%s duplicate handle failed\n",
582                             current->group_leader->comm);
583                 return ERR_PTR(-EPERM);
584         }
585
586         if (!h->alloc) {
587                 nvmap_err(client, "%s duplicating unallocated handle\n",
588                           current->group_leader->comm);
589                 nvmap_handle_put(h);
590                 return ERR_PTR(-EINVAL);
591         }
592
593         nvmap_ref_lock(client);
594         ref = __nvmap_validate_locked(client, h);
595
596         if (ref) {
597                 /* handle already duplicated in client; just increment
598                  * the reference count rather than re-duplicating it */
599                 atomic_inc(&ref->dupes);
600                 nvmap_ref_unlock(client);
601                 return ref;
602         }
603
604         nvmap_ref_unlock(client);
605
606         ref = kzalloc(sizeof(*ref), GFP_KERNEL);
607         if (!ref) {
608                 nvmap_handle_put(h);
609                 return ERR_PTR(-ENOMEM);
610         }
611
612         atomic_set(&ref->dupes, 1);
613         ref->handle = h;
614         atomic_set(&ref->pin, 0);
615         add_handle_ref(client, ref);
616
617         /*
618          * Ref counting on the dma_bufs follows the creation and destruction of
619          * nvmap_handle_refs. That is every time a handle_ref is made the
620          * dma_buf ref count goes up and everytime a handle_ref is destroyed
621          * the dma_buf ref count goes down.
622          */
623         get_dma_buf(h->dmabuf);
624
625         trace_nvmap_duplicate_handle(client, h, ref);
626         return ref;
627 }
628
629 struct nvmap_handle_ref *nvmap_create_handle_from_fd(
630                         struct nvmap_client *client, int fd)
631 {
632         struct nvmap_handle *handle;
633         struct nvmap_handle_ref *ref;
634
635         BUG_ON(!client);
636
637         handle = nvmap_get_id_from_dmabuf_fd(client, fd);
638         if (IS_ERR(handle))
639                 return ERR_CAST(handle);
640         ref = nvmap_duplicate_handle(client, handle, 1);
641         return ref;
642 }
643
644 struct nvmap_handle *nvmap_duplicate_handle_id_ex(struct nvmap_client *client,
645                                                         struct nvmap_handle *h)
646 {
647         struct nvmap_handle_ref *ref = nvmap_duplicate_handle(client, h, 0);
648
649         if (IS_ERR(ref))
650                 return 0;
651
652         return __nvmap_ref_to_id(ref);
653 }
654 EXPORT_SYMBOL(nvmap_duplicate_handle_id_ex);
655
656 int nvmap_get_page_list_info(struct nvmap_client *client,
657                                 struct nvmap_handle *handle, u32 *size,
658                                 u32 *flags, u32 *nr_page, bool *contig)
659 {
660         struct nvmap_handle *h;
661
662         BUG_ON(!size || !flags || !nr_page || !contig);
663         BUG_ON(!client);
664
665         *size = 0;
666         *flags = 0;
667         *nr_page = 0;
668
669         h = nvmap_handle_get(handle);
670
671         if (!h) {
672                 nvmap_err(client, "%s query invalid handle %p\n",
673                           current->group_leader->comm, handle);
674                 return -EINVAL;
675         }
676
677         if (!h->alloc || !h->heap_pgalloc) {
678                 nvmap_err(client, "%s query unallocated handle %p\n",
679                           current->group_leader->comm, handle);
680                 nvmap_handle_put(h);
681                 return -EINVAL;
682         }
683
684         *flags = h->flags;
685         *size = h->orig_size;
686         *nr_page = PAGE_ALIGN(h->size) >> PAGE_SHIFT;
687         *contig = h->pgalloc.contig;
688
689         nvmap_handle_put(h);
690         return 0;
691 }
692 EXPORT_SYMBOL(nvmap_get_page_list_info);
693
694 int nvmap_acquire_page_list(struct nvmap_client *client,
695                         struct nvmap_handle *handle, struct page **pages,
696                         u32 nr_page)
697 {
698         struct nvmap_handle *h;
699         struct nvmap_handle_ref *ref;
700         int idx;
701         phys_addr_t dummy;
702
703         BUG_ON(!client);
704
705         h = nvmap_handle_get(handle);
706
707         if (!h) {
708                 nvmap_err(client, "%s query invalid handle %p\n",
709                           current->group_leader->comm, handle);
710                 return -EINVAL;
711         }
712
713         if (!h->alloc || !h->heap_pgalloc) {
714                 nvmap_err(client, "%s query unallocated handle %p\n",
715                           current->group_leader->comm, handle);
716                 nvmap_handle_put(h);
717                 return -EINVAL;
718         }
719
720         BUG_ON(nr_page != PAGE_ALIGN(h->size) >> PAGE_SHIFT);
721
722         for (idx = 0; idx < nr_page; idx++)
723                 pages[idx] = h->pgalloc.pages[idx];
724
725         nvmap_ref_lock(client);
726         ref = __nvmap_validate_locked(client, h);
727         if (ref)
728                 __nvmap_pin(ref, &dummy);
729         nvmap_ref_unlock(client);
730
731         return 0;
732 }
733 EXPORT_SYMBOL(nvmap_acquire_page_list);
734
735 int nvmap_release_page_list(struct nvmap_client *client,
736                                 struct nvmap_handle *handle)
737 {
738         struct nvmap_handle_ref *ref;
739         struct nvmap_handle *h = NULL;
740
741         BUG_ON(!client);
742
743         nvmap_ref_lock(client);
744
745         ref = __nvmap_validate_locked(client, handle);
746         if (ref)
747                 __nvmap_unpin(ref);
748
749         nvmap_ref_unlock(client);
750
751         if (ref)
752                 h = ref->handle;
753         if (h)
754                 nvmap_handle_put(h);
755
756         return 0;
757 }
758 EXPORT_SYMBOL(nvmap_release_page_list);
759
760 int __nvmap_get_handle_param(struct nvmap_client *client,
761                              struct nvmap_handle *h, u32 param, u64 *result)
762 {
763         int err = 0;
764
765         if (WARN_ON(!virt_addr_valid(h)))
766                 return -EINVAL;
767
768         switch (param) {
769         case NVMAP_HANDLE_PARAM_SIZE:
770                 *result = h->orig_size;
771                 break;
772         case NVMAP_HANDLE_PARAM_ALIGNMENT:
773                 *result = h->align;
774                 break;
775         case NVMAP_HANDLE_PARAM_BASE:
776                 if (!h->alloc || !atomic_read(&h->pin))
777                         *result = -EINVAL;
778                 else if (!h->heap_pgalloc) {
779                         mutex_lock(&h->lock);
780                         *result = h->carveout->base;
781                         mutex_unlock(&h->lock);
782                 } else if (h->pgalloc.contig)
783                         *result = page_to_phys(h->pgalloc.pages[0]);
784                 else if (h->attachment->priv)
785                         *result = sg_dma_address(
786                                 ((struct sg_table *)h->attachment->priv)->sgl);
787                 else
788                         *result = -EINVAL;
789                 break;
790         case NVMAP_HANDLE_PARAM_HEAP:
791                 if (!h->alloc)
792                         *result = 0;
793                 else if (!h->heap_pgalloc) {
794                         mutex_lock(&h->lock);
795                         *result = nvmap_carveout_usage(client, h->carveout);
796                         mutex_unlock(&h->lock);
797                 } else
798                         *result = NVMAP_HEAP_IOVMM;
799                 break;
800         case NVMAP_HANDLE_PARAM_KIND:
801                 *result = h->kind;
802                 break;
803         case NVMAP_HANDLE_PARAM_COMPR:
804                 /* ignored, to be removed */
805                 break;
806         default:
807                 err = -EINVAL;
808                 break;
809         }
810         return err;
811 }
812
813 int nvmap_get_handle_param(struct nvmap_client *client,
814                            struct nvmap_handle_ref *ref, u32 param, u64 *result)
815 {
816         if (WARN_ON(!virt_addr_valid(ref)) ||
817             WARN_ON(!virt_addr_valid(client)) ||
818             WARN_ON(!result))
819                 return -EINVAL;
820
821         return __nvmap_get_handle_param(client, ref->handle, param, result);
822 }