]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/video/tegra/nvmap/nvmap_handle.c
3dd2c3c4f44f72d5cbf088c0bd08ffa12450c505
[sojka/nv-tegra/linux-3.10.git] / drivers / video / tegra / nvmap / nvmap_handle.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap_handle.c
3  *
4  * Handle allocation and freeing routines for nvmap
5  *
6  * Copyright (c) 2009-2014, NVIDIA CORPORATION. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #define pr_fmt(fmt)     "%s: " fmt, __func__
24
25 #include <linux/err.h>
26 #include <linux/io.h>
27 #include <linux/kernel.h>
28 #include <linux/list.h>
29 #include <linux/mm.h>
30 #include <linux/rbtree.h>
31 #include <linux/dma-buf.h>
32 #include <linux/moduleparam.h>
33 #include <linux/nvmap.h>
34 #include <linux/tegra-soc.h>
35
36 #include <asm/pgtable.h>
37
38 #include <trace/events/nvmap.h>
39
40 #include "nvmap_priv.h"
41 #include "nvmap_ioctl.h"
42
43 bool zero_memory;
44
45 static int zero_memory_set(const char *arg, const struct kernel_param *kp)
46 {
47         param_set_bool(arg, kp);
48         nvmap_page_pool_clear();
49         return 0;
50 }
51
52 static struct kernel_param_ops zero_memory_ops = {
53         .get = param_get_bool,
54         .set = zero_memory_set,
55 };
56
57 module_param_cb(zero_memory, &zero_memory_ops, &zero_memory, 0644);
58
59 u32 nvmap_max_handle_count;
60
61 /* handles may be arbitrarily large (16+MiB), and any handle allocated from
62  * the kernel (i.e., not a carveout handle) includes its array of pages. to
63  * preserve kmalloc space, if the array of pages exceeds PAGELIST_VMALLOC_MIN,
64  * the array is allocated using vmalloc. */
65 #define PAGELIST_VMALLOC_MIN    (PAGE_SIZE)
66
67 void *nvmap_altalloc(size_t len)
68 {
69         if (len > PAGELIST_VMALLOC_MIN)
70                 return vmalloc(len);
71         else
72                 return kmalloc(len, GFP_KERNEL);
73 }
74
75 void nvmap_altfree(void *ptr, size_t len)
76 {
77         if (!ptr)
78                 return;
79
80         if (len > PAGELIST_VMALLOC_MIN)
81                 vfree(ptr);
82         else
83                 kfree(ptr);
84 }
85
86 void _nvmap_handle_free(struct nvmap_handle *h)
87 {
88         unsigned int i, nr_page, page_index = 0;
89
90         if (h->nvhost_priv)
91                 h->nvhost_priv_delete(h->nvhost_priv);
92
93         if (nvmap_handle_remove(nvmap_dev, h) != 0)
94                 return;
95
96         if (!h->alloc)
97                 goto out;
98
99         nvmap_stats_inc(NS_RELEASE, h->size);
100         nvmap_stats_dec(NS_TOTAL, PAGE_ALIGN(h->orig_size));
101         if (!h->heap_pgalloc) {
102                 nvmap_heap_free(h->carveout);
103                 goto out;
104         }
105
106         nr_page = DIV_ROUND_UP(h->size, PAGE_SIZE);
107
108         BUG_ON(h->size & ~PAGE_MASK);
109         BUG_ON(!h->pgalloc.pages);
110
111 #ifdef NVMAP_LAZY_VFREE
112         if (h->vaddr)
113                 vm_unmap_ram(h->vaddr, h->size >> PAGE_SHIFT);
114 #endif
115
116         for (i = 0; i < nr_page; i++)
117                 h->pgalloc.pages[i] = nvmap_to_page(h->pgalloc.pages[i]);
118
119         if (!zero_memory)
120                 page_index = nvmap_page_pool_fill_lots(&nvmap_dev->pool,
121                                 h->pgalloc.pages, nr_page);
122
123         for (i = page_index; i < nr_page; i++)
124                 __free_page(h->pgalloc.pages[i]);
125
126         nvmap_altfree(h->pgalloc.pages, nr_page * sizeof(struct page *));
127
128 out:
129         kfree(h);
130 }
131
132 static struct page *nvmap_alloc_pages_exact(gfp_t gfp, size_t size)
133 {
134         struct page *page, *p, *e;
135         unsigned int order;
136
137         size = PAGE_ALIGN(size);
138         order = get_order(size);
139         page = alloc_pages(gfp, order);
140
141         if (!page)
142                 return NULL;
143
144         split_page(page, order);
145         e = page + (1 << order);
146         for (p = page + (size >> PAGE_SHIFT); p < e; p++)
147                 __free_page(p);
148
149         return page;
150 }
151
152 static int handle_page_alloc(struct nvmap_client *client,
153                              struct nvmap_handle *h, bool contiguous)
154 {
155         size_t size = PAGE_ALIGN(h->size);
156         unsigned int nr_page = size >> PAGE_SHIFT;
157         pgprot_t prot;
158         unsigned int i = 0, page_index = 0;
159         struct page **pages;
160         gfp_t gfp = GFP_NVMAP;
161
162         if (zero_memory)
163                 gfp |= __GFP_ZERO;
164
165         pages = nvmap_altalloc(nr_page * sizeof(*pages));
166         if (!pages)
167                 return -ENOMEM;
168
169         prot = nvmap_pgprot(h, PG_PROT_KERNEL);
170
171         if (contiguous) {
172                 struct page *page;
173                 page = nvmap_alloc_pages_exact(gfp, size);
174                 if (!page)
175                         goto fail;
176
177                 for (i = 0; i < nr_page; i++)
178                         pages[i] = nth_page(page, i);
179
180         } else {
181 #ifdef CONFIG_NVMAP_PAGE_POOLS
182                 /*
183                  * Get as many pages from the pools as possible.
184                  */
185                 page_index = nvmap_page_pool_alloc_lots(&nvmap_dev->pool, pages,
186                                                                  nr_page);
187 #endif
188                 for (i = page_index; i < nr_page; i++) {
189                         pages[i] = nvmap_alloc_pages_exact(gfp, PAGE_SIZE);
190                         if (!pages[i])
191                                 goto fail;
192                 }
193         }
194
195         /*
196          * Make sure any data in the caches is cleaned out before
197          * passing these pages to userspace. otherwise, It can lead to
198          * corruption in pages that get mapped as something other than WB in
199          * userspace and leaked kernel data structures.
200          *
201          * FIXME: For ARMv7 we don't have __clean_dcache_page() so we continue
202          * to use the flush cache version.
203          */
204 #ifdef ARM64
205         nvmap_clean_cache(pages, nr_page);
206 #else
207         nvmap_flush_cache(pages, nr_page);
208 #endif
209
210         h->size = size;
211         h->pgalloc.pages = pages;
212         h->pgalloc.contig = contiguous;
213         atomic_set(&h->pgalloc.ndirty, 0);
214         return 0;
215
216 fail:
217         while (i--)
218                 __free_page(pages[i]);
219         nvmap_altfree(pages, nr_page * sizeof(*pages));
220         wmb();
221         return -ENOMEM;
222 }
223
224 static void alloc_handle(struct nvmap_client *client,
225                          struct nvmap_handle *h, unsigned int type)
226 {
227         unsigned int carveout_mask = NVMAP_HEAP_CARVEOUT_MASK;
228         unsigned int iovmm_mask = NVMAP_HEAP_IOVMM;
229
230         BUG_ON(type & (type - 1));
231
232 #ifdef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM
233         /* Convert generic carveout requests to iovmm requests. */
234         carveout_mask &= ~NVMAP_HEAP_CARVEOUT_GENERIC;
235         iovmm_mask |= NVMAP_HEAP_CARVEOUT_GENERIC;
236 #endif
237
238         if (type & carveout_mask) {
239                 struct nvmap_heap_block *b;
240
241                 b = nvmap_carveout_alloc(client, h, type);
242                 if (b) {
243                         h->heap_type = type;
244                         h->heap_pgalloc = false;
245                         /* barrier to ensure all handle alloc data
246                          * is visible before alloc is seen by other
247                          * processors.
248                          */
249                         mb();
250                         h->alloc = true;
251                 }
252         } else if (type & iovmm_mask) {
253                 int ret;
254
255                 ret = handle_page_alloc(client, h,
256                         h->userflags & NVMAP_HANDLE_PHYS_CONTIG);
257                 if (ret)
258                         return;
259                 h->heap_type = NVMAP_HEAP_IOVMM;
260                 h->heap_pgalloc = true;
261                 mb();
262                 h->alloc = true;
263         }
264 }
265
266 /* small allocations will try to allocate from generic OS memory before
267  * any of the limited heaps, to increase the effective memory for graphics
268  * allocations, and to reduce fragmentation of the graphics heaps with
269  * sub-page splinters */
270 static const unsigned int heap_policy_small[] = {
271         NVMAP_HEAP_CARVEOUT_VPR,
272         NVMAP_HEAP_CARVEOUT_IRAM,
273         NVMAP_HEAP_CARVEOUT_MASK,
274         NVMAP_HEAP_IOVMM,
275         0,
276 };
277
278 static const unsigned int heap_policy_large[] = {
279         NVMAP_HEAP_CARVEOUT_VPR,
280         NVMAP_HEAP_CARVEOUT_IRAM,
281         NVMAP_HEAP_IOVMM,
282         NVMAP_HEAP_CARVEOUT_MASK,
283         0,
284 };
285
286 int nvmap_alloc_handle(struct nvmap_client *client,
287                        struct nvmap_handle *h, unsigned int heap_mask,
288                        size_t align,
289                        u8 kind,
290                        unsigned int flags)
291 {
292         const unsigned int *alloc_policy;
293         int nr_page;
294         int err = -ENOMEM;
295
296         h = nvmap_handle_get(h);
297
298         if (!h)
299                 return -EINVAL;
300
301         if (h->alloc) {
302                 nvmap_handle_put(h);
303                 return -EEXIST;
304         }
305
306         nvmap_stats_inc(NS_TOTAL, PAGE_ALIGN(h->orig_size));
307         nvmap_stats_inc(NS_ALLOC, PAGE_ALIGN(h->size));
308         trace_nvmap_alloc_handle(client, h,
309                 h->size, heap_mask, align, flags,
310                 nvmap_stats_read(NS_TOTAL),
311                 nvmap_stats_read(NS_ALLOC));
312         h->userflags = flags;
313         nr_page = ((h->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
314         h->flags = (flags & NVMAP_HANDLE_CACHE_FLAG);
315         h->align = max_t(size_t, align, L1_CACHE_BYTES);
316         h->kind = kind;
317
318         /* convert iovmm requests to generic carveout. */
319         if (heap_mask & NVMAP_HEAP_IOVMM) {
320                 heap_mask = (heap_mask & ~NVMAP_HEAP_IOVMM) |
321                             NVMAP_HEAP_CARVEOUT_GENERIC;
322         }
323
324         if (!heap_mask) {
325                 err = -EINVAL;
326                 goto out;
327         }
328
329         alloc_policy = (nr_page == 1) ? heap_policy_small : heap_policy_large;
330
331         while (!h->alloc && *alloc_policy) {
332                 unsigned int heap_type;
333
334                 heap_type = *alloc_policy++;
335                 heap_type &= heap_mask;
336
337                 if (!heap_type)
338                         continue;
339
340                 heap_mask &= ~heap_type;
341
342                 while (heap_type && !h->alloc) {
343                         unsigned int heap;
344
345                         /* iterate possible heaps MSB-to-LSB, since higher-
346                          * priority carveouts will have higher usage masks */
347                         heap = 1 << __fls(heap_type);
348                         alloc_handle(client, h, heap);
349                         heap_type &= ~heap;
350                 }
351         }
352
353 out:
354         if (h->alloc) {
355                 if (client->kernel_client)
356                         nvmap_stats_inc(NS_KALLOC, h->size);
357                 else
358                         nvmap_stats_inc(NS_UALLOC, h->size);
359         } else {
360                 nvmap_stats_dec(NS_TOTAL, PAGE_ALIGN(h->orig_size));
361                 nvmap_stats_dec(NS_ALLOC, PAGE_ALIGN(h->orig_size));
362         }
363
364         err = (h->alloc) ? 0 : err;
365         nvmap_handle_put(h);
366         return err;
367 }
368
369 void nvmap_free_handle(struct nvmap_client *client,
370                        struct nvmap_handle *handle)
371 {
372         struct nvmap_handle_ref *ref;
373         struct nvmap_handle *h;
374         int pins;
375
376         nvmap_ref_lock(client);
377
378         ref = __nvmap_validate_locked(client, handle);
379         if (!ref) {
380                 nvmap_ref_unlock(client);
381                 return;
382         }
383
384         trace_nvmap_free_handle(client, handle);
385         BUG_ON(!ref->handle);
386         h = ref->handle;
387
388         if (atomic_dec_return(&ref->dupes)) {
389                 nvmap_ref_unlock(client);
390                 goto out;
391         }
392
393         smp_rmb();
394         pins = atomic_read(&ref->pin);
395         rb_erase(&ref->node, &client->handle_refs);
396         client->handle_count--;
397         atomic_dec(&ref->handle->share_count);
398
399         nvmap_ref_unlock(client);
400
401         if (pins)
402                 pr_debug("%s freeing pinned handle %p\n",
403                             current->group_leader->comm, h);
404
405         while (atomic_read(&ref->pin))
406                 __nvmap_unpin(ref);
407
408         if (h->owner == client)
409                 h->owner = NULL;
410
411         dma_buf_put(ref->handle->dmabuf);
412         kfree(ref);
413
414 out:
415         BUG_ON(!atomic_read(&h->ref));
416         nvmap_handle_put(h);
417 }
418 EXPORT_SYMBOL(nvmap_free_handle);
419
420 void nvmap_free_handle_user_id(struct nvmap_client *client,
421                                unsigned long user_id)
422 {
423         struct nvmap_handle *handle = unmarshal_user_handle(user_id);
424         if (handle) {
425                 nvmap_free_handle(client, handle);
426                 nvmap_handle_put(handle);
427         }
428 }
429
430 static void add_handle_ref(struct nvmap_client *client,
431                            struct nvmap_handle_ref *ref)
432 {
433         struct rb_node **p, *parent = NULL;
434
435         nvmap_ref_lock(client);
436         p = &client->handle_refs.rb_node;
437         while (*p) {
438                 struct nvmap_handle_ref *node;
439                 parent = *p;
440                 node = rb_entry(parent, struct nvmap_handle_ref, node);
441                 if (ref->handle > node->handle)
442                         p = &parent->rb_right;
443                 else
444                         p = &parent->rb_left;
445         }
446         rb_link_node(&ref->node, parent, p);
447         rb_insert_color(&ref->node, &client->handle_refs);
448         client->handle_count++;
449         if (client->handle_count > nvmap_max_handle_count)
450                 nvmap_max_handle_count = client->handle_count;
451         atomic_inc(&ref->handle->share_count);
452         nvmap_ref_unlock(client);
453 }
454
455 struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
456                                              size_t size)
457 {
458         void *err = ERR_PTR(-ENOMEM);
459         struct nvmap_handle *h;
460         struct nvmap_handle_ref *ref = NULL;
461
462         if (!client)
463                 return ERR_PTR(-EINVAL);
464
465         if (!size)
466                 return ERR_PTR(-EINVAL);
467
468         h = kzalloc(sizeof(*h), GFP_KERNEL);
469         if (!h)
470                 return ERR_PTR(-ENOMEM);
471
472         ref = kzalloc(sizeof(*ref), GFP_KERNEL);
473         if (!ref)
474                 goto ref_alloc_fail;
475
476         atomic_set(&h->ref, 1);
477         atomic_set(&h->pin, 0);
478         h->owner = client;
479         BUG_ON(!h->owner);
480         h->size = h->orig_size = size;
481         h->flags = NVMAP_HANDLE_WRITE_COMBINE;
482         mutex_init(&h->lock);
483         INIT_LIST_HEAD(&h->vmas);
484
485         /*
486          * This takes out 1 ref on the dambuf. This corresponds to the
487          * handle_ref that gets automatically made by nvmap_create_handle().
488          */
489         h->dmabuf = __nvmap_make_dmabuf(client, h);
490         if (IS_ERR(h->dmabuf)) {
491                 err = h->dmabuf;
492                 goto make_dmabuf_fail;
493         }
494
495         /*
496          * Pre-attach nvmap to this new dmabuf. This gets unattached during the
497          * dma_buf_release() operation.
498          */
499         h->attachment = dma_buf_attach(h->dmabuf, nvmap_dev->dev_user.parent);
500         if (IS_ERR(h->attachment)) {
501                 err = h->attachment;
502                 goto dma_buf_attach_fail;
503         }
504
505         nvmap_handle_add(nvmap_dev, h);
506
507         /*
508          * Major assumption here: the dma_buf object that the handle contains
509          * is created with a ref count of 1.
510          */
511         atomic_set(&ref->dupes, 1);
512         ref->handle = h;
513         atomic_set(&ref->pin, 0);
514         add_handle_ref(client, ref);
515         trace_nvmap_create_handle(client, client->name, h, size, ref);
516         return ref;
517
518 dma_buf_attach_fail:
519         dma_buf_put(h->dmabuf);
520 make_dmabuf_fail:
521         kfree(ref);
522 ref_alloc_fail:
523         kfree(h);
524         return err;
525 }
526
527 struct nvmap_handle_ref *nvmap_duplicate_handle(struct nvmap_client *client,
528                                         struct nvmap_handle *h, bool skip_val)
529 {
530         struct nvmap_handle_ref *ref = NULL;
531
532         BUG_ON(!client);
533         /* on success, the reference count for the handle should be
534          * incremented, so the success paths will not call nvmap_handle_put */
535         h = nvmap_validate_get(h);
536
537         if (!h) {
538                 pr_debug("%s duplicate handle failed\n",
539                             current->group_leader->comm);
540                 return ERR_PTR(-EPERM);
541         }
542
543         if (!h->alloc) {
544                 pr_err("%s duplicating unallocated handle\n",
545                         current->group_leader->comm);
546                 nvmap_handle_put(h);
547                 return ERR_PTR(-EINVAL);
548         }
549
550         nvmap_ref_lock(client);
551         ref = __nvmap_validate_locked(client, h);
552
553         if (ref) {
554                 /* handle already duplicated in client; just increment
555                  * the reference count rather than re-duplicating it */
556                 atomic_inc(&ref->dupes);
557                 nvmap_ref_unlock(client);
558                 return ref;
559         }
560
561         nvmap_ref_unlock(client);
562
563         ref = kzalloc(sizeof(*ref), GFP_KERNEL);
564         if (!ref) {
565                 nvmap_handle_put(h);
566                 return ERR_PTR(-ENOMEM);
567         }
568
569         atomic_set(&ref->dupes, 1);
570         ref->handle = h;
571         atomic_set(&ref->pin, 0);
572         add_handle_ref(client, ref);
573
574         /*
575          * Ref counting on the dma_bufs follows the creation and destruction of
576          * nvmap_handle_refs. That is every time a handle_ref is made the
577          * dma_buf ref count goes up and everytime a handle_ref is destroyed
578          * the dma_buf ref count goes down.
579          */
580         get_dma_buf(h->dmabuf);
581
582         trace_nvmap_duplicate_handle(client, h, ref);
583         return ref;
584 }
585
586 struct nvmap_handle_ref *nvmap_create_handle_from_fd(
587                         struct nvmap_client *client, int fd)
588 {
589         struct nvmap_handle *handle;
590         struct nvmap_handle_ref *ref;
591
592         BUG_ON(!client);
593
594         handle = nvmap_get_id_from_dmabuf_fd(client, fd);
595         if (IS_ERR(handle))
596                 return ERR_CAST(handle);
597         ref = nvmap_duplicate_handle(client, handle, 1);
598         nvmap_handle_put(handle);
599         return ref;
600 }
601
602 struct nvmap_handle *nvmap_duplicate_handle_id_ex(struct nvmap_client *client,
603                                                         struct nvmap_handle *h)
604 {
605         struct nvmap_handle_ref *ref = nvmap_duplicate_handle(client, h, 0);
606
607         if (IS_ERR(ref))
608                 return 0;
609
610         return __nvmap_ref_to_id(ref);
611 }
612 EXPORT_SYMBOL(nvmap_duplicate_handle_id_ex);
613
614 int nvmap_get_page_list_info(struct nvmap_client *client,
615                                 struct nvmap_handle *handle, u32 *size,
616                                 u32 *flags, u32 *nr_page, bool *contig)
617 {
618         struct nvmap_handle *h;
619
620         BUG_ON(!size || !flags || !nr_page || !contig);
621         BUG_ON(!client);
622
623         *size = 0;
624         *flags = 0;
625         *nr_page = 0;
626
627         h = nvmap_handle_get(handle);
628
629         if (!h) {
630                 pr_err("%s query invalid handle %p\n",
631                         current->group_leader->comm, handle);
632                 return -EINVAL;
633         }
634
635         if (!h->alloc || !h->heap_pgalloc) {
636                 pr_err("%s query unallocated handle %p\n",
637                         current->group_leader->comm, handle);
638                 nvmap_handle_put(h);
639                 return -EINVAL;
640         }
641
642         *flags = h->flags;
643         *size = h->orig_size;
644         *nr_page = PAGE_ALIGN(h->size) >> PAGE_SHIFT;
645         *contig = h->pgalloc.contig;
646
647         nvmap_handle_put(h);
648         return 0;
649 }
650 EXPORT_SYMBOL(nvmap_get_page_list_info);
651
652 int nvmap_acquire_page_list(struct nvmap_client *client,
653                         struct nvmap_handle *handle, struct page **pages,
654                         u32 nr_page)
655 {
656         struct nvmap_handle *h;
657         struct nvmap_handle_ref *ref;
658         int idx;
659         phys_addr_t dummy;
660
661         BUG_ON(!client);
662
663         h = nvmap_handle_get(handle);
664
665         if (!h) {
666                 pr_err("%s query invalid handle %p\n",
667                           current->group_leader->comm, handle);
668                 return -EINVAL;
669         }
670
671         if (!h->alloc || !h->heap_pgalloc) {
672                 pr_err("%s query unallocated handle %p\n",
673                           current->group_leader->comm, handle);
674                 nvmap_handle_put(h);
675                 return -EINVAL;
676         }
677
678         BUG_ON(nr_page != PAGE_ALIGN(h->size) >> PAGE_SHIFT);
679
680         for (idx = 0; idx < nr_page; idx++)
681                 pages[idx] = h->pgalloc.pages[idx];
682
683         nvmap_ref_lock(client);
684         ref = __nvmap_validate_locked(client, h);
685         if (ref)
686                 __nvmap_pin(ref, &dummy);
687         nvmap_ref_unlock(client);
688
689         return 0;
690 }
691 EXPORT_SYMBOL(nvmap_acquire_page_list);
692
693 int nvmap_release_page_list(struct nvmap_client *client,
694                                 struct nvmap_handle *handle)
695 {
696         struct nvmap_handle_ref *ref;
697         struct nvmap_handle *h = NULL;
698
699         BUG_ON(!client);
700
701         nvmap_ref_lock(client);
702
703         ref = __nvmap_validate_locked(client, handle);
704         if (ref)
705                 __nvmap_unpin(ref);
706
707         nvmap_ref_unlock(client);
708
709         if (ref)
710                 h = ref->handle;
711         if (h)
712                 nvmap_handle_put(h);
713
714         return 0;
715 }
716 EXPORT_SYMBOL(nvmap_release_page_list);
717
718 int __nvmap_get_handle_param(struct nvmap_client *client,
719                              struct nvmap_handle *h, u32 param, u64 *result)
720 {
721         int err = 0;
722
723         if (WARN_ON(!virt_addr_valid(h)))
724                 return -EINVAL;
725
726         switch (param) {
727         case NVMAP_HANDLE_PARAM_SIZE:
728                 *result = h->orig_size;
729                 break;
730         case NVMAP_HANDLE_PARAM_ALIGNMENT:
731                 *result = h->align;
732                 break;
733         case NVMAP_HANDLE_PARAM_BASE:
734                 if (!h->alloc || !atomic_read(&h->pin))
735                         *result = -EINVAL;
736                 else if (!h->heap_pgalloc) {
737                         mutex_lock(&h->lock);
738                         *result = h->carveout->base;
739                         mutex_unlock(&h->lock);
740                 } else if (h->attachment->priv)
741                         *result = sg_dma_address(
742                                 ((struct sg_table *)h->attachment->priv)->sgl);
743                 else
744                         *result = -EINVAL;
745                 break;
746         case NVMAP_HANDLE_PARAM_HEAP:
747                 if (!h->alloc)
748                         *result = 0;
749                 else if (!h->heap_pgalloc) {
750                         mutex_lock(&h->lock);
751                         *result = nvmap_carveout_usage(client, h->carveout);
752                         mutex_unlock(&h->lock);
753                 } else
754                         *result = NVMAP_HEAP_IOVMM;
755                 break;
756         case NVMAP_HANDLE_PARAM_KIND:
757                 *result = h->kind;
758                 break;
759         case NVMAP_HANDLE_PARAM_COMPR:
760                 /* ignored, to be removed */
761                 break;
762         default:
763                 err = -EINVAL;
764                 break;
765         }
766         return err;
767 }
768
769 int nvmap_get_handle_param(struct nvmap_client *client,
770                            struct nvmap_handle_ref *ref, u32 param, u64 *result)
771 {
772         if (WARN_ON(!virt_addr_valid(ref)) ||
773             WARN_ON(!virt_addr_valid(client)) ||
774             WARN_ON(!result))
775                 return -EINVAL;
776
777         return __nvmap_get_handle_param(client, ref->handle, param, result);
778 }