]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/video/tegra/nvmap/nvmap_handle.c
video: tegra: nvmap: Remove old ZP support
[sojka/nv-tegra/linux-3.10.git] / drivers / video / tegra / nvmap / nvmap_handle.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap_handle.c
3  *
4  * Handle allocation and freeing routines for nvmap
5  *
6  * Copyright (c) 2009-2014, NVIDIA CORPORATION. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #define pr_fmt(fmt)     "%s: " fmt, __func__
24
25 #include <linux/err.h>
26 #include <linux/kernel.h>
27 #include <linux/list.h>
28 #include <linux/mm.h>
29 #include <linux/rbtree.h>
30 #include <linux/dma-buf.h>
31 #include <linux/moduleparam.h>
32 #include <linux/nvmap.h>
33 #include <linux/tegra-soc.h>
34
35 #include <asm/pgtable.h>
36
37 #include <trace/events/nvmap.h>
38
39 #include "nvmap_priv.h"
40 #include "nvmap_ioctl.h"
41
42 bool zero_memory;
43 u32 nvmap_max_handle_count;
44
45 static int zero_memory_set(const char *arg, const struct kernel_param *kp)
46 {
47         param_set_bool(arg, kp);
48         nvmap_page_pool_clear();
49         return 0;
50 }
51
52 static struct kernel_param_ops zero_memory_ops = {
53         .get = param_get_bool,
54         .set = zero_memory_set,
55 };
56
57 module_param_cb(zero_memory, &zero_memory_ops, &zero_memory, 0644);
58
59
60 #define NVMAP_SECURE_HEAPS      (NVMAP_HEAP_CARVEOUT_IRAM | NVMAP_HEAP_IOVMM | \
61                                  NVMAP_HEAP_CARVEOUT_VPR)
62
63 /* handles may be arbitrarily large (16+MiB), and any handle allocated from
64  * the kernel (i.e., not a carveout handle) includes its array of pages. to
65  * preserve kmalloc space, if the array of pages exceeds PAGELIST_VMALLOC_MIN,
66  * the array is allocated using vmalloc. */
67 #define PAGELIST_VMALLOC_MIN    (PAGE_SIZE)
68
69 void *nvmap_altalloc(size_t len)
70 {
71         if (len > PAGELIST_VMALLOC_MIN)
72                 return vmalloc(len);
73         else
74                 return kmalloc(len, GFP_KERNEL);
75 }
76
77 void nvmap_altfree(void *ptr, size_t len)
78 {
79         if (!ptr)
80                 return;
81
82         if (len > PAGELIST_VMALLOC_MIN)
83                 vfree(ptr);
84         else
85                 kfree(ptr);
86 }
87
88 void _nvmap_handle_free(struct nvmap_handle *h)
89 {
90         unsigned int i, nr_page, page_index = 0;
91 #ifdef CONFIG_NVMAP_PAGE_POOLS
92         struct nvmap_page_pool *pool = NULL;
93 #endif
94
95         if (h->nvhost_priv)
96                 h->nvhost_priv_delete(h->nvhost_priv);
97
98         if (nvmap_handle_remove(h->dev, h) != 0)
99                 return;
100
101         if (!h->alloc)
102                 goto out;
103
104         nvmap_stats_inc(NS_RELEASE, h->size);
105         nvmap_stats_dec(NS_TOTAL, PAGE_ALIGN(h->orig_size));
106         if (!h->heap_pgalloc) {
107                 nvmap_heap_free(h->carveout);
108                 goto out;
109         }
110
111         nr_page = DIV_ROUND_UP(h->size, PAGE_SIZE);
112
113         BUG_ON(h->size & ~PAGE_MASK);
114         BUG_ON(!h->pgalloc.pages);
115
116 #ifdef NVMAP_LAZY_VFREE
117         if (h->vaddr) {
118                 nvmap_kmaps_dec(h);
119                 vm_unmap_ram(h->vaddr, h->size >> PAGE_SHIFT);
120         }
121 #endif
122
123         for (i = 0; i < nr_page; i++)
124                 h->pgalloc.pages[i] = nvmap_to_page(h->pgalloc.pages[i]);
125
126 #ifdef CONFIG_NVMAP_PAGE_POOLS
127         if (!zero_memory) {
128                 pool = &nvmap_dev->pool;
129
130                 while (page_index < nr_page) {
131                         if (!nvmap_page_pool_fill(pool,
132                                 h->pgalloc.pages[page_index]))
133                                 break;
134
135                         page_index++;
136                 }
137         }
138 #endif
139
140         for (i = page_index; i < nr_page; i++)
141                 __free_page(h->pgalloc.pages[i]);
142
143         nvmap_altfree(h->pgalloc.pages, nr_page * sizeof(struct page *));
144
145 out:
146         kfree(h);
147 }
148
149 static struct page *nvmap_alloc_pages_exact(gfp_t gfp, size_t size)
150 {
151         struct page *page, *p, *e;
152         unsigned int order;
153
154         size = PAGE_ALIGN(size);
155         order = get_order(size);
156         page = alloc_pages(gfp, order);
157
158         if (!page)
159                 return NULL;
160
161         split_page(page, order);
162         e = page + (1 << order);
163         for (p = page + (size >> PAGE_SHIFT); p < e; p++)
164                 __free_page(p);
165
166         return page;
167 }
168
169 static int handle_page_alloc(struct nvmap_client *client,
170                              struct nvmap_handle *h, bool contiguous)
171 {
172         int err = 0;
173         size_t size = PAGE_ALIGN(h->size);
174         unsigned int nr_page = size >> PAGE_SHIFT;
175         pgprot_t prot;
176         unsigned int i = 0, page_index = 0;
177         struct page **pages;
178 #ifdef CONFIG_NVMAP_PAGE_POOLS
179         struct nvmap_page_pool *pool = NULL;
180         phys_addr_t paddr;
181 #endif
182         gfp_t gfp = GFP_NVMAP;
183         unsigned long kaddr;
184         pte_t **pte = NULL;
185
186         if (zero_memory)
187                 gfp |= __GFP_ZERO;
188
189         pages = nvmap_altalloc(nr_page * sizeof(*pages));
190         if (!pages)
191                 return -ENOMEM;
192
193         prot = nvmap_pgprot(h, PG_PROT_KERNEL);
194
195         if (contiguous) {
196                 struct page *page;
197                 page = nvmap_alloc_pages_exact(gfp, size);
198                 if (!page)
199                         goto fail;
200
201                 for (i = 0; i < nr_page; i++)
202                         pages[i] = nth_page(page, i);
203
204         } else {
205 #ifdef CONFIG_NVMAP_PAGE_POOLS
206                 pool = &nvmap_dev->pool;
207
208                 for (i = 0; i < nr_page; i++) {
209                         /* Get pages from pool, if available. */
210                         pages[i] = nvmap_page_pool_alloc(pool);
211                         if (!pages[i])
212                                 break;
213                         page_index++;
214                 }
215 #endif
216                 for (i = page_index; i < nr_page; i++) {
217                         pages[i] = nvmap_alloc_pages_exact(gfp, PAGE_SIZE);
218                         if (!pages[i])
219                                 goto fail;
220                 }
221         }
222
223         /*
224          * Make sure any data in the caches is flushed out before
225          * passing these pages to userspace. otherwise, It can lead to
226          * corruption in pages that get mapped as something other than WB in
227          * userspace and leaked kernel data structures.
228          */
229         nvmap_flush_cache(pages, nr_page);
230
231         if (err)
232                 goto fail;
233
234         h->size = size;
235         h->pgalloc.pages = pages;
236         h->pgalloc.contig = contiguous;
237         atomic_set(&h->pgalloc.ndirty, 0);
238         return 0;
239
240 fail:
241         while (i--)
242                 __free_page(pages[i]);
243         nvmap_altfree(pages, nr_page * sizeof(*pages));
244         wmb();
245         return -ENOMEM;
246 }
247
248 static void alloc_handle(struct nvmap_client *client,
249                          struct nvmap_handle *h, unsigned int type)
250 {
251         unsigned int carveout_mask = NVMAP_HEAP_CARVEOUT_MASK;
252         unsigned int iovmm_mask = NVMAP_HEAP_IOVMM;
253
254         BUG_ON(type & (type - 1));
255
256 #ifdef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM
257         /* Convert generic carveout requests to iovmm requests. */
258         carveout_mask &= ~NVMAP_HEAP_CARVEOUT_GENERIC;
259         iovmm_mask |= NVMAP_HEAP_CARVEOUT_GENERIC;
260 #endif
261
262         if (type & carveout_mask) {
263                 struct nvmap_heap_block *b;
264
265                 b = nvmap_carveout_alloc(client, h, type);
266                 if (b) {
267                         h->heap_type = type;
268                         h->heap_pgalloc = false;
269                         /* barrier to ensure all handle alloc data
270                          * is visible before alloc is seen by other
271                          * processors.
272                          */
273                         mb();
274                         h->alloc = true;
275                 }
276         } else if (type & iovmm_mask) {
277                 int ret;
278
279                 ret = handle_page_alloc(client, h,
280                         h->userflags & NVMAP_HANDLE_PHYS_CONTIG);
281                 if (ret)
282                         return;
283                 h->heap_type = NVMAP_HEAP_IOVMM;
284                 h->heap_pgalloc = true;
285                 mb();
286                 h->alloc = true;
287         }
288 }
289
290 /* small allocations will try to allocate from generic OS memory before
291  * any of the limited heaps, to increase the effective memory for graphics
292  * allocations, and to reduce fragmentation of the graphics heaps with
293  * sub-page splinters */
294 static const unsigned int heap_policy_small[] = {
295         NVMAP_HEAP_CARVEOUT_VPR,
296         NVMAP_HEAP_CARVEOUT_IRAM,
297         NVMAP_HEAP_CARVEOUT_MASK,
298         NVMAP_HEAP_IOVMM,
299         0,
300 };
301
302 static const unsigned int heap_policy_large[] = {
303         NVMAP_HEAP_CARVEOUT_VPR,
304         NVMAP_HEAP_CARVEOUT_IRAM,
305         NVMAP_HEAP_IOVMM,
306         NVMAP_HEAP_CARVEOUT_MASK,
307         0,
308 };
309
310 int nvmap_alloc_handle(struct nvmap_client *client,
311                        struct nvmap_handle *h, unsigned int heap_mask,
312                        size_t align,
313                        u8 kind,
314                        unsigned int flags)
315 {
316         const unsigned int *alloc_policy;
317         int nr_page;
318         int err = -ENOMEM;
319
320         h = nvmap_handle_get(h);
321
322         if (!h)
323                 return -EINVAL;
324
325         if (h->alloc) {
326                 nvmap_handle_put(h);
327                 return -EEXIST;
328         }
329
330         nvmap_stats_inc(NS_TOTAL, PAGE_ALIGN(h->orig_size));
331         nvmap_stats_inc(NS_ALLOC, PAGE_ALIGN(h->size));
332         trace_nvmap_alloc_handle(client, h,
333                 h->size, heap_mask, align, flags,
334                 nvmap_stats_read(NS_TOTAL),
335                 nvmap_stats_read(NS_ALLOC));
336         h->userflags = flags;
337         nr_page = ((h->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
338         h->secure = !!(flags & NVMAP_HANDLE_SECURE);
339         h->flags = (flags & NVMAP_HANDLE_CACHE_FLAG);
340         h->align = max_t(size_t, align, L1_CACHE_BYTES);
341         h->kind = kind;
342         h->map_resources = 0;
343
344 #ifndef CONFIG_TEGRA_IOVMM
345         /* convert iovmm requests to generic carveout. */
346         if (heap_mask & NVMAP_HEAP_IOVMM) {
347                 heap_mask = (heap_mask & ~NVMAP_HEAP_IOVMM) |
348                             NVMAP_HEAP_CARVEOUT_GENERIC;
349         }
350 #endif
351         /* secure allocations can only be served from secure heaps */
352         if (h->secure)
353                 heap_mask &= NVMAP_SECURE_HEAPS;
354
355         if (!heap_mask) {
356                 err = -EINVAL;
357                 goto out;
358         }
359
360         alloc_policy = (nr_page == 1) ? heap_policy_small : heap_policy_large;
361
362         while (!h->alloc && *alloc_policy) {
363                 unsigned int heap_type;
364
365                 heap_type = *alloc_policy++;
366                 heap_type &= heap_mask;
367
368                 if (!heap_type)
369                         continue;
370
371                 heap_mask &= ~heap_type;
372
373                 while (heap_type && !h->alloc) {
374                         unsigned int heap;
375
376                         /* iterate possible heaps MSB-to-LSB, since higher-
377                          * priority carveouts will have higher usage masks */
378                         heap = 1 << __fls(heap_type);
379                         alloc_handle(client, h, heap);
380                         heap_type &= ~heap;
381                 }
382         }
383
384 out:
385         if (h->alloc) {
386                 if (client->kernel_client)
387                         nvmap_stats_inc(NS_KALLOC, h->size);
388                 else
389                         nvmap_stats_inc(NS_UALLOC, h->size);
390         } else {
391                 nvmap_stats_dec(NS_TOTAL, PAGE_ALIGN(h->orig_size));
392                 nvmap_stats_dec(NS_ALLOC, PAGE_ALIGN(h->orig_size));
393         }
394
395         err = (h->alloc) ? 0 : err;
396         nvmap_handle_put(h);
397         return err;
398 }
399
400 void nvmap_free_handle(struct nvmap_client *client,
401                        struct nvmap_handle *handle)
402 {
403         struct nvmap_handle_ref *ref;
404         struct nvmap_handle *h;
405         int pins;
406
407         nvmap_ref_lock(client);
408
409         ref = __nvmap_validate_locked(client, handle);
410         if (!ref) {
411                 nvmap_ref_unlock(client);
412                 return;
413         }
414
415         trace_nvmap_free_handle(client, handle);
416         BUG_ON(!ref->handle);
417         h = ref->handle;
418
419         if (atomic_dec_return(&ref->dupes)) {
420                 nvmap_ref_unlock(client);
421                 goto out;
422         }
423
424         smp_rmb();
425         pins = atomic_read(&ref->pin);
426         rb_erase(&ref->node, &client->handle_refs);
427         client->handle_count--;
428         atomic_dec(&ref->handle->share_count);
429
430         nvmap_ref_unlock(client);
431
432         if (pins)
433                 nvmap_debug(client, "%s freeing pinned handle %p\n",
434                             current->group_leader->comm, h);
435
436         while (atomic_read(&ref->pin))
437                 __nvmap_unpin(ref);
438
439         if (h->owner == client) {
440                 h->owner = NULL;
441                 h->owner_ref = NULL;
442         }
443
444         dma_buf_put(ref->handle->dmabuf);
445         kfree(ref);
446
447 out:
448         BUG_ON(!atomic_read(&h->ref));
449         nvmap_handle_put(h);
450 }
451 EXPORT_SYMBOL(nvmap_free_handle);
452
453 void nvmap_free_handle_user_id(struct nvmap_client *client,
454                                unsigned long user_id)
455 {
456         nvmap_free_handle(client, unmarshal_user_id(user_id));
457 }
458
459 static void add_handle_ref(struct nvmap_client *client,
460                            struct nvmap_handle_ref *ref)
461 {
462         struct rb_node **p, *parent = NULL;
463
464         nvmap_ref_lock(client);
465         p = &client->handle_refs.rb_node;
466         while (*p) {
467                 struct nvmap_handle_ref *node;
468                 parent = *p;
469                 node = rb_entry(parent, struct nvmap_handle_ref, node);
470                 if (ref->handle > node->handle)
471                         p = &parent->rb_right;
472                 else
473                         p = &parent->rb_left;
474         }
475         rb_link_node(&ref->node, parent, p);
476         rb_insert_color(&ref->node, &client->handle_refs);
477         client->handle_count++;
478         if (client->handle_count > nvmap_max_handle_count)
479                 nvmap_max_handle_count = client->handle_count;
480         atomic_inc(&ref->handle->share_count);
481         nvmap_ref_unlock(client);
482 }
483
484 struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
485                                              size_t size)
486 {
487         void *err = ERR_PTR(-ENOMEM);
488         struct nvmap_handle *h;
489         struct nvmap_handle_ref *ref = NULL;
490
491         if (!client)
492                 return ERR_PTR(-EINVAL);
493
494         if (!size)
495                 return ERR_PTR(-EINVAL);
496
497         h = kzalloc(sizeof(*h), GFP_KERNEL);
498         if (!h)
499                 return ERR_PTR(-ENOMEM);
500
501         ref = kzalloc(sizeof(*ref), GFP_KERNEL);
502         if (!ref)
503                 goto ref_alloc_fail;
504
505         atomic_set(&h->ref, 1);
506         atomic_set(&h->pin, 0);
507         h->owner = client;
508         h->owner_ref = ref;
509         h->dev = nvmap_dev;
510         BUG_ON(!h->owner);
511         h->size = h->orig_size = size;
512         h->flags = NVMAP_HANDLE_WRITE_COMBINE;
513         mutex_init(&h->lock);
514         INIT_LIST_HEAD(&h->vmas);
515         INIT_LIST_HEAD(&h->lru);
516
517         /*
518          * This takes out 1 ref on the dambuf. This corresponds to the
519          * handle_ref that gets automatically made by nvmap_create_handle().
520          */
521         h->dmabuf = __nvmap_make_dmabuf(client, h);
522         if (IS_ERR(h->dmabuf)) {
523                 err = h->dmabuf;
524                 goto make_dmabuf_fail;
525         }
526
527         /*
528          * Pre-attach nvmap to this new dmabuf. This gets unattached during the
529          * dma_buf_release() operation.
530          */
531         h->attachment = dma_buf_attach(h->dmabuf, &nvmap_pdev->dev);
532         if (IS_ERR(h->attachment)) {
533                 err = h->attachment;
534                 goto dma_buf_attach_fail;
535         }
536
537         nvmap_handle_add(nvmap_dev, h);
538
539         /*
540          * Major assumption here: the dma_buf object that the handle contains
541          * is created with a ref count of 1.
542          */
543         atomic_set(&ref->dupes, 1);
544         ref->handle = h;
545         atomic_set(&ref->pin, 0);
546         add_handle_ref(client, ref);
547         trace_nvmap_create_handle(client, client->name, h, size, ref);
548         return ref;
549
550 dma_buf_attach_fail:
551         dma_buf_put(h->dmabuf);
552 make_dmabuf_fail:
553         kfree(ref);
554 ref_alloc_fail:
555         kfree(h);
556         return err;
557 }
558
559 struct nvmap_handle_ref *nvmap_duplicate_handle(struct nvmap_client *client,
560                                         struct nvmap_handle *h, bool skip_val)
561 {
562         struct nvmap_handle_ref *ref = NULL;
563
564         BUG_ON(!client);
565         /* on success, the reference count for the handle should be
566          * incremented, so the success paths will not call nvmap_handle_put */
567         h = nvmap_handle_get(h);
568
569         if (!h) {
570                 nvmap_debug(client, "%s duplicate handle failed\n",
571                             current->group_leader->comm);
572                 return ERR_PTR(-EPERM);
573         }
574
575         if (!h->alloc) {
576                 nvmap_err(client, "%s duplicating unallocated handle\n",
577                           current->group_leader->comm);
578                 nvmap_handle_put(h);
579                 return ERR_PTR(-EINVAL);
580         }
581
582         nvmap_ref_lock(client);
583         ref = __nvmap_validate_locked(client, h);
584
585         if (ref) {
586                 /* handle already duplicated in client; just increment
587                  * the reference count rather than re-duplicating it */
588                 atomic_inc(&ref->dupes);
589                 nvmap_ref_unlock(client);
590                 return ref;
591         }
592
593         nvmap_ref_unlock(client);
594
595         ref = kzalloc(sizeof(*ref), GFP_KERNEL);
596         if (!ref) {
597                 nvmap_handle_put(h);
598                 return ERR_PTR(-ENOMEM);
599         }
600
601         atomic_set(&ref->dupes, 1);
602         ref->handle = h;
603         atomic_set(&ref->pin, 0);
604         add_handle_ref(client, ref);
605
606         /*
607          * Ref counting on the dma_bufs follows the creation and destruction of
608          * nvmap_handle_refs. That is every time a handle_ref is made the
609          * dma_buf ref count goes up and everytime a handle_ref is destroyed
610          * the dma_buf ref count goes down.
611          */
612         get_dma_buf(h->dmabuf);
613
614         trace_nvmap_duplicate_handle(client, h, ref);
615         return ref;
616 }
617
618 struct nvmap_handle_ref *nvmap_create_handle_from_fd(
619                         struct nvmap_client *client, int fd)
620 {
621         struct nvmap_handle *handle;
622         struct nvmap_handle_ref *ref;
623
624         BUG_ON(!client);
625
626         handle = nvmap_get_id_from_dmabuf_fd(client, fd);
627         if (IS_ERR(handle))
628                 return ERR_CAST(handle);
629         ref = nvmap_duplicate_handle(client, handle, 1);
630         return ref;
631 }
632
633 struct nvmap_handle *nvmap_duplicate_handle_id_ex(struct nvmap_client *client,
634                                                         struct nvmap_handle *h)
635 {
636         struct nvmap_handle_ref *ref = nvmap_duplicate_handle(client, h, 0);
637
638         if (IS_ERR(ref))
639                 return 0;
640
641         return __nvmap_ref_to_id(ref);
642 }
643 EXPORT_SYMBOL(nvmap_duplicate_handle_id_ex);
644
645 int nvmap_get_page_list_info(struct nvmap_client *client,
646                                 struct nvmap_handle *handle, u32 *size,
647                                 u32 *flags, u32 *nr_page, bool *contig)
648 {
649         struct nvmap_handle *h;
650
651         BUG_ON(!size || !flags || !nr_page || !contig);
652         BUG_ON(!client);
653
654         *size = 0;
655         *flags = 0;
656         *nr_page = 0;
657
658         h = nvmap_handle_get(handle);
659
660         if (!h) {
661                 nvmap_err(client, "%s query invalid handle %p\n",
662                           current->group_leader->comm, handle);
663                 return -EINVAL;
664         }
665
666         if (!h->alloc || !h->heap_pgalloc) {
667                 nvmap_err(client, "%s query unallocated handle %p\n",
668                           current->group_leader->comm, handle);
669                 nvmap_handle_put(h);
670                 return -EINVAL;
671         }
672
673         *flags = h->flags;
674         *size = h->orig_size;
675         *nr_page = PAGE_ALIGN(h->size) >> PAGE_SHIFT;
676         *contig = h->pgalloc.contig;
677
678         nvmap_handle_put(h);
679         return 0;
680 }
681 EXPORT_SYMBOL(nvmap_get_page_list_info);
682
683 int nvmap_acquire_page_list(struct nvmap_client *client,
684                         struct nvmap_handle *handle, struct page **pages,
685                         u32 nr_page)
686 {
687         struct nvmap_handle *h;
688         struct nvmap_handle_ref *ref;
689         int idx;
690         phys_addr_t dummy;
691
692         BUG_ON(!client);
693
694         h = nvmap_handle_get(handle);
695
696         if (!h) {
697                 nvmap_err(client, "%s query invalid handle %p\n",
698                           current->group_leader->comm, handle);
699                 return -EINVAL;
700         }
701
702         if (!h->alloc || !h->heap_pgalloc) {
703                 nvmap_err(client, "%s query unallocated handle %p\n",
704                           current->group_leader->comm, handle);
705                 nvmap_handle_put(h);
706                 return -EINVAL;
707         }
708
709         BUG_ON(nr_page != PAGE_ALIGN(h->size) >> PAGE_SHIFT);
710
711         for (idx = 0; idx < nr_page; idx++)
712                 pages[idx] = h->pgalloc.pages[idx];
713
714         nvmap_ref_lock(client);
715         ref = __nvmap_validate_locked(client, h);
716         if (ref)
717                 __nvmap_pin(ref, &dummy);
718         nvmap_ref_unlock(client);
719
720         return 0;
721 }
722 EXPORT_SYMBOL(nvmap_acquire_page_list);
723
724 int nvmap_release_page_list(struct nvmap_client *client,
725                                 struct nvmap_handle *handle)
726 {
727         struct nvmap_handle_ref *ref;
728         struct nvmap_handle *h = NULL;
729
730         BUG_ON(!client);
731
732         nvmap_ref_lock(client);
733
734         ref = __nvmap_validate_locked(client, handle);
735         if (ref)
736                 __nvmap_unpin(ref);
737
738         nvmap_ref_unlock(client);
739
740         if (ref)
741                 h = ref->handle;
742         if (h)
743                 nvmap_handle_put(h);
744
745         return 0;
746 }
747 EXPORT_SYMBOL(nvmap_release_page_list);
748
749 int __nvmap_get_handle_param(struct nvmap_client *client,
750                              struct nvmap_handle *h, u32 param, u64 *result)
751 {
752         int err = 0;
753
754         if (WARN_ON(!virt_addr_valid(h)))
755                 return -EINVAL;
756
757         switch (param) {
758         case NVMAP_HANDLE_PARAM_SIZE:
759                 *result = h->orig_size;
760                 break;
761         case NVMAP_HANDLE_PARAM_ALIGNMENT:
762                 *result = h->align;
763                 break;
764         case NVMAP_HANDLE_PARAM_BASE:
765                 if (!h->alloc || !atomic_read(&h->pin))
766                         *result = -EINVAL;
767                 else if (!h->heap_pgalloc) {
768                         mutex_lock(&h->lock);
769                         *result = h->carveout->base;
770                         mutex_unlock(&h->lock);
771                 } else if (h->pgalloc.contig)
772                         *result = page_to_phys(h->pgalloc.pages[0]);
773                 else if (h->attachment->priv)
774                         *result = sg_dma_address(
775                                 ((struct sg_table *)h->attachment->priv)->sgl);
776                 else
777                         *result = -EINVAL;
778                 break;
779         case NVMAP_HANDLE_PARAM_HEAP:
780                 if (!h->alloc)
781                         *result = 0;
782                 else if (!h->heap_pgalloc) {
783                         mutex_lock(&h->lock);
784                         *result = nvmap_carveout_usage(client, h->carveout);
785                         mutex_unlock(&h->lock);
786                 } else
787                         *result = NVMAP_HEAP_IOVMM;
788                 break;
789         case NVMAP_HANDLE_PARAM_KIND:
790                 *result = h->kind;
791                 break;
792         case NVMAP_HANDLE_PARAM_COMPR:
793                 /* ignored, to be removed */
794                 break;
795         default:
796                 err = -EINVAL;
797                 break;
798         }
799         return err;
800 }
801
802 int nvmap_get_handle_param(struct nvmap_client *client,
803                            struct nvmap_handle_ref *ref, u32 param, u64 *result)
804 {
805         if (WARN_ON(!virt_addr_valid(ref)) ||
806             WARN_ON(!virt_addr_valid(client)) ||
807             WARN_ON(!result))
808                 return -EINVAL;
809
810         return __nvmap_get_handle_param(client, ref->handle, param, result);
811 }