]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/video/tegra/nvmap/nvmap_handle.c
video: tegra: nvmap: don't count shared memory in full
[sojka/nv-tegra/linux-3.10.git] / drivers / video / tegra / nvmap / nvmap_handle.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap_handle.c
3  *
4  * Handle allocation and freeing routines for nvmap
5  *
6  * Copyright (c) 2009-2014, NVIDIA CORPORATION. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #define pr_fmt(fmt)     "%s: " fmt, __func__
24
25 #include <linux/err.h>
26 #include <linux/io.h>
27 #include <linux/kernel.h>
28 #include <linux/list.h>
29 #include <linux/mm.h>
30 #include <linux/rbtree.h>
31 #include <linux/dma-buf.h>
32 #include <linux/moduleparam.h>
33 #include <linux/nvmap.h>
34 #include <linux/tegra-soc.h>
35
36 #include <asm/pgtable.h>
37
38 #include <trace/events/nvmap.h>
39
40 #include "nvmap_priv.h"
41 #include "nvmap_ioctl.h"
42
43 #ifdef CONFIG_NVMAP_FORCE_ZEROED_USER_PAGES
44 bool zero_memory = 1;
45 #else
46 bool zero_memory;
47 #endif
48
49 static int zero_memory_set(const char *arg, const struct kernel_param *kp)
50 {
51 #ifdef CONFIG_NVMAP_FORCE_ZEROED_USER_PAGES
52         return -EPERM;
53 #else
54         param_set_bool(arg, kp);
55 #ifdef CONFIG_NVMAP_PAGE_POOLS
56         nvmap_page_pool_clear();
57 #endif
58         return 0;
59 #endif
60 }
61
62 static struct kernel_param_ops zero_memory_ops = {
63         .get = param_get_bool,
64         .set = zero_memory_set,
65 };
66
67 module_param_cb(zero_memory, &zero_memory_ops, &zero_memory, 0644);
68
69 u32 nvmap_max_handle_count;
70
71 /* handles may be arbitrarily large (16+MiB), and any handle allocated from
72  * the kernel (i.e., not a carveout handle) includes its array of pages. to
73  * preserve kmalloc space, if the array of pages exceeds PAGELIST_VMALLOC_MIN,
74  * the array is allocated using vmalloc. */
75 #define PAGELIST_VMALLOC_MIN    (PAGE_SIZE)
76
77 void *nvmap_altalloc(size_t len)
78 {
79         if (len > PAGELIST_VMALLOC_MIN)
80                 return vmalloc(len);
81         else
82                 return kmalloc(len, GFP_KERNEL);
83 }
84
85 void nvmap_altfree(void *ptr, size_t len)
86 {
87         if (!ptr)
88                 return;
89
90         if (len > PAGELIST_VMALLOC_MIN)
91                 vfree(ptr);
92         else
93                 kfree(ptr);
94 }
95
96 void _nvmap_handle_free(struct nvmap_handle *h)
97 {
98         unsigned int i, nr_page, page_index = 0;
99         struct nvmap_page_pool *pool;
100
101         if (h->nvhost_priv)
102                 h->nvhost_priv_delete(h->nvhost_priv);
103
104         if (nvmap_handle_remove(nvmap_dev, h) != 0)
105                 return;
106
107         if (!h->alloc)
108                 goto out;
109
110         nvmap_stats_inc(NS_RELEASE, h->size);
111         nvmap_stats_dec(NS_TOTAL, PAGE_ALIGN(h->orig_size));
112         if (!h->heap_pgalloc) {
113                 nvmap_heap_free(h->carveout);
114                 goto out;
115         }
116
117         nr_page = DIV_ROUND_UP(h->size, PAGE_SIZE);
118
119         BUG_ON(h->size & ~PAGE_MASK);
120         BUG_ON(!h->pgalloc.pages);
121
122 #ifdef NVMAP_LAZY_VFREE
123         if (h->vaddr) {
124                 nvmap_kmaps_dec(h);
125                 vm_unmap_ram(h->vaddr, h->size >> PAGE_SHIFT);
126         }
127 #endif
128
129         for (i = 0; i < nr_page; i++)
130                 h->pgalloc.pages[i] = nvmap_to_page(h->pgalloc.pages[i]);
131
132         if (!zero_memory) {
133                 pool = &nvmap_dev->pool;
134
135                 nvmap_page_pool_lock(pool);
136                 page_index = __nvmap_page_pool_fill_lots_locked(pool,
137                                                 h->pgalloc.pages, nr_page);
138                 nvmap_page_pool_unlock(pool);
139         }
140
141         for (i = page_index; i < nr_page; i++)
142                 __free_page(h->pgalloc.pages[i]);
143
144         nvmap_altfree(h->pgalloc.pages, nr_page * sizeof(struct page *));
145
146 out:
147         kfree(h);
148 }
149
150 static struct page *nvmap_alloc_pages_exact(gfp_t gfp, size_t size)
151 {
152         struct page *page, *p, *e;
153         unsigned int order;
154
155         size = PAGE_ALIGN(size);
156         order = get_order(size);
157         page = alloc_pages(gfp, order);
158
159         if (!page)
160                 return NULL;
161
162         split_page(page, order);
163         e = page + (1 << order);
164         for (p = page + (size >> PAGE_SHIFT); p < e; p++)
165                 __free_page(p);
166
167         return page;
168 }
169
170 static int handle_page_alloc(struct nvmap_client *client,
171                              struct nvmap_handle *h, bool contiguous)
172 {
173         size_t size = PAGE_ALIGN(h->size);
174         unsigned int nr_page = size >> PAGE_SHIFT;
175         pgprot_t prot;
176         unsigned int i = 0, page_index = 0;
177         struct page **pages;
178 #ifdef CONFIG_NVMAP_PAGE_POOLS
179         struct nvmap_page_pool *pool = NULL;
180 #endif
181         gfp_t gfp = GFP_NVMAP;
182
183         if (zero_memory)
184                 gfp |= __GFP_ZERO;
185
186         pages = nvmap_altalloc(nr_page * sizeof(*pages));
187         if (!pages)
188                 return -ENOMEM;
189
190         prot = nvmap_pgprot(h, PG_PROT_KERNEL);
191
192         if (contiguous) {
193                 struct page *page;
194                 page = nvmap_alloc_pages_exact(gfp, size);
195                 if (!page)
196                         goto fail;
197
198                 for (i = 0; i < nr_page; i++)
199                         pages[i] = nth_page(page, i);
200
201         } else {
202 #ifdef CONFIG_NVMAP_PAGE_POOLS
203                 pool = &nvmap_dev->pool;
204
205                 /*
206                  * Get as many pages from the pools as possible.
207                  */
208                 nvmap_page_pool_lock(pool);
209                 page_index = __nvmap_page_pool_alloc_lots_locked(pool, pages,
210                                                                  nr_page);
211                 nvmap_page_pool_unlock(pool);
212 #endif
213                 for (i = page_index; i < nr_page; i++) {
214                         pages[i] = nvmap_alloc_pages_exact(gfp, PAGE_SIZE);
215                         if (!pages[i])
216                                 goto fail;
217                 }
218         }
219
220         /*
221          * Make sure any data in the caches is cleaned out before
222          * passing these pages to userspace. otherwise, It can lead to
223          * corruption in pages that get mapped as something other than WB in
224          * userspace and leaked kernel data structures.
225          *
226          * FIXME: For ARMv7 we don't have __clean_dcache_page() so we continue
227          * to use the flush cache version.
228          */
229 #ifdef ARM64
230         nvmap_clean_cache(pages, nr_page);
231 #else
232         nvmap_flush_cache(pages, nr_page);
233 #endif
234
235         h->size = size;
236         h->pgalloc.pages = pages;
237         h->pgalloc.contig = contiguous;
238         atomic_set(&h->pgalloc.ndirty, 0);
239         return 0;
240
241 fail:
242         while (i--)
243                 __free_page(pages[i]);
244         nvmap_altfree(pages, nr_page * sizeof(*pages));
245         wmb();
246         return -ENOMEM;
247 }
248
249 static void alloc_handle(struct nvmap_client *client,
250                          struct nvmap_handle *h, unsigned int type)
251 {
252         unsigned int carveout_mask = NVMAP_HEAP_CARVEOUT_MASK;
253         unsigned int iovmm_mask = NVMAP_HEAP_IOVMM;
254
255         BUG_ON(type & (type - 1));
256
257 #ifdef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM
258         /* Convert generic carveout requests to iovmm requests. */
259         carveout_mask &= ~NVMAP_HEAP_CARVEOUT_GENERIC;
260         iovmm_mask |= NVMAP_HEAP_CARVEOUT_GENERIC;
261 #endif
262
263         if (type & carveout_mask) {
264                 struct nvmap_heap_block *b;
265
266                 b = nvmap_carveout_alloc(client, h, type);
267                 if (b) {
268                         h->heap_pgalloc = false;
269                         /* barrier to ensure all handle alloc data
270                          * is visible before alloc is seen by other
271                          * processors.
272                          */
273                         mb();
274                         h->alloc = true;
275                         nvmap_carveout_commit_add(client,
276                                 nvmap_heap_to_arg(nvmap_block_to_heap(b)),
277                                 h->size);
278                 }
279         } else if (type & iovmm_mask) {
280                 int ret;
281
282                 ret = handle_page_alloc(client, h,
283                         h->userflags & NVMAP_HANDLE_PHYS_CONTIG);
284                 if (ret)
285                         return;
286                 h->heap_pgalloc = true;
287                 mb();
288                 h->alloc = true;
289         }
290 }
291
292 /* small allocations will try to allocate from generic OS memory before
293  * any of the limited heaps, to increase the effective memory for graphics
294  * allocations, and to reduce fragmentation of the graphics heaps with
295  * sub-page splinters */
296 static const unsigned int heap_policy_small[] = {
297         NVMAP_HEAP_CARVEOUT_VPR,
298         NVMAP_HEAP_CARVEOUT_IRAM,
299         NVMAP_HEAP_CARVEOUT_MASK,
300         NVMAP_HEAP_IOVMM,
301         0,
302 };
303
304 static const unsigned int heap_policy_large[] = {
305         NVMAP_HEAP_CARVEOUT_VPR,
306         NVMAP_HEAP_CARVEOUT_IRAM,
307         NVMAP_HEAP_IOVMM,
308         NVMAP_HEAP_CARVEOUT_MASK,
309         0,
310 };
311
312 int nvmap_alloc_handle(struct nvmap_client *client,
313                        struct nvmap_handle *h, unsigned int heap_mask,
314                        size_t align,
315                        u8 kind,
316                        unsigned int flags)
317 {
318         const unsigned int *alloc_policy;
319         int nr_page;
320         int err = -ENOMEM;
321
322         h = nvmap_handle_get(h);
323
324         if (!h)
325                 return -EINVAL;
326
327         if (h->alloc) {
328                 nvmap_handle_put(h);
329                 return -EEXIST;
330         }
331
332         nvmap_stats_inc(NS_TOTAL, PAGE_ALIGN(h->orig_size));
333         nvmap_stats_inc(NS_ALLOC, PAGE_ALIGN(h->size));
334         trace_nvmap_alloc_handle(client, h,
335                 h->size, heap_mask, align, flags,
336                 nvmap_stats_read(NS_TOTAL),
337                 nvmap_stats_read(NS_ALLOC));
338         h->userflags = flags;
339         nr_page = ((h->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
340         h->flags = (flags & NVMAP_HANDLE_CACHE_FLAG);
341         h->align = max_t(size_t, align, L1_CACHE_BYTES);
342         h->kind = kind;
343
344         /* convert iovmm requests to generic carveout. */
345         if (heap_mask & NVMAP_HEAP_IOVMM) {
346                 heap_mask = (heap_mask & ~NVMAP_HEAP_IOVMM) |
347                             NVMAP_HEAP_CARVEOUT_GENERIC;
348         }
349
350         if (!heap_mask) {
351                 err = -EINVAL;
352                 goto out;
353         }
354
355         alloc_policy = (nr_page == 1) ? heap_policy_small : heap_policy_large;
356
357         while (!h->alloc && *alloc_policy) {
358                 unsigned int heap_type;
359
360                 heap_type = *alloc_policy++;
361                 heap_type &= heap_mask;
362
363                 if (!heap_type)
364                         continue;
365
366                 heap_mask &= ~heap_type;
367
368                 while (heap_type && !h->alloc) {
369                         unsigned int heap;
370
371                         /* iterate possible heaps MSB-to-LSB, since higher-
372                          * priority carveouts will have higher usage masks */
373                         heap = 1 << __fls(heap_type);
374                         alloc_handle(client, h, heap);
375                         heap_type &= ~heap;
376                 }
377         }
378
379 out:
380         if (h->alloc) {
381                 if (client->kernel_client)
382                         nvmap_stats_inc(NS_KALLOC, h->size);
383                 else
384                         nvmap_stats_inc(NS_UALLOC, h->size);
385         } else {
386                 nvmap_stats_dec(NS_TOTAL, PAGE_ALIGN(h->orig_size));
387                 nvmap_stats_dec(NS_ALLOC, PAGE_ALIGN(h->orig_size));
388         }
389
390         err = (h->alloc) ? 0 : err;
391         nvmap_handle_put(h);
392         return err;
393 }
394
395 void nvmap_free_handle(struct nvmap_client *client,
396                        struct nvmap_handle *handle)
397 {
398         struct nvmap_handle_ref *ref;
399         struct nvmap_handle *h;
400         int pins;
401
402         nvmap_ref_lock(client);
403
404         ref = __nvmap_validate_locked(client, handle);
405         if (!ref) {
406                 nvmap_ref_unlock(client);
407                 return;
408         }
409
410         trace_nvmap_free_handle(client, handle);
411         BUG_ON(!ref->handle);
412         h = ref->handle;
413
414         if (atomic_dec_return(&ref->dupes)) {
415                 nvmap_ref_unlock(client);
416                 goto out;
417         }
418
419         smp_rmb();
420         pins = atomic_read(&ref->pin);
421         rb_erase(&ref->node, &client->handle_refs);
422         client->handle_count--;
423         atomic_dec(&ref->handle->share_count);
424
425         if (h->alloc && !h->heap_pgalloc) {
426                 mutex_lock(&h->lock);
427                 nvmap_carveout_commit_subtract(client,
428                         nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
429                         h->size);
430                 mutex_unlock(&h->lock);
431         }
432
433         nvmap_ref_unlock(client);
434
435         if (pins)
436                 pr_debug("%s freeing pinned handle %p\n",
437                             current->group_leader->comm, h);
438
439         while (atomic_read(&ref->pin))
440                 __nvmap_unpin(ref);
441
442         if (h->owner == client)
443                 h->owner = NULL;
444
445         dma_buf_put(ref->handle->dmabuf);
446         kfree(ref);
447
448 out:
449         BUG_ON(!atomic_read(&h->ref));
450         nvmap_handle_put(h);
451 }
452 EXPORT_SYMBOL(nvmap_free_handle);
453
454 void nvmap_free_handle_user_id(struct nvmap_client *client,
455                                unsigned long user_id)
456 {
457         nvmap_free_handle(client, nvmap_fd_to_handle(user_id));
458 }
459
460 static void add_handle_ref(struct nvmap_client *client,
461                            struct nvmap_handle_ref *ref)
462 {
463         struct rb_node **p, *parent = NULL;
464
465         nvmap_ref_lock(client);
466         p = &client->handle_refs.rb_node;
467         while (*p) {
468                 struct nvmap_handle_ref *node;
469                 parent = *p;
470                 node = rb_entry(parent, struct nvmap_handle_ref, node);
471                 if (ref->handle > node->handle)
472                         p = &parent->rb_right;
473                 else
474                         p = &parent->rb_left;
475         }
476         rb_link_node(&ref->node, parent, p);
477         rb_insert_color(&ref->node, &client->handle_refs);
478         client->handle_count++;
479         if (client->handle_count > nvmap_max_handle_count)
480                 nvmap_max_handle_count = client->handle_count;
481         atomic_inc(&ref->handle->share_count);
482         nvmap_ref_unlock(client);
483 }
484
485 struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
486                                              size_t size)
487 {
488         void *err = ERR_PTR(-ENOMEM);
489         struct nvmap_handle *h;
490         struct nvmap_handle_ref *ref = NULL;
491
492         if (!client)
493                 return ERR_PTR(-EINVAL);
494
495         if (!size)
496                 return ERR_PTR(-EINVAL);
497
498         h = kzalloc(sizeof(*h), GFP_KERNEL);
499         if (!h)
500                 return ERR_PTR(-ENOMEM);
501
502         ref = kzalloc(sizeof(*ref), GFP_KERNEL);
503         if (!ref)
504                 goto ref_alloc_fail;
505
506         atomic_set(&h->ref, 1);
507         atomic_set(&h->pin, 0);
508         h->owner = client;
509         BUG_ON(!h->owner);
510         h->size = h->orig_size = size;
511         h->flags = NVMAP_HANDLE_WRITE_COMBINE;
512         mutex_init(&h->lock);
513         INIT_LIST_HEAD(&h->vmas);
514         INIT_LIST_HEAD(&h->lru);
515
516         /*
517          * This takes out 1 ref on the dambuf. This corresponds to the
518          * handle_ref that gets automatically made by nvmap_create_handle().
519          */
520         h->dmabuf = __nvmap_make_dmabuf(client, h);
521         if (IS_ERR(h->dmabuf)) {
522                 err = h->dmabuf;
523                 goto make_dmabuf_fail;
524         }
525
526         /*
527          * Pre-attach nvmap to this new dmabuf. This gets unattached during the
528          * dma_buf_release() operation.
529          */
530         h->attachment = dma_buf_attach(h->dmabuf, nvmap_dev->dev_user.parent);
531         if (IS_ERR(h->attachment)) {
532                 err = h->attachment;
533                 goto dma_buf_attach_fail;
534         }
535
536         nvmap_handle_add(nvmap_dev, h);
537
538         /*
539          * Major assumption here: the dma_buf object that the handle contains
540          * is created with a ref count of 1.
541          */
542         atomic_set(&ref->dupes, 1);
543         ref->handle = h;
544         atomic_set(&ref->pin, 0);
545         add_handle_ref(client, ref);
546         trace_nvmap_create_handle(client, client->name, h, size, ref);
547         return ref;
548
549 dma_buf_attach_fail:
550         dma_buf_put(h->dmabuf);
551 make_dmabuf_fail:
552         kfree(ref);
553 ref_alloc_fail:
554         kfree(h);
555         return err;
556 }
557
558 struct nvmap_handle_ref *nvmap_duplicate_handle(struct nvmap_client *client,
559                                         struct nvmap_handle *h, bool skip_val)
560 {
561         struct nvmap_handle_ref *ref = NULL;
562
563         BUG_ON(!client);
564         /* on success, the reference count for the handle should be
565          * incremented, so the success paths will not call nvmap_handle_put */
566         h = nvmap_validate_get(h);
567
568         if (!h) {
569                 pr_debug("%s duplicate handle failed\n",
570                             current->group_leader->comm);
571                 return ERR_PTR(-EPERM);
572         }
573
574         if (!h->alloc) {
575                 pr_err("%s duplicating unallocated handle\n",
576                         current->group_leader->comm);
577                 nvmap_handle_put(h);
578                 return ERR_PTR(-EINVAL);
579         }
580
581         nvmap_ref_lock(client);
582         ref = __nvmap_validate_locked(client, h);
583
584         if (ref) {
585                 /* handle already duplicated in client; just increment
586                  * the reference count rather than re-duplicating it */
587                 atomic_inc(&ref->dupes);
588                 nvmap_ref_unlock(client);
589                 return ref;
590         }
591
592         nvmap_ref_unlock(client);
593
594         ref = kzalloc(sizeof(*ref), GFP_KERNEL);
595         if (!ref) {
596                 nvmap_handle_put(h);
597                 return ERR_PTR(-ENOMEM);
598         }
599
600         if (!h->heap_pgalloc) {
601                 mutex_lock(&h->lock);
602                 nvmap_carveout_commit_add(client,
603                         nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
604                         h->size);
605                 mutex_unlock(&h->lock);
606         }
607
608         atomic_set(&ref->dupes, 1);
609         ref->handle = h;
610         atomic_set(&ref->pin, 0);
611         add_handle_ref(client, ref);
612
613         /*
614          * Ref counting on the dma_bufs follows the creation and destruction of
615          * nvmap_handle_refs. That is every time a handle_ref is made the
616          * dma_buf ref count goes up and everytime a handle_ref is destroyed
617          * the dma_buf ref count goes down.
618          */
619         get_dma_buf(h->dmabuf);
620
621         trace_nvmap_duplicate_handle(client, h, ref);
622         return ref;
623 }
624
625 struct nvmap_handle_ref *nvmap_create_handle_from_fd(
626                         struct nvmap_client *client, int fd)
627 {
628         struct nvmap_handle *handle;
629         struct nvmap_handle_ref *ref;
630
631         BUG_ON(!client);
632
633         handle = nvmap_get_id_from_dmabuf_fd(client, fd);
634         if (IS_ERR(handle))
635                 return ERR_CAST(handle);
636         ref = nvmap_duplicate_handle(client, handle, 1);
637         return ref;
638 }
639
640 struct nvmap_handle *nvmap_duplicate_handle_id_ex(struct nvmap_client *client,
641                                                         struct nvmap_handle *h)
642 {
643         struct nvmap_handle_ref *ref = nvmap_duplicate_handle(client, h, 0);
644
645         if (IS_ERR(ref))
646                 return 0;
647
648         return __nvmap_ref_to_id(ref);
649 }
650 EXPORT_SYMBOL(nvmap_duplicate_handle_id_ex);
651
652 int nvmap_get_page_list_info(struct nvmap_client *client,
653                                 struct nvmap_handle *handle, u32 *size,
654                                 u32 *flags, u32 *nr_page, bool *contig)
655 {
656         struct nvmap_handle *h;
657
658         BUG_ON(!size || !flags || !nr_page || !contig);
659         BUG_ON(!client);
660
661         *size = 0;
662         *flags = 0;
663         *nr_page = 0;
664
665         h = nvmap_handle_get(handle);
666
667         if (!h) {
668                 pr_err("%s query invalid handle %p\n",
669                         current->group_leader->comm, handle);
670                 return -EINVAL;
671         }
672
673         if (!h->alloc || !h->heap_pgalloc) {
674                 pr_err("%s query unallocated handle %p\n",
675                         current->group_leader->comm, handle);
676                 nvmap_handle_put(h);
677                 return -EINVAL;
678         }
679
680         *flags = h->flags;
681         *size = h->orig_size;
682         *nr_page = PAGE_ALIGN(h->size) >> PAGE_SHIFT;
683         *contig = h->pgalloc.contig;
684
685         nvmap_handle_put(h);
686         return 0;
687 }
688 EXPORT_SYMBOL(nvmap_get_page_list_info);
689
690 int nvmap_acquire_page_list(struct nvmap_client *client,
691                         struct nvmap_handle *handle, struct page **pages,
692                         u32 nr_page)
693 {
694         struct nvmap_handle *h;
695         struct nvmap_handle_ref *ref;
696         int idx;
697         phys_addr_t dummy;
698
699         BUG_ON(!client);
700
701         h = nvmap_handle_get(handle);
702
703         if (!h) {
704                 pr_err("%s query invalid handle %p\n",
705                           current->group_leader->comm, handle);
706                 return -EINVAL;
707         }
708
709         if (!h->alloc || !h->heap_pgalloc) {
710                 pr_err("%s query unallocated handle %p\n",
711                           current->group_leader->comm, handle);
712                 nvmap_handle_put(h);
713                 return -EINVAL;
714         }
715
716         BUG_ON(nr_page != PAGE_ALIGN(h->size) >> PAGE_SHIFT);
717
718         for (idx = 0; idx < nr_page; idx++)
719                 pages[idx] = h->pgalloc.pages[idx];
720
721         nvmap_ref_lock(client);
722         ref = __nvmap_validate_locked(client, h);
723         if (ref)
724                 __nvmap_pin(ref, &dummy);
725         nvmap_ref_unlock(client);
726
727         return 0;
728 }
729 EXPORT_SYMBOL(nvmap_acquire_page_list);
730
731 int nvmap_release_page_list(struct nvmap_client *client,
732                                 struct nvmap_handle *handle)
733 {
734         struct nvmap_handle_ref *ref;
735         struct nvmap_handle *h = NULL;
736
737         BUG_ON(!client);
738
739         nvmap_ref_lock(client);
740
741         ref = __nvmap_validate_locked(client, handle);
742         if (ref)
743                 __nvmap_unpin(ref);
744
745         nvmap_ref_unlock(client);
746
747         if (ref)
748                 h = ref->handle;
749         if (h)
750                 nvmap_handle_put(h);
751
752         return 0;
753 }
754 EXPORT_SYMBOL(nvmap_release_page_list);
755
756 int __nvmap_get_handle_param(struct nvmap_client *client,
757                              struct nvmap_handle *h, u32 param, u64 *result)
758 {
759         int err = 0;
760
761         if (WARN_ON(!virt_addr_valid(h)))
762                 return -EINVAL;
763
764         switch (param) {
765         case NVMAP_HANDLE_PARAM_SIZE:
766                 *result = h->orig_size;
767                 break;
768         case NVMAP_HANDLE_PARAM_ALIGNMENT:
769                 *result = h->align;
770                 break;
771         case NVMAP_HANDLE_PARAM_BASE:
772                 if (!h->alloc || !atomic_read(&h->pin))
773                         *result = -EINVAL;
774                 else if (!h->heap_pgalloc) {
775                         mutex_lock(&h->lock);
776                         *result = h->carveout->base;
777                         mutex_unlock(&h->lock);
778                 } else if (h->attachment->priv)
779                         *result = sg_dma_address(
780                                 ((struct sg_table *)h->attachment->priv)->sgl);
781                 else
782                         *result = -EINVAL;
783                 break;
784         case NVMAP_HANDLE_PARAM_HEAP:
785                 if (!h->alloc)
786                         *result = 0;
787                 else if (!h->heap_pgalloc) {
788                         mutex_lock(&h->lock);
789                         *result = nvmap_carveout_usage(client, h->carveout);
790                         mutex_unlock(&h->lock);
791                 } else
792                         *result = NVMAP_HEAP_IOVMM;
793                 break;
794         case NVMAP_HANDLE_PARAM_KIND:
795                 *result = h->kind;
796                 break;
797         case NVMAP_HANDLE_PARAM_COMPR:
798                 /* ignored, to be removed */
799                 break;
800         default:
801                 err = -EINVAL;
802                 break;
803         }
804         return err;
805 }
806
807 int nvmap_get_handle_param(struct nvmap_client *client,
808                            struct nvmap_handle_ref *ref, u32 param, u64 *result)
809 {
810         if (WARN_ON(!virt_addr_valid(ref)) ||
811             WARN_ON(!virt_addr_valid(client)) ||
812             WARN_ON(!result))
813                 return -EINVAL;
814
815         return __nvmap_get_handle_param(client, ref->handle, param, result);
816 }