]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/base/dma-coherent.c
1e1d4d722fb6505255e39db5e858aa5fa80d64b8
[sojka/nv-tegra/linux-3.10.git] / drivers / base / dma-coherent.c
1 /*
2  * Coherent per-device memory handling.
3  * Borrowed from i386
4  */
5
6 #define pr_fmt(fmt) "%s:%d: " fmt, __func__, __LINE__
7 #include <linux/slab.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dma-attrs.h>
12 #include <linux/dma-contiguous.h>
13 #include <linux/debugfs.h>
14 #include <linux/highmem.h>
15 #include <asm/cacheflush.h>
16
17 struct heap_info {
18         char *name;
19         /* number of devices pointed by devs */
20         unsigned int num_devs;
21         /* devs to manage cma/coherent memory allocs, if resize allowed */
22         struct device *devs;
23         /* device to allocate memory from cma */
24         struct device *cma_dev;
25         /* lock to synchronise heap resizing */
26         struct mutex resize_lock;
27         /* CMA chunk size if resize supported */
28         size_t cma_chunk_size;
29         /* heap base */
30         phys_addr_t base;
31         /* heap size */
32         size_t len;
33         phys_addr_t cma_base;
34         size_t cma_len;
35         size_t rem_chunk_size;
36         struct dentry *dma_debug_root;
37         int (*update_resize_cfg)(phys_addr_t , size_t);
38 };
39
40 #define DMA_RESERVED_COUNT 8
41 static struct dma_coherent_reserved {
42         const struct device *dev;
43 } dma_coherent_reserved[DMA_RESERVED_COUNT];
44
45 static unsigned dma_coherent_reserved_count;
46
47 #ifdef CONFIG_ARM_DMA_IOMMU_ALIGNMENT
48 #define DMA_BUF_ALIGNMENT CONFIG_ARM_DMA_IOMMU_ALIGNMENT
49 #else
50 #define DMA_BUF_ALIGNMENT 8
51 #endif
52
53 struct dma_coherent_mem {
54         void            *virt_base;
55         dma_addr_t      device_base;
56         phys_addr_t     pfn_base;
57         int             size;
58         int             flags;
59         unsigned long   *bitmap;
60 };
61
62 static bool dma_is_coherent_dev(struct device *dev)
63 {
64         int i;
65         struct dma_coherent_reserved *r = dma_coherent_reserved;
66
67         for (i = 0; i < dma_coherent_reserved_count; i++, r++) {
68                 if (dev == r->dev)
69                         return true;
70         }
71         return false;
72 }
73 static void dma_debugfs_init(struct device *dev, struct heap_info *heap)
74 {
75         if (!heap->dma_debug_root) {
76                 heap->dma_debug_root = debugfs_create_dir(dev_name(dev), NULL);
77                 if (IS_ERR_OR_NULL(heap->dma_debug_root)) {
78                         dev_err(dev, "couldn't create debug files\n");
79                         return;
80                 }
81         }
82
83         debugfs_create_x32("base", S_IRUGO,
84                 heap->dma_debug_root, (u32 *)&heap->base);
85         debugfs_create_x32("size", S_IRUGO,
86                 heap->dma_debug_root, (u32 *)&heap->len);
87         debugfs_create_x32("cma_base", S_IRUGO,
88                 heap->dma_debug_root, (u32 *)&heap->cma_base);
89         debugfs_create_x32("cma_size", S_IRUGO,
90                 heap->dma_debug_root, (u32 *)&heap->cma_len);
91         debugfs_create_x32("cma_chunk_size", S_IRUGO,
92                 heap->dma_debug_root, (u32 *)&heap->cma_chunk_size);
93         debugfs_create_x32("num_cma_chunks", S_IRUGO,
94                 heap->dma_debug_root, (u32 *)&heap->num_devs);
95 }
96
97 static struct device *dma_create_dma_devs(const char *name, int num_devs)
98 {
99         int idx = 0;
100         struct device *devs;
101
102         devs = kzalloc(num_devs * sizeof(*devs), GFP_KERNEL);
103         if (!devs)
104                 return NULL;
105
106         for (idx = 0; idx < num_devs; idx++)
107                 dev_set_name(&devs[idx], "%s-heap-%d", name, idx);
108
109         return devs;
110 }
111
112 int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
113                                 dma_addr_t device_addr, size_t size, int flags)
114 {
115         void __iomem *mem_base = NULL;
116         int pages = size >> PAGE_SHIFT;
117         int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
118
119         if ((flags &
120                 (DMA_MEMORY_MAP | DMA_MEMORY_IO | DMA_MEMORY_NOMAP)) == 0)
121                 goto out;
122         if (!size)
123                 goto out;
124         if (dev->dma_mem)
125                 goto out;
126
127         /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
128
129         dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
130         if (!dev->dma_mem)
131                 goto out;
132         dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
133         if (!dev->dma_mem->bitmap)
134                 goto free1_out;
135
136         if (flags & DMA_MEMORY_NOMAP)
137                 goto skip_mapping;
138
139         mem_base = ioremap(bus_addr, size);
140         if (!mem_base)
141                 goto out;
142         dev->dma_mem->virt_base = mem_base;
143
144 skip_mapping:
145         dev->dma_mem->device_base = device_addr;
146         dev->dma_mem->pfn_base = PFN_DOWN(bus_addr);
147         dev->dma_mem->size = pages;
148         dev->dma_mem->flags = flags;
149
150         if (flags & DMA_MEMORY_MAP)
151                 return DMA_MEMORY_MAP;
152
153         if (flags & DMA_MEMORY_NOMAP)
154                 return DMA_MEMORY_NOMAP;
155
156         return DMA_MEMORY_IO;
157
158  free1_out:
159         kfree(dev->dma_mem);
160         dev->dma_mem = NULL;
161  out:
162         if (mem_base)
163                 iounmap(mem_base);
164         return 0;
165 }
166 EXPORT_SYMBOL(dma_declare_coherent_memory);
167
168 static int declare_coherent_heap(struct device *dev, phys_addr_t base,
169                                         size_t size)
170 {
171         int err;
172
173         BUG_ON(dev->dma_mem);
174         dma_set_coherent_mask(dev,  DMA_BIT_MASK(64));
175         err = dma_declare_coherent_memory(dev, 0,
176                         base, size, DMA_MEMORY_NOMAP);
177         if (err & DMA_MEMORY_NOMAP) {
178                 dev_dbg(dev, "dma coherent mem base (0x%pa) size (0x%zx)\n",
179                         &base, size);
180                 return 0;
181         }
182         dev_err(dev, "declare dma coherent_mem fail 0x%pa 0x%zx\n",
183                 &base, size);
184         return -ENOMEM;
185 }
186
187 int dma_declare_coherent_resizable_cma_memory(struct device *dev,
188                                         struct dma_declare_info *dma_info)
189 {
190 #ifdef CONFIG_CMA
191         int err = 0;
192         struct heap_info *heap_info = NULL;
193         struct dma_contiguous_stats stats;
194         struct dma_coherent_reserved *r =
195                         &dma_coherent_reserved[dma_coherent_reserved_count];
196
197         if (dma_coherent_reserved_count == ARRAY_SIZE(dma_coherent_reserved)) {
198                 pr_err("Not enough slots for DMA Coherent reserved regions!\n");
199                 return -ENOSPC;
200         }
201
202         if (!dev || !dma_info || !dma_info->name || !dma_info->cma_dev)
203                 return -EINVAL;
204
205         heap_info = kzalloc(sizeof(*heap_info), GFP_KERNEL);
206         if (!heap_info)
207                 return -ENOMEM;
208
209         heap_info->name = kmalloc(strlen(dma_info->name) + 1, GFP_KERNEL);
210         if (!heap_info->name) {
211                 kfree(heap_info);
212                 return -ENOMEM;
213         }
214
215         dma_get_contiguous_stats(dma_info->cma_dev, &stats);
216         pr_info("resizable heap=%s, base=0x%pa, size=0x%zx\n",
217                 dma_info->name, &stats.base, stats.size);
218         strcpy(heap_info->name, dma_info->name);
219         dev_set_name(dev, "dma-%s", heap_info->name);
220         heap_info->cma_dev = dma_info->cma_dev;
221         heap_info->cma_chunk_size = dma_info->size;
222         heap_info->cma_base = stats.base;
223         heap_info->cma_len = stats.size;
224         dev_set_name(heap_info->cma_dev, "cma-%s-heap", heap_info->name);
225         mutex_init(&heap_info->resize_lock);
226
227         if (heap_info->cma_len < heap_info->cma_chunk_size) {
228                 dev_err(dev, "error cma_len(0x%zx) < cma_chunk_size(0x%zx)\n",
229                         heap_info->cma_len, heap_info->cma_chunk_size);
230                 err = -EINVAL;
231                 goto fail;
232         }
233
234         heap_info->num_devs = div_u64_rem(heap_info->cma_len,
235                 (u32)heap_info->cma_chunk_size, (u32 *)&heap_info->rem_chunk_size);
236         if (heap_info->rem_chunk_size) {
237                 heap_info->num_devs++;
238                 dev_info(dev, "heap size is not multiple of cma_chunk_size "
239                         "heap_info->num_devs (%d) rem_chunk_size(0x%zx)\n",
240                         heap_info->num_devs, heap_info->rem_chunk_size);
241         } else
242                 heap_info->rem_chunk_size = heap_info->cma_chunk_size;
243         heap_info->devs = dma_create_dma_devs(heap_info->name,
244                                 heap_info->num_devs);
245         if (!heap_info->devs) {
246                 dev_err(dev, "failed to alloc devices\n");
247                 err = -ENOMEM;
248                 goto fail;
249         }
250         if (dma_info->notifier.ops)
251                 heap_info->update_resize_cfg =
252                         dma_info->notifier.ops->resize;
253
254         r->dev = dev;
255         dma_coherent_reserved_count++;
256
257         dev_set_drvdata(dev, heap_info);
258         dma_debugfs_init(dev, heap_info);
259         pr_info("resizable cma heap=%s create successful", heap_info->name);
260         return 0;
261 fail:
262         kfree(heap_info);
263         return err;
264 #else
265         return -EINVAL;
266 #endif
267 }
268 EXPORT_SYMBOL(dma_declare_coherent_resizable_cma_memory);
269
270 static phys_addr_t alloc_from_contiguous_heap(
271                                 struct heap_info *h,
272                                 phys_addr_t base, size_t len)
273 {
274         size_t count;
275         struct page *page;
276         unsigned long order;
277
278         dev_dbg(h->cma_dev, "req at base (0x%pa) size (0x%zx)\n",
279                 &base, len);
280         order = get_order(len);
281         count = PAGE_ALIGN(len) >> PAGE_SHIFT;
282         page = dma_alloc_at_from_contiguous(h->cma_dev, count, order, base);
283         if (!page) {
284                 dev_err(h->cma_dev, "dma_alloc_at_from_contiguous failed\n");
285                 goto dma_alloc_err;
286         }
287
288         base = page_to_phys(page);
289         dev_dbg(h->cma_dev, "allocated at base (0x%pa) size (0x%zx)\n",
290                 &base, len);
291         BUG_ON(base < h->cma_base ||
292                 base - h->cma_base + len > h->cma_len);
293         return base;
294
295 dma_alloc_err:
296         return DMA_ERROR_CODE;
297 }
298
299 static void release_from_contiguous_heap(
300                                 struct heap_info *h,
301                                 phys_addr_t base, size_t len)
302 {
303         struct page *page = phys_to_page(base);
304         size_t count = PAGE_ALIGN(len) >> PAGE_SHIFT;
305
306         dma_release_from_contiguous(h->cma_dev, page, count);
307 }
308
309 static void get_first_and_last_idx(struct heap_info *h,
310                                    int *first_alloc_idx, int *last_alloc_idx)
311 {
312         int idx;
313         struct device *d;
314
315         *first_alloc_idx = -1;
316         *last_alloc_idx = h->num_devs;
317
318         for (idx = 0; idx < h->num_devs; idx++) {
319                 d = &h->devs[idx];
320                 if (d->dma_mem) {
321                         if (*first_alloc_idx == -1)
322                                 *first_alloc_idx = idx;
323                         *last_alloc_idx = idx;
324                 }
325         }
326 }
327
328 static void update_heap_base_len(struct heap_info *h)
329 {
330         int idx;
331         struct device *d;
332         phys_addr_t base = 0;
333         size_t len = 0;
334
335         for (idx = 0; idx < h->num_devs; idx++) {
336                 d = &h->devs[idx];
337                 if (d->dma_mem) {
338                         if (!base)
339                                 base = idx * h->cma_chunk_size + h->cma_base;
340                         len += (idx == h->num_devs - 1) ?
341                                         h->rem_chunk_size : h->cma_chunk_size;
342                 }
343         }
344
345         h->base = base;
346         h->len = len;
347 }
348
349 static int heap_resize_locked(struct heap_info *h)
350 {
351         int i;
352         int err = 0;
353         phys_addr_t base = -1;
354         size_t len = h->cma_chunk_size;
355         phys_addr_t prev_base = h->base;
356         size_t prev_len = h->len;
357         int alloc_at_idx = 0;
358         int first_alloc_idx;
359         int last_alloc_idx;
360         phys_addr_t start_addr = 0;
361
362         get_first_and_last_idx(h, &first_alloc_idx, &last_alloc_idx);
363         pr_debug("req resize, fi=%d,li=%d\n", first_alloc_idx, last_alloc_idx);
364
365         /* All chunks are in use. Can't grow it. */
366         if (first_alloc_idx == 0 && last_alloc_idx == h->num_devs - 1)
367                 return -ENOMEM;
368
369         /* All chunks are free. Can allocate anywhere in CMA with
370          * cma_chunk_size alignment.
371          */
372         if (first_alloc_idx == -1) {
373                 base = alloc_from_contiguous_heap(h, start_addr, len);
374                 if (!dma_mapping_error(h->cma_dev, base))
375                         goto alloc_success;
376         }
377
378         /* Free chunk before previously allocated chunk. Attempt
379          * to allocate only immediate previous chunk.
380          */
381         if (first_alloc_idx > 0) {
382                 alloc_at_idx = first_alloc_idx - 1;
383                 start_addr = alloc_at_idx * h->cma_chunk_size + h->cma_base;
384                 base = alloc_from_contiguous_heap(h, start_addr, len);
385                 if (base == start_addr)
386                         goto alloc_success;
387                 BUG_ON(!dma_mapping_error(h->cma_dev, base));
388         }
389
390         /* Free chunk after previously allocated chunk. */
391         if (last_alloc_idx < h->num_devs - 1) {
392                 alloc_at_idx = last_alloc_idx + 1;
393                 len = (alloc_at_idx == h->num_devs - 1) ?
394                                 h->rem_chunk_size : h->cma_chunk_size;
395                 start_addr = alloc_at_idx * h->cma_chunk_size + h->cma_base;
396                 base = alloc_from_contiguous_heap(h, start_addr, len);
397                 if (base == start_addr)
398                         goto alloc_success;
399                 BUG_ON(!dma_mapping_error(h->cma_dev, base));
400         }
401
402         if (dma_mapping_error(h->cma_dev, base))
403                 dev_err(&h->devs[alloc_at_idx],
404                 "Failed to allocate contiguous memory on heap grow req\n");
405
406         return -ENOMEM;
407
408 alloc_success:
409         if (declare_coherent_heap(&h->devs[alloc_at_idx], base, len)) {
410                 dev_err(&h->devs[alloc_at_idx],
411                         "Failed to declare coherent memory\n");
412                 goto fail_declare;
413         }
414
415         for (i = 0; i < len >> PAGE_SHIFT; i++) {
416                 struct page *page = phys_to_page(i + base);
417
418                 if (PageHighMem(page)) {
419                         void *ptr = kmap_atomic(page);
420                         dmac_flush_range(ptr, ptr + PAGE_SIZE);
421                         kunmap_atomic(ptr);
422                 } else {
423                         void *ptr = page_address(page);
424                         dmac_flush_range(ptr, ptr + PAGE_SIZE);
425                 }
426         }
427
428         update_heap_base_len(h);
429
430         /* Handle VPR configuration updates*/
431         if (h->update_resize_cfg) {
432                 err = h->update_resize_cfg(h->base, h->len);
433                 if (err) {
434                         dev_err(&h->devs[alloc_at_idx], "Failed to update heap resize\n");
435                         goto fail_update;
436                 }
437         }
438
439         dev_dbg(&h->devs[alloc_at_idx],
440                 "grow heap base from=0x%pa to=0x%pa,"
441                 " len from=0x%zx to=0x%zx\n",
442                 &prev_base, &h->base, prev_len, h->len);
443         return 0;
444
445 fail_update:
446         dma_release_declared_memory(&h->devs[alloc_at_idx]);
447 fail_declare:
448         release_from_contiguous_heap(h, base, len);
449         h->base = prev_base;
450         h->len = prev_len;
451         return -ENOMEM;
452 }
453
454 /* retval: !0 on success, 0 on failure */
455 static int dma_alloc_from_coherent_dev(struct device *dev, ssize_t size,
456                                        dma_addr_t *dma_handle, void **ret,
457                                        struct dma_attrs *attrs)
458 {
459         struct dma_coherent_mem *mem;
460         int order = get_order(size);
461         int pageno;
462         unsigned int count;
463         unsigned long align;
464
465         if (!dev)
466                 return 0;
467         mem = dev->dma_mem;
468         if (!mem)
469                 return 0;
470
471         *dma_handle = DMA_ERROR_CODE;
472         *ret = NULL;
473
474         if (unlikely(size > (mem->size << PAGE_SHIFT)))
475                 goto err;
476
477         if (order > DMA_BUF_ALIGNMENT)
478                 align = (1 << DMA_BUF_ALIGNMENT) - 1;
479         else
480                 align = (1 << order) - 1;
481
482         if (dma_get_attr(DMA_ATTR_ALLOC_EXACT_SIZE, attrs))
483                 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
484         else
485                 count = 1 << order;
486
487         pageno = bitmap_find_next_zero_area(mem->bitmap, mem->size,
488                         0, count, align);
489
490         if (pageno >= mem->size)
491                 goto err;
492
493         bitmap_set(mem->bitmap, pageno, count);
494
495         /*
496          * Memory was found in the per-device area.
497          */
498         *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
499         if (!(mem->flags & DMA_MEMORY_NOMAP)) {
500                 *ret = mem->virt_base + (pageno << PAGE_SHIFT);
501                 memset(*ret, 0, size);
502         }
503
504         return 1;
505
506 err:
507         /*
508          * In the case where the allocation can not be satisfied from the
509          * per-device area, try to fall back to generic memory if the
510          * constraints allow it.
511          */
512         return mem->flags & DMA_MEMORY_EXCLUSIVE;
513 }
514
515 /* retval: !0 on success, 0 on failure */
516 static int dma_alloc_from_coherent_heap_dev(struct device *dev, size_t len,
517                                         dma_addr_t *dma_handle, void **ret,
518                                         struct dma_attrs *attrs)
519 {
520         int idx;
521         struct heap_info *h = NULL;
522         struct device *d;
523
524         *dma_handle = DMA_ERROR_CODE;
525         if (!dma_is_coherent_dev(dev))
526                 return 0;
527
528         h = dev_get_drvdata(dev);
529         BUG_ON(!h);
530         if (!h)
531                 return 1;
532         dma_set_attr(DMA_ATTR_ALLOC_EXACT_SIZE, attrs);
533
534         mutex_lock(&h->resize_lock);
535 retry_alloc:
536         /* Try allocation from already existing CMA chunks */
537         for (idx = 0; idx < h->num_devs; idx++) {
538                 d = &h->devs[idx];
539                 if (!d->dma_mem)
540                         continue;
541                 if (dma_alloc_from_coherent_dev(
542                         d, len, dma_handle, ret, attrs)) {
543                         dev_dbg(d, "allocated addr 0x%pa len 0x%zx\n",
544                                 dma_handle, len);
545                         goto out;
546                 }
547         }
548
549         if (!heap_resize_locked(h))
550                 goto retry_alloc;
551 out:
552         mutex_unlock(&h->resize_lock);
553         return DMA_MEMORY_EXCLUSIVE;
554 }
555
556 /* retval: !0 on success, 0 on failure */
557 static int dma_release_from_coherent_dev(struct device *dev, size_t size,
558                                         void *vaddr, struct dma_attrs *attrs)
559 {
560         struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
561         void *mem_addr;
562         unsigned int count;
563         unsigned int pageno;
564
565         if (!mem)
566                 return 0;
567
568         if (mem->flags & DMA_MEMORY_NOMAP)
569                 mem_addr =  (void *)(uintptr_t)mem->device_base;
570         else
571                 mem_addr =  mem->virt_base;
572
573         if (mem && vaddr >= mem_addr &&
574             vaddr - mem_addr < mem->size << PAGE_SHIFT) {
575
576                 pageno = (vaddr - mem_addr) >> PAGE_SHIFT;
577
578                 if (dma_get_attr(DMA_ATTR_ALLOC_EXACT_SIZE, attrs))
579                         count = PAGE_ALIGN(size) >> PAGE_SHIFT;
580                 else
581                         count = 1 << get_order(size);
582
583                 bitmap_clear(mem->bitmap, pageno, count);
584
585                 return 1;
586         }
587         return 0;
588 }
589
590 static int dma_release_from_coherent_heap_dev(struct device *dev, size_t len,
591                                         void *base, struct dma_attrs *attrs)
592 {
593         int idx = 0;
594         int err = 0;
595         int resize_err = 0;
596         void *ret = NULL;
597         dma_addr_t dev_base;
598         struct heap_info *h = NULL;
599         size_t chunk_size;
600         int first_alloc_idx;
601         int last_alloc_idx;
602
603         if (!dma_is_coherent_dev(dev))
604                 return 0;
605
606         h = dev_get_drvdata(dev);
607         BUG_ON(!h);
608         if (!h)
609                 return 1;
610         if ((uintptr_t)base < h->cma_base ||
611             len > h->cma_chunk_size ||
612             (uintptr_t)base - h->cma_base > h->cma_len - len) {
613                 BUG();
614                 return 1;
615         }
616
617         dma_set_attr(DMA_ATTR_ALLOC_EXACT_SIZE, attrs);
618
619         mutex_lock(&h->resize_lock);
620
621         idx = div_u64((uintptr_t)base - h->cma_base, h->cma_chunk_size);
622         dev_dbg(&h->devs[idx], "req free addr (%p) size (0x%zx) idx (%d)\n",
623                 base, len, idx);
624         err = dma_release_from_coherent_dev(&h->devs[idx], len, base, attrs);
625
626         if (!err)
627                 goto out_unlock;
628
629 check_next_chunk:
630         get_first_and_last_idx(h, &first_alloc_idx, &last_alloc_idx);
631
632         /* Check if heap can be shrinked */
633         if (idx == first_alloc_idx || idx == last_alloc_idx) {
634                 /* check if entire chunk is free */
635                 if (idx == h->num_devs - 1)
636                         chunk_size = h->rem_chunk_size;
637                 else
638                         chunk_size = h->cma_chunk_size;
639
640                 resize_err = dma_alloc_from_coherent_dev(&h->devs[idx],
641                                         chunk_size,
642                                         &dev_base, &ret, attrs);
643                 if (!resize_err)
644                         goto out_unlock;
645                 else {
646                         dev_dbg(&h->devs[idx],
647                                 "prep to remove chunk b=0x%pa, s=0x%zx\n",
648                                 &dev_base, chunk_size);
649                         resize_err = dma_release_from_coherent_dev(
650                                 &h->devs[idx], chunk_size,
651                                 (void *)(uintptr_t)dev_base, attrs);
652                         if (!resize_err) {
653                                 dev_err(&h->devs[idx], "failed to rel mem\n");
654                                 goto out_unlock;
655                         }
656
657                         dma_release_declared_memory(&h->devs[idx]);
658                         BUG_ON(h->devs[idx].dma_mem != NULL);
659                         update_heap_base_len(h);
660
661                         /* Handle VPR configuration updates */
662                         if (h->update_resize_cfg) {
663                                 resize_err =
664                                         h->update_resize_cfg(h->base, h->len);
665                                 if (resize_err) {
666                                         dev_err(&h->devs[idx],
667                                                 "update resize failed\n");
668                                         /* On update failure re-declare heap */
669                                         resize_err = declare_coherent_heap(
670                                                 &h->devs[idx], dev_base,
671                                                 chunk_size);
672                                         if (resize_err) {
673                                                 /* on declare coherent failure
674                                                  * release heap chunk
675                                                  */
676                                                 release_from_contiguous_heap(h,
677                                                         dev_base, chunk_size);
678                                                 dev_err(&h->devs[idx],
679                                                         "declare failed\n");
680                                         } else
681                                                 update_heap_base_len(h);
682                                         goto out_unlock;
683                                 }
684                         }
685
686                         idx == first_alloc_idx ? ++idx : --idx;
687                         release_from_contiguous_heap(h, dev_base, chunk_size);
688                         dev_dbg(&h->devs[idx], "removed chunk b=0x%pa, s=0x%zx"
689                                 "new heap b=0x%pa, s=0x%zx",
690                                 &dev_base, chunk_size, &h->base, h->len);
691                 }
692                 if (idx < h->num_devs)
693                         goto check_next_chunk;
694         }
695 out_unlock:
696         mutex_unlock(&h->resize_lock);
697         return err;
698 }
699
700 void dma_release_declared_memory(struct device *dev)
701 {
702         struct dma_coherent_mem *mem = dev->dma_mem;
703
704         if (!mem)
705                 return;
706         dev->dma_mem = NULL;
707
708         if (!(mem->flags & DMA_MEMORY_NOMAP))
709                 iounmap(mem->virt_base);
710
711         kfree(mem->bitmap);
712         kfree(mem);
713 }
714 EXPORT_SYMBOL(dma_release_declared_memory);
715
716 void *dma_mark_declared_memory_occupied(struct device *dev,
717                                         dma_addr_t device_addr, size_t size)
718 {
719         struct dma_coherent_mem *mem = dev->dma_mem;
720         int pos, err;
721
722         size += device_addr & ~PAGE_MASK;
723
724         if (!mem)
725                 return ERR_PTR(-EINVAL);
726
727         pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
728         err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
729         if (err != 0)
730                 return ERR_PTR(err);
731         return mem->virt_base + (pos << PAGE_SHIFT);
732 }
733 EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
734
735 /**
736  * dma_alloc_from_coherent_attr() - try to allocate memory from the per-device
737  * coherent area
738  *
739  * @dev:        device from which we allocate memory
740  * @size:       size of requested memory area
741  * @dma_handle: This will be filled with the correct dma handle
742  * @ret:        This pointer will be filled with the virtual address
743  *              to allocated area.
744  * @attrs:      DMA Attribute
745  * This function should be only called from per-arch dma_alloc_coherent()
746  * to support allocation from per-device coherent memory pools.
747  *
748  * Returns 0 if dma_alloc_coherent_attr should continue with allocating from
749  * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
750  */
751 int dma_alloc_from_coherent_attr(struct device *dev, ssize_t size,
752                                        dma_addr_t *dma_handle, void **ret,
753                                        struct dma_attrs *attrs)
754 {
755         if (!dev)
756                 return 0;
757
758         if (dev->dma_mem)
759                 return dma_alloc_from_coherent_dev(dev, size, dma_handle, ret,
760                                                         attrs);
761         else
762                 return dma_alloc_from_coherent_heap_dev(dev, size, dma_handle,
763                                                         ret, attrs);
764 }
765 EXPORT_SYMBOL(dma_alloc_from_coherent_attr);
766
767 /**
768  * dma_release_from_coherent_attr() - try to free the memory allocated from
769  * per-device coherent memory pool
770  * @dev:        device from which the memory was allocated
771  * @size:       size of the memory area to free
772  * @vaddr:      virtual address of allocated pages
773  * @attrs:      DMA Attribute
774  *
775  * This checks whether the memory was allocated from the per-device
776  * coherent memory pool and if so, releases that memory.
777  *
778  * Returns 1 if we correctly released the memory, or 0 if
779  * dma_release_coherent_attr() should proceed with releasing memory from
780  * generic pools.
781  */
782 int dma_release_from_coherent_attr(struct device *dev, size_t size, void *vaddr,
783                                 struct dma_attrs *attrs)
784 {
785         if (!dev)
786                 return 0;
787
788         if (dev->dma_mem)
789                 return dma_release_from_coherent_dev(dev, size, vaddr, attrs);
790         else
791                 return dma_release_from_coherent_heap_dev(dev, size, vaddr,
792                         attrs);
793 }
794 EXPORT_SYMBOL(dma_release_from_coherent_attr);
795
796 /**
797  * dma_mmap_from_coherent() - try to mmap the memory allocated from
798  * per-device coherent memory pool to userspace
799  * @dev:        device from which the memory was allocated
800  * @vma:        vm_area for the userspace memory
801  * @vaddr:      cpu address returned by dma_alloc_from_coherent
802  * @size:       size of the memory buffer allocated by dma_alloc_from_coherent
803  * @ret:        result from remap_pfn_range()
804  *
805  * This checks whether the memory was allocated from the per-device
806  * coherent memory pool and if so, maps that memory to the provided vma.
807  *
808  * Returns 1 if we correctly mapped the memory, or 0 if the caller should
809  * proceed with mapping memory from generic pools.
810  */
811 int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
812                            void *vaddr, size_t size, int *ret)
813 {
814         struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
815         void *mem_addr;
816
817         if (!mem)
818                 return 0;
819
820         if (mem->flags & DMA_MEMORY_NOMAP)
821                 mem_addr =  (void *)(uintptr_t)mem->device_base;
822         else
823                 mem_addr =  mem->virt_base;
824
825         if (mem && vaddr >= mem_addr && vaddr + size <=
826                    (mem_addr + (mem->size << PAGE_SHIFT))) {
827                 unsigned long off = vma->vm_pgoff;
828                 int start = (vaddr - mem_addr) >> PAGE_SHIFT;
829                 int user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
830                 int count = size >> PAGE_SHIFT;
831
832                 *ret = -ENXIO;
833                 if (off < count && user_count <= count - off) {
834                         unsigned pfn = mem->pfn_base + start + off;
835                         *ret = remap_pfn_range(vma, vma->vm_start, pfn,
836                                                user_count << PAGE_SHIFT,
837                                                vma->vm_page_prot);
838                 }
839                 return 1;
840         }
841         return 0;
842 }
843 EXPORT_SYMBOL(dma_mmap_from_coherent);