]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/base/dma-coherent.c
dma:coherent: manage resizable memory with single device
[sojka/nv-tegra/linux-3.10.git] / drivers / base / dma-coherent.c
1 /*
2  * Coherent per-device memory handling.
3  * Borrowed from i386
4  */
5
6 #define pr_fmt(fmt) "%s:%d: " fmt, __func__, __LINE__
7 #include <linux/slab.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dma-attrs.h>
12 #include <linux/dma-contiguous.h>
13 #include <linux/debugfs.h>
14 #include <linux/highmem.h>
15 #include <asm/cacheflush.h>
16
17 #define RESIZE_MAGIC 0xC11A900d
18 struct heap_info {
19         int magic;
20         char *name;
21         /* number of chunks memory to manage in */
22         unsigned int num_chunks;
23         /* dev to manage cma/coherent memory allocs, if resize allowed */
24         struct device dev;
25         /* device to allocate memory from cma */
26         struct device *cma_dev;
27         /* lock to synchronise heap resizing */
28         struct mutex resize_lock;
29         /* CMA chunk size if resize supported */
30         size_t cma_chunk_size;
31         /* heap current base */
32         phys_addr_t curr_base;
33         /* heap current length */
34         size_t curr_len;
35         /* heap lowest base */
36         phys_addr_t cma_base;
37         /* heap max length */
38         size_t cma_len;
39         size_t rem_chunk_size;
40         struct dentry *dma_debug_root;
41         int (*update_resize_cfg)(phys_addr_t , size_t);
42 };
43
44 #ifdef CONFIG_ARM_DMA_IOMMU_ALIGNMENT
45 #define DMA_BUF_ALIGNMENT CONFIG_ARM_DMA_IOMMU_ALIGNMENT
46 #else
47 #define DMA_BUF_ALIGNMENT 8
48 #endif
49
50 struct dma_coherent_mem {
51         void            *virt_base;
52         dma_addr_t      device_base;
53         phys_addr_t     pfn_base;
54         int             size;
55         int             flags;
56         unsigned long   *bitmap;
57 };
58
59 static bool dma_is_coherent_dev(struct device *dev)
60 {
61         struct heap_info *h;
62
63         if (!dev)
64                 return false;
65         h = dev_get_drvdata(dev);
66         if (!h)
67                 return false;
68         if (h->magic != RESIZE_MAGIC)
69                 return false;
70         return true;
71 }
72 static void dma_debugfs_init(struct device *dev, struct heap_info *heap)
73 {
74         if (!heap->dma_debug_root) {
75                 heap->dma_debug_root = debugfs_create_dir(dev_name(dev), NULL);
76                 if (IS_ERR_OR_NULL(heap->dma_debug_root)) {
77                         dev_err(dev, "couldn't create debug files\n");
78                         return;
79                 }
80         }
81
82         debugfs_create_x32("curr_base", S_IRUGO,
83                 heap->dma_debug_root, (u32 *)&heap->curr_base);
84         debugfs_create_x32("curr_len", S_IRUGO,
85                 heap->dma_debug_root, (u32 *)&heap->curr_len);
86         debugfs_create_x32("cma_base", S_IRUGO,
87                 heap->dma_debug_root, (u32 *)&heap->cma_base);
88         debugfs_create_x32("cma_size", S_IRUGO,
89                 heap->dma_debug_root, (u32 *)&heap->cma_len);
90         debugfs_create_x32("cma_chunk_size", S_IRUGO,
91                 heap->dma_debug_root, (u32 *)&heap->cma_chunk_size);
92         debugfs_create_x32("num_cma_chunks", S_IRUGO,
93                 heap->dma_debug_root, (u32 *)&heap->num_chunks);
94 }
95
96 int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
97                                 dma_addr_t device_addr, size_t size, int flags)
98 {
99         void __iomem *mem_base = NULL;
100         int pages = size >> PAGE_SHIFT;
101         int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
102
103         if ((flags &
104                 (DMA_MEMORY_MAP | DMA_MEMORY_IO | DMA_MEMORY_NOMAP)) == 0)
105                 goto out;
106         if (!size)
107                 goto out;
108         if (dev->dma_mem)
109                 goto out;
110
111         /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
112
113         dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
114         if (!dev->dma_mem)
115                 goto out;
116         dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
117         if (!dev->dma_mem->bitmap)
118                 goto free1_out;
119
120         if (flags & DMA_MEMORY_NOMAP)
121                 goto skip_mapping;
122
123         mem_base = ioremap(bus_addr, size);
124         if (!mem_base)
125                 goto out;
126         dev->dma_mem->virt_base = mem_base;
127
128 skip_mapping:
129         dev->dma_mem->device_base = device_addr;
130         dev->dma_mem->pfn_base = PFN_DOWN(bus_addr);
131         dev->dma_mem->size = pages;
132         dev->dma_mem->flags = flags;
133
134         if (flags & DMA_MEMORY_MAP)
135                 return DMA_MEMORY_MAP;
136
137         if (flags & DMA_MEMORY_NOMAP)
138                 return DMA_MEMORY_NOMAP;
139
140         return DMA_MEMORY_IO;
141
142  free1_out:
143         kfree(dev->dma_mem);
144         dev->dma_mem = NULL;
145  out:
146         if (mem_base)
147                 iounmap(mem_base);
148         return 0;
149 }
150 EXPORT_SYMBOL(dma_declare_coherent_memory);
151
152 static int declare_coherent_heap(struct device *dev, phys_addr_t base,
153                                         size_t size)
154 {
155         int err;
156
157         BUG_ON(dev->dma_mem);
158         dma_set_coherent_mask(dev,  DMA_BIT_MASK(64));
159         err = dma_declare_coherent_memory(dev, 0,
160                         base, size, DMA_MEMORY_NOMAP);
161         if (err & DMA_MEMORY_NOMAP) {
162                 dev_dbg(dev, "dma coherent mem base (0x%pa) size (0x%zx)\n",
163                         &base, size);
164                 return 0;
165         }
166         dev_err(dev, "declare dma coherent_mem fail 0x%pa 0x%zx\n",
167                 &base, size);
168         return -ENOMEM;
169 }
170
171 int dma_declare_coherent_resizable_cma_memory(struct device *dev,
172                                         struct dma_declare_info *dma_info)
173 {
174 #ifdef CONFIG_CMA
175         int err = 0;
176         struct heap_info *heap_info = NULL;
177         struct dma_contiguous_stats stats;
178
179         if (!dev || !dma_info || !dma_info->name || !dma_info->cma_dev)
180                 return -EINVAL;
181
182         heap_info = kzalloc(sizeof(*heap_info), GFP_KERNEL);
183         if (!heap_info)
184                 return -ENOMEM;
185
186         heap_info->magic = RESIZE_MAGIC;
187         heap_info->name = kmalloc(strlen(dma_info->name) + 1, GFP_KERNEL);
188         if (!heap_info->name) {
189                 kfree(heap_info);
190                 return -ENOMEM;
191         }
192
193         dma_get_contiguous_stats(dma_info->cma_dev, &stats);
194         pr_info("resizable heap=%s, base=0x%pa, size=0x%zx\n",
195                 dma_info->name, &stats.base, stats.size);
196         strcpy(heap_info->name, dma_info->name);
197         dev_set_name(dev, "dma-%s", heap_info->name);
198         heap_info->cma_dev = dma_info->cma_dev;
199         heap_info->cma_chunk_size = dma_info->size ? : stats.size;
200         heap_info->cma_base = stats.base;
201         heap_info->cma_len = stats.size;
202         heap_info->curr_base = stats.base;
203         dev_set_name(heap_info->cma_dev, "cma-%s-heap", heap_info->name);
204         mutex_init(&heap_info->resize_lock);
205
206         if (heap_info->cma_len < heap_info->cma_chunk_size) {
207                 dev_err(dev, "error cma_len(0x%zx) < cma_chunk_size(0x%zx)\n",
208                         heap_info->cma_len, heap_info->cma_chunk_size);
209                 err = -EINVAL;
210                 goto fail;
211         }
212
213         heap_info->num_chunks = div_u64_rem(heap_info->cma_len,
214                 (u32)heap_info->cma_chunk_size, (u32 *)&heap_info->rem_chunk_size);
215         if (heap_info->rem_chunk_size) {
216                 heap_info->num_chunks++;
217                 dev_info(dev, "heap size is not multiple of cma_chunk_size "
218                         "heap_info->num_chunks (%d) rem_chunk_size(0x%zx)\n",
219                         heap_info->num_chunks, heap_info->rem_chunk_size);
220         } else
221                 heap_info->rem_chunk_size = heap_info->cma_chunk_size;
222
223         dev_set_name(&heap_info->dev, "%s-heap", heap_info->name);
224
225         if (dma_info->notifier.ops)
226                 heap_info->update_resize_cfg =
227                         dma_info->notifier.ops->resize;
228
229         dev_set_drvdata(dev, heap_info);
230         dma_debugfs_init(dev, heap_info);
231
232         if (declare_coherent_heap(&heap_info->dev,
233                                   heap_info->cma_base, heap_info->cma_len))
234                 goto declare_fail;
235         heap_info->dev.dma_mem->size = 0;
236
237         pr_info("resizable cma heap=%s create successful", heap_info->name);
238         return 0;
239 declare_fail:
240         kfree(heap_info->name);
241 fail:
242         kfree(heap_info);
243         return err;
244 #else
245         return -EINVAL;
246 #endif
247 }
248 EXPORT_SYMBOL(dma_declare_coherent_resizable_cma_memory);
249
250 static phys_addr_t alloc_from_contiguous_heap(
251                                 struct heap_info *h,
252                                 phys_addr_t base, size_t len)
253 {
254         size_t count;
255         struct page *page;
256         unsigned long order;
257
258         dev_dbg(h->cma_dev, "req at base (0x%pa) size (0x%zx)\n",
259                 &base, len);
260         order = get_order(len);
261         count = PAGE_ALIGN(len) >> PAGE_SHIFT;
262         page = dma_alloc_at_from_contiguous(h->cma_dev, count, order, base);
263         if (!page) {
264                 dev_err(h->cma_dev, "dma_alloc_at_from_contiguous failed\n");
265                 goto dma_alloc_err;
266         }
267
268         base = page_to_phys(page);
269         dev_dbg(h->cma_dev, "allocated at base (0x%pa) size (0x%zx)\n",
270                 &base, len);
271         BUG_ON(base < h->cma_base ||
272                 base - h->cma_base + len > h->cma_len);
273         return base;
274
275 dma_alloc_err:
276         return DMA_ERROR_CODE;
277 }
278
279 static void release_from_contiguous_heap(
280                                 struct heap_info *h,
281                                 phys_addr_t base, size_t len)
282 {
283         struct page *page = phys_to_page(base);
284         size_t count = PAGE_ALIGN(len) >> PAGE_SHIFT;
285
286         dma_release_from_contiguous(h->cma_dev, page, count);
287         dev_dbg(h->cma_dev, "released at base (0x%pa) size (0x%zx)\n",
288                 &base, len);
289 }
290
291 static void get_first_and_last_idx(struct heap_info *h,
292                                    int *first_alloc_idx, int *last_alloc_idx)
293 {
294         if (!h->curr_len) {
295                 *first_alloc_idx = -1;
296                 *last_alloc_idx = h->num_chunks;
297         } else {
298                 *first_alloc_idx = div_u64(h->curr_base - h->cma_base,
299                                            h->cma_chunk_size);
300                 *last_alloc_idx = div_u64(h->curr_base - h->cma_base +
301                                           h->curr_len + h->cma_chunk_size -
302                                           h->rem_chunk_size,
303                                           h->cma_chunk_size) - 1;
304         }
305 }
306
307 static void update_alloc_range(struct heap_info *h)
308 {
309         if (!h->curr_len)
310                 h->dev.dma_mem->size = 0;
311         else
312                 h->dev.dma_mem->size = (h->curr_base - h->cma_base +
313                                         h->curr_len) >> PAGE_SHIFT;
314 }
315
316 static int heap_resize_locked(struct heap_info *h)
317 {
318         int i;
319         int err = 0;
320         phys_addr_t base = -1;
321         size_t len = h->cma_chunk_size;
322         phys_addr_t prev_base = h->curr_base;
323         size_t prev_len = h->curr_len;
324         int alloc_at_idx = 0;
325         int first_alloc_idx;
326         int last_alloc_idx;
327         phys_addr_t start_addr = 0;
328
329         get_first_and_last_idx(h, &first_alloc_idx, &last_alloc_idx);
330         pr_debug("req resize, fi=%d,li=%d\n", first_alloc_idx, last_alloc_idx);
331
332         /* All chunks are in use. Can't grow it. */
333         if (first_alloc_idx == 0 && last_alloc_idx == h->num_chunks - 1)
334                 return -ENOMEM;
335
336         /* All chunks are free. Can allocate anywhere in CMA with
337          * cma_chunk_size alignment.
338          */
339         if (first_alloc_idx == -1) {
340                 base = alloc_from_contiguous_heap(h, start_addr, len);
341                 if (!dma_mapping_error(h->cma_dev, base))
342                         goto alloc_success;
343         }
344
345         /* Free chunk before previously allocated chunk. Attempt
346          * to allocate only immediate previous chunk.
347          */
348         if (first_alloc_idx > 0) {
349                 alloc_at_idx = first_alloc_idx - 1;
350                 start_addr = alloc_at_idx * h->cma_chunk_size + h->cma_base;
351                 base = alloc_from_contiguous_heap(h, start_addr, len);
352                 if (base == start_addr)
353                         goto alloc_success;
354                 BUG_ON(!dma_mapping_error(h->cma_dev, base));
355         }
356
357         /* Free chunk after previously allocated chunk. */
358         if (last_alloc_idx < h->num_chunks - 1) {
359                 alloc_at_idx = last_alloc_idx + 1;
360                 len = (alloc_at_idx == h->num_chunks - 1) ?
361                                 h->rem_chunk_size : h->cma_chunk_size;
362                 start_addr = alloc_at_idx * h->cma_chunk_size + h->cma_base;
363                 base = alloc_from_contiguous_heap(h, start_addr, len);
364                 if (base == start_addr)
365                         goto alloc_success;
366                 BUG_ON(!dma_mapping_error(h->cma_dev, base));
367         }
368
369         if (dma_mapping_error(h->cma_dev, base))
370                 dev_err(&h->dev,
371                 "Failed to allocate contiguous memory on heap grow req\n");
372
373         return -ENOMEM;
374
375 alloc_success:
376         if (!h->curr_len || h->curr_base > base)
377                 h->curr_base = base;
378         h->curr_len += len;
379
380         for (i = 0; i < (len >> PAGE_SHIFT); i++) {
381                 struct page *page = phys_to_page(i + base);
382
383                 if (PageHighMem(page)) {
384                         void *ptr = kmap_atomic(page);
385                         dmac_flush_range(ptr, ptr + PAGE_SIZE);
386                         kunmap_atomic(ptr);
387                 } else {
388                         void *ptr = page_address(page);
389                         dmac_flush_range(ptr, ptr + PAGE_SIZE);
390                 }
391         }
392
393         /* Handle VPR configuration updates*/
394         if (h->update_resize_cfg) {
395                 err = h->update_resize_cfg(h->curr_base, h->curr_len);
396                 if (err) {
397                         dev_err(&h->dev, "Failed to update heap resize\n");
398                         goto fail_update;
399                 }
400                 dev_dbg(&h->dev, "update vpr base to %pa, size=%zx\n",
401                         &h->curr_base, h->curr_len);
402         }
403
404         update_alloc_range(h);
405         dev_dbg(&h->dev,
406                 "grow heap base from=0x%pa to=0x%pa,"
407                 " len from=0x%zx to=0x%zx\n",
408                 &prev_base, &h->curr_base, prev_len, h->curr_len);
409         return 0;
410
411 fail_update:
412         release_from_contiguous_heap(h, base, len);
413         h->curr_base = prev_base;
414         h->curr_len = prev_len;
415         return -ENOMEM;
416 }
417
418 /* retval: !0 on success, 0 on failure */
419 static int dma_alloc_from_coherent_dev_at(struct device *dev, ssize_t size,
420                                        dma_addr_t *dma_handle, void **ret,
421                                        struct dma_attrs *attrs, ulong start)
422 {
423         struct dma_coherent_mem *mem;
424         int order = get_order(size);
425         int pageno;
426         unsigned int count;
427         unsigned long align;
428
429         if (!dev)
430                 return 0;
431         mem = dev->dma_mem;
432         if (!mem)
433                 return 0;
434
435         *dma_handle = DMA_ERROR_CODE;
436         *ret = NULL;
437
438         if (unlikely(size > (mem->size << PAGE_SHIFT)))
439                 goto err;
440
441         if (order > DMA_BUF_ALIGNMENT)
442                 align = (1 << DMA_BUF_ALIGNMENT) - 1;
443         else
444                 align = (1 << order) - 1;
445
446         if (dma_get_attr(DMA_ATTR_ALLOC_EXACT_SIZE, attrs))
447                 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
448         else
449                 count = 1 << order;
450
451         pageno = bitmap_find_next_zero_area(mem->bitmap, mem->size,
452                         start, count, align);
453
454         if (pageno >= mem->size)
455                 goto err;
456
457         bitmap_set(mem->bitmap, pageno, count);
458
459         /*
460          * Memory was found in the per-device area.
461          */
462         *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
463         if (!(mem->flags & DMA_MEMORY_NOMAP)) {
464                 *ret = mem->virt_base + (pageno << PAGE_SHIFT);
465                 memset(*ret, 0, size);
466         }
467
468         return 1;
469
470 err:
471         /*
472          * In the case where the allocation can not be satisfied from the
473          * per-device area, try to fall back to generic memory if the
474          * constraints allow it.
475          */
476         return mem->flags & DMA_MEMORY_EXCLUSIVE;
477 }
478
479 static int dma_alloc_from_coherent_dev(struct device *dev, ssize_t size,
480                                        dma_addr_t *dma_handle, void **ret,
481                                        struct dma_attrs *attrs)
482 {
483         return dma_alloc_from_coherent_dev_at(dev, size, dma_handle,
484                                               ret, attrs, 0);
485 }
486
487 /* retval: !0 on success, 0 on failure */
488 static int dma_alloc_from_coherent_heap_dev(struct device *dev, size_t len,
489                                         dma_addr_t *dma_handle, void **ret,
490                                         struct dma_attrs *attrs)
491 {
492         struct heap_info *h = NULL;
493
494         *dma_handle = DMA_ERROR_CODE;
495         if (!dma_is_coherent_dev(dev))
496                 return 0;
497
498         h = dev_get_drvdata(dev);
499         BUG_ON(!h);
500         if (!h)
501                 return DMA_MEMORY_EXCLUSIVE;
502         dma_set_attr(DMA_ATTR_ALLOC_EXACT_SIZE, attrs);
503
504         mutex_lock(&h->resize_lock);
505 retry_alloc:
506         /* Try allocation from already existing CMA chunks */
507         if (dma_alloc_from_coherent_dev_at(
508                 &h->dev, len, dma_handle, ret, attrs,
509                 (h->curr_base - h->cma_base) >> PAGE_SHIFT)) {
510                 dev_dbg(&h->dev, "allocated addr 0x%pa len 0x%zx\n",
511                         dma_handle, len);
512                 goto out;
513         }
514
515         if (!heap_resize_locked(h))
516                 goto retry_alloc;
517 out:
518         mutex_unlock(&h->resize_lock);
519         return DMA_MEMORY_EXCLUSIVE;
520 }
521
522 /* retval: !0 on success, 0 on failure */
523 static int dma_release_from_coherent_dev(struct device *dev, size_t size,
524                                         void *vaddr, struct dma_attrs *attrs)
525 {
526         struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
527         void *mem_addr;
528         unsigned int count;
529         unsigned int pageno;
530
531         if (!mem)
532                 return 0;
533
534         if (mem->flags & DMA_MEMORY_NOMAP)
535                 mem_addr =  (void *)(uintptr_t)mem->device_base;
536         else
537                 mem_addr =  mem->virt_base;
538
539         if (mem && vaddr >= mem_addr &&
540             vaddr - mem_addr < mem->size << PAGE_SHIFT) {
541
542                 pageno = (vaddr - mem_addr) >> PAGE_SHIFT;
543
544                 if (dma_get_attr(DMA_ATTR_ALLOC_EXACT_SIZE, attrs))
545                         count = PAGE_ALIGN(size) >> PAGE_SHIFT;
546                 else
547                         count = 1 << get_order(size);
548
549                 bitmap_clear(mem->bitmap, pageno, count);
550
551                 return 1;
552         }
553         return 0;
554 }
555
556 static int dma_release_from_coherent_heap_dev(struct device *dev, size_t len,
557                                         void *base, struct dma_attrs *attrs)
558 {
559         int idx = 0;
560         int err = 0;
561         int resize_err = 0;
562         void *ret = NULL;
563         dma_addr_t dev_base;
564         struct heap_info *h = NULL;
565         size_t chunk_size;
566         int first_alloc_idx;
567         int last_alloc_idx;
568
569         if (!dma_is_coherent_dev(dev))
570                 return 0;
571
572         h = dev_get_drvdata(dev);
573         BUG_ON(!h);
574         if (!h)
575                 return 1;
576         if ((uintptr_t)base < h->cma_base ||
577             len > h->cma_chunk_size ||
578             (uintptr_t)base - h->cma_base > h->cma_len - len) {
579                 BUG();
580                 return 1;
581         }
582
583         dma_set_attr(DMA_ATTR_ALLOC_EXACT_SIZE, attrs);
584
585         mutex_lock(&h->resize_lock);
586
587         idx = div_u64((uintptr_t)base - h->cma_base, h->cma_chunk_size);
588         dev_dbg(&h->dev, "req free addr (%p) size (0x%zx) idx (%d)\n",
589                 base, len, idx);
590         err = dma_release_from_coherent_dev(&h->dev, len, base, attrs);
591
592         if (!err)
593                 goto out_unlock;
594
595 check_next_chunk:
596         get_first_and_last_idx(h, &first_alloc_idx, &last_alloc_idx);
597
598         /* Check if heap can be shrinked */
599         if (idx == first_alloc_idx || idx == last_alloc_idx) {
600                 /* check if entire chunk is free */
601                 chunk_size = (idx == h->num_chunks - 1) ? h->rem_chunk_size :
602                                                           h->cma_chunk_size;
603                 resize_err = dma_alloc_from_coherent_dev_at(&h->dev,
604                                         chunk_size, &dev_base, &ret, attrs,
605                                         idx * h->cma_chunk_size >> PAGE_SHIFT);
606                 if (!resize_err) {
607                         goto out_unlock;
608                 } else if (dev_base != h->cma_base + idx * h->cma_chunk_size) {
609                         resize_err = dma_release_from_coherent_dev(
610                                         &h->dev, chunk_size,
611                                         (void *)(uintptr_t)dev_base, attrs);
612                         BUG_ON(!resize_err);
613                         goto out_unlock;
614                 } else {
615                         dev_dbg(&h->dev,
616                                 "prep to remove chunk b=0x%pa, s=0x%zx\n",
617                                 &dev_base, chunk_size);
618                         resize_err = dma_release_from_coherent_dev(
619                                         &h->dev, chunk_size,
620                                         (void *)(uintptr_t)dev_base, attrs);
621                         BUG_ON(!resize_err);
622                         if (!resize_err) {
623                                 dev_err(&h->dev, "failed to rel mem\n");
624                                 goto out_unlock;
625                         }
626
627                         /* Handle VPR configuration updates */
628                         if (h->update_resize_cfg) {
629                                 phys_addr_t new_base = h->curr_base;
630                                 size_t new_len = h->curr_len - chunk_size;
631                                 if (h->curr_base == dev_base)
632                                         new_base += chunk_size;
633                                 dev_dbg(&h->dev, "update vpr base to %pa, size=%zx\n",
634                                         &new_base, new_len);
635                                 resize_err =
636                                         h->update_resize_cfg(new_base, new_len);
637                                 if (resize_err) {
638                                         dev_err(&h->dev,
639                                                 "update resize failed\n");
640                                         goto out_unlock;
641                                 }
642                         }
643
644                         if (h->curr_base == dev_base)
645                                 h->curr_base += chunk_size;
646                         h->curr_len -= chunk_size;
647                         update_alloc_range(h);
648                         idx == first_alloc_idx ? ++idx : --idx;
649                         release_from_contiguous_heap(h, dev_base, chunk_size);
650                         dev_dbg(&h->dev, "removed chunk b=0x%pa, s=0x%zx"
651                                 " new heap b=0x%pa, s=0x%zx\n", &dev_base,
652                                 chunk_size, &h->curr_base, h->curr_len);
653                 }
654                 if (idx < h->num_chunks)
655                         goto check_next_chunk;
656         }
657 out_unlock:
658         mutex_unlock(&h->resize_lock);
659         return err;
660 }
661
662 void dma_release_declared_memory(struct device *dev)
663 {
664         struct dma_coherent_mem *mem = dev->dma_mem;
665
666         if (!mem)
667                 return;
668         dev->dma_mem = NULL;
669
670         if (!(mem->flags & DMA_MEMORY_NOMAP))
671                 iounmap(mem->virt_base);
672
673         kfree(mem->bitmap);
674         kfree(mem);
675 }
676 EXPORT_SYMBOL(dma_release_declared_memory);
677
678 void *dma_mark_declared_memory_occupied(struct device *dev,
679                                         dma_addr_t device_addr, size_t size)
680 {
681         struct dma_coherent_mem *mem = dev->dma_mem;
682         int pos, err;
683
684         size += device_addr & ~PAGE_MASK;
685
686         if (!mem)
687                 return ERR_PTR(-EINVAL);
688
689         pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
690         err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
691         if (err != 0)
692                 return ERR_PTR(err);
693         return mem->virt_base + (pos << PAGE_SHIFT);
694 }
695 EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
696
697 /**
698  * dma_alloc_from_coherent_attr() - try to allocate memory from the per-device
699  * coherent area
700  *
701  * @dev:        device from which we allocate memory
702  * @size:       size of requested memory area
703  * @dma_handle: This will be filled with the correct dma handle
704  * @ret:        This pointer will be filled with the virtual address
705  *              to allocated area.
706  * @attrs:      DMA Attribute
707  * This function should be only called from per-arch dma_alloc_coherent()
708  * to support allocation from per-device coherent memory pools.
709  *
710  * Returns 0 if dma_alloc_coherent_attr should continue with allocating from
711  * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
712  */
713 int dma_alloc_from_coherent_attr(struct device *dev, ssize_t size,
714                                        dma_addr_t *dma_handle, void **ret,
715                                        struct dma_attrs *attrs)
716 {
717         if (!dev)
718                 return 0;
719
720         if (dev->dma_mem)
721                 return dma_alloc_from_coherent_dev(dev, size, dma_handle, ret,
722                                                         attrs);
723         else
724                 return dma_alloc_from_coherent_heap_dev(dev, size, dma_handle,
725                                                         ret, attrs);
726 }
727 EXPORT_SYMBOL(dma_alloc_from_coherent_attr);
728
729 /**
730  * dma_release_from_coherent_attr() - try to free the memory allocated from
731  * per-device coherent memory pool
732  * @dev:        device from which the memory was allocated
733  * @size:       size of the memory area to free
734  * @vaddr:      virtual address of allocated pages
735  * @attrs:      DMA Attribute
736  *
737  * This checks whether the memory was allocated from the per-device
738  * coherent memory pool and if so, releases that memory.
739  *
740  * Returns 1 if we correctly released the memory, or 0 if
741  * dma_release_coherent_attr() should proceed with releasing memory from
742  * generic pools.
743  */
744 int dma_release_from_coherent_attr(struct device *dev, size_t size, void *vaddr,
745                                 struct dma_attrs *attrs)
746 {
747         if (!dev)
748                 return 0;
749
750         if (dev->dma_mem)
751                 return dma_release_from_coherent_dev(dev, size, vaddr, attrs);
752         else
753                 return dma_release_from_coherent_heap_dev(dev, size, vaddr,
754                         attrs);
755 }
756 EXPORT_SYMBOL(dma_release_from_coherent_attr);
757
758 /**
759  * dma_mmap_from_coherent() - try to mmap the memory allocated from
760  * per-device coherent memory pool to userspace
761  * @dev:        device from which the memory was allocated
762  * @vma:        vm_area for the userspace memory
763  * @vaddr:      cpu address returned by dma_alloc_from_coherent
764  * @size:       size of the memory buffer allocated by dma_alloc_from_coherent
765  * @ret:        result from remap_pfn_range()
766  *
767  * This checks whether the memory was allocated from the per-device
768  * coherent memory pool and if so, maps that memory to the provided vma.
769  *
770  * Returns 1 if we correctly mapped the memory, or 0 if the caller should
771  * proceed with mapping memory from generic pools.
772  */
773 int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
774                            void *vaddr, size_t size, int *ret)
775 {
776         struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
777         void *mem_addr;
778
779         if (!mem)
780                 return 0;
781
782         if (mem->flags & DMA_MEMORY_NOMAP)
783                 mem_addr =  (void *)(uintptr_t)mem->device_base;
784         else
785                 mem_addr =  mem->virt_base;
786
787         if (mem && vaddr >= mem_addr && vaddr + size <=
788                    (mem_addr + (mem->size << PAGE_SHIFT))) {
789                 unsigned long off = vma->vm_pgoff;
790                 int start = (vaddr - mem_addr) >> PAGE_SHIFT;
791                 int user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
792                 int count = size >> PAGE_SHIFT;
793
794                 *ret = -ENXIO;
795                 if (off < count && user_count <= count - off) {
796                         unsigned pfn = mem->pfn_base + start + off;
797                         *ret = remap_pfn_range(vma, vma->vm_start, pfn,
798                                                user_count << PAGE_SHIFT,
799                                                vma->vm_page_prot);
800                 }
801                 return 1;
802         }
803         return 0;
804 }
805 EXPORT_SYMBOL(dma_mmap_from_coherent);