{
struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
- if (dma_release_from_coherent_attr(dev, size, cpu_addr, attrs))
+ if (dma_release_from_coherent_attr(dev, size, cpu_addr,
+ attrs, handle))
return;
if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
* generic pools.
*/
int dma_release_from_coherent_attr(struct device *dev, size_t size, void *vaddr,
- struct dma_attrs *attrs)
+ struct dma_attrs *attrs, dma_addr_t dma_handle)
{
if (!dev)
return 0;
+ if (!vaddr)
+ /*
+ * The only possible valid case where vaddr is NULL is when
+ * dma_alloc_attrs() is called on coherent dev which was
+ * initialized with DMA_MEMORY_NOMAP.
+ */
+ vaddr = (void *)dma_handle;
+
if (dev->dma_mem)
return dma_release_from_coherent_dev(dev, size, vaddr, attrs);
else
dma_addr_t *dma_handle, void **ret,
struct dma_attrs *attrs);
int dma_release_from_coherent_attr(struct device *dev, size_t size, void *vaddr,
- struct dma_attrs *attrs);
+ struct dma_attrs *attrs,
+ dma_addr_t dma_handle);
#define dma_alloc_from_coherent(d, s, h, r) \
dma_alloc_from_coherent_attr(d, s, h, r, NULL)
#define dma_release_from_coherent(d, s, v) \
- dma_release_from_coherent_attr(d, s, v, NULL)
+ dma_release_from_coherent_attr(d, s, v, NULL, 0)
int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, size_t size, int *ret);