spin_unlock_irqrestore(&iommu_mapping_list_lock, flags);
}
-static int pg_iommu_map(struct iommu_domain *domain, unsigned long iova,
+static inline int iommu_get_num_pf_pages(struct dma_iommu_mapping *mapping,
+ struct dma_attrs *attrs)
+{
+ int count = 0;
+
+ /*
+ * XXX: assume alignment property's presence <=> prefetch and gap
+ * properties are correctly filled
+ */
+ if (!mapping->alignment) {
+ if (!dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs))
+ return PG_PAGES;
+ return 0;
+ }
+
+ /* XXX: currently we support only 1 prefetch page */
+ WARN_ON(mapping->num_pf_page > prefetch_page_count);
+
+ count += mapping->num_pf_page;
+ count += mapping->gap_page ? gap_page_count : 0;
+ return count;
+}
+
+static int pg_iommu_map(struct dma_iommu_mapping *mapping, unsigned long iova,
phys_addr_t phys, size_t len, unsigned long prot)
{
int err;
struct dma_attrs *attrs = (struct dma_attrs *)prot;
+ struct iommu_domain *domain = mapping->domain;
+ bool need_prefetch_page = !!iommu_get_num_pf_pages(mapping, attrs);
- if (!dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs)) {
+ if (need_prefetch_page) {
err = iommu_map(domain, iova + len, iova_gap_phys,
PF_PAGES_SIZE, prot);
if (err)
}
err = iommu_map(domain, iova, phys, len, prot);
- if (err && !dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs))
+ if (err && need_prefetch_page)
iommu_unmap(domain, iova + len, PF_PAGES_SIZE);
return err;
}
-static size_t pg_iommu_unmap(struct iommu_domain *domain,
+static size_t pg_iommu_unmap(struct dma_iommu_mapping *mapping,
unsigned long iova, size_t len, ulong prot)
{
struct dma_attrs *attrs = (struct dma_attrs *)prot;
+ struct iommu_domain *domain = mapping->domain;
+ bool need_prefetch_page = !!iommu_get_num_pf_pages(mapping, attrs);
- if (!dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs)) {
+ if (need_prefetch_page) {
phys_addr_t phys_addr;
phys_addr = iommu_iova_to_phys(domain, iova + len);
return iommu_unmap(domain, iova, len);
}
-static int pg_iommu_map_pages(struct iommu_domain *domain, unsigned long iova,
+static int pg_iommu_map_pages(struct dma_iommu_mapping *mapping, unsigned long iova,
struct page **pages, size_t count, unsigned long prot)
{
int err;
struct dma_attrs *attrs = (struct dma_attrs *)prot;
+ struct iommu_domain *domain = mapping->domain;
+ bool need_prefetch_page = !!iommu_get_num_pf_pages(mapping, attrs);
- if (!dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs)) {
+ if (need_prefetch_page) {
err = iommu_map(domain, iova + (count << PAGE_SHIFT),
iova_gap_phys, PF_PAGES_SIZE, prot);
if (err)
}
err = iommu_map_pages(domain, iova, pages, count, prot);
- if (err && !dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs))
+ if (err && need_prefetch_page)
iommu_unmap(domain, iova + (count << PAGE_SHIFT), PF_PAGES_SIZE);
return err;
}
-static int pg_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+static int pg_iommu_map_sg(struct dma_iommu_mapping *mapping, unsigned long iova,
struct scatterlist *sgl, int nents, unsigned long prot)
{
int err;
struct dma_attrs *attrs = (struct dma_attrs *)prot;
+ struct iommu_domain *domain = mapping->domain;
+ bool need_prefetch_page = !!iommu_get_num_pf_pages(mapping, attrs);
- if (!dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs)) {
+ if (need_prefetch_page) {
err = iommu_map(domain, iova + (nents << PAGE_SHIFT),
iova_gap_phys, PF_PAGES_SIZE, prot);
if (err)
}
err = iommu_map_sg(domain, iova, sgl, nents, prot);
- if (err && !dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs))
+ if (err && need_prefetch_page)
iommu_unmap(domain, iova + (nents << PAGE_SHIFT), PF_PAGES_SIZE);
return err;
count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
(1 << mapping->order) - 1) >> mapping->order;
- if (!dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs))
- count += PG_PAGES;
+ count += iommu_get_num_pf_pages(mapping, attrs);
if (order > mapping->order)
align = (1 << (order - mapping->order)) - 1;
count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
(1 << mapping->order) - 1) >> mapping->order;
- if (!dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs))
- count += PG_PAGES;
+ count += iommu_get_num_pf_pages(mapping, attrs);
bytes = count << (mapping->order + PAGE_SHIFT);
(1 << mapping->order) - 1) >> mapping->order;
unsigned long flags;
- if (!dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs))
- count += PG_PAGES;
+ count += iommu_get_num_pf_pages(mapping, attrs);
spin_lock_irqsave(&mapping->lock, flags);
bitmap_clear(mapping->bitmap, start, count);
break;
len = (j - i) << PAGE_SHIFT;
- ret = pg_iommu_map(mapping->domain, iova, phys, len,
+ ret = pg_iommu_map(mapping, iova, phys, len,
(ulong)attrs);
if (ret < 0)
goto fail;
}
return dma_addr;
fail:
- pg_iommu_unmap(mapping->domain, dma_addr, iova-dma_addr, (ulong)attrs);
+ pg_iommu_unmap(mapping, dma_addr, iova-dma_addr, (ulong)attrs);
__free_iova(mapping, dma_addr, size, attrs);
return DMA_ERROR_CODE;
}
size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
iova &= PAGE_MASK;
- pg_iommu_unmap(mapping->domain, iova, size, (ulong)attrs);
+ pg_iommu_unmap(mapping, iova, size, (ulong)attrs);
__free_iova(mapping, iova, size, attrs);
return 0;
}
skip_cmaint:
count = size >> PAGE_SHIFT;
- ret = pg_iommu_map_sg(mapping->domain, iova_base, sg, count,
+ ret = pg_iommu_map_sg(mapping, iova_base, sg, count,
(ulong)attrs);
if (WARN_ON(ret < 0))
goto fail;
return 0;
fail:
- pg_iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE,
+ pg_iommu_unmap(mapping, iova_base, count * PAGE_SIZE,
(ulong)attrs);
__free_iova(mapping, iova_base, size, attrs);
return ret;
if (dma_addr == DMA_ERROR_CODE)
return dma_addr;
- ret = pg_iommu_map(mapping->domain, dma_addr,
+ ret = pg_iommu_map(mapping, dma_addr,
page_to_phys(page), len, (ulong)attrs);
if (ret < 0)
goto fail;
if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
__dma_page_cpu_to_dev(page, offset, size, dir);
- ret = pg_iommu_map(mapping->domain, dma_addr,
+ ret = pg_iommu_map(mapping, dma_addr,
page_to_phys(page), len, (ulong)attrs);
if (ret < 0)
return DMA_ERROR_CODE;
__dma_page_cpu_to_dev(pages[i], 0, PAGE_SIZE, dir);
}
- ret = pg_iommu_map_pages(mapping->domain, dma_handle, pages, count,
+ ret = pg_iommu_map_pages(mapping, dma_handle, pages, count,
(ulong)attrs);
if (ret < 0)
return DMA_ERROR_CODE;
trace_dmadebug_unmap_page(dev, handle, size,
phys_to_page(iommu_iova_to_phys(mapping->domain, handle)));
- pg_iommu_unmap(mapping->domain, iova, len, (ulong)attrs);
+ pg_iommu_unmap(mapping, iova, len, (ulong)attrs);
if (!dma_get_attr(DMA_ATTR_SKIP_FREE_IOVA, attrs))
__free_iova(mapping, iova, len, attrs);
}
trace_dmadebug_unmap_page(dev, handle, size,
phys_to_page(iommu_iova_to_phys(mapping->domain, handle)));
- pg_iommu_unmap(mapping->domain, iova, len, (ulong)attrs);
+ pg_iommu_unmap(mapping, iova, len, (ulong)attrs);
if (!dma_get_attr(DMA_ATTR_SKIP_FREE_IOVA, attrs))
__free_iova(mapping, iova, len, attrs);
}