]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/commitdiff
ARM64: dma-mapping: get gap/prefetch from mapping
authorSri Krishna chowdary <schowdary@nvidia.com>
Thu, 22 May 2014 14:48:57 +0000 (20:18 +0530)
committerHiroshi Doyu <hdoyu@nvidia.com>
Fri, 1 Aug 2014 07:39:22 +0000 (00:39 -0700)
if prefetch or gap are specified from mapping, then
use mapping's properties. Else, assume that this
mapping is still not updated and fall back to using
DMA_ATTR_SKIP_IOVA_GAP attribute.

Bug 1463797

Change-Id: I4cef73adc5ffbb43389f7d31f6684d82f9fefa16
Signed-off-by: Sri Krishna chowdary <schowdary@nvidia.com>
Reviewed-on: http://git-master/r/413269
GVS: Gerrit_Virtual_Submit
Reviewed-by: Hiroshi Doyu <hdoyu@nvidia.com>
Tested-by: Hiroshi Doyu <hdoyu@nvidia.com>
arch/arm64/mm/dma-mapping.c

index 119b88cea4b6fdbbb5b9a6cd0de6428360fa1e2b..f0f4ad8f2664462b3e8ec7e1a48ea8c733479120 100644 (file)
@@ -1229,13 +1229,38 @@ static void iommu_mapping_list_del(struct dma_iommu_mapping *mapping)
        spin_unlock_irqrestore(&iommu_mapping_list_lock, flags);
 }
 
-static int pg_iommu_map(struct iommu_domain *domain, unsigned long iova,
+static inline int iommu_get_num_pf_pages(struct dma_iommu_mapping *mapping,
+                                        struct dma_attrs *attrs)
+{
+       int count = 0;
+
+       /*
+        * XXX: assume alignment property's presence <=> prefetch and gap
+        * properties are correctly filled
+        */
+       if (!mapping->alignment) {
+               if (!dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs))
+                       return PG_PAGES;
+               return 0;
+       }
+
+       /* XXX: currently we support only 1 prefetch page */
+       WARN_ON(mapping->num_pf_page > prefetch_page_count);
+
+       count += mapping->num_pf_page;
+       count += mapping->gap_page ? gap_page_count : 0;
+       return count;
+}
+
+static int pg_iommu_map(struct dma_iommu_mapping *mapping, unsigned long iova,
                        phys_addr_t phys, size_t len, unsigned long prot)
 {
        int err;
        struct dma_attrs *attrs = (struct dma_attrs *)prot;
+       struct iommu_domain *domain = mapping->domain;
+       bool need_prefetch_page = !!iommu_get_num_pf_pages(mapping, attrs);
 
-       if (!dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs)) {
+       if (need_prefetch_page) {
                err = iommu_map(domain, iova + len, iova_gap_phys,
                                PF_PAGES_SIZE, prot);
                if (err)
@@ -1243,18 +1268,20 @@ static int pg_iommu_map(struct iommu_domain *domain, unsigned long iova,
        }
 
        err = iommu_map(domain, iova, phys, len, prot);
-       if (err && !dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs))
+       if (err && need_prefetch_page)
                iommu_unmap(domain, iova + len, PF_PAGES_SIZE);
 
        return err;
 }
 
-static size_t pg_iommu_unmap(struct iommu_domain *domain,
+static size_t pg_iommu_unmap(struct dma_iommu_mapping *mapping,
                             unsigned long iova, size_t len, ulong prot)
 {
        struct dma_attrs *attrs = (struct dma_attrs *)prot;
+       struct iommu_domain *domain = mapping->domain;
+       bool need_prefetch_page = !!iommu_get_num_pf_pages(mapping, attrs);
 
-       if (!dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs)) {
+       if (need_prefetch_page) {
                phys_addr_t phys_addr;
 
                phys_addr = iommu_iova_to_phys(domain, iova + len);
@@ -1265,13 +1292,15 @@ static size_t pg_iommu_unmap(struct iommu_domain *domain,
        return iommu_unmap(domain, iova, len);
 }
 
-static int pg_iommu_map_pages(struct iommu_domain *domain, unsigned long iova,
+static int pg_iommu_map_pages(struct dma_iommu_mapping *mapping, unsigned long iova,
                    struct page **pages, size_t count, unsigned long prot)
 {
        int err;
        struct dma_attrs *attrs = (struct dma_attrs *)prot;
+       struct iommu_domain *domain = mapping->domain;
+       bool need_prefetch_page = !!iommu_get_num_pf_pages(mapping, attrs);
 
-       if (!dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs)) {
+       if (need_prefetch_page) {
                err = iommu_map(domain, iova + (count << PAGE_SHIFT),
                                iova_gap_phys, PF_PAGES_SIZE, prot);
                if (err)
@@ -1279,19 +1308,21 @@ static int pg_iommu_map_pages(struct iommu_domain *domain, unsigned long iova,
        }
 
        err = iommu_map_pages(domain, iova, pages, count, prot);
-       if (err && !dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs))
+       if (err && need_prefetch_page)
                iommu_unmap(domain, iova + (count << PAGE_SHIFT), PF_PAGES_SIZE);
 
        return err;
 }
 
-static int pg_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+static int pg_iommu_map_sg(struct dma_iommu_mapping *mapping, unsigned long iova,
                 struct scatterlist *sgl, int nents, unsigned long prot)
 {
        int err;
        struct dma_attrs *attrs = (struct dma_attrs *)prot;
+       struct iommu_domain *domain = mapping->domain;
+       bool need_prefetch_page = !!iommu_get_num_pf_pages(mapping, attrs);
 
-       if (!dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs)) {
+       if (need_prefetch_page) {
                err = iommu_map(domain, iova + (nents << PAGE_SHIFT),
                                iova_gap_phys, PF_PAGES_SIZE, prot);
                if (err)
@@ -1299,7 +1330,7 @@ static int pg_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
        }
 
        err = iommu_map_sg(domain, iova, sgl, nents, prot);
-       if (err && !dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs))
+       if (err && need_prefetch_page)
                iommu_unmap(domain, iova + (nents << PAGE_SHIFT), PF_PAGES_SIZE);
 
        return err;
@@ -1367,8 +1398,7 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
        count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
                 (1 << mapping->order) - 1) >> mapping->order;
 
-       if (!dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs))
-               count += PG_PAGES;
+       count += iommu_get_num_pf_pages(mapping, attrs);
 
        if (order > mapping->order)
                align = (1 << (order - mapping->order)) - 1;
@@ -1398,8 +1428,7 @@ static dma_addr_t __alloc_iova_at(struct dma_iommu_mapping *mapping,
        count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
                 (1 << mapping->order) - 1) >> mapping->order;
 
-       if (!dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs))
-               count += PG_PAGES;
+       count += iommu_get_num_pf_pages(mapping, attrs);
 
        bytes = count << (mapping->order + PAGE_SHIFT);
 
@@ -1455,8 +1484,7 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping,
                              (1 << mapping->order) - 1) >> mapping->order;
        unsigned long flags;
 
-       if (!dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs))
-               count += PG_PAGES;
+       count += iommu_get_num_pf_pages(mapping, attrs);
 
        spin_lock_irqsave(&mapping->lock, flags);
        bitmap_clear(mapping->bitmap, start, count);
@@ -1632,7 +1660,7 @@ ____iommu_create_mapping(struct device *dev, dma_addr_t *req,
                                break;
 
                len = (j - i) << PAGE_SHIFT;
-               ret = pg_iommu_map(mapping->domain, iova, phys, len,
+               ret = pg_iommu_map(mapping, iova, phys, len,
                                   (ulong)attrs);
                if (ret < 0)
                        goto fail;
@@ -1641,7 +1669,7 @@ ____iommu_create_mapping(struct device *dev, dma_addr_t *req,
        }
        return dma_addr;
 fail:
-       pg_iommu_unmap(mapping->domain, dma_addr, iova-dma_addr, (ulong)attrs);
+       pg_iommu_unmap(mapping, dma_addr, iova-dma_addr, (ulong)attrs);
        __free_iova(mapping, dma_addr, size, attrs);
        return DMA_ERROR_CODE;
 }
@@ -1665,7 +1693,7 @@ static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova,
        size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
        iova &= PAGE_MASK;
 
-       pg_iommu_unmap(mapping->domain, iova, size, (ulong)attrs);
+       pg_iommu_unmap(mapping, iova, size, (ulong)attrs);
        __free_iova(mapping, iova, size, attrs);
        return 0;
 }
@@ -1869,7 +1897,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
 
 skip_cmaint:
        count = size >> PAGE_SHIFT;
-       ret = pg_iommu_map_sg(mapping->domain, iova_base, sg, count,
+       ret = pg_iommu_map_sg(mapping, iova_base, sg, count,
                              (ulong)attrs);
        if (WARN_ON(ret < 0))
                goto fail;
@@ -1878,7 +1906,7 @@ skip_cmaint:
 
        return 0;
 fail:
-       pg_iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE,
+       pg_iommu_unmap(mapping, iova_base, count * PAGE_SIZE,
                       (ulong)attrs);
        __free_iova(mapping, iova_base, size, attrs);
        return ret;
@@ -2080,7 +2108,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
        if (dma_addr == DMA_ERROR_CODE)
                return dma_addr;
 
-       ret = pg_iommu_map(mapping->domain, dma_addr,
+       ret = pg_iommu_map(mapping, dma_addr,
                           page_to_phys(page), len, (ulong)attrs);
        if (ret < 0)
                goto fail;
@@ -2122,7 +2150,7 @@ static dma_addr_t arm_iommu_map_page_at(struct device *dev, struct page *page,
        if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
                __dma_page_cpu_to_dev(page, offset, size, dir);
 
-       ret = pg_iommu_map(mapping->domain, dma_addr,
+       ret = pg_iommu_map(mapping, dma_addr,
                           page_to_phys(page), len, (ulong)attrs);
        if (ret < 0)
                return DMA_ERROR_CODE;
@@ -2146,7 +2174,7 @@ static dma_addr_t arm_iommu_map_pages(struct device *dev, struct page **pages,
                        __dma_page_cpu_to_dev(pages[i], 0, PAGE_SIZE, dir);
        }
 
-       ret = pg_iommu_map_pages(mapping->domain, dma_handle, pages, count,
+       ret = pg_iommu_map_pages(mapping, dma_handle, pages, count,
                                 (ulong)attrs);
        if (ret < 0)
                return DMA_ERROR_CODE;
@@ -2179,7 +2207,7 @@ static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
 
        trace_dmadebug_unmap_page(dev, handle, size,
                  phys_to_page(iommu_iova_to_phys(mapping->domain, handle)));
-       pg_iommu_unmap(mapping->domain, iova, len, (ulong)attrs);
+       pg_iommu_unmap(mapping, iova, len, (ulong)attrs);
        if (!dma_get_attr(DMA_ATTR_SKIP_FREE_IOVA, attrs))
                __free_iova(mapping, iova, len, attrs);
 }
@@ -2211,7 +2239,7 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
 
        trace_dmadebug_unmap_page(dev, handle, size,
                  phys_to_page(iommu_iova_to_phys(mapping->domain, handle)));
-       pg_iommu_unmap(mapping->domain, iova, len, (ulong)attrs);
+       pg_iommu_unmap(mapping, iova, len, (ulong)attrs);
        if (!dma_get_attr(DMA_ATTR_SKIP_FREE_IOVA, attrs))
                __free_iova(mapping, iova, len, attrs);
 }