]> rtime.felk.cvut.cz Git - zynq/linux.git/commitdiff
iommu/intel: small map_page cleanup
authorChristoph Hellwig <hch@lst.de>
Wed, 21 Nov 2018 18:32:03 +0000 (19:32 +0100)
committerChristoph Hellwig <hch@lst.de>
Thu, 6 Dec 2018 14:56:47 +0000 (06:56 -0800)
Pass the page + offset to the low-level __iommu_map_single helper
(which gets renamed to fit the new calling conventions) as both
callers have the page at hand.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
drivers/iommu/intel-iommu.c

index 41a4b8808802b8bcc30106c37b748eb3507943c0..66b4444398aefc93baab78ce671bee923ed2cbda 100644 (file)
@@ -3597,9 +3597,11 @@ static int iommu_no_mapping(struct device *dev)
        return 0;
 }
 
-static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
-                                    size_t size, int dir, u64 dma_mask)
+static dma_addr_t __intel_map_page(struct device *dev, struct page *page,
+                                  unsigned long offset, size_t size, int dir,
+                                  u64 dma_mask)
 {
+       phys_addr_t paddr = page_to_phys(page) + offset;
        struct dmar_domain *domain;
        phys_addr_t start_paddr;
        unsigned long iova_pfn;
@@ -3661,8 +3663,7 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page,
                                 enum dma_data_direction dir,
                                 unsigned long attrs)
 {
-       return __intel_map_single(dev, page_to_phys(page) + offset, size,
-                                 dir, *dev->dma_mask);
+       return __intel_map_page(dev, page, offset, size, dir, *dev->dma_mask);
 }
 
 static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
@@ -3753,9 +3754,8 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
                return NULL;
        memset(page_address(page), 0, size);
 
-       *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
-                                        DMA_BIDIRECTIONAL,
-                                        dev->coherent_dma_mask);
+       *dma_handle = __intel_map_page(dev, page, 0, size, DMA_BIDIRECTIONAL,
+                                      dev->coherent_dma_mask);
        if (*dma_handle)
                return page_address(page);
        if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))