]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/commitdiff
iommu/tegra: modify SMMU_EX_PTBL_PAGE to avoid pfn
authorSri Krishna chowdary K <schowdary@nvidia.com>
Thu, 22 Jan 2015 12:32:56 +0000 (04:32 -0800)
committerSri Krishna Chowdary <schowdary@nvidia.com>
Sat, 7 Feb 2015 06:54:36 +0000 (22:54 -0800)
SMMU_MK_PDE converts physical address to SMMU PDE.
So, when extracting the page, first extract the physical address
of the page and convert to page by using phys_to_page. This makes the
macro independent of PAGE_SIZE.

If this is not done, locate_pte() gives us invalid virtual address
and kernel panic occurs when dereferencing it.

Bug 1526131

Change-Id: Id44031939df88eeb2593ed68442bef03f5383f0a
Signed-off-by: Sri Krishna chowdary <schowdary@nvidia.com>
Reviewed-on: http://git-master/r/676518
Reviewed-by: Krishna Reddy <vdumpa@nvidia.com>
drivers/iommu/tegra-smmu.h

index 99dcdf73be7a345bf12f94baa2b2349bb4e04642..86360f91895a28746bb3d637dd09b7e5d08c51e1 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, NVIDIA CORPORATION.  All rights reserved.
+ * Copyright (c) 2014-2015, NVIDIA CORPORATION.  All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
                                                ((page_to_phys(page) >> SMMU_PDIR_SHIFT) | (attr))
 #define SMMU_MK_PDE(page, attr)                        \
                                                (u32)((page_to_phys(page) >> SMMU_PDE_SHIFT) | (attr))
-#define SMMU_EX_PTBL_PAGE(pde)                 \
-                                               pfn_to_page((u32)(pde) & SMMU_PFN_MASK)
+#define SMMU_EX_PTBL_PAGE(pde)                 phys_to_page((phys_addr_t)(pde & SMMU_PFN_MASK) << SMMU_PDE_SHIFT)
 #define SMMU_PFN_TO_PTE(pfn, attr)             (u32)((pfn) | (attr))
 
 #define SMMU_ASID_ENABLE(asid, idx)            (((asid) << (idx * 8)) | (1 << 31))