]> rtime.felk.cvut.cz Git - lisovros/linux_canprio.git/commitdiff
ARM: 6464/2: fix spinlock recursion in adjust_pte()
authorMika Westerberg <mika.westerberg@iki.fi>
Thu, 28 Oct 2010 10:45:22 +0000 (11:45 +0100)
committerGreg Kroah-Hartman <gregkh@suse.de>
Thu, 9 Dec 2010 21:33:20 +0000 (13:33 -0800)
commit 4e54d93d3c9846ba1c2644ad06463dafa690d1b7 upstream.

When running following code in a machine which has VIVT caches and
USE_SPLIT_PTLOCKS is not defined:

  fd = open("/etc/passwd", O_RDONLY);
  addr = mmap(NULL, 4096, PROT_READ, MAP_SHARED, fd, 0);
  addr2 = mmap(NULL, 4096, PROT_READ, MAP_SHARED, fd, 0);

  v = *((int *)addr);

we will hang in spinlock recursion in the page fault handler:

  BUG: spinlock recursion on CPU#0, mmap_test/717
  lock: c5e295d8, .magic: dead4ead, .owner: mmap_test/717,
                  .owner_cpu: 0
  [<c0026604>] (unwind_backtrace+0x0/0xec)
  [<c014ee48>] (do_raw_spin_lock+0x40/0x140)
  [<c0027f68>] (update_mmu_cache+0x208/0x250)
  [<c0079db4>] (__do_fault+0x320/0x3ec)
  [<c007af7c>] (handle_mm_fault+0x2f0/0x6d8)
  [<c0027834>] (do_page_fault+0xdc/0x1cc)
  [<c00202d0>] (do_DataAbort+0x34/0x94)

This comes from the fact that when USE_SPLIT_PTLOCKS is not defined,
the only lock protecting the page tables is mm->page_table_lock
which is already locked before update_mmu_cache() is called.

Signed-off-by: Mika Westerberg <mika.westerberg@iki.fi>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
arch/arm/mm/fault-armv.c

index 9b906dec1ca1abc0ec308472dc39a7684c6c3b01..56036ff04deb5b7d329e10f47ed246ef1dfb03db 100644 (file)
@@ -65,6 +65,30 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
        return ret;
 }
 
+#if USE_SPLIT_PTLOCKS
+/*
+ * If we are using split PTE locks, then we need to take the page
+ * lock here.  Otherwise we are using shared mm->page_table_lock
+ * which is already locked, thus cannot take it.
+ */
+static inline void do_pte_lock(spinlock_t *ptl)
+{
+       /*
+        * Use nested version here to indicate that we are already
+        * holding one similar spinlock.
+        */
+       spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
+}
+
+static inline void do_pte_unlock(spinlock_t *ptl)
+{
+       spin_unlock(ptl);
+}
+#else /* !USE_SPLIT_PTLOCKS */
+static inline void do_pte_lock(spinlock_t *ptl) {}
+static inline void do_pte_unlock(spinlock_t *ptl) {}
+#endif /* USE_SPLIT_PTLOCKS */
+
 static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
        unsigned long pfn)
 {
@@ -89,11 +113,11 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
         */
        ptl = pte_lockptr(vma->vm_mm, pmd);
        pte = pte_offset_map_nested(pmd, address);
-       spin_lock(ptl);
+       do_pte_lock(ptl);
 
        ret = do_adjust_pte(vma, address, pfn, pte);
 
-       spin_unlock(ptl);
+       do_pte_unlock(ptl);
        pte_unmap_nested(pte);
 
        return ret;