]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/commitdiff
tegra: nvmap: fix the possible dead lock
authorXianhui Wang <xianhuiw@nvidia.com>
Wed, 19 Aug 2015 04:26:42 +0000 (12:26 +0800)
committermobile promotions <svcmobile_promotions@nvidia.com>
Mon, 14 Sep 2015 20:55:28 +0000 (13:55 -0700)
bug 200107851

When a process call nvmap_iovmm_get_client_mss it will hold mutex_lock(&h->lock)

and try to get down_read(&mm->mmap_sem), at the same time if the related

process is calling nvmap_vma_close, it have hold mm->mmap_sem for write when

it entering vm_munmap, and try to get mutex_lock(&h->lock). Then the problem

happen, many process become "uninterruptable sleep" with the two process.

Then bugreport will not work.

Try to fix the problem and let nvmap_iovmm_get_client_mss get mmap_sem firstly

then it will align with nvmap_vma_close.

The call stack:

<6>[68949.696132] memory_usage.sh D ffffffc000086b1c    0 2771     1 0x00000000
<7>[68949.696140] Call trace:
<7>[68949.696146] [<ffffffc000086b1c>] __switch_to+0x3c/0x48
<7>[68949.696151] [<ffffffc000b90410>] __schedule+0x4b4/0x5e0
<7>[68949.696156] [<ffffffc000b90588>] schedule+0x4c/0x68
<7>[68949.696161] [<ffffffc000b91234>] __down_read+0xb8/0xe0
<7>[68949.696166] [<ffffffc000b8f85c>] down_read+0x28/0x38
<7>[68949.696174] [<ffffffc0004705b8>] nvmap_iovmm_get_client_mss+0xf8/0x1ac
<7>[68949.696180] [<ffffffc0004706fc>] nvmap_debug_iovmm_procrank_show+0x90/0x124
<7>[68949.696185] [<ffffffc0001c8b94>] seq_read+0x178/0x3d4
<7>[68949.696190] [<ffffffc0001a30c8>] vfs_read+0x94/0x158
<7>[68949.696195] [<ffffffc0001a3a78>] SyS_read+0xbc/0x16c
<6>[68949.696870] nvtest64       D ffffffc000086b1c    0 9220     1 0x00000000
<7>[68949.696877] Call trace:
<7>[68949.696883] [<ffffffc000086b1c>] __switch_to+0x3c/0x48
<7>[68949.696888] [<ffffffc000b90410>] __schedule+0x4b4/0x5e0
<7>[68949.696893] [<ffffffc000b90588>] schedule+0x4c/0x68
<7>[68949.696898] [<ffffffc000b90a04>] schedule_preempt_disabled+0x10/0x24
<7>[68949.696904] [<ffffffc000b8f42c>] __mutex_lock_slowpath+0x19c/0x264
<7>[68949.696909] [<ffffffc000b8f534>] mutex_lock+0x40/0x60
<7>[68949.696914] [<ffffffc00046ede8>] nvmap_vma_close+0x64/0x234
<7>[68949.696919] [<ffffffc000182e64>] remove_vma+0x3c/0x6c
<7>[68949.696924] [<ffffffc000184110>] remove_vma_list+0x68/0x9c
<7>[68949.696928] [<ffffffc000184b60>] do_munmap+0x1d8/0x250
<7>[68949.696932] [<ffffffc000184c18>] vm_munmap+0x40/0x64
<7>[68949.696937] [<ffffffc000184cc8>] SyS_munmap+0x8c/0x11c

Change-Id: Icee67329c2dbbd05b10a7ddf0dcc4167191623d3
Signed-off-by: Xianhui Wang <xianhuiw@nvidia.com>
Reviewed-on: http://git-master/r/785721
(cherry picked from commit 306af7c0b904d04300c8a6e202bdf7eea6acd5af)
Reviewed-on: http://git-master/r/788685
Reviewed-by: Maneet Maneet Singh <mmaneetsingh@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Bharat Nihalani <bnihalani@nvidia.com>
drivers/video/tegra/nvmap/nvmap_dev.c

index 9b895e5ac2c987ed8454c953ff4d2afa5e147b5c..70d99245bc4619275b8e8f40dc67bbb99ef08da4 100644 (file)
@@ -1360,11 +1360,17 @@ static void nvmap_iovmm_get_client_mss(struct nvmap_client *client, u64 *pss,
                .private = &mss,
        };
        struct mm_struct *mm;
-       bool is_mm_accessed = false;
 
        memset(&mss, 0, sizeof(mss));
        *pss = *total = 0;
 
+       mm = mm_access(client->task,
+                       PTRACE_MODE_READ);
+       if (!mm || IS_ERR(mm)) return;
+
+       down_read(&mm->mmap_sem);
+       procrank_walk.mm = mm;
+
        nvmap_ref_lock(client);
        n = rb_first(&client->handle_refs);
        for (; n != NULL; n = rb_next(n)) {
@@ -1378,20 +1384,6 @@ static void nvmap_iovmm_get_client_mss(struct nvmap_client *client, u64 *pss,
                mutex_lock(&h->lock);
                list_for_each_entry(tmp, &h->vmas, list) {
                        if (client->task->pid == tmp->pid) {
-
-                               if (!is_mm_accessed) {
-                                       is_mm_accessed = true;
-                                       mm = mm_access(client->task,
-                                                       PTRACE_MODE_READ);
-                                       if (!mm || IS_ERR(mm)) {
-                                               mutex_unlock(&h->lock);
-                                               nvmap_ref_unlock(client);
-                                               return;
-                                       }
-                                       down_read(&mm->mmap_sem);
-                                       procrank_walk.mm = tmp->vma->vm_mm;
-                               }
-
                                mss.vma = tmp->vma;
                                walk_page_range(tmp->vma->vm_start,
                                                tmp->vma->vm_end,
@@ -1399,15 +1391,12 @@ static void nvmap_iovmm_get_client_mss(struct nvmap_client *client, u64 *pss,
                        }
                }
                mutex_unlock(&h->lock);
-
                *total += h->size / atomic_read(&h->share_count);
        }
 
-       if (is_mm_accessed) {
-               up_read(&mm->mmap_sem);
-               mmput(mm);
-               *pss = (mss.pss >> PSS_SHIFT);
-       }
+       up_read(&mm->mmap_sem);
+       mmput(mm);
+       *pss = (mss.pss >> PSS_SHIFT);
        nvmap_ref_unlock(client);
 }