]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/commitdiff
video: tegra: host: gk20a: fix PDE update sequence
authorDeepak Nibade <dnibade@nvidia.com>
Mon, 1 Dec 2014 10:55:41 +0000 (16:25 +0530)
committerWinnie Hsu <whsu@nvidia.com>
Mon, 19 Jan 2015 23:53:03 +0000 (15:53 -0800)
Current sequence :
- delete page tables memory
- update PDE entry and mark above page tables invalid

With this sequence, it is possible to have valid PDE entries
with already freed page table and this could lead us to
invalid memory accesses.

Fix this by switching the sequence as follows :
- update PDE entry and mark page tables invalid
- delete page tables memory

Bug 1577947

Change-Id: Icc3a8c74bbf1bf59e41e0322cfc279d15690aa9d
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: http://git-master/r/670323
(cherry-picked from commit 56f738b4c4ee188ec1f69b91615cd9728ff18cf0)
Reviewed-on: http://git-master/r/671196
Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
GVS: Gerrit_Virtual_Submit

drivers/gpu/nvgpu/gk20a/mm_gk20a.c

index 52c0f3c5978e122b1e9657edf0ba05fb8995389d..d8fa08ff4971e0252cb1797ee437bef1b45cbaf6 100644 (file)
@@ -1797,6 +1797,8 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
                unmap_gmmu_pages(pte->ref, pte->sgt, pte_kv_cur);
 
                if (pte->ref_cnt == 0) {
+                       void *pte_ref_ptr = pte->ref;
+
                        /* It can make sense to keep around one page table for
                         * each flavor (empty)... in case a new map is coming
                         * right back to alloc (and fill it in) again.
@@ -1804,13 +1806,15 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
                         * unmap/map/unmap/map cases where we'd trigger pte
                         * free/alloc/free/alloc.
                         */
-                       free_gmmu_pages(vm, pte->ref, pte->sgt,
-                               vm->mm->page_table_sizing[pgsz_idx].order,
-                               pte->size);
                        pte->ref = NULL;
 
                        /* rewrite pde */
                        update_gmmu_pde_locked(vm, pde_i);
+
+                       free_gmmu_pages(vm, pte_ref_ptr, pte->sgt,
+                               vm->mm->page_table_sizing[pgsz_idx].order,
+                               pte->size);
+
                }
 
        }