]> rtime.felk.cvut.cz Git - lisovros/linux_canprio.git/commitdiff
KVM: MMU: Simplify spte fetch() function
authorAvi Kivity <avi@redhat.com>
Tue, 13 Jul 2010 11:27:09 +0000 (14:27 +0300)
committerAvi Kivity <avi@redhat.com>
Mon, 2 Aug 2010 03:40:45 +0000 (06:40 +0300)
Partition the function into three sections:

- fetching indirect shadow pages (host_level > guest_level)
- fetching direct shadow pages (page_level < host_level <= guest_level)
- the final spte (page_level == host_level)

Instead of the current spaghetti.

A slight change from the original code is that we call validate_direct_spte()
more often: previously we called it only for gw->level, now we also call it for
lower levels.  The change should have no effect.

[xiao: fix regression caused by validate_direct_spte() called too late]

Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
arch/x86/kvm/paging_tmpl.h

index e1c1f9eb1cc15778b0d32e8c439075fa0d0c898c..368e4cb6233b2160d800c8ca0c370ddd11a49552 100644 (file)
@@ -321,9 +321,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
        unsigned access = gw->pt_access;
        struct kvm_mmu_page *sp;
        u64 *sptep = NULL;
-       int direct;
-       gfn_t table_gfn;
-       int level;
+       int uninitialized_var(level);
        bool dirty = is_dirty_gpte(gw->ptes[gw->level - 1]);
        unsigned direct_access;
        struct kvm_shadow_walk_iterator iterator;
@@ -335,61 +333,68 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
        if (!dirty)
                direct_access &= ~ACC_WRITE_MASK;
 
-       for_each_shadow_entry(vcpu, addr, iterator) {
+       for (shadow_walk_init(&iterator, vcpu, addr);
+            shadow_walk_okay(&iterator) && iterator.level > gw->level;
+            shadow_walk_next(&iterator)) {
+               gfn_t table_gfn;
+
                level = iterator.level;
                sptep = iterator.sptep;
-               if (iterator.level == hlevel) {
-                       mmu_set_spte(vcpu, sptep, access,
-                                    gw->pte_access & access,
-                                    user_fault, write_fault,
-                                    dirty, ptwrite, level,
-                                    gw->gfn, pfn, false, true);
-                       break;
-               }
-
-               if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)
-                   && level == gw->level)
-                       validate_direct_spte(vcpu, sptep, direct_access);
 
                drop_large_spte(vcpu, sptep);
 
                if (is_shadow_present_pte(*sptep))
                        continue;
 
-               if (level <= gw->level) {
-                       direct = 1;
-                       access = direct_access;
-
-                       /*
-                        * It is a large guest pages backed by small host pages,
-                        * So we set @direct(@sp->role.direct)=1, and set
-                        * @table_gfn(@sp->gfn)=the base page frame for linear
-                        * translations.
-                        */
-                       table_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
-                       access &= gw->pte_access;
-               } else {
-                       direct = 0;
-                       table_gfn = gw->table_gfn[level - 2];
-               }
+               table_gfn = gw->table_gfn[level - 2];
                sp = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
-                                              direct, access, sptep);
-               if (!direct)
-                       /*
-                        * Verify that the gpte in the page we've just write
-                        * protected is still there.
-                        */
-                       if (FNAME(gpte_changed)(vcpu, gw, level - 1)) {
-                               kvm_mmu_put_page(sp, sptep);
-                               kvm_release_pfn_clean(pfn);
-                               sptep = NULL;
-                               break;
-                       }
+                                     false, access, sptep);
+
+               /*
+                * Verify that the gpte in the page we've just write
+                * protected is still there.
+                */
+               if (FNAME(gpte_changed)(vcpu, gw, level - 1))
+                       goto out_gpte_changed;
+
+               link_shadow_page(sptep, sp);
+       }
+
+       for (;
+            shadow_walk_okay(&iterator) && iterator.level > hlevel;
+            shadow_walk_next(&iterator)) {
+               gfn_t direct_gfn;
 
+               level = iterator.level;
+               sptep = iterator.sptep;
+
+               validate_direct_spte(vcpu, sptep, direct_access);
+
+               drop_large_spte(vcpu, sptep);
+
+               if (is_shadow_present_pte(*sptep))
+                       continue;
+
+               direct_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
+
+               sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, level-1,
+                                     true, direct_access, sptep);
                link_shadow_page(sptep, sp);
        }
 
+       sptep = iterator.sptep;
+       level = iterator.level;
+
+       mmu_set_spte(vcpu, sptep, access, gw->pte_access & access,
+                    user_fault, write_fault, dirty, ptwrite, level,
+                    gw->gfn, pfn, false, true);
+
        return sptep;
+
+out_gpte_changed:
+       kvm_mmu_put_page(sp, sptep);
+       kvm_release_pfn_clean(pfn);
+       return NULL;
 }
 
 /*