From 7e71a59b250330fd52ee7293eb9d31952f16682e Mon Sep 17 00:00:00 2001 From: Kai Huang Date: Fri, 9 Jan 2015 16:44:30 +0800 Subject: [PATCH] KVM: x86: flush TLB when D bit is manually changed. When software changes D bit (either from 1 to 0, or 0 to 1), the corresponding TLB entity in the hardware won't be updated immediately. We should flush it to guarantee the consistence of D bit between TLB and MMU page table in memory. This is especially important when clearing the D bit, since it may cause false negatives in reporting dirtiness. Sanity test was done on my machine with Intel processor. Signed-off-by: Kai Huang [Check A bit too. - Paolo] Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index a0985ebb5512..0d0fdd6f002c 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -532,6 +532,11 @@ static bool spte_is_bit_cleared(u64 old_spte, u64 new_spte, u64 bit_mask) return (old_spte & bit_mask) && !(new_spte & bit_mask); } +static bool spte_is_bit_changed(u64 old_spte, u64 new_spte, u64 bit_mask) +{ + return (old_spte & bit_mask) != (new_spte & bit_mask); +} + /* Rules for using mmu_spte_set: * Set the sptep from nonpresent to present. * Note: the sptep being assigned *must* be either not present @@ -582,6 +587,14 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte) if (!shadow_accessed_mask) return ret; + /* + * Flush TLB when accessed/dirty bits are changed in the page tables, + * to guarantee consistency between TLB and page tables. + */ + if (spte_is_bit_changed(old_spte, new_spte, + shadow_accessed_mask | shadow_dirty_mask)) + ret = true; + if (spte_is_bit_cleared(old_spte, new_spte, shadow_accessed_mask)) kvm_set_pfn_accessed(spte_to_pfn(old_spte)); if (spte_is_bit_cleared(old_spte, new_spte, shadow_dirty_mask)) -- 2.11.0