OSDN Git Service

KVM: VMX: Macrofy the MSR bitmap getters and setters
authorSean Christopherson <seanjc@google.com>
Tue, 9 Nov 2021 01:30:46 +0000 (01:30 +0000)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 11 Nov 2021 15:56:23 +0000 (10:56 -0500)
Add builder macros to generate the MSR bitmap helpers to reduce the
amount of copy-paste code, especially with respect to all the magic
numbers needed to calc the correct bit location.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20211109013047.2041518-4-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/vmx/vmx.h

index d51311f..86c093d 100644 (file)
@@ -400,68 +400,33 @@ static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
 
 void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
 
-static inline bool vmx_test_msr_bitmap_read(ulong *msr_bitmap, u32 msr)
-{
-       int f = sizeof(unsigned long);
-
-       if (msr <= 0x1fff)
-               return test_bit(msr, msr_bitmap + 0x000 / f);
-       else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff))
-               return test_bit(msr & 0x1fff, msr_bitmap + 0x400 / f);
-       return true;
-}
-
-static inline bool vmx_test_msr_bitmap_write(ulong *msr_bitmap, u32 msr)
-{
-       int f = sizeof(unsigned long);
-
-       if (msr <= 0x1fff)
-               return test_bit(msr, msr_bitmap + 0x800 / f);
-       else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff))
-               return test_bit(msr & 0x1fff, msr_bitmap + 0xc00 / f);
-       return true;
-}
-
-static inline void vmx_clear_msr_bitmap_read(ulong *msr_bitmap, u32 msr)
-{
-       int f = sizeof(unsigned long);
-
-       if (msr <= 0x1fff)
-               __clear_bit(msr, msr_bitmap + 0x000 / f);
-       else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff))
-               __clear_bit(msr & 0x1fff, msr_bitmap + 0x400 / f);
-}
-
-static inline void vmx_clear_msr_bitmap_write(ulong *msr_bitmap, u32 msr)
-{
-       int f = sizeof(unsigned long);
-
-       if (msr <= 0x1fff)
-               __clear_bit(msr, msr_bitmap + 0x800 / f);
-       else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff))
-               __clear_bit(msr & 0x1fff, msr_bitmap + 0xc00 / f);
-}
-
-static inline void vmx_set_msr_bitmap_read(ulong *msr_bitmap, u32 msr)
-{
-       int f = sizeof(unsigned long);
-
-       if (msr <= 0x1fff)
-               __set_bit(msr, msr_bitmap + 0x000 / f);
-       else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff))
-               __set_bit(msr & 0x1fff, msr_bitmap + 0x400 / f);
-}
-
-static inline void vmx_set_msr_bitmap_write(ulong *msr_bitmap, u32 msr)
-{
-       int f = sizeof(unsigned long);
-
-       if (msr <= 0x1fff)
-               __set_bit(msr, msr_bitmap + 0x800 / f);
-       else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff))
-               __set_bit(msr & 0x1fff, msr_bitmap + 0xc00 / f);
+/*
+ * Note, early Intel manuals have the write-low and read-high bitmap offsets
+ * the wrong way round.  The bitmaps control MSRs 0x00000000-0x00001fff and
+ * 0xc0000000-0xc0001fff.  The former (low) uses bytes 0-0x3ff for reads and
+ * 0x800-0xbff for writes.  The latter (high) uses 0x400-0x7ff for reads and
+ * 0xc00-0xfff for writes.  MSRs not covered by either of the ranges always
+ * VM-Exit.
+ */
+#define __BUILD_VMX_MSR_BITMAP_HELPER(rtype, action, bitop, access, base)      \
+static inline rtype vmx_##action##_msr_bitmap_##access(unsigned long *bitmap,  \
+                                                      u32 msr)                \
+{                                                                             \
+       int f = sizeof(unsigned long);                                         \
+                                                                              \
+       if (msr <= 0x1fff)                                                     \
+               return bitop##_bit(msr, bitmap + base / f);                    \
+       else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff))                   \
+               return bitop##_bit(msr & 0x1fff, bitmap + (base + 0x400) / f); \
+       return (rtype)true;                                                    \
 }
+#define BUILD_VMX_MSR_BITMAP_HELPERS(ret_type, action, bitop)                 \
+       __BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, read,  0x0)     \
+       __BUILD_VMX_MSR_BITMAP_HELPER(ret_type, action, bitop, write, 0x800)
 
+BUILD_VMX_MSR_BITMAP_HELPERS(bool, test, test)
+BUILD_VMX_MSR_BITMAP_HELPERS(void, clear, __clear)
+BUILD_VMX_MSR_BITMAP_HELPERS(void, set, __set)
 
 static inline u8 vmx_get_rvi(void)
 {