OSDN Git Service

x86/paravirt: Clean up native_patch()
authorBorislav Petkov <bp@alien8.de>
Tue, 11 Sep 2018 09:15:10 +0000 (11:15 +0200)
committerIngo Molnar <mingo@kernel.org>
Tue, 11 Sep 2018 10:45:14 +0000 (12:45 +0200)
When CONFIG_PARAVIRT_SPINLOCKS=n, it generates a warning:

  arch/x86/kernel/paravirt_patch_64.c: In function ‘native_patch’:
  arch/x86/kernel/paravirt_patch_64.c:89:1: warning: label ‘patch_site’ defined but not used [-Wunused-label]
   patch_site:

... but those labels can simply be removed by directly calling the
respective functions there.

Get rid of local variables too, while at it. Also, simplify function
flow for better readability.

Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Juergen Gross <jgross@suse.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: virtualization@lists.linux-foundation.org
Link: http://lkml.kernel.org/r/20180911091510.GA12094@zn.tnic
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/kernel/paravirt_patch_32.c
arch/x86/kernel/paravirt_patch_64.c

index d460cbc..6368c22 100644 (file)
@@ -34,14 +34,10 @@ extern bool pv_is_native_vcpu_is_preempted(void);
 
 unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len)
 {
-       const unsigned char *start, *end;
-       unsigned ret;
-
 #define PATCH_SITE(ops, x)                                     \
-               case PARAVIRT_PATCH(ops.x):                     \
-                       start = start_##ops##_##x;              \
-                       end = end_##ops##_##x;                  \
-                       goto patch_site
+       case PARAVIRT_PATCH(ops.x):                             \
+               return paravirt_patch_insns(ibuf, len, start_##ops##_##x, end_##ops##_##x)
+
        switch (type) {
 #ifdef CONFIG_PARAVIRT_XXL
                PATCH_SITE(irq, irq_disable);
@@ -54,32 +50,24 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len)
                PATCH_SITE(mmu, write_cr3);
 #endif
 #if defined(CONFIG_PARAVIRT_SPINLOCKS)
-               case PARAVIRT_PATCH(lock.queued_spin_unlock):
-                       if (pv_is_native_spin_unlock()) {
-                               start = start_lock_queued_spin_unlock;
-                               end   = end_lock_queued_spin_unlock;
-                               goto patch_site;
-                       }
-                       goto patch_default;
+       case PARAVIRT_PATCH(lock.queued_spin_unlock):
+               if (pv_is_native_spin_unlock())
+                       return paravirt_patch_insns(ibuf, len,
+                                                   start_lock_queued_spin_unlock,
+                                                   end_lock_queued_spin_unlock);
+               break;
 
-               case PARAVIRT_PATCH(lock.vcpu_is_preempted):
-                       if (pv_is_native_vcpu_is_preempted()) {
-                               start = start_lock_vcpu_is_preempted;
-                               end   = end_lock_vcpu_is_preempted;
-                               goto patch_site;
-                       }
-                       goto patch_default;
+       case PARAVIRT_PATCH(lock.vcpu_is_preempted):
+               if (pv_is_native_vcpu_is_preempted())
+                       return paravirt_patch_insns(ibuf, len,
+                                                   start_lock_vcpu_is_preempted,
+                                                   end_lock_vcpu_is_preempted);
+               break;
 #endif
 
        default:
-patch_default: __maybe_unused
-               ret = paravirt_patch_default(type, ibuf, addr, len);
-               break;
-
-patch_site:
-               ret = paravirt_patch_insns(ibuf, len, start, end);
                break;
        }
 #undef PATCH_SITE
-       return ret;
+       return paravirt_patch_default(type, ibuf, addr, len);
 }
index 5ad5bcd..7ca9cb7 100644 (file)
@@ -42,15 +42,11 @@ extern bool pv_is_native_vcpu_is_preempted(void);
 
 unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len)
 {
-       const unsigned char *start, *end;
-       unsigned ret;
-
 #define PATCH_SITE(ops, x)                                     \
-               case PARAVIRT_PATCH(ops.x):                     \
-                       start = start_##ops##_##x;              \
-                       end = end_##ops##_##x;                  \
-                       goto patch_site
-       switch(type) {
+       case PARAVIRT_PATCH(ops.x):                             \
+               return paravirt_patch_insns(ibuf, len, start_##ops##_##x, end_##ops##_##x)
+
+       switch (type) {
 #ifdef CONFIG_PARAVIRT_XXL
                PATCH_SITE(irq, restore_fl);
                PATCH_SITE(irq, save_fl);
@@ -64,32 +60,24 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len)
                PATCH_SITE(mmu, write_cr3);
 #endif
 #if defined(CONFIG_PARAVIRT_SPINLOCKS)
-               case PARAVIRT_PATCH(lock.queued_spin_unlock):
-                       if (pv_is_native_spin_unlock()) {
-                               start = start_lock_queued_spin_unlock;
-                               end   = end_lock_queued_spin_unlock;
-                               goto patch_site;
-                       }
-                       goto patch_default;
+       case PARAVIRT_PATCH(lock.queued_spin_unlock):
+               if (pv_is_native_spin_unlock())
+                       return paravirt_patch_insns(ibuf, len,
+                                                   start_lock_queued_spin_unlock,
+                                                   end_lock_queued_spin_unlock);
+               break;
 
-               case PARAVIRT_PATCH(lock.vcpu_is_preempted):
-                       if (pv_is_native_vcpu_is_preempted()) {
-                               start = start_lock_vcpu_is_preempted;
-                               end   = end_lock_vcpu_is_preempted;
-                               goto patch_site;
-                       }
-                       goto patch_default;
+       case PARAVIRT_PATCH(lock.vcpu_is_preempted):
+               if (pv_is_native_vcpu_is_preempted())
+                       return paravirt_patch_insns(ibuf, len,
+                                                   start_lock_vcpu_is_preempted,
+                                                   end_lock_vcpu_is_preempted);
+               break;
 #endif
 
        default:
-patch_default: __maybe_unused
-               ret = paravirt_patch_default(type, ibuf, addr, len);
-               break;
-
-patch_site:
-               ret = paravirt_patch_insns(ibuf, len, start, end);
                break;
        }
 #undef PATCH_SITE
-       return ret;
+       return paravirt_patch_default(type, ibuf, addr, len);
 }