OSDN Git Service

Merge tag 'kvm-s390-next-20150508' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorPaolo Bonzini <pbonzini@redhat.com>
Mon, 11 May 2015 12:06:32 +0000 (14:06 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 11 May 2015 12:06:32 +0000 (14:06 +0200)
KVM: s390: Fixes and features for 4.2 (kvm/next)

Mostly a bunch of fixes, reworks and optimizations for s390.
There is one new feature (EDAT-2 inside the guest), which boils
down to 2GB pages.

1  2 
arch/s390/kvm/kvm-s390.c

diff --combined arch/s390/kvm/kvm-s390.c
@@@ -110,7 -110,7 +110,7 @@@ struct kvm_stats_debugfs_item debugfs_e
  /* upper facilities limit for kvm */
  unsigned long kvm_s390_fac_list_mask[] = {
        0xffe6fffbfcfdfc40UL,
-       0x005c800000000000UL,
+       0x005e800000000000UL,
  };
  
  unsigned long kvm_s390_fac_list_mask_size(void)
@@@ -454,10 -454,10 +454,10 @@@ static int kvm_s390_set_tod_low(struct 
  
        mutex_lock(&kvm->lock);
        kvm->arch.epoch = gtod - host_tod;
-       kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) {
+       kvm_s390_vcpu_block_all(kvm);
+       kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm)
                cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
-               exit_sie(cur_vcpu);
-       }
+       kvm_s390_vcpu_unblock_all(kvm);
        mutex_unlock(&kvm->lock);
        return 0;
  }
@@@ -1311,8 -1311,13 +1311,13 @@@ int kvm_arch_vcpu_setup(struct kvm_vcp
  
        atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
                                                    CPUSTAT_SM |
-                                                   CPUSTAT_STOPPED |
-                                                   CPUSTAT_GED);
+                                                   CPUSTAT_STOPPED);
+       if (test_kvm_facility(vcpu->kvm, 78))
+               atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
+       else if (test_kvm_facility(vcpu->kvm, 8))
+               atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
        kvm_s390_vcpu_setup_model(vcpu);
  
        vcpu->arch.sie_block->ecb   = 6;
@@@ -1409,16 -1414,26 +1414,26 @@@ int kvm_arch_vcpu_runnable(struct kvm_v
        return kvm_s390_vcpu_has_irq(vcpu, 0);
  }
  
- void s390_vcpu_block(struct kvm_vcpu *vcpu)
+ void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
  {
        atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
  }
  
- void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
+ void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
  {
        atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
  }
  
+ static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
+ {
+       atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
+ }
+ static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
+ {
+       atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
+ }
  /*
   * Kick a guest cpu out of SIE and wait until SIE is not running.
   * If the CPU is not running (e.g. waiting as idle) the function will
@@@ -1430,10 -1445,11 +1445,11 @@@ void exit_sie(struct kvm_vcpu *vcpu
                cpu_relax();
  }
  
- /* Kick a guest cpu out of SIE and prevent SIE-reentry */
- void exit_sie_sync(struct kvm_vcpu *vcpu)
+ /* Kick a guest cpu out of SIE to process a request synchronously */
+ void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
  {
-       s390_vcpu_block(vcpu);
+       kvm_make_request(req, vcpu);
+       kvm_s390_vcpu_request(vcpu);
        exit_sie(vcpu);
  }
  
@@@ -1447,8 -1463,7 +1463,7 @@@ static void kvm_gmap_notifier(struct gm
                /* match against both prefix pages */
                if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
                        VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
-                       kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
-                       exit_sie_sync(vcpu);
+                       kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
                }
        }
  }
@@@ -1720,8 -1735,10 +1735,10 @@@ static bool ibs_enabled(struct kvm_vcp
  
  static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
  {
+       if (!vcpu->requests)
+               return 0;
  retry:
-       s390_vcpu_unblock(vcpu);
+       kvm_s390_vcpu_request_handled(vcpu);
        /*
         * We use MMU_RELOAD just to re-arm the ipte notifier for the
         * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
@@@ -1993,14 -2010,12 +2010,14 @@@ static int __vcpu_run(struct kvm_vcpu *
                 * As PF_VCPU will be used in fault handler, between
                 * guest_enter and guest_exit should be no uaccess.
                 */
 -              preempt_disable();
 -              kvm_guest_enter();
 -              preempt_enable();
 +              local_irq_disable();
 +              __kvm_guest_enter();
 +              local_irq_enable();
                exit_reason = sie64a(vcpu->arch.sie_block,
                                     vcpu->run->s.regs.gprs);
 -              kvm_guest_exit();
 +              local_irq_disable();
 +              __kvm_guest_exit();
 +              local_irq_enable();
                vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  
                rc = vcpu_post_run(vcpu, exit_reason);
@@@ -2208,8 -2223,7 +2225,7 @@@ int kvm_s390_vcpu_store_adtl_status(str
  static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
  {
        kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
-       kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
-       exit_sie_sync(vcpu);
+       kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
  }
  
  static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
  static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
  {
        kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
-       kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
-       exit_sie_sync(vcpu);
+       kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
  }
  
  void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)