OSDN Git Service

Merge tag 'kvm-s390-next-20150508' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorPaolo Bonzini <pbonzini@redhat.com>
Mon, 11 May 2015 12:06:32 +0000 (14:06 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 11 May 2015 12:06:32 +0000 (14:06 +0200)
KVM: s390: Fixes and features for 4.2 (kvm/next)

Mostly a bunch of fixes, reworks and optimizations for s390.
There is one new feature (EDAT-2 inside the guest), which boils
down to 2GB pages.

arch/s390/include/asm/kvm_host.h
arch/s390/kernel/entry.S
arch/s390/kvm/intercept.c
arch/s390/kvm/interrupt.c
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/kvm-s390.h
arch/s390/kvm/priv.c

index d01fc58..444c412 100644 (file)
@@ -80,6 +80,7 @@ struct sca_block {
 #define CPUSTAT_MCDS       0x00000100
 #define CPUSTAT_SM         0x00000080
 #define CPUSTAT_IBS        0x00000040
+#define CPUSTAT_GED2       0x00000010
 #define CPUSTAT_G          0x00000008
 #define CPUSTAT_GED        0x00000004
 #define CPUSTAT_J          0x00000002
@@ -95,7 +96,8 @@ struct kvm_s390_sie_block {
 #define PROG_IN_SIE (1<<0)
        __u32   prog0c;                 /* 0x000c */
        __u8    reserved10[16];         /* 0x0010 */
-#define PROG_BLOCK_SIE 0x00000001
+#define PROG_BLOCK_SIE (1<<0)
+#define PROG_REQUEST   (1<<1)
        atomic_t prog20;                /* 0x0020 */
        __u8    reserved24[4];          /* 0x0024 */
        __u64   cputm;                  /* 0x0028 */
index 99b44ac..3238893 100644 (file)
@@ -1005,7 +1005,7 @@ ENTRY(sie64a)
 .Lsie_gmap:
        lg      %r14,__SF_EMPTY(%r15)           # get control block pointer
        oi      __SIE_PROG0C+3(%r14),1          # we are going into SIE now
-       tm      __SIE_PROG20+3(%r14),1          # last exit...
+       tm      __SIE_PROG20+3(%r14),3          # last exit...
        jnz     .Lsie_done
        LPP     __SF_EMPTY(%r15)                # set guest id
        sie     0(%r14)
index 9e3779e..7365e8a 100644 (file)
@@ -241,21 +241,6 @@ static int handle_prog(struct kvm_vcpu *vcpu)
        return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
 }
 
-static int handle_instruction_and_prog(struct kvm_vcpu *vcpu)
-{
-       int rc, rc2;
-
-       vcpu->stat.exit_instr_and_program++;
-       rc = handle_instruction(vcpu);
-       rc2 = handle_prog(vcpu);
-
-       if (rc == -EOPNOTSUPP)
-               vcpu->arch.sie_block->icptcode = 0x04;
-       if (rc)
-               return rc;
-       return rc2;
-}
-
 /**
  * handle_external_interrupt - used for external interruption interceptions
  *
@@ -355,7 +340,6 @@ static const intercept_handler_t intercept_funcs[] = {
        [0x00 >> 2] = handle_noop,
        [0x04 >> 2] = handle_instruction,
        [0x08 >> 2] = handle_prog,
-       [0x0C >> 2] = handle_instruction_and_prog,
        [0x10 >> 2] = handle_noop,
        [0x14 >> 2] = handle_external_interrupt,
        [0x18 >> 2] = handle_noop,
index 9de4726..322ef9c 100644 (file)
@@ -134,6 +134,8 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
 
        active_mask = pending_local_irqs(vcpu);
        active_mask |= pending_floating_irqs(vcpu);
+       if (!active_mask)
+               return 0;
 
        if (psw_extint_disabled(vcpu))
                active_mask &= ~IRQ_PEND_EXT_MASK;
@@ -941,12 +943,9 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
        if (cpu_timer_irq_pending(vcpu))
                set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
 
-       do {
-               irqs = deliverable_irqs(vcpu);
+       while ((irqs = deliverable_irqs(vcpu)) && !rc) {
                /* bits are in the order of interrupt priority */
                irq_type = find_first_bit(&irqs, IRQ_PEND_COUNT);
-               if (irq_type == IRQ_PEND_COUNT)
-                       break;
                if (is_ioirq(irq_type)) {
                        rc = __deliver_io(vcpu, irq_type);
                } else {
@@ -958,9 +957,7 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
                        }
                        rc = func(vcpu);
                }
-               if (rc)
-                       break;
-       } while (!rc);
+       }
 
        set_intercept_indicators(vcpu);
 
@@ -1061,7 +1058,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
        if (sclp_has_sigpif())
                return __inject_extcall_sigpif(vcpu, src_id);
 
-       if (!test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
+       if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
                return -EBUSY;
        *extcall = irq->u.extcall;
        atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
@@ -1340,12 +1337,54 @@ static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
        return 0;
 }
 
-static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
+/*
+ * Find a destination VCPU for a floating irq and kick it.
+ */
+static void __floating_irq_kick(struct kvm *kvm, u64 type)
 {
+       struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
        struct kvm_s390_local_interrupt *li;
+       struct kvm_vcpu *dst_vcpu;
+       int sigcpu, online_vcpus, nr_tries = 0;
+
+       online_vcpus = atomic_read(&kvm->online_vcpus);
+       if (!online_vcpus)
+               return;
+
+       /* find idle VCPUs first, then round robin */
+       sigcpu = find_first_bit(fi->idle_mask, online_vcpus);
+       if (sigcpu == online_vcpus) {
+               do {
+                       sigcpu = fi->next_rr_cpu;
+                       fi->next_rr_cpu = (fi->next_rr_cpu + 1) % online_vcpus;
+                       /* avoid endless loops if all vcpus are stopped */
+                       if (nr_tries++ >= online_vcpus)
+                               return;
+               } while (is_vcpu_stopped(kvm_get_vcpu(kvm, sigcpu)));
+       }
+       dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
+
+       /* make the VCPU drop out of the SIE, or wake it up if sleeping */
+       li = &dst_vcpu->arch.local_int;
+       spin_lock(&li->lock);
+       switch (type) {
+       case KVM_S390_MCHK:
+               atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
+               break;
+       case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
+               atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags);
+               break;
+       default:
+               atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+               break;
+       }
+       spin_unlock(&li->lock);
+       kvm_s390_vcpu_wakeup(dst_vcpu);
+}
+
+static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
+{
        struct kvm_s390_float_interrupt *fi;
-       struct kvm_vcpu *dst_vcpu = NULL;
-       int sigcpu;
        u64 type = READ_ONCE(inti->type);
        int rc;
 
@@ -1373,32 +1412,8 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
        if (rc)
                return rc;
 
-       sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
-       if (sigcpu == KVM_MAX_VCPUS) {
-               do {
-                       sigcpu = fi->next_rr_cpu++;
-                       if (sigcpu == KVM_MAX_VCPUS)
-                               sigcpu = fi->next_rr_cpu = 0;
-               } while (kvm_get_vcpu(kvm, sigcpu) == NULL);
-       }
-       dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
-       li = &dst_vcpu->arch.local_int;
-       spin_lock(&li->lock);
-       switch (type) {
-       case KVM_S390_MCHK:
-               atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
-               break;
-       case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
-               atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags);
-               break;
-       default:
-               atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
-               break;
-       }
-       spin_unlock(&li->lock);
-       kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu));
+       __floating_irq_kick(kvm, type);
        return 0;
-
 }
 
 int kvm_s390_inject_vm(struct kvm *kvm,
index 2be391b..d461f8a 100644 (file)
@@ -110,7 +110,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
 /* upper facilities limit for kvm */
 unsigned long kvm_s390_fac_list_mask[] = {
        0xffe6fffbfcfdfc40UL,
-       0x005c800000000000UL,
+       0x005e800000000000UL,
 };
 
 unsigned long kvm_s390_fac_list_mask_size(void)
@@ -454,10 +454,10 @@ static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
 
        mutex_lock(&kvm->lock);
        kvm->arch.epoch = gtod - host_tod;
-       kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) {
+       kvm_s390_vcpu_block_all(kvm);
+       kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm)
                cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
-               exit_sie(cur_vcpu);
-       }
+       kvm_s390_vcpu_unblock_all(kvm);
        mutex_unlock(&kvm->lock);
        return 0;
 }
@@ -1311,8 +1311,13 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
 
        atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
                                                    CPUSTAT_SM |
-                                                   CPUSTAT_STOPPED |
-                                                   CPUSTAT_GED);
+                                                   CPUSTAT_STOPPED);
+
+       if (test_kvm_facility(vcpu->kvm, 78))
+               atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
+       else if (test_kvm_facility(vcpu->kvm, 8))
+               atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
+
        kvm_s390_vcpu_setup_model(vcpu);
 
        vcpu->arch.sie_block->ecb   = 6;
@@ -1409,16 +1414,26 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
        return kvm_s390_vcpu_has_irq(vcpu, 0);
 }
 
-void s390_vcpu_block(struct kvm_vcpu *vcpu)
+void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
 {
        atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
 }
 
-void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
+void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
 {
        atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
 }
 
+static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
+{
+       atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
+}
+
+static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
+{
+       atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
+}
+
 /*
  * Kick a guest cpu out of SIE and wait until SIE is not running.
  * If the CPU is not running (e.g. waiting as idle) the function will
@@ -1430,10 +1445,11 @@ void exit_sie(struct kvm_vcpu *vcpu)
                cpu_relax();
 }
 
-/* Kick a guest cpu out of SIE and prevent SIE-reentry */
-void exit_sie_sync(struct kvm_vcpu *vcpu)
+/* Kick a guest cpu out of SIE to process a request synchronously */
+void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
 {
-       s390_vcpu_block(vcpu);
+       kvm_make_request(req, vcpu);
+       kvm_s390_vcpu_request(vcpu);
        exit_sie(vcpu);
 }
 
@@ -1447,8 +1463,7 @@ static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
                /* match against both prefix pages */
                if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
                        VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
-                       kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
-                       exit_sie_sync(vcpu);
+                       kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
                }
        }
 }
@@ -1720,8 +1735,10 @@ static bool ibs_enabled(struct kvm_vcpu *vcpu)
 
 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
 {
+       if (!vcpu->requests)
+               return 0;
 retry:
-       s390_vcpu_unblock(vcpu);
+       kvm_s390_vcpu_request_handled(vcpu);
        /*
         * We use MMU_RELOAD just to re-arm the ipte notifier for the
         * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
@@ -2208,8 +2225,7 @@ int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
 static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
 {
        kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
-       kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
-       exit_sie_sync(vcpu);
+       kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
 }
 
 static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
@@ -2225,8 +2241,7 @@ static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
 static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
 {
        kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
-       kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
-       exit_sie_sync(vcpu);
+       kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
 }
 
 void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
index ca108b9..c570478 100644 (file)
@@ -211,10 +211,10 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
 int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr);
 void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
 void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
-void s390_vcpu_block(struct kvm_vcpu *vcpu);
-void s390_vcpu_unblock(struct kvm_vcpu *vcpu);
+void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu);
+void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu);
 void exit_sie(struct kvm_vcpu *vcpu);
-void exit_sie_sync(struct kvm_vcpu *vcpu);
+void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu);
 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu);
 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu);
 /* is cmma enabled */
@@ -228,6 +228,25 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
 int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
                             struct kvm_s390_pgm_info *pgm_info);
 
+static inline void kvm_s390_vcpu_block_all(struct kvm *kvm)
+{
+       int i;
+       struct kvm_vcpu *vcpu;
+
+       WARN_ON(!mutex_is_locked(&kvm->lock));
+       kvm_for_each_vcpu(i, vcpu, kvm)
+               kvm_s390_vcpu_block(vcpu);
+}
+
+static inline void kvm_s390_vcpu_unblock_all(struct kvm *kvm)
+{
+       int i;
+       struct kvm_vcpu *vcpu;
+
+       kvm_for_each_vcpu(i, vcpu, kvm)
+               kvm_s390_vcpu_unblock(vcpu);
+}
+
 /**
  * kvm_s390_inject_prog_cond - conditionally inject a program check
  * @vcpu: virtual cpu
index d22d8ee..ad42422 100644 (file)
@@ -698,10 +698,14 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
        case 0x00001000:
                end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
                break;
-       /* We dont support EDAT2
        case 0x00002000:
+               /* only support 2G frame size if EDAT2 is available and we are
+                  not in 24-bit addressing mode */
+               if (!test_kvm_facility(vcpu->kvm, 78) ||
+                   psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_24BIT)
+                       return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
                end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
-               break;*/
+               break;
        default:
                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
        }