OSDN Git Service

KVM: s390: fix external call injection without sigp interpretation
[android-x86/kernel.git] / arch / s390 / kvm / interrupt.c
1 /*
2  * handling kvm guest interrupts
3  *
4  * Copyright IBM Corp. 2008,2014
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  */
12
13 #include <linux/interrupt.h>
14 #include <linux/kvm_host.h>
15 #include <linux/hrtimer.h>
16 #include <linux/mmu_context.h>
17 #include <linux/signal.h>
18 #include <linux/slab.h>
19 #include <linux/bitmap.h>
20 #include <linux/vmalloc.h>
21 #include <asm/asm-offsets.h>
22 #include <asm/uaccess.h>
23 #include <asm/sclp.h>
24 #include "kvm-s390.h"
25 #include "gaccess.h"
26 #include "trace-s390.h"
27
28 #define IOINT_SCHID_MASK 0x0000ffff
29 #define IOINT_SSID_MASK 0x00030000
30 #define IOINT_CSSID_MASK 0x03fc0000
31 #define IOINT_AI_MASK 0x04000000
32 #define PFAULT_INIT 0x0600
33 #define PFAULT_DONE 0x0680
34 #define VIRTIO_PARAM 0x0d00
35
36 static int is_ioint(u64 type)
37 {
38         return ((type & 0xfffe0000u) != 0xfffe0000u);
39 }
40
41 int psw_extint_disabled(struct kvm_vcpu *vcpu)
42 {
43         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
44 }
45
46 static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
47 {
48         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
49 }
50
51 static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
52 {
53         return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
54 }
55
56 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
57 {
58         if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) ||
59             (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) ||
60             (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT))
61                 return 0;
62         return 1;
63 }
64
65 static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
66 {
67         if (psw_extint_disabled(vcpu) ||
68             !(vcpu->arch.sie_block->gcr[0] & 0x800ul))
69                 return 0;
70         if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
71                 /* No timer interrupts when single stepping */
72                 return 0;
73         return 1;
74 }
75
76 static u64 int_word_to_isc_bits(u32 int_word)
77 {
78         u8 isc = (int_word & 0x38000000) >> 27;
79
80         return (0x80 >> isc) << 24;
81 }
82
83 static int __must_check __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
84                                       struct kvm_s390_interrupt_info *inti)
85 {
86         switch (inti->type) {
87         case KVM_S390_INT_EXTERNAL_CALL:
88                 if (psw_extint_disabled(vcpu))
89                         return 0;
90                 if (vcpu->arch.sie_block->gcr[0] & 0x2000ul)
91                         return 1;
92                 return 0;
93         case KVM_S390_INT_EMERGENCY:
94                 if (psw_extint_disabled(vcpu))
95                         return 0;
96                 if (vcpu->arch.sie_block->gcr[0] & 0x4000ul)
97                         return 1;
98                 return 0;
99         case KVM_S390_INT_CLOCK_COMP:
100                 return ckc_interrupts_enabled(vcpu);
101         case KVM_S390_INT_CPU_TIMER:
102                 if (psw_extint_disabled(vcpu))
103                         return 0;
104                 if (vcpu->arch.sie_block->gcr[0] & 0x400ul)
105                         return 1;
106                 return 0;
107         case KVM_S390_INT_SERVICE:
108         case KVM_S390_INT_PFAULT_INIT:
109         case KVM_S390_INT_PFAULT_DONE:
110         case KVM_S390_INT_VIRTIO:
111                 if (psw_extint_disabled(vcpu))
112                         return 0;
113                 if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
114                         return 1;
115                 return 0;
116         case KVM_S390_PROGRAM_INT:
117         case KVM_S390_SIGP_STOP:
118         case KVM_S390_SIGP_SET_PREFIX:
119         case KVM_S390_RESTART:
120                 return 1;
121         case KVM_S390_MCHK:
122                 if (psw_mchk_disabled(vcpu))
123                         return 0;
124                 if (vcpu->arch.sie_block->gcr[14] & inti->mchk.cr14)
125                         return 1;
126                 return 0;
127         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
128                 if (psw_ioint_disabled(vcpu))
129                         return 0;
130                 if (vcpu->arch.sie_block->gcr[6] &
131                     int_word_to_isc_bits(inti->io.io_int_word))
132                         return 1;
133                 return 0;
134         default:
135                 printk(KERN_WARNING "illegal interrupt type %llx\n",
136                        inti->type);
137                 BUG();
138         }
139         return 0;
140 }
141
142 static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu)
143 {
144         return vcpu->arch.local_int.pending_irqs;
145 }
146
147 static unsigned long deliverable_local_irqs(struct kvm_vcpu *vcpu)
148 {
149         unsigned long active_mask = pending_local_irqs(vcpu);
150
151         if (psw_extint_disabled(vcpu))
152                 active_mask &= ~IRQ_PEND_EXT_MASK;
153         if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul))
154                 __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
155         if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul))
156                 __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask);
157         if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
158                 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
159         if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul))
160                 __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
161         if (psw_mchk_disabled(vcpu))
162                 active_mask &= ~IRQ_PEND_MCHK_MASK;
163
164         /*
165          * STOP irqs will never be actively delivered. They are triggered via
166          * intercept requests and cleared when the stop intercept is performed.
167          */
168         __clear_bit(IRQ_PEND_SIGP_STOP, &active_mask);
169
170         return active_mask;
171 }
172
173 static void __set_cpu_idle(struct kvm_vcpu *vcpu)
174 {
175         atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
176         set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
177 }
178
179 static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
180 {
181         atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
182         clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
183 }
184
185 static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
186 {
187         atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
188                           &vcpu->arch.sie_block->cpuflags);
189         vcpu->arch.sie_block->lctl = 0x0000;
190         vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
191
192         if (guestdbg_enabled(vcpu)) {
193                 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 |
194                                                LCTL_CR10 | LCTL_CR11);
195                 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
196         }
197 }
198
199 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
200 {
201         atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
202 }
203
204 static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
205 {
206         if (!(pending_local_irqs(vcpu) & IRQ_PEND_EXT_MASK))
207                 return;
208         if (psw_extint_disabled(vcpu))
209                 __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
210         else
211                 vcpu->arch.sie_block->lctl |= LCTL_CR0;
212 }
213
214 static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
215 {
216         if (!(pending_local_irqs(vcpu) & IRQ_PEND_MCHK_MASK))
217                 return;
218         if (psw_mchk_disabled(vcpu))
219                 vcpu->arch.sie_block->ictl |= ICTL_LPSW;
220         else
221                 vcpu->arch.sie_block->lctl |= LCTL_CR14;
222 }
223
224 static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu)
225 {
226         if (kvm_s390_is_stop_irq_pending(vcpu))
227                 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
228 }
229
230 /* Set interception request for non-deliverable local interrupts */
231 static void set_intercept_indicators_local(struct kvm_vcpu *vcpu)
232 {
233         set_intercept_indicators_ext(vcpu);
234         set_intercept_indicators_mchk(vcpu);
235         set_intercept_indicators_stop(vcpu);
236 }
237
238 static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
239                                       struct kvm_s390_interrupt_info *inti)
240 {
241         switch (inti->type) {
242         case KVM_S390_INT_SERVICE:
243         case KVM_S390_INT_PFAULT_DONE:
244         case KVM_S390_INT_VIRTIO:
245                 if (psw_extint_disabled(vcpu))
246                         __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
247                 else
248                         vcpu->arch.sie_block->lctl |= LCTL_CR0;
249                 break;
250         case KVM_S390_MCHK:
251                 if (psw_mchk_disabled(vcpu))
252                         vcpu->arch.sie_block->ictl |= ICTL_LPSW;
253                 else
254                         vcpu->arch.sie_block->lctl |= LCTL_CR14;
255                 break;
256         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
257                 if (psw_ioint_disabled(vcpu))
258                         __set_cpuflag(vcpu, CPUSTAT_IO_INT);
259                 else
260                         vcpu->arch.sie_block->lctl |= LCTL_CR6;
261                 break;
262         default:
263                 BUG();
264         }
265 }
266
267 static u16 get_ilc(struct kvm_vcpu *vcpu)
268 {
269         const unsigned short table[] = { 2, 4, 4, 6 };
270
271         switch (vcpu->arch.sie_block->icptcode) {
272         case ICPT_INST:
273         case ICPT_INSTPROGI:
274         case ICPT_OPEREXC:
275         case ICPT_PARTEXEC:
276         case ICPT_IOINST:
277                 /* last instruction only stored for these icptcodes */
278                 return table[vcpu->arch.sie_block->ipa >> 14];
279         case ICPT_PROGI:
280                 return vcpu->arch.sie_block->pgmilc;
281         default:
282                 return 0;
283         }
284 }
285
286 static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
287 {
288         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
289         int rc;
290
291         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
292                                          0, 0);
293
294         rc  = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
295                            (u16 *)__LC_EXT_INT_CODE);
296         rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
297         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
298                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
299         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
300                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
301         clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
302         return rc ? -EFAULT : 0;
303 }
304
305 static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu)
306 {
307         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
308         int rc;
309
310         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
311                                          0, 0);
312
313         rc  = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
314                            (u16 __user *)__LC_EXT_INT_CODE);
315         rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
316         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
317                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
318         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
319                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
320         clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
321         return rc ? -EFAULT : 0;
322 }
323
324 static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
325 {
326         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
327         struct kvm_s390_ext_info ext;
328         int rc;
329
330         spin_lock(&li->lock);
331         ext = li->irq.ext;
332         clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
333         li->irq.ext.ext_params2 = 0;
334         spin_unlock(&li->lock);
335
336         VCPU_EVENT(vcpu, 4, "interrupt: pfault init parm:%x,parm64:%llx",
337                    0, ext.ext_params2);
338         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
339                                          KVM_S390_INT_PFAULT_INIT,
340                                          0, ext.ext_params2);
341
342         rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE);
343         rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
344         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
345                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
346         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
347                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
348         rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2);
349         return rc ? -EFAULT : 0;
350 }
351
352 static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
353 {
354         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
355         struct kvm_s390_mchk_info mchk;
356         int rc;
357
358         spin_lock(&li->lock);
359         mchk = li->irq.mchk;
360         /*
361          * If there was an exigent machine check pending, then any repressible
362          * machine checks that might have been pending are indicated along
363          * with it, so always clear both bits
364          */
365         clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
366         clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
367         memset(&li->irq.mchk, 0, sizeof(mchk));
368         spin_unlock(&li->lock);
369
370         VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
371                    mchk.mcic);
372         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK,
373                                          mchk.cr14, mchk.mcic);
374
375         rc  = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED);
376         rc |= put_guest_lc(vcpu, mchk.mcic,
377                            (u64 __user *) __LC_MCCK_CODE);
378         rc |= put_guest_lc(vcpu, mchk.failing_storage_address,
379                            (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
380         rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
381                              &mchk.fixed_logout, sizeof(mchk.fixed_logout));
382         rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
383                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
384         rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
385                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
386         return rc ? -EFAULT : 0;
387 }
388
389 static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
390 {
391         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
392         int rc;
393
394         VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
395         vcpu->stat.deliver_restart_signal++;
396         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
397
398         rc  = write_guest_lc(vcpu,
399                              offsetof(struct _lowcore, restart_old_psw),
400                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
401         rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw),
402                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
403         clear_bit(IRQ_PEND_RESTART, &li->pending_irqs);
404         return rc ? -EFAULT : 0;
405 }
406
407 static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
408 {
409         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
410         struct kvm_s390_prefix_info prefix;
411
412         spin_lock(&li->lock);
413         prefix = li->irq.prefix;
414         li->irq.prefix.address = 0;
415         clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
416         spin_unlock(&li->lock);
417
418         VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", prefix.address);
419         vcpu->stat.deliver_prefix_signal++;
420         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
421                                          KVM_S390_SIGP_SET_PREFIX,
422                                          prefix.address, 0);
423
424         kvm_s390_set_prefix(vcpu, prefix.address);
425         return 0;
426 }
427
428 static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu)
429 {
430         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
431         int rc;
432         int cpu_addr;
433
434         spin_lock(&li->lock);
435         cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS);
436         clear_bit(cpu_addr, li->sigp_emerg_pending);
437         if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS))
438                 clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
439         spin_unlock(&li->lock);
440
441         VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
442         vcpu->stat.deliver_emergency_signal++;
443         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
444                                          cpu_addr, 0);
445
446         rc  = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG,
447                            (u16 *)__LC_EXT_INT_CODE);
448         rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR);
449         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
450                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
451         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
452                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
453         return rc ? -EFAULT : 0;
454 }
455
456 static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
457 {
458         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
459         struct kvm_s390_extcall_info extcall;
460         int rc;
461
462         spin_lock(&li->lock);
463         extcall = li->irq.extcall;
464         li->irq.extcall.code = 0;
465         clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
466         spin_unlock(&li->lock);
467
468         VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call");
469         vcpu->stat.deliver_external_call++;
470         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
471                                          KVM_S390_INT_EXTERNAL_CALL,
472                                          extcall.code, 0);
473
474         rc  = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL,
475                            (u16 *)__LC_EXT_INT_CODE);
476         rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR);
477         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
478                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
479         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw,
480                             sizeof(psw_t));
481         return rc ? -EFAULT : 0;
482 }
483
484 static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
485 {
486         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
487         struct kvm_s390_pgm_info pgm_info;
488         int rc = 0;
489         u16 ilc = get_ilc(vcpu);
490
491         spin_lock(&li->lock);
492         pgm_info = li->irq.pgm;
493         clear_bit(IRQ_PEND_PROG, &li->pending_irqs);
494         memset(&li->irq.pgm, 0, sizeof(pgm_info));
495         spin_unlock(&li->lock);
496
497         VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
498                    pgm_info.code, ilc);
499         vcpu->stat.deliver_program_int++;
500         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
501                                          pgm_info.code, 0);
502
503         switch (pgm_info.code & ~PGM_PER) {
504         case PGM_AFX_TRANSLATION:
505         case PGM_ASX_TRANSLATION:
506         case PGM_EX_TRANSLATION:
507         case PGM_LFX_TRANSLATION:
508         case PGM_LSTE_SEQUENCE:
509         case PGM_LSX_TRANSLATION:
510         case PGM_LX_TRANSLATION:
511         case PGM_PRIMARY_AUTHORITY:
512         case PGM_SECONDARY_AUTHORITY:
513         case PGM_SPACE_SWITCH:
514                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
515                                   (u64 *)__LC_TRANS_EXC_CODE);
516                 break;
517         case PGM_ALEN_TRANSLATION:
518         case PGM_ALE_SEQUENCE:
519         case PGM_ASTE_INSTANCE:
520         case PGM_ASTE_SEQUENCE:
521         case PGM_ASTE_VALIDITY:
522         case PGM_EXTENDED_AUTHORITY:
523                 rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
524                                   (u8 *)__LC_EXC_ACCESS_ID);
525                 break;
526         case PGM_ASCE_TYPE:
527         case PGM_PAGE_TRANSLATION:
528         case PGM_REGION_FIRST_TRANS:
529         case PGM_REGION_SECOND_TRANS:
530         case PGM_REGION_THIRD_TRANS:
531         case PGM_SEGMENT_TRANSLATION:
532                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
533                                   (u64 *)__LC_TRANS_EXC_CODE);
534                 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
535                                    (u8 *)__LC_EXC_ACCESS_ID);
536                 rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
537                                    (u8 *)__LC_OP_ACCESS_ID);
538                 break;
539         case PGM_MONITOR:
540                 rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
541                                   (u16 *)__LC_MON_CLASS_NR);
542                 rc |= put_guest_lc(vcpu, pgm_info.mon_code,
543                                    (u64 *)__LC_MON_CODE);
544                 break;
545         case PGM_DATA:
546                 rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
547                                   (u32 *)__LC_DATA_EXC_CODE);
548                 break;
549         case PGM_PROTECTION:
550                 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
551                                   (u64 *)__LC_TRANS_EXC_CODE);
552                 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
553                                    (u8 *)__LC_EXC_ACCESS_ID);
554                 break;
555         }
556
557         if (pgm_info.code & PGM_PER) {
558                 rc |= put_guest_lc(vcpu, pgm_info.per_code,
559                                    (u8 *) __LC_PER_CODE);
560                 rc |= put_guest_lc(vcpu, pgm_info.per_atmid,
561                                    (u8 *)__LC_PER_ATMID);
562                 rc |= put_guest_lc(vcpu, pgm_info.per_address,
563                                    (u64 *) __LC_PER_ADDRESS);
564                 rc |= put_guest_lc(vcpu, pgm_info.per_access_id,
565                                    (u8 *) __LC_PER_ACCESS_ID);
566         }
567
568         rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC);
569         rc |= put_guest_lc(vcpu, pgm_info.code,
570                            (u16 *)__LC_PGM_INT_CODE);
571         rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
572                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
573         rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
574                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
575         return rc ? -EFAULT : 0;
576 }
577
578 static int __must_check __deliver_service(struct kvm_vcpu *vcpu,
579                                           struct kvm_s390_interrupt_info *inti)
580 {
581         int rc;
582
583         VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
584                    inti->ext.ext_params);
585         vcpu->stat.deliver_service_signal++;
586         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
587                                          inti->ext.ext_params, 0);
588
589         rc  = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
590         rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
591         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
592                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
593         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
594                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
595         rc |= put_guest_lc(vcpu, inti->ext.ext_params,
596                            (u32 *)__LC_EXT_PARAMS);
597         return rc ? -EFAULT : 0;
598 }
599
600 static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu,
601                                            struct kvm_s390_interrupt_info *inti)
602 {
603         int rc;
604
605         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
606                                          KVM_S390_INT_PFAULT_DONE, 0,
607                                          inti->ext.ext_params2);
608
609         rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE);
610         rc |= put_guest_lc(vcpu, PFAULT_DONE, (u16 *)__LC_EXT_CPU_ADDR);
611         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
612                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
613         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
614                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
615         rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
616                            (u64 *)__LC_EXT_PARAMS2);
617         return rc ? -EFAULT : 0;
618 }
619
620 static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu,
621                                          struct kvm_s390_interrupt_info *inti)
622 {
623         int rc;
624
625         VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
626                    inti->ext.ext_params, inti->ext.ext_params2);
627         vcpu->stat.deliver_virtio_interrupt++;
628         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
629                                          inti->ext.ext_params,
630                                          inti->ext.ext_params2);
631
632         rc  = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE);
633         rc |= put_guest_lc(vcpu, VIRTIO_PARAM, (u16 *)__LC_EXT_CPU_ADDR);
634         rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
635                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
636         rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
637                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
638         rc |= put_guest_lc(vcpu, inti->ext.ext_params,
639                            (u32 *)__LC_EXT_PARAMS);
640         rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
641                            (u64 *)__LC_EXT_PARAMS2);
642         return rc ? -EFAULT : 0;
643 }
644
645 static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
646                                      struct kvm_s390_interrupt_info *inti)
647 {
648         int rc;
649
650         VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type);
651         vcpu->stat.deliver_io_int++;
652         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
653                                          ((__u32)inti->io.subchannel_id << 16) |
654                                                 inti->io.subchannel_nr,
655                                          ((__u64)inti->io.io_int_parm << 32) |
656                                                 inti->io.io_int_word);
657
658         rc  = put_guest_lc(vcpu, inti->io.subchannel_id,
659                            (u16 *)__LC_SUBCHANNEL_ID);
660         rc |= put_guest_lc(vcpu, inti->io.subchannel_nr,
661                            (u16 *)__LC_SUBCHANNEL_NR);
662         rc |= put_guest_lc(vcpu, inti->io.io_int_parm,
663                            (u32 *)__LC_IO_INT_PARM);
664         rc |= put_guest_lc(vcpu, inti->io.io_int_word,
665                            (u32 *)__LC_IO_INT_WORD);
666         rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
667                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
668         rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
669                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
670         return rc ? -EFAULT : 0;
671 }
672
673 static int __must_check __deliver_mchk_floating(struct kvm_vcpu *vcpu,
674                                            struct kvm_s390_interrupt_info *inti)
675 {
676         struct kvm_s390_mchk_info *mchk = &inti->mchk;
677         int rc;
678
679         VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
680                    mchk->mcic);
681         trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK,
682                                          mchk->cr14, mchk->mcic);
683
684         rc  = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED);
685         rc |= put_guest_lc(vcpu, mchk->mcic,
686                         (u64 __user *) __LC_MCCK_CODE);
687         rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
688                         (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
689         rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
690                              &mchk->fixed_logout, sizeof(mchk->fixed_logout));
691         rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
692                              &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
693         rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
694                             &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
695         return rc ? -EFAULT : 0;
696 }
697
698 typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu);
699
700 static const deliver_irq_t deliver_irq_funcs[] = {
701         [IRQ_PEND_MCHK_EX]        = __deliver_machine_check,
702         [IRQ_PEND_PROG]           = __deliver_prog,
703         [IRQ_PEND_EXT_EMERGENCY]  = __deliver_emergency_signal,
704         [IRQ_PEND_EXT_EXTERNAL]   = __deliver_external_call,
705         [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc,
706         [IRQ_PEND_EXT_CPU_TIMER]  = __deliver_cpu_timer,
707         [IRQ_PEND_RESTART]        = __deliver_restart,
708         [IRQ_PEND_SET_PREFIX]     = __deliver_set_prefix,
709         [IRQ_PEND_PFAULT_INIT]    = __deliver_pfault_init,
710 };
711
712 static int __must_check __deliver_floating_interrupt(struct kvm_vcpu *vcpu,
713                                            struct kvm_s390_interrupt_info *inti)
714 {
715         int rc;
716
717         switch (inti->type) {
718         case KVM_S390_INT_SERVICE:
719                 rc = __deliver_service(vcpu, inti);
720                 break;
721         case KVM_S390_INT_PFAULT_DONE:
722                 rc = __deliver_pfault_done(vcpu, inti);
723                 break;
724         case KVM_S390_INT_VIRTIO:
725                 rc = __deliver_virtio(vcpu, inti);
726                 break;
727         case KVM_S390_MCHK:
728                 rc = __deliver_mchk_floating(vcpu, inti);
729                 break;
730         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
731                 rc = __deliver_io(vcpu, inti);
732                 break;
733         default:
734                 BUG();
735         }
736
737         return rc;
738 }
739
740 /* Check whether an external call is pending (deliverable or not) */
741 int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
742 {
743         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
744         uint8_t sigp_ctrl = vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
745
746         if (!sclp_has_sigpif())
747                 return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
748
749         return (sigp_ctrl & SIGP_CTRL_C) &&
750                (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND);
751 }
752
753 int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
754 {
755         struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
756         struct kvm_s390_interrupt_info  *inti;
757         int rc;
758
759         rc = !!deliverable_local_irqs(vcpu);
760
761         if ((!rc) && atomic_read(&fi->active)) {
762                 spin_lock(&fi->lock);
763                 list_for_each_entry(inti, &fi->list, list)
764                         if (__interrupt_is_deliverable(vcpu, inti)) {
765                                 rc = 1;
766                                 break;
767                         }
768                 spin_unlock(&fi->lock);
769         }
770
771         if (!rc && kvm_cpu_has_pending_timer(vcpu))
772                 rc = 1;
773
774         /* external call pending and deliverable */
775         if (!rc && kvm_s390_ext_call_pending(vcpu) &&
776             !psw_extint_disabled(vcpu) &&
777             (vcpu->arch.sie_block->gcr[0] & 0x2000ul))
778                 rc = 1;
779
780         if (!rc && !exclude_stop && kvm_s390_is_stop_irq_pending(vcpu))
781                 rc = 1;
782
783         return rc;
784 }
785
786 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
787 {
788         if (!(vcpu->arch.sie_block->ckc <
789               get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
790                 return 0;
791         if (!ckc_interrupts_enabled(vcpu))
792                 return 0;
793         return 1;
794 }
795
796 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
797 {
798         u64 now, sltime;
799
800         vcpu->stat.exit_wait_state++;
801
802         /* fast path */
803         if (kvm_cpu_has_pending_timer(vcpu) || kvm_arch_vcpu_runnable(vcpu))
804                 return 0;
805
806         if (psw_interrupts_disabled(vcpu)) {
807                 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
808                 return -EOPNOTSUPP; /* disabled wait */
809         }
810
811         if (!ckc_interrupts_enabled(vcpu)) {
812                 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
813                 __set_cpu_idle(vcpu);
814                 goto no_timer;
815         }
816
817         now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
818         sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
819
820         /* underflow */
821         if (vcpu->arch.sie_block->ckc < now)
822                 return 0;
823
824         __set_cpu_idle(vcpu);
825         hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
826         VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
827 no_timer:
828         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
829         kvm_vcpu_block(vcpu);
830         __unset_cpu_idle(vcpu);
831         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
832
833         hrtimer_cancel(&vcpu->arch.ckc_timer);
834         return 0;
835 }
836
837 void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
838 {
839         if (waitqueue_active(&vcpu->wq)) {
840                 /*
841                  * The vcpu gave up the cpu voluntarily, mark it as a good
842                  * yield-candidate.
843                  */
844                 vcpu->preempted = true;
845                 wake_up_interruptible(&vcpu->wq);
846                 vcpu->stat.halt_wakeup++;
847         }
848 }
849
850 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
851 {
852         struct kvm_vcpu *vcpu;
853         u64 now, sltime;
854
855         vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
856         now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
857         sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
858
859         /*
860          * If the monotonic clock runs faster than the tod clock we might be
861          * woken up too early and have to go back to sleep to avoid deadlocks.
862          */
863         if (vcpu->arch.sie_block->ckc > now &&
864             hrtimer_forward_now(timer, ns_to_ktime(sltime)))
865                 return HRTIMER_RESTART;
866         kvm_s390_vcpu_wakeup(vcpu);
867         return HRTIMER_NORESTART;
868 }
869
870 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
871 {
872         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
873
874         spin_lock(&li->lock);
875         li->pending_irqs = 0;
876         bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS);
877         memset(&li->irq, 0, sizeof(li->irq));
878         spin_unlock(&li->lock);
879
880         /* clear pending external calls set by sigp interpretation facility */
881         atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags);
882         vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0;
883 }
884
885 int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
886 {
887         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
888         struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
889         struct kvm_s390_interrupt_info  *n, *inti = NULL;
890         deliver_irq_t func;
891         int deliver;
892         int rc = 0;
893         unsigned long irq_type;
894         unsigned long deliverable_irqs;
895
896         __reset_intercept_indicators(vcpu);
897
898         /* pending ckc conditions might have been invalidated */
899         clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
900         if (kvm_cpu_has_pending_timer(vcpu))
901                 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
902
903         do {
904                 deliverable_irqs = deliverable_local_irqs(vcpu);
905                 /* bits are in the order of interrupt priority */
906                 irq_type = find_first_bit(&deliverable_irqs, IRQ_PEND_COUNT);
907                 if (irq_type == IRQ_PEND_COUNT)
908                         break;
909                 func = deliver_irq_funcs[irq_type];
910                 if (!func) {
911                         WARN_ON_ONCE(func == NULL);
912                         clear_bit(irq_type, &li->pending_irqs);
913                         continue;
914                 }
915                 rc = func(vcpu);
916         } while (!rc && irq_type != IRQ_PEND_COUNT);
917
918         set_intercept_indicators_local(vcpu);
919
920         if (!rc && atomic_read(&fi->active)) {
921                 do {
922                         deliver = 0;
923                         spin_lock(&fi->lock);
924                         list_for_each_entry_safe(inti, n, &fi->list, list) {
925                                 if (__interrupt_is_deliverable(vcpu, inti)) {
926                                         list_del(&inti->list);
927                                         fi->irq_count--;
928                                         deliver = 1;
929                                         break;
930                                 }
931                                 __set_intercept_indicator(vcpu, inti);
932                         }
933                         if (list_empty(&fi->list))
934                                 atomic_set(&fi->active, 0);
935                         spin_unlock(&fi->lock);
936                         if (deliver) {
937                                 rc = __deliver_floating_interrupt(vcpu, inti);
938                                 kfree(inti);
939                         }
940                 } while (!rc && deliver);
941         }
942
943         return rc;
944 }
945
946 static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
947 {
948         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
949
950         li->irq.pgm = irq->u.pgm;
951         set_bit(IRQ_PEND_PROG, &li->pending_irqs);
952         return 0;
953 }
954
955 int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
956 {
957         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
958         struct kvm_s390_irq irq;
959
960         VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
961         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, code,
962                                    0, 1);
963         spin_lock(&li->lock);
964         irq.u.pgm.code = code;
965         __inject_prog(vcpu, &irq);
966         BUG_ON(waitqueue_active(li->wq));
967         spin_unlock(&li->lock);
968         return 0;
969 }
970
971 int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
972                              struct kvm_s390_pgm_info *pgm_info)
973 {
974         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
975         struct kvm_s390_irq irq;
976         int rc;
977
978         VCPU_EVENT(vcpu, 3, "inject: prog irq %d (from kernel)",
979                    pgm_info->code);
980         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
981                                    pgm_info->code, 0, 1);
982         spin_lock(&li->lock);
983         irq.u.pgm = *pgm_info;
984         rc = __inject_prog(vcpu, &irq);
985         BUG_ON(waitqueue_active(li->wq));
986         spin_unlock(&li->lock);
987         return rc;
988 }
989
990 static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
991 {
992         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
993
994         VCPU_EVENT(vcpu, 3, "inject: external irq params:%x, params2:%llx",
995                    irq->u.ext.ext_params, irq->u.ext.ext_params2);
996         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT,
997                                    irq->u.ext.ext_params,
998                                    irq->u.ext.ext_params2, 2);
999
1000         li->irq.ext = irq->u.ext;
1001         set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
1002         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1003         return 0;
1004 }
1005
1006 static int __inject_extcall_sigpif(struct kvm_vcpu *vcpu, uint16_t src_id)
1007 {
1008         unsigned char new_val, old_val;
1009         uint8_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
1010
1011         new_val = SIGP_CTRL_C | (src_id & SIGP_CTRL_SCN_MASK);
1012         old_val = *sigp_ctrl & ~SIGP_CTRL_C;
1013         if (cmpxchg(sigp_ctrl, old_val, new_val) != old_val) {
1014                 /* another external call is pending */
1015                 return -EBUSY;
1016         }
1017         atomic_set_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
1018         return 0;
1019 }
1020
1021 static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1022 {
1023         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1024         struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
1025         uint16_t src_id = irq->u.extcall.code;
1026
1027         VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u",
1028                    src_id);
1029         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
1030                                    src_id, 0, 2);
1031
1032         /* sending vcpu invalid */
1033         if (src_id >= KVM_MAX_VCPUS ||
1034             kvm_get_vcpu(vcpu->kvm, src_id) == NULL)
1035                 return -EINVAL;
1036
1037         if (sclp_has_sigpif())
1038                 return __inject_extcall_sigpif(vcpu, src_id);
1039
1040         if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
1041                 return -EBUSY;
1042         *extcall = irq->u.extcall;
1043         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1044         return 0;
1045 }
1046
1047 static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1048 {
1049         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1050         struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
1051
1052         VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)",
1053                    irq->u.prefix.address);
1054         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
1055                                    irq->u.prefix.address, 0, 2);
1056
1057         if (!is_vcpu_stopped(vcpu))
1058                 return -EBUSY;
1059
1060         *prefix = irq->u.prefix;
1061         set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
1062         return 0;
1063 }
1064
1065 #define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
1066 static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1067 {
1068         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1069         struct kvm_s390_stop_info *stop = &li->irq.stop;
1070         int rc = 0;
1071
1072         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0, 2);
1073
1074         if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS)
1075                 return -EINVAL;
1076
1077         if (is_vcpu_stopped(vcpu)) {
1078                 if (irq->u.stop.flags & KVM_S390_STOP_FLAG_STORE_STATUS)
1079                         rc = kvm_s390_store_status_unloaded(vcpu,
1080                                                 KVM_S390_STORE_STATUS_NOADDR);
1081                 return rc;
1082         }
1083
1084         if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs))
1085                 return -EBUSY;
1086         stop->flags = irq->u.stop.flags;
1087         __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
1088         return 0;
1089 }
1090
1091 static int __inject_sigp_restart(struct kvm_vcpu *vcpu,
1092                                  struct kvm_s390_irq *irq)
1093 {
1094         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1095
1096         VCPU_EVENT(vcpu, 3, "inject: restart type %llx", irq->type);
1097         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0, 2);
1098
1099         set_bit(IRQ_PEND_RESTART, &li->pending_irqs);
1100         return 0;
1101 }
1102
1103 static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
1104                                    struct kvm_s390_irq *irq)
1105 {
1106         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1107
1108         VCPU_EVENT(vcpu, 3, "inject: emergency %u\n",
1109                    irq->u.emerg.code);
1110         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
1111                                    irq->u.emerg.code, 0, 2);
1112
1113         set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
1114         set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
1115         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1116         return 0;
1117 }
1118
1119 static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1120 {
1121         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1122         struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
1123
1124         VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx",
1125                    irq->u.mchk.mcic);
1126         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
1127                                    irq->u.mchk.mcic, 2);
1128
1129         /*
1130          * Because repressible machine checks can be indicated along with
1131          * exigent machine checks (PoP, Chapter 11, Interruption action)
1132          * we need to combine cr14, mcic and external damage code.
1133          * Failing storage address and the logout area should not be or'ed
1134          * together, we just indicate the last occurrence of the corresponding
1135          * machine check
1136          */
1137         mchk->cr14 |= irq->u.mchk.cr14;
1138         mchk->mcic |= irq->u.mchk.mcic;
1139         mchk->ext_damage_code |= irq->u.mchk.ext_damage_code;
1140         mchk->failing_storage_address = irq->u.mchk.failing_storage_address;
1141         memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout,
1142                sizeof(mchk->fixed_logout));
1143         if (mchk->mcic & MCHK_EX_MASK)
1144                 set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
1145         else if (mchk->mcic & MCHK_REP_MASK)
1146                 set_bit(IRQ_PEND_MCHK_REP,  &li->pending_irqs);
1147         return 0;
1148 }
1149
1150 static int __inject_ckc(struct kvm_vcpu *vcpu)
1151 {
1152         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1153
1154         VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CLOCK_COMP);
1155         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
1156                                    0, 0, 2);
1157
1158         set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1159         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1160         return 0;
1161 }
1162
1163 static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
1164 {
1165         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1166
1167         VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CPU_TIMER);
1168         trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
1169                                    0, 0, 2);
1170
1171         set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1172         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1173         return 0;
1174 }
1175
1176
1177 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
1178                                                     u64 cr6, u64 schid)
1179 {
1180         struct kvm_s390_float_interrupt *fi;
1181         struct kvm_s390_interrupt_info *inti, *iter;
1182
1183         if ((!schid && !cr6) || (schid && cr6))
1184                 return NULL;
1185         fi = &kvm->arch.float_int;
1186         spin_lock(&fi->lock);
1187         inti = NULL;
1188         list_for_each_entry(iter, &fi->list, list) {
1189                 if (!is_ioint(iter->type))
1190                         continue;
1191                 if (cr6 &&
1192                     ((cr6 & int_word_to_isc_bits(iter->io.io_int_word)) == 0))
1193                         continue;
1194                 if (schid) {
1195                         if (((schid & 0x00000000ffff0000) >> 16) !=
1196                             iter->io.subchannel_id)
1197                                 continue;
1198                         if ((schid & 0x000000000000ffff) !=
1199                             iter->io.subchannel_nr)
1200                                 continue;
1201                 }
1202                 inti = iter;
1203                 break;
1204         }
1205         if (inti) {
1206                 list_del_init(&inti->list);
1207                 fi->irq_count--;
1208         }
1209         if (list_empty(&fi->list))
1210                 atomic_set(&fi->active, 0);
1211         spin_unlock(&fi->lock);
1212         return inti;
1213 }
1214
1215 static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1216 {
1217         struct kvm_s390_local_interrupt *li;
1218         struct kvm_s390_float_interrupt *fi;
1219         struct kvm_s390_interrupt_info *iter;
1220         struct kvm_vcpu *dst_vcpu = NULL;
1221         int sigcpu;
1222         int rc = 0;
1223
1224         fi = &kvm->arch.float_int;
1225         spin_lock(&fi->lock);
1226         if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) {
1227                 rc = -EINVAL;
1228                 goto unlock_fi;
1229         }
1230         fi->irq_count++;
1231         if (!is_ioint(inti->type)) {
1232                 list_add_tail(&inti->list, &fi->list);
1233         } else {
1234                 u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word);
1235
1236                 /* Keep I/O interrupts sorted in isc order. */
1237                 list_for_each_entry(iter, &fi->list, list) {
1238                         if (!is_ioint(iter->type))
1239                                 continue;
1240                         if (int_word_to_isc_bits(iter->io.io_int_word)
1241                             <= isc_bits)
1242                                 continue;
1243                         break;
1244                 }
1245                 list_add_tail(&inti->list, &iter->list);
1246         }
1247         atomic_set(&fi->active, 1);
1248         if (atomic_read(&kvm->online_vcpus) == 0)
1249                 goto unlock_fi;
1250         sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
1251         if (sigcpu == KVM_MAX_VCPUS) {
1252                 do {
1253                         sigcpu = fi->next_rr_cpu++;
1254                         if (sigcpu == KVM_MAX_VCPUS)
1255                                 sigcpu = fi->next_rr_cpu = 0;
1256                 } while (kvm_get_vcpu(kvm, sigcpu) == NULL);
1257         }
1258         dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
1259         li = &dst_vcpu->arch.local_int;
1260         spin_lock(&li->lock);
1261         switch (inti->type) {
1262         case KVM_S390_MCHK:
1263                 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
1264                 break;
1265         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1266                 atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags);
1267                 break;
1268         default:
1269                 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1270                 break;
1271         }
1272         spin_unlock(&li->lock);
1273         kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu));
1274 unlock_fi:
1275         spin_unlock(&fi->lock);
1276         return rc;
1277 }
1278
1279 int kvm_s390_inject_vm(struct kvm *kvm,
1280                        struct kvm_s390_interrupt *s390int)
1281 {
1282         struct kvm_s390_interrupt_info *inti;
1283         int rc;
1284
1285         inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1286         if (!inti)
1287                 return -ENOMEM;
1288
1289         inti->type = s390int->type;
1290         switch (inti->type) {
1291         case KVM_S390_INT_VIRTIO:
1292                 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
1293                          s390int->parm, s390int->parm64);
1294                 inti->ext.ext_params = s390int->parm;
1295                 inti->ext.ext_params2 = s390int->parm64;
1296                 break;
1297         case KVM_S390_INT_SERVICE:
1298                 VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm);
1299                 inti->ext.ext_params = s390int->parm;
1300                 break;
1301         case KVM_S390_INT_PFAULT_DONE:
1302                 inti->ext.ext_params2 = s390int->parm64;
1303                 break;
1304         case KVM_S390_MCHK:
1305                 VM_EVENT(kvm, 5, "inject: machine check parm64:%llx",
1306                          s390int->parm64);
1307                 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
1308                 inti->mchk.mcic = s390int->parm64;
1309                 break;
1310         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1311                 if (inti->type & IOINT_AI_MASK)
1312                         VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)");
1313                 else
1314                         VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x",
1315                                  s390int->type & IOINT_CSSID_MASK,
1316                                  s390int->type & IOINT_SSID_MASK,
1317                                  s390int->type & IOINT_SCHID_MASK);
1318                 inti->io.subchannel_id = s390int->parm >> 16;
1319                 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
1320                 inti->io.io_int_parm = s390int->parm64 >> 32;
1321                 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
1322                 break;
1323         default:
1324                 kfree(inti);
1325                 return -EINVAL;
1326         }
1327         trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
1328                                  2);
1329
1330         rc = __inject_vm(kvm, inti);
1331         if (rc)
1332                 kfree(inti);
1333         return rc;
1334 }
1335
1336 int kvm_s390_reinject_io_int(struct kvm *kvm,
1337                               struct kvm_s390_interrupt_info *inti)
1338 {
1339         return __inject_vm(kvm, inti);
1340 }
1341
1342 int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
1343                        struct kvm_s390_irq *irq)
1344 {
1345         irq->type = s390int->type;
1346         switch (irq->type) {
1347         case KVM_S390_PROGRAM_INT:
1348                 if (s390int->parm & 0xffff0000)
1349                         return -EINVAL;
1350                 irq->u.pgm.code = s390int->parm;
1351                 break;
1352         case KVM_S390_SIGP_SET_PREFIX:
1353                 irq->u.prefix.address = s390int->parm;
1354                 break;
1355         case KVM_S390_SIGP_STOP:
1356                 irq->u.stop.flags = s390int->parm;
1357                 break;
1358         case KVM_S390_INT_EXTERNAL_CALL:
1359                 if (s390int->parm & 0xffff0000)
1360                         return -EINVAL;
1361                 irq->u.extcall.code = s390int->parm;
1362                 break;
1363         case KVM_S390_INT_EMERGENCY:
1364                 if (s390int->parm & 0xffff0000)
1365                         return -EINVAL;
1366                 irq->u.emerg.code = s390int->parm;
1367                 break;
1368         case KVM_S390_MCHK:
1369                 irq->u.mchk.mcic = s390int->parm64;
1370                 break;
1371         }
1372         return 0;
1373 }
1374
1375 int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
1376 {
1377         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1378
1379         return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1380 }
1381
1382 void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
1383 {
1384         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1385
1386         spin_lock(&li->lock);
1387         li->irq.stop.flags = 0;
1388         clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1389         spin_unlock(&li->lock);
1390 }
1391
1392 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1393 {
1394         struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1395         int rc;
1396
1397         spin_lock(&li->lock);
1398         switch (irq->type) {
1399         case KVM_S390_PROGRAM_INT:
1400                 VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
1401                            irq->u.pgm.code);
1402                 rc = __inject_prog(vcpu, irq);
1403                 break;
1404         case KVM_S390_SIGP_SET_PREFIX:
1405                 rc = __inject_set_prefix(vcpu, irq);
1406                 break;
1407         case KVM_S390_SIGP_STOP:
1408                 rc = __inject_sigp_stop(vcpu, irq);
1409                 break;
1410         case KVM_S390_RESTART:
1411                 rc = __inject_sigp_restart(vcpu, irq);
1412                 break;
1413         case KVM_S390_INT_CLOCK_COMP:
1414                 rc = __inject_ckc(vcpu);
1415                 break;
1416         case KVM_S390_INT_CPU_TIMER:
1417                 rc = __inject_cpu_timer(vcpu);
1418                 break;
1419         case KVM_S390_INT_EXTERNAL_CALL:
1420                 rc = __inject_extcall(vcpu, irq);
1421                 break;
1422         case KVM_S390_INT_EMERGENCY:
1423                 rc = __inject_sigp_emergency(vcpu, irq);
1424                 break;
1425         case KVM_S390_MCHK:
1426                 rc = __inject_mchk(vcpu, irq);
1427                 break;
1428         case KVM_S390_INT_PFAULT_INIT:
1429                 rc = __inject_pfault_init(vcpu, irq);
1430                 break;
1431         case KVM_S390_INT_VIRTIO:
1432         case KVM_S390_INT_SERVICE:
1433         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1434         default:
1435                 rc = -EINVAL;
1436         }
1437         spin_unlock(&li->lock);
1438         if (!rc)
1439                 kvm_s390_vcpu_wakeup(vcpu);
1440         return rc;
1441 }
1442
1443 void kvm_s390_clear_float_irqs(struct kvm *kvm)
1444 {
1445         struct kvm_s390_float_interrupt *fi;
1446         struct kvm_s390_interrupt_info  *n, *inti = NULL;
1447
1448         fi = &kvm->arch.float_int;
1449         spin_lock(&fi->lock);
1450         list_for_each_entry_safe(inti, n, &fi->list, list) {
1451                 list_del(&inti->list);
1452                 kfree(inti);
1453         }
1454         fi->irq_count = 0;
1455         atomic_set(&fi->active, 0);
1456         spin_unlock(&fi->lock);
1457 }
1458
1459 static void inti_to_irq(struct kvm_s390_interrupt_info *inti,
1460                        struct kvm_s390_irq *irq)
1461 {
1462         irq->type = inti->type;
1463         switch (inti->type) {
1464         case KVM_S390_INT_PFAULT_INIT:
1465         case KVM_S390_INT_PFAULT_DONE:
1466         case KVM_S390_INT_VIRTIO:
1467         case KVM_S390_INT_SERVICE:
1468                 irq->u.ext = inti->ext;
1469                 break;
1470         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1471                 irq->u.io = inti->io;
1472                 break;
1473         case KVM_S390_MCHK:
1474                 irq->u.mchk = inti->mchk;
1475                 break;
1476         }
1477 }
1478
1479 static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
1480 {
1481         struct kvm_s390_interrupt_info *inti;
1482         struct kvm_s390_float_interrupt *fi;
1483         struct kvm_s390_irq *buf;
1484         int max_irqs;
1485         int ret = 0;
1486         int n = 0;
1487
1488         if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0)
1489                 return -EINVAL;
1490
1491         /*
1492          * We are already using -ENOMEM to signal
1493          * userspace it may retry with a bigger buffer,
1494          * so we need to use something else for this case
1495          */
1496         buf = vzalloc(len);
1497         if (!buf)
1498                 return -ENOBUFS;
1499
1500         max_irqs = len / sizeof(struct kvm_s390_irq);
1501
1502         fi = &kvm->arch.float_int;
1503         spin_lock(&fi->lock);
1504         list_for_each_entry(inti, &fi->list, list) {
1505                 if (n == max_irqs) {
1506                         /* signal userspace to try again */
1507                         ret = -ENOMEM;
1508                         break;
1509                 }
1510                 inti_to_irq(inti, &buf[n]);
1511                 n++;
1512         }
1513         spin_unlock(&fi->lock);
1514         if (!ret && n > 0) {
1515                 if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n))
1516                         ret = -EFAULT;
1517         }
1518         vfree(buf);
1519
1520         return ret < 0 ? ret : n;
1521 }
1522
1523 static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1524 {
1525         int r;
1526
1527         switch (attr->group) {
1528         case KVM_DEV_FLIC_GET_ALL_IRQS:
1529                 r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr,
1530                                           attr->attr);
1531                 break;
1532         default:
1533                 r = -EINVAL;
1534         }
1535
1536         return r;
1537 }
1538
1539 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
1540                                      u64 addr)
1541 {
1542         struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
1543         void *target = NULL;
1544         void __user *source;
1545         u64 size;
1546
1547         if (get_user(inti->type, (u64 __user *)addr))
1548                 return -EFAULT;
1549
1550         switch (inti->type) {
1551         case KVM_S390_INT_PFAULT_INIT:
1552         case KVM_S390_INT_PFAULT_DONE:
1553         case KVM_S390_INT_VIRTIO:
1554         case KVM_S390_INT_SERVICE:
1555                 target = (void *) &inti->ext;
1556                 source = &uptr->u.ext;
1557                 size = sizeof(inti->ext);
1558                 break;
1559         case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1560                 target = (void *) &inti->io;
1561                 source = &uptr->u.io;
1562                 size = sizeof(inti->io);
1563                 break;
1564         case KVM_S390_MCHK:
1565                 target = (void *) &inti->mchk;
1566                 source = &uptr->u.mchk;
1567                 size = sizeof(inti->mchk);
1568                 break;
1569         default:
1570                 return -EINVAL;
1571         }
1572
1573         if (copy_from_user(target, source, size))
1574                 return -EFAULT;
1575
1576         return 0;
1577 }
1578
1579 static int enqueue_floating_irq(struct kvm_device *dev,
1580                                 struct kvm_device_attr *attr)
1581 {
1582         struct kvm_s390_interrupt_info *inti = NULL;
1583         int r = 0;
1584         int len = attr->attr;
1585
1586         if (len % sizeof(struct kvm_s390_irq) != 0)
1587                 return -EINVAL;
1588         else if (len > KVM_S390_FLIC_MAX_BUFFER)
1589                 return -EINVAL;
1590
1591         while (len >= sizeof(struct kvm_s390_irq)) {
1592                 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1593                 if (!inti)
1594                         return -ENOMEM;
1595
1596                 r = copy_irq_from_user(inti, attr->addr);
1597                 if (r) {
1598                         kfree(inti);
1599                         return r;
1600                 }
1601                 r = __inject_vm(dev->kvm, inti);
1602                 if (r) {
1603                         kfree(inti);
1604                         return r;
1605                 }
1606                 len -= sizeof(struct kvm_s390_irq);
1607                 attr->addr += sizeof(struct kvm_s390_irq);
1608         }
1609
1610         return r;
1611 }
1612
1613 static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
1614 {
1615         if (id >= MAX_S390_IO_ADAPTERS)
1616                 return NULL;
1617         return kvm->arch.adapters[id];
1618 }
1619
1620 static int register_io_adapter(struct kvm_device *dev,
1621                                struct kvm_device_attr *attr)
1622 {
1623         struct s390_io_adapter *adapter;
1624         struct kvm_s390_io_adapter adapter_info;
1625
1626         if (copy_from_user(&adapter_info,
1627                            (void __user *)attr->addr, sizeof(adapter_info)))
1628                 return -EFAULT;
1629
1630         if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) ||
1631             (dev->kvm->arch.adapters[adapter_info.id] != NULL))
1632                 return -EINVAL;
1633
1634         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
1635         if (!adapter)
1636                 return -ENOMEM;
1637
1638         INIT_LIST_HEAD(&adapter->maps);
1639         init_rwsem(&adapter->maps_lock);
1640         atomic_set(&adapter->nr_maps, 0);
1641         adapter->id = adapter_info.id;
1642         adapter->isc = adapter_info.isc;
1643         adapter->maskable = adapter_info.maskable;
1644         adapter->masked = false;
1645         adapter->swap = adapter_info.swap;
1646         dev->kvm->arch.adapters[adapter->id] = adapter;
1647
1648         return 0;
1649 }
1650
1651 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
1652 {
1653         int ret;
1654         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1655
1656         if (!adapter || !adapter->maskable)
1657                 return -EINVAL;
1658         ret = adapter->masked;
1659         adapter->masked = masked;
1660         return ret;
1661 }
1662
1663 static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
1664 {
1665         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1666         struct s390_map_info *map;
1667         int ret;
1668
1669         if (!adapter || !addr)
1670                 return -EINVAL;
1671
1672         map = kzalloc(sizeof(*map), GFP_KERNEL);
1673         if (!map) {
1674                 ret = -ENOMEM;
1675                 goto out;
1676         }
1677         INIT_LIST_HEAD(&map->list);
1678         map->guest_addr = addr;
1679         map->addr = gmap_translate(kvm->arch.gmap, addr);
1680         if (map->addr == -EFAULT) {
1681                 ret = -EFAULT;
1682                 goto out;
1683         }
1684         ret = get_user_pages_fast(map->addr, 1, 1, &map->page);
1685         if (ret < 0)
1686                 goto out;
1687         BUG_ON(ret != 1);
1688         down_write(&adapter->maps_lock);
1689         if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) {
1690                 list_add_tail(&map->list, &adapter->maps);
1691                 ret = 0;
1692         } else {
1693                 put_page(map->page);
1694                 ret = -EINVAL;
1695         }
1696         up_write(&adapter->maps_lock);
1697 out:
1698         if (ret)
1699                 kfree(map);
1700         return ret;
1701 }
1702
1703 static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr)
1704 {
1705         struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1706         struct s390_map_info *map, *tmp;
1707         int found = 0;
1708
1709         if (!adapter || !addr)
1710                 return -EINVAL;
1711
1712         down_write(&adapter->maps_lock);
1713         list_for_each_entry_safe(map, tmp, &adapter->maps, list) {
1714                 if (map->guest_addr == addr) {
1715                         found = 1;
1716                         atomic_dec(&adapter->nr_maps);
1717                         list_del(&map->list);
1718                         put_page(map->page);
1719                         kfree(map);
1720                         break;
1721                 }
1722         }
1723         up_write(&adapter->maps_lock);
1724
1725         return found ? 0 : -EINVAL;
1726 }
1727
1728 void kvm_s390_destroy_adapters(struct kvm *kvm)
1729 {
1730         int i;
1731         struct s390_map_info *map, *tmp;
1732
1733         for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) {
1734                 if (!kvm->arch.adapters[i])
1735                         continue;
1736                 list_for_each_entry_safe(map, tmp,
1737                                          &kvm->arch.adapters[i]->maps, list) {
1738                         list_del(&map->list);
1739                         put_page(map->page);
1740                         kfree(map);
1741                 }
1742                 kfree(kvm->arch.adapters[i]);
1743         }
1744 }
1745
1746 static int modify_io_adapter(struct kvm_device *dev,
1747                              struct kvm_device_attr *attr)
1748 {
1749         struct kvm_s390_io_adapter_req req;
1750         struct s390_io_adapter *adapter;
1751         int ret;
1752
1753         if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
1754                 return -EFAULT;
1755
1756         adapter = get_io_adapter(dev->kvm, req.id);
1757         if (!adapter)
1758                 return -EINVAL;
1759         switch (req.type) {
1760         case KVM_S390_IO_ADAPTER_MASK:
1761                 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
1762                 if (ret > 0)
1763                         ret = 0;
1764                 break;
1765         case KVM_S390_IO_ADAPTER_MAP:
1766                 ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr);
1767                 break;
1768         case KVM_S390_IO_ADAPTER_UNMAP:
1769                 ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr);
1770                 break;
1771         default:
1772                 ret = -EINVAL;
1773         }
1774
1775         return ret;
1776 }
1777
1778 static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1779 {
1780         int r = 0;
1781         unsigned int i;
1782         struct kvm_vcpu *vcpu;
1783
1784         switch (attr->group) {
1785         case KVM_DEV_FLIC_ENQUEUE:
1786                 r = enqueue_floating_irq(dev, attr);
1787                 break;
1788         case KVM_DEV_FLIC_CLEAR_IRQS:
1789                 kvm_s390_clear_float_irqs(dev->kvm);
1790                 break;
1791         case KVM_DEV_FLIC_APF_ENABLE:
1792                 dev->kvm->arch.gmap->pfault_enabled = 1;
1793                 break;
1794         case KVM_DEV_FLIC_APF_DISABLE_WAIT:
1795                 dev->kvm->arch.gmap->pfault_enabled = 0;
1796                 /*
1797                  * Make sure no async faults are in transition when
1798                  * clearing the queues. So we don't need to worry
1799                  * about late coming workers.
1800                  */
1801                 synchronize_srcu(&dev->kvm->srcu);
1802                 kvm_for_each_vcpu(i, vcpu, dev->kvm)
1803                         kvm_clear_async_pf_completion_queue(vcpu);
1804                 break;
1805         case KVM_DEV_FLIC_ADAPTER_REGISTER:
1806                 r = register_io_adapter(dev, attr);
1807                 break;
1808         case KVM_DEV_FLIC_ADAPTER_MODIFY:
1809                 r = modify_io_adapter(dev, attr);
1810                 break;
1811         default:
1812                 r = -EINVAL;
1813         }
1814
1815         return r;
1816 }
1817
1818 static int flic_create(struct kvm_device *dev, u32 type)
1819 {
1820         if (!dev)
1821                 return -EINVAL;
1822         if (dev->kvm->arch.flic)
1823                 return -EINVAL;
1824         dev->kvm->arch.flic = dev;
1825         return 0;
1826 }
1827
1828 static void flic_destroy(struct kvm_device *dev)
1829 {
1830         dev->kvm->arch.flic = NULL;
1831         kfree(dev);
1832 }
1833
1834 /* s390 floating irq controller (flic) */
1835 struct kvm_device_ops kvm_flic_ops = {
1836         .name = "kvm-flic",
1837         .get_attr = flic_get_attr,
1838         .set_attr = flic_set_attr,
1839         .create = flic_create,
1840         .destroy = flic_destroy,
1841 };
1842
1843 static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
1844 {
1845         unsigned long bit;
1846
1847         bit = bit_nr + (addr % PAGE_SIZE) * 8;
1848
1849         return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
1850 }
1851
1852 static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter,
1853                                           u64 addr)
1854 {
1855         struct s390_map_info *map;
1856
1857         if (!adapter)
1858                 return NULL;
1859
1860         list_for_each_entry(map, &adapter->maps, list) {
1861                 if (map->guest_addr == addr)
1862                         return map;
1863         }
1864         return NULL;
1865 }
1866
1867 static int adapter_indicators_set(struct kvm *kvm,
1868                                   struct s390_io_adapter *adapter,
1869                                   struct kvm_s390_adapter_int *adapter_int)
1870 {
1871         unsigned long bit;
1872         int summary_set, idx;
1873         struct s390_map_info *info;
1874         void *map;
1875
1876         info = get_map_info(adapter, adapter_int->ind_addr);
1877         if (!info)
1878                 return -1;
1879         map = page_address(info->page);
1880         bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap);
1881         set_bit(bit, map);
1882         idx = srcu_read_lock(&kvm->srcu);
1883         mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
1884         set_page_dirty_lock(info->page);
1885         info = get_map_info(adapter, adapter_int->summary_addr);
1886         if (!info) {
1887                 srcu_read_unlock(&kvm->srcu, idx);
1888                 return -1;
1889         }
1890         map = page_address(info->page);
1891         bit = get_ind_bit(info->addr, adapter_int->summary_offset,
1892                           adapter->swap);
1893         summary_set = test_and_set_bit(bit, map);
1894         mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
1895         set_page_dirty_lock(info->page);
1896         srcu_read_unlock(&kvm->srcu, idx);
1897         return summary_set ? 0 : 1;
1898 }
1899
1900 /*
1901  * < 0 - not injected due to error
1902  * = 0 - coalesced, summary indicator already active
1903  * > 0 - injected interrupt
1904  */
1905 static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
1906                            struct kvm *kvm, int irq_source_id, int level,
1907                            bool line_status)
1908 {
1909         int ret;
1910         struct s390_io_adapter *adapter;
1911
1912         /* We're only interested in the 0->1 transition. */
1913         if (!level)
1914                 return 0;
1915         adapter = get_io_adapter(kvm, e->adapter.adapter_id);
1916         if (!adapter)
1917                 return -1;
1918         down_read(&adapter->maps_lock);
1919         ret = adapter_indicators_set(kvm, adapter, &e->adapter);
1920         up_read(&adapter->maps_lock);
1921         if ((ret > 0) && !adapter->masked) {
1922                 struct kvm_s390_interrupt s390int = {
1923                         .type = KVM_S390_INT_IO(1, 0, 0, 0),
1924                         .parm = 0,
1925                         .parm64 = (adapter->isc << 27) | 0x80000000,
1926                 };
1927                 ret = kvm_s390_inject_vm(kvm, &s390int);
1928                 if (ret == 0)
1929                         ret = 1;
1930         }
1931         return ret;
1932 }
1933
1934 int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
1935                           const struct kvm_irq_routing_entry *ue)
1936 {
1937         int ret;
1938
1939         switch (ue->type) {
1940         case KVM_IRQ_ROUTING_S390_ADAPTER:
1941                 e->set = set_adapter_int;
1942                 e->adapter.summary_addr = ue->u.adapter.summary_addr;
1943                 e->adapter.ind_addr = ue->u.adapter.ind_addr;
1944                 e->adapter.summary_offset = ue->u.adapter.summary_offset;
1945                 e->adapter.ind_offset = ue->u.adapter.ind_offset;
1946                 e->adapter.adapter_id = ue->u.adapter.adapter_id;
1947                 ret = 0;
1948                 break;
1949         default:
1950                 ret = -EINVAL;
1951         }
1952
1953         return ret;
1954 }
1955
1956 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
1957                 int irq_source_id, int level, bool line_status)
1958 {
1959         return -EINVAL;
1960 }