OSDN Git Service

c0b99e0f6b63a52bcf0d037da403319878c6817a
[android-x86/kernel.git] / arch / s390 / kvm / sigp.c
1 /*
2  * handling interprocessor communication
3  *
4  * Copyright IBM Corp. 2008, 2013
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
13  */
14
15 #include <linux/kvm.h>
16 #include <linux/kvm_host.h>
17 #include <linux/slab.h>
18 #include <asm/sigp.h>
19 #include "gaccess.h"
20 #include "kvm-s390.h"
21 #include "trace.h"
22
23 static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
24                         u64 *reg)
25 {
26         struct kvm_s390_local_interrupt *li;
27         struct kvm_vcpu *dst_vcpu = NULL;
28         int cpuflags;
29         int rc;
30
31         if (cpu_addr >= KVM_MAX_VCPUS)
32                 return SIGP_CC_NOT_OPERATIONAL;
33
34         dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
35         if (!dst_vcpu)
36                 return SIGP_CC_NOT_OPERATIONAL;
37         li = &dst_vcpu->arch.local_int;
38
39         cpuflags = atomic_read(li->cpuflags);
40         if (!(cpuflags & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED)))
41                 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
42         else {
43                 *reg &= 0xffffffff00000000UL;
44                 if (cpuflags & CPUSTAT_ECALL_PEND)
45                         *reg |= SIGP_STATUS_EXT_CALL_PENDING;
46                 if (cpuflags & CPUSTAT_STOPPED)
47                         *reg |= SIGP_STATUS_STOPPED;
48                 rc = SIGP_CC_STATUS_STORED;
49         }
50
51         VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
52         return rc;
53 }
54
55 static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
56 {
57         struct kvm_s390_local_interrupt *li;
58         struct kvm_s390_interrupt_info *inti;
59         struct kvm_vcpu *dst_vcpu = NULL;
60
61         if (cpu_addr < KVM_MAX_VCPUS)
62                 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
63         if (!dst_vcpu)
64                 return SIGP_CC_NOT_OPERATIONAL;
65
66         inti = kzalloc(sizeof(*inti), GFP_KERNEL);
67         if (!inti)
68                 return -ENOMEM;
69
70         inti->type = KVM_S390_INT_EMERGENCY;
71         inti->emerg.code = vcpu->vcpu_id;
72
73         li = &dst_vcpu->arch.local_int;
74         spin_lock_bh(&li->lock);
75         list_add_tail(&inti->list, &li->list);
76         atomic_set(&li->active, 1);
77         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
78         if (waitqueue_active(li->wq))
79                 wake_up_interruptible(li->wq);
80         spin_unlock_bh(&li->lock);
81         VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
82
83         return SIGP_CC_ORDER_CODE_ACCEPTED;
84 }
85
86 static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
87                                         u16 asn, u64 *reg)
88 {
89         struct kvm_vcpu *dst_vcpu = NULL;
90         const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT;
91         u16 p_asn, s_asn;
92         psw_t *psw;
93         u32 flags;
94
95         if (cpu_addr < KVM_MAX_VCPUS)
96                 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
97         if (!dst_vcpu)
98                 return SIGP_CC_NOT_OPERATIONAL;
99         flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags);
100         psw = &dst_vcpu->arch.sie_block->gpsw;
101         p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff;  /* Primary ASN */
102         s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff;  /* Secondary ASN */
103
104         /* Deliver the emergency signal? */
105         if (!(flags & CPUSTAT_STOPPED)
106             || (psw->mask & psw_int_mask) != psw_int_mask
107             || ((flags & CPUSTAT_WAIT) && psw->addr != 0)
108             || (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) {
109                 return __sigp_emergency(vcpu, cpu_addr);
110         } else {
111                 *reg &= 0xffffffff00000000UL;
112                 *reg |= SIGP_STATUS_INCORRECT_STATE;
113                 return SIGP_CC_STATUS_STORED;
114         }
115 }
116
117 static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
118 {
119         struct kvm_s390_local_interrupt *li;
120         struct kvm_s390_interrupt_info *inti;
121         struct kvm_vcpu *dst_vcpu = NULL;
122
123         if (cpu_addr < KVM_MAX_VCPUS)
124                 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
125         if (!dst_vcpu)
126                 return SIGP_CC_NOT_OPERATIONAL;
127
128         inti = kzalloc(sizeof(*inti), GFP_KERNEL);
129         if (!inti)
130                 return -ENOMEM;
131
132         inti->type = KVM_S390_INT_EXTERNAL_CALL;
133         inti->extcall.code = vcpu->vcpu_id;
134
135         li = &dst_vcpu->arch.local_int;
136         spin_lock_bh(&li->lock);
137         list_add_tail(&inti->list, &li->list);
138         atomic_set(&li->active, 1);
139         atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
140         if (waitqueue_active(li->wq))
141                 wake_up_interruptible(li->wq);
142         spin_unlock_bh(&li->lock);
143         VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
144
145         return SIGP_CC_ORDER_CODE_ACCEPTED;
146 }
147
148 static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
149 {
150         struct kvm_s390_interrupt_info *inti;
151         int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
152
153         inti = kzalloc(sizeof(*inti), GFP_ATOMIC);
154         if (!inti)
155                 return -ENOMEM;
156         inti->type = KVM_S390_SIGP_STOP;
157
158         spin_lock_bh(&li->lock);
159         if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
160                 kfree(inti);
161                 if ((action & ACTION_STORE_ON_STOP) != 0)
162                         rc = -ESHUTDOWN;
163                 goto out;
164         }
165         list_add_tail(&inti->list, &li->list);
166         atomic_set(&li->active, 1);
167         atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
168         li->action_bits |= action;
169         if (waitqueue_active(li->wq))
170                 wake_up_interruptible(li->wq);
171 out:
172         spin_unlock_bh(&li->lock);
173
174         return rc;
175 }
176
177 static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
178 {
179         struct kvm_s390_local_interrupt *li;
180         struct kvm_vcpu *dst_vcpu = NULL;
181         int rc;
182
183         if (cpu_addr >= KVM_MAX_VCPUS)
184                 return SIGP_CC_NOT_OPERATIONAL;
185
186         dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
187         if (!dst_vcpu)
188                 return SIGP_CC_NOT_OPERATIONAL;
189         li = &dst_vcpu->arch.local_int;
190
191         rc = __inject_sigp_stop(li, action);
192
193         VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
194
195         if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) {
196                 /* If the CPU has already been stopped, we still have
197                  * to save the status when doing stop-and-store. This
198                  * has to be done after unlocking all spinlocks. */
199                 rc = kvm_s390_store_status_unloaded(dst_vcpu,
200                                                 KVM_S390_STORE_STATUS_NOADDR);
201         }
202
203         return rc;
204 }
205
206 static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
207 {
208         int rc;
209         unsigned int i;
210         struct kvm_vcpu *v;
211
212         switch (parameter & 0xff) {
213         case 0:
214                 rc = SIGP_CC_NOT_OPERATIONAL;
215                 break;
216         case 1:
217         case 2:
218                 kvm_for_each_vcpu(i, v, vcpu->kvm) {
219                         v->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
220                         kvm_clear_async_pf_completion_queue(v);
221                 }
222
223                 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
224                 break;
225         default:
226                 rc = -EOPNOTSUPP;
227         }
228         return rc;
229 }
230
231 static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
232                              u64 *reg)
233 {
234         struct kvm_s390_local_interrupt *li;
235         struct kvm_vcpu *dst_vcpu = NULL;
236         struct kvm_s390_interrupt_info *inti;
237         int rc;
238
239         if (cpu_addr < KVM_MAX_VCPUS)
240                 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
241         if (!dst_vcpu)
242                 return SIGP_CC_NOT_OPERATIONAL;
243         li = &dst_vcpu->arch.local_int;
244
245         /*
246          * Make sure the new value is valid memory. We only need to check the
247          * first page, since address is 8k aligned and memory pieces are always
248          * at least 1MB aligned and have at least a size of 1MB.
249          */
250         address &= 0x7fffe000u;
251         if (kvm_is_error_gpa(vcpu->kvm, address)) {
252                 *reg &= 0xffffffff00000000UL;
253                 *reg |= SIGP_STATUS_INVALID_PARAMETER;
254                 return SIGP_CC_STATUS_STORED;
255         }
256
257         inti = kzalloc(sizeof(*inti), GFP_KERNEL);
258         if (!inti)
259                 return SIGP_CC_BUSY;
260
261         spin_lock_bh(&li->lock);
262         /* cpu must be in stopped state */
263         if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
264                 *reg &= 0xffffffff00000000UL;
265                 *reg |= SIGP_STATUS_INCORRECT_STATE;
266                 rc = SIGP_CC_STATUS_STORED;
267                 kfree(inti);
268                 goto out_li;
269         }
270
271         inti->type = KVM_S390_SIGP_SET_PREFIX;
272         inti->prefix.address = address;
273
274         list_add_tail(&inti->list, &li->list);
275         atomic_set(&li->active, 1);
276         if (waitqueue_active(li->wq))
277                 wake_up_interruptible(li->wq);
278         rc = SIGP_CC_ORDER_CODE_ACCEPTED;
279
280         VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
281 out_li:
282         spin_unlock_bh(&li->lock);
283         return rc;
284 }
285
286 static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id,
287                                         u32 addr, u64 *reg)
288 {
289         struct kvm_vcpu *dst_vcpu = NULL;
290         int flags;
291         int rc;
292
293         if (cpu_id < KVM_MAX_VCPUS)
294                 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_id);
295         if (!dst_vcpu)
296                 return SIGP_CC_NOT_OPERATIONAL;
297
298         spin_lock_bh(&dst_vcpu->arch.local_int.lock);
299         flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
300         spin_unlock_bh(&dst_vcpu->arch.local_int.lock);
301         if (!(flags & CPUSTAT_STOPPED)) {
302                 *reg &= 0xffffffff00000000UL;
303                 *reg |= SIGP_STATUS_INCORRECT_STATE;
304                 return SIGP_CC_STATUS_STORED;
305         }
306
307         addr &= 0x7ffffe00;
308         rc = kvm_s390_store_status_unloaded(dst_vcpu, addr);
309         if (rc == -EFAULT) {
310                 *reg &= 0xffffffff00000000UL;
311                 *reg |= SIGP_STATUS_INVALID_PARAMETER;
312                 rc = SIGP_CC_STATUS_STORED;
313         }
314         return rc;
315 }
316
317 static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
318                                 u64 *reg)
319 {
320         struct kvm_s390_local_interrupt *li;
321         struct kvm_vcpu *dst_vcpu = NULL;
322         int rc;
323
324         if (cpu_addr >= KVM_MAX_VCPUS)
325                 return SIGP_CC_NOT_OPERATIONAL;
326
327         dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
328         if (!dst_vcpu)
329                 return SIGP_CC_NOT_OPERATIONAL;
330         li = &dst_vcpu->arch.local_int;
331         if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
332                 /* running */
333                 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
334         } else {
335                 /* not running */
336                 *reg &= 0xffffffff00000000UL;
337                 *reg |= SIGP_STATUS_NOT_RUNNING;
338                 rc = SIGP_CC_STATUS_STORED;
339         }
340
341         VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr,
342                    rc);
343
344         return rc;
345 }
346
347 /* Test whether the destination CPU is available and not busy */
348 static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr)
349 {
350         struct kvm_s390_local_interrupt *li;
351         int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
352         struct kvm_vcpu *dst_vcpu = NULL;
353
354         if (cpu_addr >= KVM_MAX_VCPUS)
355                 return SIGP_CC_NOT_OPERATIONAL;
356
357         dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
358         if (!dst_vcpu)
359                 return SIGP_CC_NOT_OPERATIONAL;
360         li = &dst_vcpu->arch.local_int;
361         spin_lock_bh(&li->lock);
362         if (li->action_bits & ACTION_STOP_ON_STOP)
363                 rc = SIGP_CC_BUSY;
364         spin_unlock_bh(&li->lock);
365
366         return rc;
367 }
368
369 int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
370 {
371         int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
372         int r3 = vcpu->arch.sie_block->ipa & 0x000f;
373         u32 parameter;
374         u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
375         u8 order_code;
376         int rc;
377
378         /* sigp in userspace can exit */
379         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
380                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
381
382         order_code = kvm_s390_get_base_disp_rs(vcpu);
383
384         if (r1 % 2)
385                 parameter = vcpu->run->s.regs.gprs[r1];
386         else
387                 parameter = vcpu->run->s.regs.gprs[r1 + 1];
388
389         trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
390         switch (order_code) {
391         case SIGP_SENSE:
392                 vcpu->stat.instruction_sigp_sense++;
393                 rc = __sigp_sense(vcpu, cpu_addr,
394                                   &vcpu->run->s.regs.gprs[r1]);
395                 break;
396         case SIGP_EXTERNAL_CALL:
397                 vcpu->stat.instruction_sigp_external_call++;
398                 rc = __sigp_external_call(vcpu, cpu_addr);
399                 break;
400         case SIGP_EMERGENCY_SIGNAL:
401                 vcpu->stat.instruction_sigp_emergency++;
402                 rc = __sigp_emergency(vcpu, cpu_addr);
403                 break;
404         case SIGP_STOP:
405                 vcpu->stat.instruction_sigp_stop++;
406                 rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP);
407                 break;
408         case SIGP_STOP_AND_STORE_STATUS:
409                 vcpu->stat.instruction_sigp_stop++;
410                 rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP |
411                                                  ACTION_STOP_ON_STOP);
412                 break;
413         case SIGP_STORE_STATUS_AT_ADDRESS:
414                 rc = __sigp_store_status_at_addr(vcpu, cpu_addr, parameter,
415                                                  &vcpu->run->s.regs.gprs[r1]);
416                 break;
417         case SIGP_SET_ARCHITECTURE:
418                 vcpu->stat.instruction_sigp_arch++;
419                 rc = __sigp_set_arch(vcpu, parameter);
420                 break;
421         case SIGP_SET_PREFIX:
422                 vcpu->stat.instruction_sigp_prefix++;
423                 rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
424                                        &vcpu->run->s.regs.gprs[r1]);
425                 break;
426         case SIGP_COND_EMERGENCY_SIGNAL:
427                 rc = __sigp_conditional_emergency(vcpu, cpu_addr, parameter,
428                                                   &vcpu->run->s.regs.gprs[r1]);
429                 break;
430         case SIGP_SENSE_RUNNING:
431                 vcpu->stat.instruction_sigp_sense_running++;
432                 rc = __sigp_sense_running(vcpu, cpu_addr,
433                                           &vcpu->run->s.regs.gprs[r1]);
434                 break;
435         case SIGP_START:
436                 rc = sigp_check_callable(vcpu, cpu_addr);
437                 if (rc == SIGP_CC_ORDER_CODE_ACCEPTED)
438                         rc = -EOPNOTSUPP;    /* Handle START in user space */
439                 break;
440         case SIGP_RESTART:
441                 vcpu->stat.instruction_sigp_restart++;
442                 rc = sigp_check_callable(vcpu, cpu_addr);
443                 if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) {
444                         VCPU_EVENT(vcpu, 4,
445                                    "sigp restart %x to handle userspace",
446                                    cpu_addr);
447                         /* user space must know about restart */
448                         rc = -EOPNOTSUPP;
449                 }
450                 break;
451         default:
452                 return -EOPNOTSUPP;
453         }
454
455         if (rc < 0)
456                 return rc;
457
458         kvm_s390_set_psw_cc(vcpu, rc);
459         return 0;
460 }