OSDN Git Service

perf/x86/uncore: Correct the number of CHAs on EMR
[tomoyo/tomoyo-test1.git] / arch / riscv / kvm / vcpu_sbi.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2019 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Atish Patra <atish.patra@wdc.com>
7  */
8
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
12 #include <asm/sbi.h>
13 #include <asm/kvm_vcpu_sbi.h>
14
15 #ifndef CONFIG_RISCV_SBI_V01
16 static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
17         .extid_start = -1UL,
18         .extid_end = -1UL,
19         .handler = NULL,
20 };
21 #endif
22
23 #ifndef CONFIG_RISCV_PMU_SBI
24 static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = {
25         .extid_start = -1UL,
26         .extid_end = -1UL,
27         .handler = NULL,
28 };
29 #endif
30
31 struct kvm_riscv_sbi_extension_entry {
32         enum KVM_RISCV_SBI_EXT_ID ext_idx;
33         const struct kvm_vcpu_sbi_extension *ext_ptr;
34 };
35
36 static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = {
37         {
38                 .ext_idx = KVM_RISCV_SBI_EXT_V01,
39                 .ext_ptr = &vcpu_sbi_ext_v01,
40         },
41         {
42                 .ext_idx = KVM_RISCV_SBI_EXT_MAX, /* Can't be disabled */
43                 .ext_ptr = &vcpu_sbi_ext_base,
44         },
45         {
46                 .ext_idx = KVM_RISCV_SBI_EXT_TIME,
47                 .ext_ptr = &vcpu_sbi_ext_time,
48         },
49         {
50                 .ext_idx = KVM_RISCV_SBI_EXT_IPI,
51                 .ext_ptr = &vcpu_sbi_ext_ipi,
52         },
53         {
54                 .ext_idx = KVM_RISCV_SBI_EXT_RFENCE,
55                 .ext_ptr = &vcpu_sbi_ext_rfence,
56         },
57         {
58                 .ext_idx = KVM_RISCV_SBI_EXT_SRST,
59                 .ext_ptr = &vcpu_sbi_ext_srst,
60         },
61         {
62                 .ext_idx = KVM_RISCV_SBI_EXT_HSM,
63                 .ext_ptr = &vcpu_sbi_ext_hsm,
64         },
65         {
66                 .ext_idx = KVM_RISCV_SBI_EXT_PMU,
67                 .ext_ptr = &vcpu_sbi_ext_pmu,
68         },
69         {
70                 .ext_idx = KVM_RISCV_SBI_EXT_EXPERIMENTAL,
71                 .ext_ptr = &vcpu_sbi_ext_experimental,
72         },
73         {
74                 .ext_idx = KVM_RISCV_SBI_EXT_VENDOR,
75                 .ext_ptr = &vcpu_sbi_ext_vendor,
76         },
77 };
78
79 void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
80 {
81         struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
82
83         vcpu->arch.sbi_context.return_handled = 0;
84         vcpu->stat.ecall_exit_stat++;
85         run->exit_reason = KVM_EXIT_RISCV_SBI;
86         run->riscv_sbi.extension_id = cp->a7;
87         run->riscv_sbi.function_id = cp->a6;
88         run->riscv_sbi.args[0] = cp->a0;
89         run->riscv_sbi.args[1] = cp->a1;
90         run->riscv_sbi.args[2] = cp->a2;
91         run->riscv_sbi.args[3] = cp->a3;
92         run->riscv_sbi.args[4] = cp->a4;
93         run->riscv_sbi.args[5] = cp->a5;
94         run->riscv_sbi.ret[0] = cp->a0;
95         run->riscv_sbi.ret[1] = cp->a1;
96 }
97
98 void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
99                                      struct kvm_run *run,
100                                      u32 type, u64 reason)
101 {
102         unsigned long i;
103         struct kvm_vcpu *tmp;
104
105         kvm_for_each_vcpu(i, tmp, vcpu->kvm)
106                 tmp->arch.power_off = true;
107         kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
108
109         memset(&run->system_event, 0, sizeof(run->system_event));
110         run->system_event.type = type;
111         run->system_event.ndata = 1;
112         run->system_event.data[0] = reason;
113         run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
114 }
115
116 int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
117 {
118         struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
119
120         /* Handle SBI return only once */
121         if (vcpu->arch.sbi_context.return_handled)
122                 return 0;
123         vcpu->arch.sbi_context.return_handled = 1;
124
125         /* Update return values */
126         cp->a0 = run->riscv_sbi.ret[0];
127         cp->a1 = run->riscv_sbi.ret[1];
128
129         /* Move to next instruction */
130         vcpu->arch.guest_context.sepc += 4;
131
132         return 0;
133 }
134
135 static int riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu *vcpu,
136                                          unsigned long reg_num,
137                                          unsigned long reg_val)
138 {
139         unsigned long i;
140         const struct kvm_riscv_sbi_extension_entry *sext = NULL;
141         struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
142
143         if (reg_num >= KVM_RISCV_SBI_EXT_MAX ||
144             (reg_val != 1 && reg_val != 0))
145                 return -EINVAL;
146
147         for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
148                 if (sbi_ext[i].ext_idx == reg_num) {
149                         sext = &sbi_ext[i];
150                         break;
151                 }
152         }
153         if (!sext)
154                 return -ENOENT;
155
156         /*
157          * We can't set the extension status to available here, since it may
158          * have a probe() function which needs to confirm availability first,
159          * but it may be too early to call that here. We can set the status to
160          * unavailable, though.
161          */
162         if (!reg_val)
163                 scontext->ext_status[sext->ext_idx] =
164                         KVM_RISCV_SBI_EXT_UNAVAILABLE;
165
166         return 0;
167 }
168
169 static int riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu *vcpu,
170                                          unsigned long reg_num,
171                                          unsigned long *reg_val)
172 {
173         unsigned long i;
174         const struct kvm_riscv_sbi_extension_entry *sext = NULL;
175         struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
176
177         if (reg_num >= KVM_RISCV_SBI_EXT_MAX)
178                 return -EINVAL;
179
180         for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
181                 if (sbi_ext[i].ext_idx == reg_num) {
182                         sext = &sbi_ext[i];
183                         break;
184                 }
185         }
186         if (!sext)
187                 return -ENOENT;
188
189         /*
190          * If the extension status is still uninitialized, then we should probe
191          * to determine if it's available, but it may be too early to do that
192          * here. The best we can do is report that the extension has not been
193          * disabled, i.e. we return 1 when the extension is available and also
194          * when it only may be available.
195          */
196         *reg_val = scontext->ext_status[sext->ext_idx] !=
197                                 KVM_RISCV_SBI_EXT_UNAVAILABLE;
198
199         return 0;
200 }
201
202 static int riscv_vcpu_set_sbi_ext_multi(struct kvm_vcpu *vcpu,
203                                         unsigned long reg_num,
204                                         unsigned long reg_val, bool enable)
205 {
206         unsigned long i, ext_id;
207
208         if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
209                 return -EINVAL;
210
211         for_each_set_bit(i, &reg_val, BITS_PER_LONG) {
212                 ext_id = i + reg_num * BITS_PER_LONG;
213                 if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
214                         break;
215
216                 riscv_vcpu_set_sbi_ext_single(vcpu, ext_id, enable);
217         }
218
219         return 0;
220 }
221
222 static int riscv_vcpu_get_sbi_ext_multi(struct kvm_vcpu *vcpu,
223                                         unsigned long reg_num,
224                                         unsigned long *reg_val)
225 {
226         unsigned long i, ext_id, ext_val;
227
228         if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
229                 return -EINVAL;
230
231         for (i = 0; i < BITS_PER_LONG; i++) {
232                 ext_id = i + reg_num * BITS_PER_LONG;
233                 if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
234                         break;
235
236                 ext_val = 0;
237                 riscv_vcpu_get_sbi_ext_single(vcpu, ext_id, &ext_val);
238                 if (ext_val)
239                         *reg_val |= KVM_REG_RISCV_SBI_MULTI_MASK(ext_id);
240         }
241
242         return 0;
243 }
244
245 int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
246                                    const struct kvm_one_reg *reg)
247 {
248         unsigned long __user *uaddr =
249                         (unsigned long __user *)(unsigned long)reg->addr;
250         unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
251                                             KVM_REG_SIZE_MASK |
252                                             KVM_REG_RISCV_SBI_EXT);
253         unsigned long reg_val, reg_subtype;
254
255         if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
256                 return -EINVAL;
257
258         if (vcpu->arch.ran_atleast_once)
259                 return -EBUSY;
260
261         reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
262         reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
263
264         if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
265                 return -EFAULT;
266
267         switch (reg_subtype) {
268         case KVM_REG_RISCV_SBI_SINGLE:
269                 return riscv_vcpu_set_sbi_ext_single(vcpu, reg_num, reg_val);
270         case KVM_REG_RISCV_SBI_MULTI_EN:
271                 return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, true);
272         case KVM_REG_RISCV_SBI_MULTI_DIS:
273                 return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, false);
274         default:
275                 return -EINVAL;
276         }
277
278         return 0;
279 }
280
281 int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
282                                    const struct kvm_one_reg *reg)
283 {
284         int rc;
285         unsigned long __user *uaddr =
286                         (unsigned long __user *)(unsigned long)reg->addr;
287         unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
288                                             KVM_REG_SIZE_MASK |
289                                             KVM_REG_RISCV_SBI_EXT);
290         unsigned long reg_val, reg_subtype;
291
292         if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
293                 return -EINVAL;
294
295         reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
296         reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
297
298         reg_val = 0;
299         switch (reg_subtype) {
300         case KVM_REG_RISCV_SBI_SINGLE:
301                 rc = riscv_vcpu_get_sbi_ext_single(vcpu, reg_num, &reg_val);
302                 break;
303         case KVM_REG_RISCV_SBI_MULTI_EN:
304         case KVM_REG_RISCV_SBI_MULTI_DIS:
305                 rc = riscv_vcpu_get_sbi_ext_multi(vcpu, reg_num, &reg_val);
306                 if (!rc && reg_subtype == KVM_REG_RISCV_SBI_MULTI_DIS)
307                         reg_val = ~reg_val;
308                 break;
309         default:
310                 rc = -EINVAL;
311         }
312         if (rc)
313                 return rc;
314
315         if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
316                 return -EFAULT;
317
318         return 0;
319 }
320
321 const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
322                                 struct kvm_vcpu *vcpu, unsigned long extid)
323 {
324         struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
325         const struct kvm_riscv_sbi_extension_entry *entry;
326         const struct kvm_vcpu_sbi_extension *ext;
327         int i;
328
329         for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
330                 entry = &sbi_ext[i];
331                 ext = entry->ext_ptr;
332
333                 if (ext->extid_start <= extid && ext->extid_end >= extid) {
334                         if (entry->ext_idx >= KVM_RISCV_SBI_EXT_MAX ||
335                             scontext->ext_status[entry->ext_idx] ==
336                                                 KVM_RISCV_SBI_EXT_AVAILABLE)
337                                 return ext;
338                         if (scontext->ext_status[entry->ext_idx] ==
339                                                 KVM_RISCV_SBI_EXT_UNAVAILABLE)
340                                 return NULL;
341                         if (ext->probe && !ext->probe(vcpu)) {
342                                 scontext->ext_status[entry->ext_idx] =
343                                         KVM_RISCV_SBI_EXT_UNAVAILABLE;
344                                 return NULL;
345                         }
346
347                         scontext->ext_status[entry->ext_idx] =
348                                 KVM_RISCV_SBI_EXT_AVAILABLE;
349                         return ext;
350                 }
351         }
352
353         return NULL;
354 }
355
356 int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
357 {
358         int ret = 1;
359         bool next_sepc = true;
360         struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
361         const struct kvm_vcpu_sbi_extension *sbi_ext;
362         struct kvm_cpu_trap utrap = {0};
363         struct kvm_vcpu_sbi_return sbi_ret = {
364                 .out_val = 0,
365                 .err_val = 0,
366                 .utrap = &utrap,
367         };
368         bool ext_is_v01 = false;
369
370         sbi_ext = kvm_vcpu_sbi_find_ext(vcpu, cp->a7);
371         if (sbi_ext && sbi_ext->handler) {
372 #ifdef CONFIG_RISCV_SBI_V01
373                 if (cp->a7 >= SBI_EXT_0_1_SET_TIMER &&
374                     cp->a7 <= SBI_EXT_0_1_SHUTDOWN)
375                         ext_is_v01 = true;
376 #endif
377                 ret = sbi_ext->handler(vcpu, run, &sbi_ret);
378         } else {
379                 /* Return error for unsupported SBI calls */
380                 cp->a0 = SBI_ERR_NOT_SUPPORTED;
381                 goto ecall_done;
382         }
383
384         /*
385          * When the SBI extension returns a Linux error code, it exits the ioctl
386          * loop and forwards the error to userspace.
387          */
388         if (ret < 0) {
389                 next_sepc = false;
390                 goto ecall_done;
391         }
392
393         /* Handle special error cases i.e trap, exit or userspace forward */
394         if (sbi_ret.utrap->scause) {
395                 /* No need to increment sepc or exit ioctl loop */
396                 ret = 1;
397                 sbi_ret.utrap->sepc = cp->sepc;
398                 kvm_riscv_vcpu_trap_redirect(vcpu, sbi_ret.utrap);
399                 next_sepc = false;
400                 goto ecall_done;
401         }
402
403         /* Exit ioctl loop or Propagate the error code the guest */
404         if (sbi_ret.uexit) {
405                 next_sepc = false;
406                 ret = 0;
407         } else {
408                 cp->a0 = sbi_ret.err_val;
409                 ret = 1;
410         }
411 ecall_done:
412         if (next_sepc)
413                 cp->sepc += 4;
414         /* a1 should only be updated when we continue the ioctl loop */
415         if (!ext_is_v01 && ret == 1)
416                 cp->a1 = sbi_ret.out_val;
417
418         return ret;
419 }