1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2019 Western Digital Corporation or its affiliates.
6 * Atish Patra <atish.patra@wdc.com>
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
13 #include <asm/kvm_vcpu_sbi.h>
15 #ifndef CONFIG_RISCV_SBI_V01
16 static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
23 #ifndef CONFIG_RISCV_PMU_SBI
24 static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = {
31 struct kvm_riscv_sbi_extension_entry {
32 enum KVM_RISCV_SBI_EXT_ID ext_idx;
33 const struct kvm_vcpu_sbi_extension *ext_ptr;
36 static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = {
38 .ext_idx = KVM_RISCV_SBI_EXT_V01,
39 .ext_ptr = &vcpu_sbi_ext_v01,
42 .ext_idx = KVM_RISCV_SBI_EXT_MAX, /* Can't be disabled */
43 .ext_ptr = &vcpu_sbi_ext_base,
46 .ext_idx = KVM_RISCV_SBI_EXT_TIME,
47 .ext_ptr = &vcpu_sbi_ext_time,
50 .ext_idx = KVM_RISCV_SBI_EXT_IPI,
51 .ext_ptr = &vcpu_sbi_ext_ipi,
54 .ext_idx = KVM_RISCV_SBI_EXT_RFENCE,
55 .ext_ptr = &vcpu_sbi_ext_rfence,
58 .ext_idx = KVM_RISCV_SBI_EXT_SRST,
59 .ext_ptr = &vcpu_sbi_ext_srst,
62 .ext_idx = KVM_RISCV_SBI_EXT_HSM,
63 .ext_ptr = &vcpu_sbi_ext_hsm,
66 .ext_idx = KVM_RISCV_SBI_EXT_PMU,
67 .ext_ptr = &vcpu_sbi_ext_pmu,
70 .ext_idx = KVM_RISCV_SBI_EXT_EXPERIMENTAL,
71 .ext_ptr = &vcpu_sbi_ext_experimental,
74 .ext_idx = KVM_RISCV_SBI_EXT_VENDOR,
75 .ext_ptr = &vcpu_sbi_ext_vendor,
79 void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
81 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
83 vcpu->arch.sbi_context.return_handled = 0;
84 vcpu->stat.ecall_exit_stat++;
85 run->exit_reason = KVM_EXIT_RISCV_SBI;
86 run->riscv_sbi.extension_id = cp->a7;
87 run->riscv_sbi.function_id = cp->a6;
88 run->riscv_sbi.args[0] = cp->a0;
89 run->riscv_sbi.args[1] = cp->a1;
90 run->riscv_sbi.args[2] = cp->a2;
91 run->riscv_sbi.args[3] = cp->a3;
92 run->riscv_sbi.args[4] = cp->a4;
93 run->riscv_sbi.args[5] = cp->a5;
94 run->riscv_sbi.ret[0] = cp->a0;
95 run->riscv_sbi.ret[1] = cp->a1;
98 void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
100 u32 type, u64 reason)
103 struct kvm_vcpu *tmp;
105 kvm_for_each_vcpu(i, tmp, vcpu->kvm)
106 tmp->arch.power_off = true;
107 kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
109 memset(&run->system_event, 0, sizeof(run->system_event));
110 run->system_event.type = type;
111 run->system_event.ndata = 1;
112 run->system_event.data[0] = reason;
113 run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
116 int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
118 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
120 /* Handle SBI return only once */
121 if (vcpu->arch.sbi_context.return_handled)
123 vcpu->arch.sbi_context.return_handled = 1;
125 /* Update return values */
126 cp->a0 = run->riscv_sbi.ret[0];
127 cp->a1 = run->riscv_sbi.ret[1];
129 /* Move to next instruction */
130 vcpu->arch.guest_context.sepc += 4;
135 static int riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu *vcpu,
136 unsigned long reg_num,
137 unsigned long reg_val)
140 const struct kvm_riscv_sbi_extension_entry *sext = NULL;
141 struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
143 if (reg_num >= KVM_RISCV_SBI_EXT_MAX ||
144 (reg_val != 1 && reg_val != 0))
147 for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
148 if (sbi_ext[i].ext_idx == reg_num) {
157 * We can't set the extension status to available here, since it may
158 * have a probe() function which needs to confirm availability first,
159 * but it may be too early to call that here. We can set the status to
160 * unavailable, though.
163 scontext->ext_status[sext->ext_idx] =
164 KVM_RISCV_SBI_EXT_UNAVAILABLE;
169 static int riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu *vcpu,
170 unsigned long reg_num,
171 unsigned long *reg_val)
174 const struct kvm_riscv_sbi_extension_entry *sext = NULL;
175 struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
177 if (reg_num >= KVM_RISCV_SBI_EXT_MAX)
180 for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
181 if (sbi_ext[i].ext_idx == reg_num) {
190 * If the extension status is still uninitialized, then we should probe
191 * to determine if it's available, but it may be too early to do that
192 * here. The best we can do is report that the extension has not been
193 * disabled, i.e. we return 1 when the extension is available and also
194 * when it only may be available.
196 *reg_val = scontext->ext_status[sext->ext_idx] !=
197 KVM_RISCV_SBI_EXT_UNAVAILABLE;
202 static int riscv_vcpu_set_sbi_ext_multi(struct kvm_vcpu *vcpu,
203 unsigned long reg_num,
204 unsigned long reg_val, bool enable)
206 unsigned long i, ext_id;
208 if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
211 for_each_set_bit(i, ®_val, BITS_PER_LONG) {
212 ext_id = i + reg_num * BITS_PER_LONG;
213 if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
216 riscv_vcpu_set_sbi_ext_single(vcpu, ext_id, enable);
222 static int riscv_vcpu_get_sbi_ext_multi(struct kvm_vcpu *vcpu,
223 unsigned long reg_num,
224 unsigned long *reg_val)
226 unsigned long i, ext_id, ext_val;
228 if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
231 for (i = 0; i < BITS_PER_LONG; i++) {
232 ext_id = i + reg_num * BITS_PER_LONG;
233 if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
237 riscv_vcpu_get_sbi_ext_single(vcpu, ext_id, &ext_val);
239 *reg_val |= KVM_REG_RISCV_SBI_MULTI_MASK(ext_id);
245 int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
246 const struct kvm_one_reg *reg)
248 unsigned long __user *uaddr =
249 (unsigned long __user *)(unsigned long)reg->addr;
250 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
252 KVM_REG_RISCV_SBI_EXT);
253 unsigned long reg_val, reg_subtype;
255 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
258 if (vcpu->arch.ran_atleast_once)
261 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
262 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
264 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
267 switch (reg_subtype) {
268 case KVM_REG_RISCV_SBI_SINGLE:
269 return riscv_vcpu_set_sbi_ext_single(vcpu, reg_num, reg_val);
270 case KVM_REG_RISCV_SBI_MULTI_EN:
271 return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, true);
272 case KVM_REG_RISCV_SBI_MULTI_DIS:
273 return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, false);
281 int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
282 const struct kvm_one_reg *reg)
285 unsigned long __user *uaddr =
286 (unsigned long __user *)(unsigned long)reg->addr;
287 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
289 KVM_REG_RISCV_SBI_EXT);
290 unsigned long reg_val, reg_subtype;
292 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
295 reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
296 reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
299 switch (reg_subtype) {
300 case KVM_REG_RISCV_SBI_SINGLE:
301 rc = riscv_vcpu_get_sbi_ext_single(vcpu, reg_num, ®_val);
303 case KVM_REG_RISCV_SBI_MULTI_EN:
304 case KVM_REG_RISCV_SBI_MULTI_DIS:
305 rc = riscv_vcpu_get_sbi_ext_multi(vcpu, reg_num, ®_val);
306 if (!rc && reg_subtype == KVM_REG_RISCV_SBI_MULTI_DIS)
315 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
321 const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
322 struct kvm_vcpu *vcpu, unsigned long extid)
324 struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
325 const struct kvm_riscv_sbi_extension_entry *entry;
326 const struct kvm_vcpu_sbi_extension *ext;
329 for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
331 ext = entry->ext_ptr;
333 if (ext->extid_start <= extid && ext->extid_end >= extid) {
334 if (entry->ext_idx >= KVM_RISCV_SBI_EXT_MAX ||
335 scontext->ext_status[entry->ext_idx] ==
336 KVM_RISCV_SBI_EXT_AVAILABLE)
338 if (scontext->ext_status[entry->ext_idx] ==
339 KVM_RISCV_SBI_EXT_UNAVAILABLE)
341 if (ext->probe && !ext->probe(vcpu)) {
342 scontext->ext_status[entry->ext_idx] =
343 KVM_RISCV_SBI_EXT_UNAVAILABLE;
347 scontext->ext_status[entry->ext_idx] =
348 KVM_RISCV_SBI_EXT_AVAILABLE;
356 int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
359 bool next_sepc = true;
360 struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
361 const struct kvm_vcpu_sbi_extension *sbi_ext;
362 struct kvm_cpu_trap utrap = {0};
363 struct kvm_vcpu_sbi_return sbi_ret = {
368 bool ext_is_v01 = false;
370 sbi_ext = kvm_vcpu_sbi_find_ext(vcpu, cp->a7);
371 if (sbi_ext && sbi_ext->handler) {
372 #ifdef CONFIG_RISCV_SBI_V01
373 if (cp->a7 >= SBI_EXT_0_1_SET_TIMER &&
374 cp->a7 <= SBI_EXT_0_1_SHUTDOWN)
377 ret = sbi_ext->handler(vcpu, run, &sbi_ret);
379 /* Return error for unsupported SBI calls */
380 cp->a0 = SBI_ERR_NOT_SUPPORTED;
385 * When the SBI extension returns a Linux error code, it exits the ioctl
386 * loop and forwards the error to userspace.
393 /* Handle special error cases i.e trap, exit or userspace forward */
394 if (sbi_ret.utrap->scause) {
395 /* No need to increment sepc or exit ioctl loop */
397 sbi_ret.utrap->sepc = cp->sepc;
398 kvm_riscv_vcpu_trap_redirect(vcpu, sbi_ret.utrap);
403 /* Exit ioctl loop or Propagate the error code the guest */
408 cp->a0 = sbi_ret.err_val;
414 /* a1 should only be updated when we continue the ioctl loop */
415 if (!ext_is_v01 && ret == 1)
416 cp->a1 = sbi_ret.out_val;