2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: Instruction/Exception emulation
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/ktime.h>
15 #include <linux/kvm_host.h>
16 #include <linux/module.h>
17 #include <linux/vmalloc.h>
19 #include <linux/bootmem.h>
20 #include <linux/random.h>
22 #include <asm/cacheflush.h>
23 #include <asm/cpu-info.h>
24 #include <asm/mmu_context.h>
25 #include <asm/tlbflush.h>
29 #include <asm/r4kcache.h>
30 #define CONFIG_MIPS_MT
32 #include "kvm_mips_opcode.h"
33 #include "kvm_mips_int.h"
34 #include "kvm_mips_comm.h"
39 * Compute the return address and do emulate branch simulation, if required.
40 * This function should be called only in branch delay slot active.
42 unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
45 unsigned int dspcontrol;
46 union mips_instruction insn;
47 struct kvm_vcpu_arch *arch = &vcpu->arch;
49 long nextpc = KVM_INVALID_INST;
54 /* Read the instruction */
55 insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
57 if (insn.word == KVM_INVALID_INST)
58 return KVM_INVALID_INST;
60 switch (insn.i_format.opcode) {
61 /* jr and jalr are in r_format format. */
63 switch (insn.r_format.func) {
65 arch->gprs[insn.r_format.rd] = epc + 8;
68 nextpc = arch->gprs[insn.r_format.rs];
74 * This group contains:
75 * bltz_op, bgez_op, bltzl_op, bgezl_op,
76 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
79 switch (insn.i_format.rt) {
82 if ((long)arch->gprs[insn.i_format.rs] < 0)
83 epc = epc + 4 + (insn.i_format.simmediate << 2);
91 if ((long)arch->gprs[insn.i_format.rs] >= 0)
92 epc = epc + 4 + (insn.i_format.simmediate << 2);
100 arch->gprs[31] = epc + 8;
101 if ((long)arch->gprs[insn.i_format.rs] < 0)
102 epc = epc + 4 + (insn.i_format.simmediate << 2);
110 arch->gprs[31] = epc + 8;
111 if ((long)arch->gprs[insn.i_format.rs] >= 0)
112 epc = epc + 4 + (insn.i_format.simmediate << 2);
121 dspcontrol = rddsp(0x01);
123 if (dspcontrol >= 32)
124 epc = epc + 4 + (insn.i_format.simmediate << 2);
132 /* These are unconditional and in j_format. */
134 arch->gprs[31] = instpc + 8;
139 epc |= (insn.j_format.target << 2);
143 /* These are conditional and in i_format. */
146 if (arch->gprs[insn.i_format.rs] ==
147 arch->gprs[insn.i_format.rt])
148 epc = epc + 4 + (insn.i_format.simmediate << 2);
156 if (arch->gprs[insn.i_format.rs] !=
157 arch->gprs[insn.i_format.rt])
158 epc = epc + 4 + (insn.i_format.simmediate << 2);
164 case blez_op: /* not really i_format */
166 /* rt field assumed to be zero */
167 if ((long)arch->gprs[insn.i_format.rs] <= 0)
168 epc = epc + 4 + (insn.i_format.simmediate << 2);
176 /* rt field assumed to be zero */
177 if ((long)arch->gprs[insn.i_format.rs] > 0)
178 epc = epc + 4 + (insn.i_format.simmediate << 2);
184 /* And now the FPA/cp1 branch instructions. */
186 kvm_err("%s: unsupported cop1_op\n", __func__);
193 kvm_err("%s: unaligned epc\n", __func__);
197 kvm_err("%s: DSP branch but not DSP ASE\n", __func__);
201 enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
203 unsigned long branch_pc;
204 enum emulation_result er = EMULATE_DONE;
206 if (cause & CAUSEF_BD) {
207 branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
208 if (branch_pc == KVM_INVALID_INST) {
211 vcpu->arch.pc = branch_pc;
212 kvm_debug("BD update_pc(): New PC: %#lx\n",
218 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
224 * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
225 * @vcpu: Virtual CPU.
227 * Returns: 1 if the CP0_Count timer is disabled by either the guest
228 * CP0_Cause.DC bit or the count_ctl.DC bit.
229 * 0 otherwise (in which case CP0_Count timer is running).
231 static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
233 struct mips_coproc *cop0 = vcpu->arch.cop0;
235 return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
236 (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
240 * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
242 * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
244 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
246 static uint32_t kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
251 now_ns = ktime_to_ns(now);
252 delta = now_ns + vcpu->arch.count_dyn_bias;
254 if (delta >= vcpu->arch.count_period) {
255 /* If delta is out of safe range the bias needs adjusting */
256 periods = div64_s64(now_ns, vcpu->arch.count_period);
257 vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
258 /* Recalculate delta with new bias */
259 delta = now_ns + vcpu->arch.count_dyn_bias;
263 * We've ensured that:
264 * delta < count_period
266 * Therefore the intermediate delta*count_hz will never overflow since
267 * at the boundary condition:
268 * delta = count_period
269 * delta = NSEC_PER_SEC * 2^32 / count_hz
270 * delta * count_hz = NSEC_PER_SEC * 2^32
272 return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
276 * kvm_mips_count_time() - Get effective current time.
277 * @vcpu: Virtual CPU.
279 * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
280 * except when the master disable bit is set in count_ctl, in which case it is
281 * count_resume, i.e. the time that the count was disabled.
283 * Returns: Effective monotonic ktime for CP0_Count.
285 static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
287 if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
288 return vcpu->arch.count_resume;
294 * kvm_mips_read_count_running() - Read the current count value as if running.
295 * @vcpu: Virtual CPU.
296 * @now: Kernel time to read CP0_Count at.
298 * Returns the current guest CP0_Count register at time @now and handles if the
299 * timer interrupt is pending and hasn't been handled yet.
301 * Returns: The current value of the guest CP0_Count register.
303 static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
308 /* Is the hrtimer pending? */
309 expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
310 if (ktime_compare(now, expires) >= 0) {
312 * Cancel it while we handle it so there's no chance of
313 * interference with the timeout handler.
315 running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
317 /* Nothing should be waiting on the timeout */
318 kvm_mips_callbacks->queue_timer_int(vcpu);
321 * Restart the timer if it was running based on the expiry time
322 * we read, so that we don't push it back 2 periods.
325 expires = ktime_add_ns(expires,
326 vcpu->arch.count_period);
327 hrtimer_start(&vcpu->arch.comparecount_timer, expires,
332 /* Return the biased and scaled guest CP0_Count */
333 return vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
337 * kvm_mips_read_count() - Read the current count value.
338 * @vcpu: Virtual CPU.
340 * Read the current guest CP0_Count value, taking into account whether the timer
343 * Returns: The current guest CP0_Count value.
345 uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu)
347 struct mips_coproc *cop0 = vcpu->arch.cop0;
349 /* If count disabled just read static copy of count */
350 if (kvm_mips_count_disabled(vcpu))
351 return kvm_read_c0_guest_count(cop0);
353 return kvm_mips_read_count_running(vcpu, ktime_get());
357 * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
358 * @vcpu: Virtual CPU.
359 * @count: Output pointer for CP0_Count value at point of freeze.
361 * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
362 * at the point it was frozen. It is guaranteed that any pending interrupts at
363 * the point it was frozen are handled, and none after that point.
365 * This is useful where the time/CP0_Count is needed in the calculation of the
368 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
370 * Returns: The ktime at the point of freeze.
372 static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu,
377 /* stop hrtimer before finding time */
378 hrtimer_cancel(&vcpu->arch.comparecount_timer);
381 /* find count at this point and handle pending hrtimer */
382 *count = kvm_mips_read_count_running(vcpu, now);
388 * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
389 * @vcpu: Virtual CPU.
390 * @now: ktime at point of resume.
391 * @count: CP0_Count at point of resume.
393 * Resumes the timer and updates the timer expiry based on @now and @count.
394 * This can be used in conjunction with kvm_mips_freeze_timer() when timer
395 * parameters need to be changed.
397 * It is guaranteed that a timer interrupt immediately after resume will be
398 * handled, but not if CP_Compare is exactly at @count. That case is already
399 * handled by kvm_mips_freeze_timer().
401 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
403 static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
404 ktime_t now, uint32_t count)
406 struct mips_coproc *cop0 = vcpu->arch.cop0;
411 /* Calculate timeout (wrap 0 to 2^32) */
412 compare = kvm_read_c0_guest_compare(cop0);
413 delta = (u64)(uint32_t)(compare - count - 1) + 1;
414 delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
415 expire = ktime_add_ns(now, delta);
417 /* Update hrtimer to use new timeout */
418 hrtimer_cancel(&vcpu->arch.comparecount_timer);
419 hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
423 * kvm_mips_update_hrtimer() - Update next expiry time of hrtimer.
424 * @vcpu: Virtual CPU.
426 * Recalculates and updates the expiry time of the hrtimer. This can be used
427 * after timer parameters have been altered which do not depend on the time that
428 * the change occurs (in those cases kvm_mips_freeze_hrtimer() and
429 * kvm_mips_resume_hrtimer() are used directly).
431 * It is guaranteed that no timer interrupts will be lost in the process.
433 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
435 static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu)
441 * freeze_hrtimer takes care of a timer interrupts <= count, and
442 * resume_hrtimer the hrtimer takes care of a timer interrupts > count.
444 now = kvm_mips_freeze_hrtimer(vcpu, &count);
445 kvm_mips_resume_hrtimer(vcpu, now, count);
449 * kvm_mips_write_count() - Modify the count and update timer.
450 * @vcpu: Virtual CPU.
451 * @count: Guest CP0_Count value to set.
453 * Sets the CP0_Count value and updates the timer accordingly.
455 void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count)
457 struct mips_coproc *cop0 = vcpu->arch.cop0;
461 now = kvm_mips_count_time(vcpu);
462 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
464 if (kvm_mips_count_disabled(vcpu))
465 /* The timer's disabled, adjust the static count */
466 kvm_write_c0_guest_count(cop0, count);
469 kvm_mips_resume_hrtimer(vcpu, now, count);
473 * kvm_mips_init_count() - Initialise timer.
474 * @vcpu: Virtual CPU.
476 * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set
477 * it going if it's enabled.
479 void kvm_mips_init_count(struct kvm_vcpu *vcpu)
482 vcpu->arch.count_hz = 100*1000*1000;
483 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32,
484 vcpu->arch.count_hz);
485 vcpu->arch.count_dyn_bias = 0;
488 kvm_mips_write_count(vcpu, 0);
492 * kvm_mips_set_count_hz() - Update the frequency of the timer.
493 * @vcpu: Virtual CPU.
494 * @count_hz: Frequency of CP0_Count timer in Hz.
496 * Change the frequency of the CP0_Count timer. This is done atomically so that
497 * CP0_Count is continuous and no timer interrupt is lost.
499 * Returns: -EINVAL if @count_hz is out of range.
502 int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
504 struct mips_coproc *cop0 = vcpu->arch.cop0;
509 /* ensure the frequency is in a sensible range... */
510 if (count_hz <= 0 || count_hz > NSEC_PER_SEC)
512 /* ... and has actually changed */
513 if (vcpu->arch.count_hz == count_hz)
516 /* Safely freeze timer so we can keep it continuous */
517 dc = kvm_mips_count_disabled(vcpu);
519 now = kvm_mips_count_time(vcpu);
520 count = kvm_read_c0_guest_count(cop0);
522 now = kvm_mips_freeze_hrtimer(vcpu, &count);
525 /* Update the frequency */
526 vcpu->arch.count_hz = count_hz;
527 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
528 vcpu->arch.count_dyn_bias = 0;
530 /* Calculate adjusted bias so dynamic count is unchanged */
531 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
533 /* Update and resume hrtimer */
535 kvm_mips_resume_hrtimer(vcpu, now, count);
540 * kvm_mips_write_compare() - Modify compare and update timer.
541 * @vcpu: Virtual CPU.
542 * @compare: New CP0_Compare value.
544 * Update CP0_Compare to a new value and update the timeout.
546 void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare)
548 struct mips_coproc *cop0 = vcpu->arch.cop0;
550 /* if unchanged, must just be an ack */
551 if (kvm_read_c0_guest_compare(cop0) == compare)
555 kvm_write_c0_guest_compare(cop0, compare);
557 /* Update timeout if count enabled */
558 if (!kvm_mips_count_disabled(vcpu))
559 kvm_mips_update_hrtimer(vcpu);
563 * kvm_mips_count_disable() - Disable count.
564 * @vcpu: Virtual CPU.
566 * Disable the CP0_Count timer. A timer interrupt on or before the final stop
567 * time will be handled but not after.
569 * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
570 * count_ctl.DC has been set (count disabled).
572 * Returns: The time that the timer was stopped.
574 static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
576 struct mips_coproc *cop0 = vcpu->arch.cop0;
581 hrtimer_cancel(&vcpu->arch.comparecount_timer);
583 /* Set the static count from the dynamic count, handling pending TI */
585 count = kvm_mips_read_count_running(vcpu, now);
586 kvm_write_c0_guest_count(cop0, count);
592 * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
593 * @vcpu: Virtual CPU.
595 * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
596 * before the final stop time will be handled if the timer isn't disabled by
597 * count_ctl.DC, but not after.
599 * Assumes CP0_Cause.DC is clear (count enabled).
601 void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
603 struct mips_coproc *cop0 = vcpu->arch.cop0;
605 kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
606 if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
607 kvm_mips_count_disable(vcpu);
611 * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
612 * @vcpu: Virtual CPU.
614 * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
615 * the start time will be handled if the timer isn't disabled by count_ctl.DC,
616 * potentially before even returning, so the caller should be careful with
617 * ordering of CP0_Cause modifications so as not to lose it.
619 * Assumes CP0_Cause.DC is set (count disabled).
621 void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
623 struct mips_coproc *cop0 = vcpu->arch.cop0;
626 kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
629 * Set the dynamic count to match the static count.
630 * This starts the hrtimer if count_ctl.DC allows it.
631 * Otherwise it conveniently updates the biases.
633 count = kvm_read_c0_guest_count(cop0);
634 kvm_mips_write_count(vcpu, count);
638 * kvm_mips_set_count_ctl() - Update the count control KVM register.
639 * @vcpu: Virtual CPU.
640 * @count_ctl: Count control register new value.
642 * Set the count control KVM register. The timer is updated accordingly.
644 * Returns: -EINVAL if reserved bits are set.
647 int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
649 struct mips_coproc *cop0 = vcpu->arch.cop0;
650 s64 changed = count_ctl ^ vcpu->arch.count_ctl;
653 uint32_t count, compare;
655 /* Only allow defined bits to be changed */
656 if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
659 /* Apply new value */
660 vcpu->arch.count_ctl = count_ctl;
662 /* Master CP0_Count disable */
663 if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
664 /* Is CP0_Cause.DC already disabling CP0_Count? */
665 if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
666 if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
667 /* Just record the current time */
668 vcpu->arch.count_resume = ktime_get();
669 } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
670 /* disable timer and record current time */
671 vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
674 * Calculate timeout relative to static count at resume
675 * time (wrap 0 to 2^32).
677 count = kvm_read_c0_guest_count(cop0);
678 compare = kvm_read_c0_guest_compare(cop0);
679 delta = (u64)(uint32_t)(compare - count - 1) + 1;
680 delta = div_u64(delta * NSEC_PER_SEC,
681 vcpu->arch.count_hz);
682 expire = ktime_add_ns(vcpu->arch.count_resume, delta);
684 /* Handle pending interrupt */
686 if (ktime_compare(now, expire) >= 0)
687 /* Nothing should be waiting on the timeout */
688 kvm_mips_callbacks->queue_timer_int(vcpu);
690 /* Resume hrtimer without changing bias */
691 count = kvm_mips_read_count_running(vcpu, now);
692 kvm_mips_resume_hrtimer(vcpu, now, count);
700 * kvm_mips_set_count_resume() - Update the count resume KVM register.
701 * @vcpu: Virtual CPU.
702 * @count_resume: Count resume register new value.
704 * Set the count resume KVM register.
706 * Returns: -EINVAL if out of valid range (0..now).
709 int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
712 * It doesn't make sense for the resume time to be in the future, as it
713 * would be possible for the next interrupt to be more than a full
714 * period in the future.
716 if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
719 vcpu->arch.count_resume = ns_to_ktime(count_resume);
724 * kvm_mips_count_timeout() - Push timer forward on timeout.
725 * @vcpu: Virtual CPU.
727 * Handle an hrtimer event by push the hrtimer forward a period.
729 * Returns: The hrtimer_restart value to return to the hrtimer subsystem.
731 enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
733 /* Add the Count period to the current expiry time */
734 hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
735 vcpu->arch.count_period);
736 return HRTIMER_RESTART;
739 enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
741 struct mips_coproc *cop0 = vcpu->arch.cop0;
742 enum emulation_result er = EMULATE_DONE;
744 if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
745 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
746 kvm_read_c0_guest_epc(cop0));
747 kvm_clear_c0_guest_status(cop0, ST0_EXL);
748 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
750 } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
751 kvm_clear_c0_guest_status(cop0, ST0_ERL);
752 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
754 kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
762 enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
764 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
765 vcpu->arch.pending_exceptions);
767 ++vcpu->stat.wait_exits;
768 trace_kvm_exit(vcpu, WAIT_EXITS);
769 if (!vcpu->arch.pending_exceptions) {
771 kvm_vcpu_block(vcpu);
774 * We we are runnable, then definitely go off to user space to
775 * check if any I/O interrupts are pending.
777 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
778 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
779 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
787 * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that
788 * we can catch this, if things ever change
790 enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
792 struct mips_coproc *cop0 = vcpu->arch.cop0;
793 uint32_t pc = vcpu->arch.pc;
795 kvm_err("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
799 /* Write Guest TLB Entry @ Index */
800 enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
802 struct mips_coproc *cop0 = vcpu->arch.cop0;
803 int index = kvm_read_c0_guest_index(cop0);
804 struct kvm_mips_tlb *tlb = NULL;
805 uint32_t pc = vcpu->arch.pc;
807 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
808 kvm_debug("%s: illegal index: %d\n", __func__, index);
809 kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
810 pc, index, kvm_read_c0_guest_entryhi(cop0),
811 kvm_read_c0_guest_entrylo0(cop0),
812 kvm_read_c0_guest_entrylo1(cop0),
813 kvm_read_c0_guest_pagemask(cop0));
814 index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
817 tlb = &vcpu->arch.guest_tlb[index];
819 * Probe the shadow host TLB for the entry being overwritten, if one
820 * matches, invalidate it
822 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
824 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
825 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
826 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
827 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
829 kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
830 pc, index, kvm_read_c0_guest_entryhi(cop0),
831 kvm_read_c0_guest_entrylo0(cop0),
832 kvm_read_c0_guest_entrylo1(cop0),
833 kvm_read_c0_guest_pagemask(cop0));
838 /* Write Guest TLB Entry @ Random Index */
839 enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
841 struct mips_coproc *cop0 = vcpu->arch.cop0;
842 struct kvm_mips_tlb *tlb = NULL;
843 uint32_t pc = vcpu->arch.pc;
846 get_random_bytes(&index, sizeof(index));
847 index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
849 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
850 kvm_err("%s: illegal index: %d\n", __func__, index);
854 tlb = &vcpu->arch.guest_tlb[index];
857 * Probe the shadow host TLB for the entry being overwritten, if one
858 * matches, invalidate it
860 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
862 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
863 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
864 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
865 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
867 kvm_debug("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
868 pc, index, kvm_read_c0_guest_entryhi(cop0),
869 kvm_read_c0_guest_entrylo0(cop0),
870 kvm_read_c0_guest_entrylo1(cop0));
875 enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
877 struct mips_coproc *cop0 = vcpu->arch.cop0;
878 long entryhi = kvm_read_c0_guest_entryhi(cop0);
879 uint32_t pc = vcpu->arch.pc;
882 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
884 kvm_write_c0_guest_index(cop0, index);
886 kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
892 enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
893 uint32_t cause, struct kvm_run *run,
894 struct kvm_vcpu *vcpu)
896 struct mips_coproc *cop0 = vcpu->arch.cop0;
897 enum emulation_result er = EMULATE_DONE;
898 int32_t rt, rd, copz, sel, co_bit, op;
899 uint32_t pc = vcpu->arch.pc;
900 unsigned long curr_pc;
903 * Update PC and hold onto current PC in case there is
904 * an error and we want to rollback the PC
906 curr_pc = vcpu->arch.pc;
907 er = update_pc(vcpu, cause);
908 if (er == EMULATE_FAIL)
911 copz = (inst >> 21) & 0x1f;
912 rt = (inst >> 16) & 0x1f;
913 rd = (inst >> 11) & 0x1f;
915 co_bit = (inst >> 25) & 1;
921 case tlbr_op: /* Read indexed TLB entry */
922 er = kvm_mips_emul_tlbr(vcpu);
924 case tlbwi_op: /* Write indexed */
925 er = kvm_mips_emul_tlbwi(vcpu);
927 case tlbwr_op: /* Write random */
928 er = kvm_mips_emul_tlbwr(vcpu);
930 case tlbp_op: /* TLB Probe */
931 er = kvm_mips_emul_tlbp(vcpu);
934 kvm_err("!!!COP0_RFE!!!\n");
937 er = kvm_mips_emul_eret(vcpu);
941 er = kvm_mips_emul_wait(vcpu);
947 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
948 cop0->stat[rd][sel]++;
951 if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
952 vcpu->arch.gprs[rt] = kvm_mips_read_count(vcpu);
953 } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
954 vcpu->arch.gprs[rt] = 0x0;
955 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
956 kvm_mips_trans_mfc0(inst, opc, vcpu);
959 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
961 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
962 kvm_mips_trans_mfc0(inst, opc, vcpu);
967 ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
968 pc, rd, sel, rt, vcpu->arch.gprs[rt]);
973 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
977 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
978 cop0->stat[rd][sel]++;
980 if ((rd == MIPS_CP0_TLB_INDEX)
981 && (vcpu->arch.gprs[rt] >=
982 KVM_MIPS_GUEST_TLB_SIZE)) {
983 kvm_err("Invalid TLB Index: %ld",
984 vcpu->arch.gprs[rt]);
988 #define C0_EBASE_CORE_MASK 0xff
989 if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
990 /* Preserve CORE number */
991 kvm_change_c0_guest_ebase(cop0,
992 ~(C0_EBASE_CORE_MASK),
993 vcpu->arch.gprs[rt]);
994 kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n",
995 kvm_read_c0_guest_ebase(cop0));
996 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
998 vcpu->arch.gprs[rt] & ASID_MASK;
999 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) &&
1000 ((kvm_read_c0_guest_entryhi(cop0) &
1001 ASID_MASK) != nasid)) {
1002 kvm_debug("MTCz, change ASID from %#lx to %#lx\n",
1003 kvm_read_c0_guest_entryhi(cop0)
1008 /* Blow away the shadow host TLBs */
1009 kvm_mips_flush_host_tlb(1);
1011 kvm_write_c0_guest_entryhi(cop0,
1012 vcpu->arch.gprs[rt]);
1014 /* Are we writing to COUNT */
1015 else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
1016 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
1018 } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
1019 kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
1020 pc, kvm_read_c0_guest_compare(cop0),
1021 vcpu->arch.gprs[rt]);
1023 /* If we are writing to COMPARE */
1024 /* Clear pending timer interrupt, if any */
1025 kvm_mips_callbacks->dequeue_timer_int(vcpu);
1026 kvm_mips_write_compare(vcpu,
1027 vcpu->arch.gprs[rt]);
1028 } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
1029 kvm_write_c0_guest_status(cop0,
1030 vcpu->arch.gprs[rt]);
1032 * Make sure that CU1 and NMI bits are
1035 kvm_clear_c0_guest_status(cop0,
1036 (ST0_CU1 | ST0_NMI));
1038 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1039 kvm_mips_trans_mtc0(inst, opc, vcpu);
1041 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
1042 uint32_t old_cause, new_cause;
1044 old_cause = kvm_read_c0_guest_cause(cop0);
1045 new_cause = vcpu->arch.gprs[rt];
1046 /* Update R/W bits */
1047 kvm_change_c0_guest_cause(cop0, 0x08800300,
1049 /* DC bit enabling/disabling timer? */
1050 if ((old_cause ^ new_cause) & CAUSEF_DC) {
1051 if (new_cause & CAUSEF_DC)
1052 kvm_mips_count_disable_cause(vcpu);
1054 kvm_mips_count_enable_cause(vcpu);
1057 cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
1058 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1059 kvm_mips_trans_mtc0(inst, opc, vcpu);
1063 kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
1064 rd, sel, cop0->reg[rd][sel]);
1068 kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
1069 vcpu->arch.pc, rt, rd, sel);
1074 #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
1075 cop0->stat[MIPS_CP0_STATUS][0]++;
1078 vcpu->arch.gprs[rt] =
1079 kvm_read_c0_guest_status(cop0);
1083 kvm_debug("[%#lx] mfmcz_op: EI\n",
1085 kvm_set_c0_guest_status(cop0, ST0_IE);
1087 kvm_debug("[%#lx] mfmcz_op: DI\n",
1089 kvm_clear_c0_guest_status(cop0, ST0_IE);
1097 cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
1099 (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
1101 * We don't support any shadow register sets, so
1102 * SRSCtl[PSS] == SRSCtl[CSS] = 0
1108 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
1109 vcpu->arch.gprs[rt]);
1110 vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
1114 kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
1115 vcpu->arch.pc, copz);
1122 /* Rollback PC only if emulation was unsuccessful */
1123 if (er == EMULATE_FAIL)
1124 vcpu->arch.pc = curr_pc;
1128 * This is for special instructions whose emulation
1129 * updates the PC, so do not overwrite the PC under
1136 enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
1137 struct kvm_run *run,
1138 struct kvm_vcpu *vcpu)
1140 enum emulation_result er = EMULATE_DO_MMIO;
1141 int32_t op, base, rt, offset;
1143 void *data = run->mmio.data;
1144 unsigned long curr_pc;
1147 * Update PC and hold onto current PC in case there is
1148 * an error and we want to rollback the PC
1150 curr_pc = vcpu->arch.pc;
1151 er = update_pc(vcpu, cause);
1152 if (er == EMULATE_FAIL)
1155 rt = (inst >> 16) & 0x1f;
1156 base = (inst >> 21) & 0x1f;
1157 offset = inst & 0xffff;
1158 op = (inst >> 26) & 0x3f;
1163 if (bytes > sizeof(run->mmio.data)) {
1164 kvm_err("%s: bad MMIO length: %d\n", __func__,
1167 run->mmio.phys_addr =
1168 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1170 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1174 run->mmio.len = bytes;
1175 run->mmio.is_write = 1;
1176 vcpu->mmio_needed = 1;
1177 vcpu->mmio_is_write = 1;
1178 *(u8 *) data = vcpu->arch.gprs[rt];
1179 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1180 vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
1187 if (bytes > sizeof(run->mmio.data)) {
1188 kvm_err("%s: bad MMIO length: %d\n", __func__,
1191 run->mmio.phys_addr =
1192 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1194 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1199 run->mmio.len = bytes;
1200 run->mmio.is_write = 1;
1201 vcpu->mmio_needed = 1;
1202 vcpu->mmio_is_write = 1;
1203 *(uint32_t *) data = vcpu->arch.gprs[rt];
1205 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1206 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1207 vcpu->arch.gprs[rt], *(uint32_t *) data);
1212 if (bytes > sizeof(run->mmio.data)) {
1213 kvm_err("%s: bad MMIO length: %d\n", __func__,
1216 run->mmio.phys_addr =
1217 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1219 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1224 run->mmio.len = bytes;
1225 run->mmio.is_write = 1;
1226 vcpu->mmio_needed = 1;
1227 vcpu->mmio_is_write = 1;
1228 *(uint16_t *) data = vcpu->arch.gprs[rt];
1230 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1231 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1232 vcpu->arch.gprs[rt], *(uint32_t *) data);
1236 kvm_err("Store not yet supported");
1241 /* Rollback PC if emulation was unsuccessful */
1242 if (er == EMULATE_FAIL)
1243 vcpu->arch.pc = curr_pc;
1248 enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
1249 struct kvm_run *run,
1250 struct kvm_vcpu *vcpu)
1252 enum emulation_result er = EMULATE_DO_MMIO;
1253 int32_t op, base, rt, offset;
1256 rt = (inst >> 16) & 0x1f;
1257 base = (inst >> 21) & 0x1f;
1258 offset = inst & 0xffff;
1259 op = (inst >> 26) & 0x3f;
1261 vcpu->arch.pending_load_cause = cause;
1262 vcpu->arch.io_gpr = rt;
1267 if (bytes > sizeof(run->mmio.data)) {
1268 kvm_err("%s: bad MMIO length: %d\n", __func__,
1273 run->mmio.phys_addr =
1274 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1276 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1281 run->mmio.len = bytes;
1282 run->mmio.is_write = 0;
1283 vcpu->mmio_needed = 1;
1284 vcpu->mmio_is_write = 0;
1290 if (bytes > sizeof(run->mmio.data)) {
1291 kvm_err("%s: bad MMIO length: %d\n", __func__,
1296 run->mmio.phys_addr =
1297 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1299 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1304 run->mmio.len = bytes;
1305 run->mmio.is_write = 0;
1306 vcpu->mmio_needed = 1;
1307 vcpu->mmio_is_write = 0;
1310 vcpu->mmio_needed = 2;
1312 vcpu->mmio_needed = 1;
1319 if (bytes > sizeof(run->mmio.data)) {
1320 kvm_err("%s: bad MMIO length: %d\n", __func__,
1325 run->mmio.phys_addr =
1326 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1328 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1333 run->mmio.len = bytes;
1334 run->mmio.is_write = 0;
1335 vcpu->mmio_is_write = 0;
1338 vcpu->mmio_needed = 2;
1340 vcpu->mmio_needed = 1;
1345 kvm_err("Load not yet supported");
1353 int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
1355 unsigned long offset = (va & ~PAGE_MASK);
1356 struct kvm *kvm = vcpu->kvm;
1361 gfn = va >> PAGE_SHIFT;
1363 if (gfn >= kvm->arch.guest_pmap_npages) {
1364 kvm_err("%s: Invalid gfn: %#llx\n", __func__, gfn);
1365 kvm_mips_dump_host_tlbs();
1366 kvm_arch_vcpu_dump_regs(vcpu);
1369 pfn = kvm->arch.guest_pmap[gfn];
1370 pa = (pfn << PAGE_SHIFT) | offset;
1372 kvm_debug("%s: va: %#lx, unmapped: %#x\n", __func__, va,
1375 local_flush_icache_range(CKSEG0ADDR(pa), 32);
1379 #define MIPS_CACHE_OP_INDEX_INV 0x0
1380 #define MIPS_CACHE_OP_INDEX_LD_TAG 0x1
1381 #define MIPS_CACHE_OP_INDEX_ST_TAG 0x2
1382 #define MIPS_CACHE_OP_IMP 0x3
1383 #define MIPS_CACHE_OP_HIT_INV 0x4
1384 #define MIPS_CACHE_OP_FILL_WB_INV 0x5
1385 #define MIPS_CACHE_OP_HIT_HB 0x6
1386 #define MIPS_CACHE_OP_FETCH_LOCK 0x7
1388 #define MIPS_CACHE_ICACHE 0x0
1389 #define MIPS_CACHE_DCACHE 0x1
1390 #define MIPS_CACHE_SEC 0x3
1392 enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
1394 struct kvm_run *run,
1395 struct kvm_vcpu *vcpu)
1397 struct mips_coproc *cop0 = vcpu->arch.cop0;
1398 enum emulation_result er = EMULATE_DONE;
1399 int32_t offset, cache, op_inst, op, base;
1400 struct kvm_vcpu_arch *arch = &vcpu->arch;
1402 unsigned long curr_pc;
1405 * Update PC and hold onto current PC in case there is
1406 * an error and we want to rollback the PC
1408 curr_pc = vcpu->arch.pc;
1409 er = update_pc(vcpu, cause);
1410 if (er == EMULATE_FAIL)
1413 base = (inst >> 21) & 0x1f;
1414 op_inst = (inst >> 16) & 0x1f;
1415 offset = inst & 0xffff;
1416 cache = (inst >> 16) & 0x3;
1417 op = (inst >> 18) & 0x7;
1419 va = arch->gprs[base] + offset;
1421 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1422 cache, op, base, arch->gprs[base], offset);
1425 * Treat INDEX_INV as a nop, basically issued by Linux on startup to
1426 * invalidate the caches entirely by stepping through all the
1429 if (op == MIPS_CACHE_OP_INDEX_INV) {
1430 kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1431 vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
1432 arch->gprs[base], offset);
1434 if (cache == MIPS_CACHE_DCACHE)
1436 else if (cache == MIPS_CACHE_ICACHE)
1439 kvm_err("%s: unsupported CACHE INDEX operation\n",
1441 return EMULATE_FAIL;
1444 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1445 kvm_mips_trans_cache_index(inst, opc, vcpu);
1451 if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
1452 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0)
1453 kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
1454 } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
1455 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
1458 /* If an entry already exists then skip */
1459 if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0)
1463 * If address not in the guest TLB, then give the guest a fault,
1464 * the resulting handler will do the right thing
1466 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
1467 (kvm_read_c0_guest_entryhi
1468 (cop0) & ASID_MASK));
1471 vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
1472 vcpu->arch.host_cp0_badvaddr = va;
1473 er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
1476 goto dont_update_pc;
1478 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
1480 * Check if the entry is valid, if not then setup a TLB
1481 * invalid exception to the guest
1483 if (!TLB_IS_VALID(*tlb, va)) {
1484 er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
1487 goto dont_update_pc;
1490 * We fault an entry from the guest tlb to the
1493 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
1499 kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1500 cache, op, base, arch->gprs[base], offset);
1503 goto dont_update_pc;
1508 /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1509 if (cache == MIPS_CACHE_DCACHE
1510 && (op == MIPS_CACHE_OP_FILL_WB_INV
1511 || op == MIPS_CACHE_OP_HIT_INV)) {
1512 flush_dcache_line(va);
1514 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1516 * Replace the CACHE instruction, with a SYNCI, not the same,
1519 kvm_mips_trans_cache_va(inst, opc, vcpu);
1521 } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
1522 flush_dcache_line(va);
1523 flush_icache_line(va);
1525 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
1526 /* Replace the CACHE instruction, with a SYNCI */
1527 kvm_mips_trans_cache_va(inst, opc, vcpu);
1530 kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1531 cache, op, base, arch->gprs[base], offset);
1534 goto dont_update_pc;
1541 vcpu->arch.pc = curr_pc;
1546 enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
1547 struct kvm_run *run,
1548 struct kvm_vcpu *vcpu)
1550 enum emulation_result er = EMULATE_DONE;
1553 /* Fetch the instruction. */
1554 if (cause & CAUSEF_BD)
1557 inst = kvm_get_inst(opc, vcpu);
1559 switch (((union mips_instruction)inst).r_format.opcode) {
1561 er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
1566 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1573 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1577 ++vcpu->stat.cache_exits;
1578 trace_kvm_exit(vcpu, CACHE_EXITS);
1579 er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
1583 kvm_err("Instruction emulation not supported (%p/%#x)\n", opc,
1585 kvm_arch_vcpu_dump_regs(vcpu);
1593 enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
1595 struct kvm_run *run,
1596 struct kvm_vcpu *vcpu)
1598 struct mips_coproc *cop0 = vcpu->arch.cop0;
1599 struct kvm_vcpu_arch *arch = &vcpu->arch;
1600 enum emulation_result er = EMULATE_DONE;
1602 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1604 kvm_write_c0_guest_epc(cop0, arch->pc);
1605 kvm_set_c0_guest_status(cop0, ST0_EXL);
1607 if (cause & CAUSEF_BD)
1608 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1610 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1612 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
1614 kvm_change_c0_guest_cause(cop0, (0xff),
1615 (T_SYSCALL << CAUSEB_EXCCODE));
1617 /* Set PC to the exception entry point */
1618 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1621 kvm_err("Trying to deliver SYSCALL when EXL is already set\n");
1628 enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
1630 struct kvm_run *run,
1631 struct kvm_vcpu *vcpu)
1633 struct mips_coproc *cop0 = vcpu->arch.cop0;
1634 struct kvm_vcpu_arch *arch = &vcpu->arch;
1635 unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
1636 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1638 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1640 kvm_write_c0_guest_epc(cop0, arch->pc);
1641 kvm_set_c0_guest_status(cop0, ST0_EXL);
1643 if (cause & CAUSEF_BD)
1644 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1646 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1648 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1651 /* set pc to the exception entry point */
1652 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1655 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1658 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1661 kvm_change_c0_guest_cause(cop0, (0xff),
1662 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1664 /* setup badvaddr, context and entryhi registers for the guest */
1665 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1666 /* XXXKYMA: is the context register used by linux??? */
1667 kvm_write_c0_guest_entryhi(cop0, entryhi);
1668 /* Blow away the shadow host TLBs */
1669 kvm_mips_flush_host_tlb(1);
1671 return EMULATE_DONE;
1674 enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
1676 struct kvm_run *run,
1677 struct kvm_vcpu *vcpu)
1679 struct mips_coproc *cop0 = vcpu->arch.cop0;
1680 struct kvm_vcpu_arch *arch = &vcpu->arch;
1681 unsigned long entryhi =
1682 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1683 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1685 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1687 kvm_write_c0_guest_epc(cop0, arch->pc);
1688 kvm_set_c0_guest_status(cop0, ST0_EXL);
1690 if (cause & CAUSEF_BD)
1691 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1693 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1695 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
1698 /* set pc to the exception entry point */
1699 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1702 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1704 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1707 kvm_change_c0_guest_cause(cop0, (0xff),
1708 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1710 /* setup badvaddr, context and entryhi registers for the guest */
1711 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1712 /* XXXKYMA: is the context register used by linux??? */
1713 kvm_write_c0_guest_entryhi(cop0, entryhi);
1714 /* Blow away the shadow host TLBs */
1715 kvm_mips_flush_host_tlb(1);
1717 return EMULATE_DONE;
1720 enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
1722 struct kvm_run *run,
1723 struct kvm_vcpu *vcpu)
1725 struct mips_coproc *cop0 = vcpu->arch.cop0;
1726 struct kvm_vcpu_arch *arch = &vcpu->arch;
1727 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1728 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1730 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1732 kvm_write_c0_guest_epc(cop0, arch->pc);
1733 kvm_set_c0_guest_status(cop0, ST0_EXL);
1735 if (cause & CAUSEF_BD)
1736 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1738 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1740 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1743 /* Set PC to the exception entry point */
1744 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1746 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1748 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1751 kvm_change_c0_guest_cause(cop0, (0xff),
1752 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
1754 /* setup badvaddr, context and entryhi registers for the guest */
1755 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1756 /* XXXKYMA: is the context register used by linux??? */
1757 kvm_write_c0_guest_entryhi(cop0, entryhi);
1758 /* Blow away the shadow host TLBs */
1759 kvm_mips_flush_host_tlb(1);
1761 return EMULATE_DONE;
1764 enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
1766 struct kvm_run *run,
1767 struct kvm_vcpu *vcpu)
1769 struct mips_coproc *cop0 = vcpu->arch.cop0;
1770 struct kvm_vcpu_arch *arch = &vcpu->arch;
1771 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1772 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1774 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1776 kvm_write_c0_guest_epc(cop0, arch->pc);
1777 kvm_set_c0_guest_status(cop0, ST0_EXL);
1779 if (cause & CAUSEF_BD)
1780 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1782 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1784 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1787 /* Set PC to the exception entry point */
1788 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1790 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1792 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1795 kvm_change_c0_guest_cause(cop0, (0xff),
1796 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
1798 /* setup badvaddr, context and entryhi registers for the guest */
1799 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1800 /* XXXKYMA: is the context register used by linux??? */
1801 kvm_write_c0_guest_entryhi(cop0, entryhi);
1802 /* Blow away the shadow host TLBs */
1803 kvm_mips_flush_host_tlb(1);
1805 return EMULATE_DONE;
1808 /* TLBMOD: store into address matching TLB with Dirty bit off */
1809 enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
1810 struct kvm_run *run,
1811 struct kvm_vcpu *vcpu)
1813 enum emulation_result er = EMULATE_DONE;
1815 struct mips_coproc *cop0 = vcpu->arch.cop0;
1816 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1817 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1820 /* If address not in the guest TLB, then we are in trouble */
1821 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
1823 /* XXXKYMA Invalidate and retry */
1824 kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
1825 kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
1827 kvm_mips_dump_guest_tlbs(vcpu);
1828 kvm_mips_dump_host_tlbs();
1829 return EMULATE_FAIL;
1833 er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
1837 enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
1839 struct kvm_run *run,
1840 struct kvm_vcpu *vcpu)
1842 struct mips_coproc *cop0 = vcpu->arch.cop0;
1843 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1844 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1845 struct kvm_vcpu_arch *arch = &vcpu->arch;
1847 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1849 kvm_write_c0_guest_epc(cop0, arch->pc);
1850 kvm_set_c0_guest_status(cop0, ST0_EXL);
1852 if (cause & CAUSEF_BD)
1853 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1855 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1857 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
1860 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1862 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
1864 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1867 kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE));
1869 /* setup badvaddr, context and entryhi registers for the guest */
1870 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1871 /* XXXKYMA: is the context register used by linux??? */
1872 kvm_write_c0_guest_entryhi(cop0, entryhi);
1873 /* Blow away the shadow host TLBs */
1874 kvm_mips_flush_host_tlb(1);
1876 return EMULATE_DONE;
1879 enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
1881 struct kvm_run *run,
1882 struct kvm_vcpu *vcpu)
1884 struct mips_coproc *cop0 = vcpu->arch.cop0;
1885 struct kvm_vcpu_arch *arch = &vcpu->arch;
1887 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1889 kvm_write_c0_guest_epc(cop0, arch->pc);
1890 kvm_set_c0_guest_status(cop0, ST0_EXL);
1892 if (cause & CAUSEF_BD)
1893 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1895 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1899 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1901 kvm_change_c0_guest_cause(cop0, (0xff),
1902 (T_COP_UNUSABLE << CAUSEB_EXCCODE));
1903 kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
1905 return EMULATE_DONE;
1908 enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
1910 struct kvm_run *run,
1911 struct kvm_vcpu *vcpu)
1913 struct mips_coproc *cop0 = vcpu->arch.cop0;
1914 struct kvm_vcpu_arch *arch = &vcpu->arch;
1915 enum emulation_result er = EMULATE_DONE;
1917 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1919 kvm_write_c0_guest_epc(cop0, arch->pc);
1920 kvm_set_c0_guest_status(cop0, ST0_EXL);
1922 if (cause & CAUSEF_BD)
1923 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1925 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1927 kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
1929 kvm_change_c0_guest_cause(cop0, (0xff),
1930 (T_RES_INST << CAUSEB_EXCCODE));
1932 /* Set PC to the exception entry point */
1933 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1936 kvm_err("Trying to deliver RI when EXL is already set\n");
1943 enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
1945 struct kvm_run *run,
1946 struct kvm_vcpu *vcpu)
1948 struct mips_coproc *cop0 = vcpu->arch.cop0;
1949 struct kvm_vcpu_arch *arch = &vcpu->arch;
1950 enum emulation_result er = EMULATE_DONE;
1952 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1954 kvm_write_c0_guest_epc(cop0, arch->pc);
1955 kvm_set_c0_guest_status(cop0, ST0_EXL);
1957 if (cause & CAUSEF_BD)
1958 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1960 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1962 kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
1964 kvm_change_c0_guest_cause(cop0, (0xff),
1965 (T_BREAK << CAUSEB_EXCCODE));
1967 /* Set PC to the exception entry point */
1968 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1971 kvm_err("Trying to deliver BP when EXL is already set\n");
1978 /* ll/sc, rdhwr, sync emulation */
1980 #define OPCODE 0xfc000000
1981 #define BASE 0x03e00000
1982 #define RT 0x001f0000
1983 #define OFFSET 0x0000ffff
1984 #define LL 0xc0000000
1985 #define SC 0xe0000000
1986 #define SPEC0 0x00000000
1987 #define SPEC3 0x7c000000
1988 #define RD 0x0000f800
1989 #define FUNC 0x0000003f
1990 #define SYNC 0x0000000f
1991 #define RDHWR 0x0000003b
1993 enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
1994 struct kvm_run *run,
1995 struct kvm_vcpu *vcpu)
1997 struct mips_coproc *cop0 = vcpu->arch.cop0;
1998 struct kvm_vcpu_arch *arch = &vcpu->arch;
1999 enum emulation_result er = EMULATE_DONE;
2000 unsigned long curr_pc;
2004 * Update PC and hold onto current PC in case there is
2005 * an error and we want to rollback the PC
2007 curr_pc = vcpu->arch.pc;
2008 er = update_pc(vcpu, cause);
2009 if (er == EMULATE_FAIL)
2012 /* Fetch the instruction. */
2013 if (cause & CAUSEF_BD)
2016 inst = kvm_get_inst(opc, vcpu);
2018 if (inst == KVM_INVALID_INST) {
2019 kvm_err("%s: Cannot get inst @ %p\n", __func__, opc);
2020 return EMULATE_FAIL;
2023 if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
2024 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2025 int rd = (inst & RD) >> 11;
2026 int rt = (inst & RT) >> 16;
2027 /* If usermode, check RDHWR rd is allowed by guest HWREna */
2028 if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
2029 kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
2034 case 0: /* CPU number */
2037 case 1: /* SYNCI length */
2038 arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
2039 current_cpu_data.icache.linesz);
2041 case 2: /* Read count register */
2042 arch->gprs[rt] = kvm_mips_read_count(vcpu);
2044 case 3: /* Count register resolution */
2045 switch (current_cpu_data.cputype) {
2055 arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
2059 kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
2063 kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst);
2067 return EMULATE_DONE;
2071 * Rollback PC (if in branch delay slot then the PC already points to
2072 * branch target), and pass the RI exception to the guest OS.
2074 vcpu->arch.pc = curr_pc;
2075 return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
2078 enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
2079 struct kvm_run *run)
2081 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
2082 enum emulation_result er = EMULATE_DONE;
2083 unsigned long curr_pc;
2085 if (run->mmio.len > sizeof(*gpr)) {
2086 kvm_err("Bad MMIO length: %d", run->mmio.len);
2092 * Update PC and hold onto current PC in case there is
2093 * an error and we want to rollback the PC
2095 curr_pc = vcpu->arch.pc;
2096 er = update_pc(vcpu, vcpu->arch.pending_load_cause);
2097 if (er == EMULATE_FAIL)
2100 switch (run->mmio.len) {
2102 *gpr = *(int32_t *) run->mmio.data;
2106 if (vcpu->mmio_needed == 2)
2107 *gpr = *(int16_t *) run->mmio.data;
2109 *gpr = *(int16_t *) run->mmio.data;
2113 if (vcpu->mmio_needed == 2)
2114 *gpr = *(int8_t *) run->mmio.data;
2116 *gpr = *(u8 *) run->mmio.data;
2120 if (vcpu->arch.pending_load_cause & CAUSEF_BD)
2121 kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
2122 vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
2129 static enum emulation_result kvm_mips_emulate_exc(unsigned long cause,
2131 struct kvm_run *run,
2132 struct kvm_vcpu *vcpu)
2134 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2135 struct mips_coproc *cop0 = vcpu->arch.cop0;
2136 struct kvm_vcpu_arch *arch = &vcpu->arch;
2137 enum emulation_result er = EMULATE_DONE;
2139 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2141 kvm_write_c0_guest_epc(cop0, arch->pc);
2142 kvm_set_c0_guest_status(cop0, ST0_EXL);
2144 if (cause & CAUSEF_BD)
2145 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2147 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2149 kvm_change_c0_guest_cause(cop0, (0xff),
2150 (exccode << CAUSEB_EXCCODE));
2152 /* Set PC to the exception entry point */
2153 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2154 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2156 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
2157 exccode, kvm_read_c0_guest_epc(cop0),
2158 kvm_read_c0_guest_badvaddr(cop0));
2160 kvm_err("Trying to deliver EXC when EXL is already set\n");
2167 enum emulation_result kvm_mips_check_privilege(unsigned long cause,
2169 struct kvm_run *run,
2170 struct kvm_vcpu *vcpu)
2172 enum emulation_result er = EMULATE_DONE;
2173 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2174 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
2176 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2186 case T_COP_UNUSABLE:
2187 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
2188 er = EMULATE_PRIV_FAIL;
2196 * We we are accessing Guest kernel space, then send an
2197 * address error exception to the guest
2199 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2200 kvm_debug("%s: LD MISS @ %#lx\n", __func__,
2203 cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
2204 er = EMULATE_PRIV_FAIL;
2210 * We we are accessing Guest kernel space, then send an
2211 * address error exception to the guest
2213 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2214 kvm_debug("%s: ST MISS @ %#lx\n", __func__,
2217 cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
2218 er = EMULATE_PRIV_FAIL;
2223 kvm_debug("%s: address error ST @ %#lx\n", __func__,
2225 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2227 cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
2229 er = EMULATE_PRIV_FAIL;
2232 kvm_debug("%s: address error LD @ %#lx\n", __func__,
2234 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2236 cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
2238 er = EMULATE_PRIV_FAIL;
2241 er = EMULATE_PRIV_FAIL;
2246 if (er == EMULATE_PRIV_FAIL)
2247 kvm_mips_emulate_exc(cause, opc, run, vcpu);
2253 * User Address (UA) fault, this could happen if
2254 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
2255 * case we pass on the fault to the guest kernel and let it handle it.
2256 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
2257 * case we inject the TLB from the Guest TLB into the shadow host TLB
2259 enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
2261 struct kvm_run *run,
2262 struct kvm_vcpu *vcpu)
2264 enum emulation_result er = EMULATE_DONE;
2265 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2266 unsigned long va = vcpu->arch.host_cp0_badvaddr;
2269 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
2270 vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
2273 * KVM would not have got the exception if this entry was valid in the
2274 * shadow host TLB. Check the Guest TLB, if the entry is not there then
2275 * send the guest an exception. The guest exc handler should then inject
2276 * an entry into the guest TLB.
2278 index = kvm_mips_guest_tlb_lookup(vcpu,
2280 (kvm_read_c0_guest_entryhi
2281 (vcpu->arch.cop0) & ASID_MASK));
2283 if (exccode == T_TLB_LD_MISS) {
2284 er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
2285 } else if (exccode == T_TLB_ST_MISS) {
2286 er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
2288 kvm_err("%s: invalid exc code: %d\n", __func__,
2293 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
2296 * Check if the entry is valid, if not then setup a TLB invalid
2297 * exception to the guest
2299 if (!TLB_IS_VALID(*tlb, va)) {
2300 if (exccode == T_TLB_LD_MISS) {
2301 er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
2303 } else if (exccode == T_TLB_ST_MISS) {
2304 er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
2307 kvm_err("%s: invalid exc code: %d\n", __func__,
2312 kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
2313 tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
2315 * OK we have a Guest TLB entry, now inject it into the
2318 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,