}
+uint32_t cpu_x86_virtual_addr_width(CPUX86State *env)
+{
+ if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
+ return 57; /* 57 bits virtual */
+ } else {
+ return 48; /* 48 bits virtual */
+ }
+}
+
void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
uint32_t *eax, uint32_t *ebx,
uint32_t *ecx, uint32_t *edx)
break;
case 0x80000008:
/* virtual & phys address size in low 2 bytes. */
+ *eax = cpu->phys_bits;
if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
/* 64 bit processor */
- *eax = cpu->phys_bits; /* configurable physical bits */
- if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
- *eax |= 0x00003900; /* 57 bits virtual */
- } else {
- *eax |= 0x00003000; /* 48 bits virtual */
- }
- } else {
- *eax = cpu->phys_bits;
+ *eax |= (cpu_x86_virtual_addr_width(env) << 8);
}
*ebx = env->features[FEAT_8000_0008_EBX];
if (cs->nr_cores * cs->nr_threads > 1) {
((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
}
+/*
+ * VMRUN and VMLOAD canonicalizes (i.e., sign-extend to bit 63) all base
+ * addresses in the segment registers that have been loaded.
+ */
+static inline void svm_canonicalization(CPUX86State *env, target_ulong *seg_base)
+{
+ uint16_t shift_amt = 64 - cpu_x86_virtual_addr_width(env);
+ *seg_base = ((((long) *seg_base) << shift_amt) >> shift_amt);
+}
+
static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
SegmentCache *sc)
{
sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
+ svm_canonicalization(env, &sc->base);
}
static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
offsetof(struct vmcb, control.tsc_offset));
- env->gdt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
- save.gdtr.base));
- env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
- save.gdtr.limit));
-
- env->idt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
- save.idtr.base));
- env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
- save.idtr.limit));
-
new_cr0 = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cr0));
if (new_cr0 & SVM_CR0_RESERVED_MASK) {
cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
R_SS);
svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
R_DS);
+ svm_load_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.idtr),
+ &env->idt);
+ svm_load_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.gdtr),
+ &env->gdt);
env->eip = x86_ldq_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, save.rip));
env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
+ svm_canonicalization(env, &env->kernelgsbase);
#endif
env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
env->sysenter_cs = x86_ldq_phys(cs,
save.sysenter_esp));
env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
save.sysenter_eip));
+
}
void helper_vmsave(CPUX86State *env, int aflag)