1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
5 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
7 * Copyright (c) 2005 Keir Fraser
9 * Linux coding style, mod r/m decoder, segment base fixes, real-mode
10 * privileged instructions:
12 * Copyright (C) 2006 Qumranet
13 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
15 * Avi Kivity <avi@qumranet.com>
16 * Yaniv Kamay <yaniv@qumranet.com>
18 * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
21 #include <linux/kvm_host.h>
22 #include "kvm_cache_regs.h"
23 #include "kvm_emulate.h"
24 #include <linux/stringify.h>
25 #include <asm/debugreg.h>
26 #include <asm/nospec-branch.h>
37 #define OpImplicit 1ull /* No generic decode */
38 #define OpReg 2ull /* Register */
39 #define OpMem 3ull /* Memory */
40 #define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
41 #define OpDI 5ull /* ES:DI/EDI/RDI */
42 #define OpMem64 6ull /* Memory, 64-bit */
43 #define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
44 #define OpDX 8ull /* DX register */
45 #define OpCL 9ull /* CL register (for shifts) */
46 #define OpImmByte 10ull /* 8-bit sign extended immediate */
47 #define OpOne 11ull /* Implied 1 */
48 #define OpImm 12ull /* Sign extended up to 32-bit immediate */
49 #define OpMem16 13ull /* Memory operand (16-bit). */
50 #define OpMem32 14ull /* Memory operand (32-bit). */
51 #define OpImmU 15ull /* Immediate operand, zero extended */
52 #define OpSI 16ull /* SI/ESI/RSI */
53 #define OpImmFAddr 17ull /* Immediate far address */
54 #define OpMemFAddr 18ull /* Far address in memory */
55 #define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
56 #define OpES 20ull /* ES */
57 #define OpCS 21ull /* CS */
58 #define OpSS 22ull /* SS */
59 #define OpDS 23ull /* DS */
60 #define OpFS 24ull /* FS */
61 #define OpGS 25ull /* GS */
62 #define OpMem8 26ull /* 8-bit zero extended memory operand */
63 #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
64 #define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
65 #define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
66 #define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
68 #define OpBits 5 /* Width of operand field */
69 #define OpMask ((1ull << OpBits) - 1)
72 * Opcode effective-address decode tables.
73 * Note that we only emulate instructions that have at least one memory
74 * operand (excluding implicit stack references). We assume that stack
75 * references and instruction fetches will never occur in special memory
76 * areas that require emulation. So, for example, 'mov <imm>,<reg>' need
80 /* Operand sizes: 8-bit operands or specified/overridden size. */
81 #define ByteOp (1<<0) /* 8-bit operands. */
82 /* Destination operand type. */
84 #define ImplicitOps (OpImplicit << DstShift)
85 #define DstReg (OpReg << DstShift)
86 #define DstMem (OpMem << DstShift)
87 #define DstAcc (OpAcc << DstShift)
88 #define DstDI (OpDI << DstShift)
89 #define DstMem64 (OpMem64 << DstShift)
90 #define DstMem16 (OpMem16 << DstShift)
91 #define DstImmUByte (OpImmUByte << DstShift)
92 #define DstDX (OpDX << DstShift)
93 #define DstAccLo (OpAccLo << DstShift)
94 #define DstMask (OpMask << DstShift)
95 /* Source operand type. */
97 #define SrcNone (OpNone << SrcShift)
98 #define SrcReg (OpReg << SrcShift)
99 #define SrcMem (OpMem << SrcShift)
100 #define SrcMem16 (OpMem16 << SrcShift)
101 #define SrcMem32 (OpMem32 << SrcShift)
102 #define SrcImm (OpImm << SrcShift)
103 #define SrcImmByte (OpImmByte << SrcShift)
104 #define SrcOne (OpOne << SrcShift)
105 #define SrcImmUByte (OpImmUByte << SrcShift)
106 #define SrcImmU (OpImmU << SrcShift)
107 #define SrcSI (OpSI << SrcShift)
108 #define SrcXLat (OpXLat << SrcShift)
109 #define SrcImmFAddr (OpImmFAddr << SrcShift)
110 #define SrcMemFAddr (OpMemFAddr << SrcShift)
111 #define SrcAcc (OpAcc << SrcShift)
112 #define SrcImmU16 (OpImmU16 << SrcShift)
113 #define SrcImm64 (OpImm64 << SrcShift)
114 #define SrcDX (OpDX << SrcShift)
115 #define SrcMem8 (OpMem8 << SrcShift)
116 #define SrcAccHi (OpAccHi << SrcShift)
117 #define SrcMask (OpMask << SrcShift)
118 #define BitOp (1<<11)
119 #define MemAbs (1<<12) /* Memory operand is absolute displacement */
120 #define String (1<<13) /* String instruction (rep capable) */
121 #define Stack (1<<14) /* Stack instruction (push/pop) */
122 #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
123 #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
124 #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
125 #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
126 #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
127 #define Escape (5<<15) /* Escape to coprocessor instruction */
128 #define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
129 #define ModeDual (7<<15) /* Different instruction for 32/64 bit */
130 #define Sse (1<<18) /* SSE Vector instruction */
131 /* Generic ModRM decode. */
132 #define ModRM (1<<19)
133 /* Destination is only written; never read. */
136 #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
137 #define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
138 #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
139 #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
140 #define Undefined (1<<25) /* No Such Instruction */
141 #define Lock (1<<26) /* lock prefix is allowed for the instruction */
142 #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
144 #define PageTable (1 << 29) /* instruction used to write page table */
145 #define NotImpl (1 << 30) /* instruction is not implemented */
146 /* Source 2 operand type */
147 #define Src2Shift (31)
148 #define Src2None (OpNone << Src2Shift)
149 #define Src2Mem (OpMem << Src2Shift)
150 #define Src2CL (OpCL << Src2Shift)
151 #define Src2ImmByte (OpImmByte << Src2Shift)
152 #define Src2One (OpOne << Src2Shift)
153 #define Src2Imm (OpImm << Src2Shift)
154 #define Src2ES (OpES << Src2Shift)
155 #define Src2CS (OpCS << Src2Shift)
156 #define Src2SS (OpSS << Src2Shift)
157 #define Src2DS (OpDS << Src2Shift)
158 #define Src2FS (OpFS << Src2Shift)
159 #define Src2GS (OpGS << Src2Shift)
160 #define Src2Mask (OpMask << Src2Shift)
161 #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
162 #define AlignMask ((u64)7 << 41)
163 #define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
164 #define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */
165 #define Avx ((u64)3 << 41) /* Advanced Vector Extensions */
166 #define Aligned16 ((u64)4 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
167 #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
168 #define NoWrite ((u64)1 << 45) /* No writeback */
169 #define SrcWrite ((u64)1 << 46) /* Write back src operand */
170 #define NoMod ((u64)1 << 47) /* Mod field is ignored */
171 #define Intercept ((u64)1 << 48) /* Has valid intercept field */
172 #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
173 #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
174 #define NearBranch ((u64)1 << 52) /* Near branches */
175 #define No16 ((u64)1 << 53) /* No 16 bit operand */
176 #define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
177 #define TwoMemOp ((u64)1 << 55) /* Instruction has two memory operand */
178 #define IsBranch ((u64)1 << 56) /* Instruction is considered a branch. */
180 #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
182 #define X2(x...) x, x
183 #define X3(x...) X2(x), x
184 #define X4(x...) X2(x), X2(x)
185 #define X5(x...) X4(x), x
186 #define X6(x...) X4(x), X2(x)
187 #define X7(x...) X4(x), X3(x)
188 #define X8(x...) X4(x), X4(x)
189 #define X16(x...) X8(x), X8(x)
191 #define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
192 #define FASTOP_SIZE 8
199 int (*execute)(struct x86_emulate_ctxt *ctxt);
200 const struct opcode *group;
201 const struct group_dual *gdual;
202 const struct gprefix *gprefix;
203 const struct escape *esc;
204 const struct instr_dual *idual;
205 const struct mode_dual *mdual;
206 void (*fastop)(struct fastop *fake);
208 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
212 struct opcode mod012[8];
213 struct opcode mod3[8];
217 struct opcode pfx_no;
218 struct opcode pfx_66;
219 struct opcode pfx_f2;
220 struct opcode pfx_f3;
225 struct opcode high[64];
229 struct opcode mod012;
234 struct opcode mode32;
235 struct opcode mode64;
238 #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
240 enum x86_transfer_type {
242 X86_TRANSFER_CALL_JMP,
244 X86_TRANSFER_TASK_SWITCH,
247 static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
249 if (!(ctxt->regs_valid & (1 << nr))) {
250 ctxt->regs_valid |= 1 << nr;
251 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
253 return ctxt->_regs[nr];
256 static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
258 ctxt->regs_valid |= 1 << nr;
259 ctxt->regs_dirty |= 1 << nr;
260 return &ctxt->_regs[nr];
263 static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
266 return reg_write(ctxt, nr);
269 static void writeback_registers(struct x86_emulate_ctxt *ctxt)
273 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
274 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
277 static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
279 ctxt->regs_dirty = 0;
280 ctxt->regs_valid = 0;
284 * These EFLAGS bits are restored from saved value during emulation, and
285 * any changes are written back to the saved value after emulation.
287 #define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\
288 X86_EFLAGS_PF|X86_EFLAGS_CF)
297 * fastop functions have a special calling convention:
302 * flags: rflags (in/out)
303 * ex: rsi (in:fastop pointer, out:zero if exception)
305 * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
306 * different operand sizes can be reached by calculation, rather than a jump
307 * table (which would be bigger than the code).
309 static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
311 #define __FOP_FUNC(name) \
312 ".align " __stringify(FASTOP_SIZE) " \n\t" \
313 ".type " name ", @function \n\t" \
316 #define FOP_FUNC(name) \
319 #define __FOP_RET(name) \
321 ".size " name ", .-" name "\n\t"
323 #define FOP_RET(name) \
326 #define FOP_START(op) \
327 extern void em_##op(struct fastop *fake); \
328 asm(".pushsection .text, \"ax\" \n\t" \
329 ".global em_" #op " \n\t" \
330 ".align " __stringify(FASTOP_SIZE) " \n\t" \
336 #define __FOPNOP(name) \
341 __FOPNOP(__stringify(__UNIQUE_ID(nop)))
343 #define FOP1E(op, dst) \
344 __FOP_FUNC(#op "_" #dst) \
345 "10: " #op " %" #dst " \n\t" \
346 __FOP_RET(#op "_" #dst)
348 #define FOP1EEX(op, dst) \
349 FOP1E(op, dst) _ASM_EXTABLE_TYPE_REG(10b, 11b, EX_TYPE_ZERO_REG, %%esi)
351 #define FASTOP1(op) \
356 ON64(FOP1E(op##q, rax)) \
359 /* 1-operand, using src2 (for MUL/DIV r/m) */
360 #define FASTOP1SRC2(op, name) \
365 ON64(FOP1E(op, rcx)) \
368 /* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
369 #define FASTOP1SRC2EX(op, name) \
374 ON64(FOP1EEX(op, rcx)) \
377 #define FOP2E(op, dst, src) \
378 __FOP_FUNC(#op "_" #dst "_" #src) \
379 #op " %" #src ", %" #dst " \n\t" \
380 __FOP_RET(#op "_" #dst "_" #src)
382 #define FASTOP2(op) \
384 FOP2E(op##b, al, dl) \
385 FOP2E(op##w, ax, dx) \
386 FOP2E(op##l, eax, edx) \
387 ON64(FOP2E(op##q, rax, rdx)) \
390 /* 2 operand, word only */
391 #define FASTOP2W(op) \
394 FOP2E(op##w, ax, dx) \
395 FOP2E(op##l, eax, edx) \
396 ON64(FOP2E(op##q, rax, rdx)) \
399 /* 2 operand, src is CL */
400 #define FASTOP2CL(op) \
402 FOP2E(op##b, al, cl) \
403 FOP2E(op##w, ax, cl) \
404 FOP2E(op##l, eax, cl) \
405 ON64(FOP2E(op##q, rax, cl)) \
408 /* 2 operand, src and dest are reversed */
409 #define FASTOP2R(op, name) \
411 FOP2E(op##b, dl, al) \
412 FOP2E(op##w, dx, ax) \
413 FOP2E(op##l, edx, eax) \
414 ON64(FOP2E(op##q, rdx, rax)) \
417 #define FOP3E(op, dst, src, src2) \
418 __FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
419 #op " %" #src2 ", %" #src ", %" #dst " \n\t"\
420 __FOP_RET(#op "_" #dst "_" #src "_" #src2)
422 /* 3-operand, word-only, src2=cl */
423 #define FASTOP3WCL(op) \
426 FOP3E(op##w, ax, dx, cl) \
427 FOP3E(op##l, eax, edx, cl) \
428 ON64(FOP3E(op##q, rax, rdx, cl)) \
431 /* Special case for SETcc - 1 instruction per cc */
434 * Depending on .config the SETcc functions look like:
436 * SETcc %al [3 bytes]
438 * INT3 [1 byte; CONFIG_SLS]
440 * Which gives possible sizes 4 or 5. When rounded up to the
441 * next power-of-two alignment they become 4 or 8.
443 #define SETCC_LENGTH (4 + IS_ENABLED(CONFIG_SLS))
444 #define SETCC_ALIGN (4 << IS_ENABLED(CONFIG_SLS))
445 static_assert(SETCC_LENGTH <= SETCC_ALIGN);
447 #define FOP_SETCC(op) \
448 ".align " __stringify(SETCC_ALIGN) " \n\t" \
449 ".type " #op ", @function \n\t" \
475 "pushf; sbb %al, %al; popf \n\t"
480 * XXX: inoutclob user must know where the argument is being expanded.
481 * Relying on CONFIG_CC_HAS_ASM_GOTO would allow us to remove _fault.
483 #define asm_safe(insn, inoutclob...) \
487 asm volatile("1:" insn "\n" \
489 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_ONE_REG, %[_fault]) \
490 : [_fault] "+r"(_fault) inoutclob ); \
492 _fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
495 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
496 enum x86_intercept intercept,
497 enum x86_intercept_stage stage)
499 struct x86_instruction_info info = {
500 .intercept = intercept,
501 .rep_prefix = ctxt->rep_prefix,
502 .modrm_mod = ctxt->modrm_mod,
503 .modrm_reg = ctxt->modrm_reg,
504 .modrm_rm = ctxt->modrm_rm,
505 .src_val = ctxt->src.val64,
506 .dst_val = ctxt->dst.val64,
507 .src_bytes = ctxt->src.bytes,
508 .dst_bytes = ctxt->dst.bytes,
509 .ad_bytes = ctxt->ad_bytes,
510 .next_rip = ctxt->eip,
513 return ctxt->ops->intercept(ctxt, &info, stage);
516 static void assign_masked(ulong *dest, ulong src, ulong mask)
518 *dest = (*dest & ~mask) | (src & mask);
521 static void assign_register(unsigned long *reg, u64 val, int bytes)
523 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
526 *(u8 *)reg = (u8)val;
529 *(u16 *)reg = (u16)val;
533 break; /* 64b: zero-extend */
540 static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
542 return (1UL << (ctxt->ad_bytes << 3)) - 1;
545 static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
548 struct desc_struct ss;
550 if (ctxt->mode == X86EMUL_MODE_PROT64)
552 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
553 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
556 static int stack_size(struct x86_emulate_ctxt *ctxt)
558 return (__fls(stack_mask(ctxt)) + 1) >> 3;
561 /* Access/update address held in a register, based on addressing mode. */
562 static inline unsigned long
563 address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
565 if (ctxt->ad_bytes == sizeof(unsigned long))
568 return reg & ad_mask(ctxt);
571 static inline unsigned long
572 register_address(struct x86_emulate_ctxt *ctxt, int reg)
574 return address_mask(ctxt, reg_read(ctxt, reg));
577 static void masked_increment(ulong *reg, ulong mask, int inc)
579 assign_masked(reg, *reg + inc, mask);
583 register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
585 ulong *preg = reg_rmw(ctxt, reg);
587 assign_register(preg, *preg + inc, ctxt->ad_bytes);
590 static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
592 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
595 static u32 desc_limit_scaled(struct desc_struct *desc)
597 u32 limit = get_desc_limit(desc);
599 return desc->g ? (limit << 12) | 0xfff : limit;
602 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
604 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
607 return ctxt->ops->get_cached_segment_base(ctxt, seg);
610 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
611 u32 error, bool valid)
614 ctxt->exception.vector = vec;
615 ctxt->exception.error_code = error;
616 ctxt->exception.error_code_valid = valid;
617 return X86EMUL_PROPAGATE_FAULT;
620 static int emulate_db(struct x86_emulate_ctxt *ctxt)
622 return emulate_exception(ctxt, DB_VECTOR, 0, false);
625 static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
627 return emulate_exception(ctxt, GP_VECTOR, err, true);
630 static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
632 return emulate_exception(ctxt, SS_VECTOR, err, true);
635 static int emulate_ud(struct x86_emulate_ctxt *ctxt)
637 return emulate_exception(ctxt, UD_VECTOR, 0, false);
640 static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
642 return emulate_exception(ctxt, TS_VECTOR, err, true);
645 static int emulate_de(struct x86_emulate_ctxt *ctxt)
647 return emulate_exception(ctxt, DE_VECTOR, 0, false);
650 static int emulate_nm(struct x86_emulate_ctxt *ctxt)
652 return emulate_exception(ctxt, NM_VECTOR, 0, false);
655 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
658 struct desc_struct desc;
660 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
664 static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
669 struct desc_struct desc;
671 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
672 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
675 static inline u8 ctxt_virt_addr_bits(struct x86_emulate_ctxt *ctxt)
677 return (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_LA57) ? 57 : 48;
680 static inline bool emul_is_noncanonical_address(u64 la,
681 struct x86_emulate_ctxt *ctxt)
683 return get_canonical(la, ctxt_virt_addr_bits(ctxt)) != la;
687 * x86 defines three classes of vector instructions: explicitly
688 * aligned, explicitly unaligned, and the rest, which change behaviour
689 * depending on whether they're AVX encoded or not.
691 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
692 * subject to the same check. FXSAVE and FXRSTOR are checked here too as their
693 * 512 bytes of data must be aligned to a 16 byte boundary.
695 static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
697 u64 alignment = ctxt->d & AlignMask;
699 if (likely(size < 16))
714 static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
715 struct segmented_address addr,
716 unsigned *max_size, unsigned size,
717 bool write, bool fetch,
718 enum x86emul_mode mode, ulong *linear)
720 struct desc_struct desc;
727 la = seg_base(ctxt, addr.seg) + addr.ea;
730 case X86EMUL_MODE_PROT64:
732 va_bits = ctxt_virt_addr_bits(ctxt);
733 if (get_canonical(la, va_bits) != la)
736 *max_size = min_t(u64, ~0u, (1ull << va_bits) - la);
737 if (size > *max_size)
741 *linear = la = (u32)la;
742 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
746 /* code segment in protected mode or read-only data segment */
747 if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
748 || !(desc.type & 2)) && write)
750 /* unreadable code segment */
751 if (!fetch && (desc.type & 8) && !(desc.type & 2))
753 lim = desc_limit_scaled(&desc);
754 if (!(desc.type & 8) && (desc.type & 4)) {
755 /* expand-down segment */
758 lim = desc.d ? 0xffffffff : 0xffff;
762 if (lim == 0xffffffff)
765 *max_size = (u64)lim + 1 - addr.ea;
766 if (size > *max_size)
771 if (la & (insn_alignment(ctxt, size) - 1))
772 return emulate_gp(ctxt, 0);
773 return X86EMUL_CONTINUE;
775 if (addr.seg == VCPU_SREG_SS)
776 return emulate_ss(ctxt, 0);
778 return emulate_gp(ctxt, 0);
781 static int linearize(struct x86_emulate_ctxt *ctxt,
782 struct segmented_address addr,
783 unsigned size, bool write,
787 return __linearize(ctxt, addr, &max_size, size, write, false,
791 static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
792 enum x86emul_mode mode)
797 struct segmented_address addr = { .seg = VCPU_SREG_CS,
800 if (ctxt->op_bytes != sizeof(unsigned long))
801 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
802 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
803 if (rc == X86EMUL_CONTINUE)
804 ctxt->_eip = addr.ea;
808 static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
810 return assign_eip(ctxt, dst, ctxt->mode);
813 static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
814 const struct desc_struct *cs_desc)
816 enum x86emul_mode mode = ctxt->mode;
820 if (ctxt->mode >= X86EMUL_MODE_PROT16) {
824 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
826 mode = X86EMUL_MODE_PROT64;
828 mode = X86EMUL_MODE_PROT32; /* temporary value */
831 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
832 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
833 rc = assign_eip(ctxt, dst, mode);
834 if (rc == X86EMUL_CONTINUE)
839 static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
841 return assign_eip_near(ctxt, ctxt->_eip + rel);
844 static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
845 void *data, unsigned size)
847 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
850 static int linear_write_system(struct x86_emulate_ctxt *ctxt,
851 ulong linear, void *data,
854 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
857 static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
858 struct segmented_address addr,
865 rc = linearize(ctxt, addr, size, false, &linear);
866 if (rc != X86EMUL_CONTINUE)
868 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
871 static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
872 struct segmented_address addr,
879 rc = linearize(ctxt, addr, size, true, &linear);
880 if (rc != X86EMUL_CONTINUE)
882 return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
886 * Prefetch the remaining bytes of the instruction without crossing page
887 * boundary if they are not in fetch_cache yet.
889 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
892 unsigned size, max_size;
893 unsigned long linear;
894 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
895 struct segmented_address addr = { .seg = VCPU_SREG_CS,
896 .ea = ctxt->eip + cur_size };
899 * We do not know exactly how many bytes will be needed, and
900 * __linearize is expensive, so fetch as much as possible. We
901 * just have to avoid going beyond the 15 byte limit, the end
902 * of the segment, or the end of the page.
904 * __linearize is called with size 0 so that it does not do any
905 * boundary check itself. Instead, we use max_size to check
908 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
910 if (unlikely(rc != X86EMUL_CONTINUE))
913 size = min_t(unsigned, 15UL ^ cur_size, max_size);
914 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
917 * One instruction can only straddle two pages,
918 * and one has been loaded at the beginning of
919 * x86_decode_insn. So, if not enough bytes
920 * still, we must have hit the 15-byte boundary.
922 if (unlikely(size < op_size))
923 return emulate_gp(ctxt, 0);
925 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
926 size, &ctxt->exception);
927 if (unlikely(rc != X86EMUL_CONTINUE))
929 ctxt->fetch.end += size;
930 return X86EMUL_CONTINUE;
933 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
936 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
938 if (unlikely(done_size < size))
939 return __do_insn_fetch_bytes(ctxt, size - done_size);
941 return X86EMUL_CONTINUE;
944 /* Fetch next part of the instruction being emulated. */
945 #define insn_fetch(_type, _ctxt) \
948 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
949 if (rc != X86EMUL_CONTINUE) \
951 ctxt->_eip += sizeof(_type); \
952 memcpy(&_x, ctxt->fetch.ptr, sizeof(_type)); \
953 ctxt->fetch.ptr += sizeof(_type); \
957 #define insn_fetch_arr(_arr, _size, _ctxt) \
959 rc = do_insn_fetch_bytes(_ctxt, _size); \
960 if (rc != X86EMUL_CONTINUE) \
962 ctxt->_eip += (_size); \
963 memcpy(_arr, ctxt->fetch.ptr, _size); \
964 ctxt->fetch.ptr += (_size); \
968 * Given the 'reg' portion of a ModRM byte, and a register block, return a
969 * pointer into the block that addresses the relevant register.
970 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
972 static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
976 int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
978 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
979 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
981 p = reg_rmw(ctxt, modrm_reg);
985 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
986 struct segmented_address addr,
987 u16 *size, unsigned long *address, int op_bytes)
994 rc = segmented_read_std(ctxt, addr, size, 2);
995 if (rc != X86EMUL_CONTINUE)
998 rc = segmented_read_std(ctxt, addr, address, op_bytes);
1012 FASTOP1SRC2(mul, mul_ex);
1013 FASTOP1SRC2(imul, imul_ex);
1014 FASTOP1SRC2EX(div, div_ex);
1015 FASTOP1SRC2EX(idiv, idiv_ex);
1044 FASTOP2R(cmp, cmp_r);
1046 static int em_bsf_c(struct x86_emulate_ctxt *ctxt)
1048 /* If src is zero, do not writeback, but update flags */
1049 if (ctxt->src.val == 0)
1050 ctxt->dst.type = OP_NONE;
1051 return fastop(ctxt, em_bsf);
1054 static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
1056 /* If src is zero, do not writeback, but update flags */
1057 if (ctxt->src.val == 0)
1058 ctxt->dst.type = OP_NONE;
1059 return fastop(ctxt, em_bsr);
1062 static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
1065 void (*fop)(void) = (void *)em_setcc + SETCC_ALIGN * (condition & 0xf);
1067 flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
1068 asm("push %[flags]; popf; " CALL_NOSPEC
1069 : "=a"(rc) : [thunk_target]"r"(fop), [flags]"r"(flags));
1073 static void fetch_register_operand(struct operand *op)
1075 switch (op->bytes) {
1077 op->val = *(u8 *)op->addr.reg;
1080 op->val = *(u16 *)op->addr.reg;
1083 op->val = *(u32 *)op->addr.reg;
1086 op->val = *(u64 *)op->addr.reg;
1091 static int em_fninit(struct x86_emulate_ctxt *ctxt)
1093 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1094 return emulate_nm(ctxt);
1097 asm volatile("fninit");
1099 return X86EMUL_CONTINUE;
1102 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
1106 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1107 return emulate_nm(ctxt);
1110 asm volatile("fnstcw %0": "+m"(fcw));
1113 ctxt->dst.val = fcw;
1115 return X86EMUL_CONTINUE;
1118 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
1122 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
1123 return emulate_nm(ctxt);
1126 asm volatile("fnstsw %0": "+m"(fsw));
1129 ctxt->dst.val = fsw;
1131 return X86EMUL_CONTINUE;
1134 static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
1137 unsigned reg = ctxt->modrm_reg;
1139 if (!(ctxt->d & ModRM))
1140 reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
1142 if (ctxt->d & Sse) {
1146 kvm_read_sse_reg(reg, &op->vec_val);
1149 if (ctxt->d & Mmx) {
1158 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1159 op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
1161 fetch_register_operand(op);
1162 op->orig_val = op->val;
1165 static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
1167 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
1168 ctxt->modrm_seg = VCPU_SREG_SS;
1171 static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1175 int index_reg, base_reg, scale;
1176 int rc = X86EMUL_CONTINUE;
1179 ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
1180 index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
1181 base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
1183 ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
1184 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
1185 ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
1186 ctxt->modrm_seg = VCPU_SREG_DS;
1188 if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
1190 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1191 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
1193 if (ctxt->d & Sse) {
1196 op->addr.xmm = ctxt->modrm_rm;
1197 kvm_read_sse_reg(ctxt->modrm_rm, &op->vec_val);
1200 if (ctxt->d & Mmx) {
1203 op->addr.mm = ctxt->modrm_rm & 7;
1206 fetch_register_operand(op);
1212 if (ctxt->ad_bytes == 2) {
1213 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1214 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1215 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1216 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1218 /* 16-bit ModR/M decode. */
1219 switch (ctxt->modrm_mod) {
1221 if (ctxt->modrm_rm == 6)
1222 modrm_ea += insn_fetch(u16, ctxt);
1225 modrm_ea += insn_fetch(s8, ctxt);
1228 modrm_ea += insn_fetch(u16, ctxt);
1231 switch (ctxt->modrm_rm) {
1233 modrm_ea += bx + si;
1236 modrm_ea += bx + di;
1239 modrm_ea += bp + si;
1242 modrm_ea += bp + di;
1251 if (ctxt->modrm_mod != 0)
1258 if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
1259 (ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
1260 ctxt->modrm_seg = VCPU_SREG_SS;
1261 modrm_ea = (u16)modrm_ea;
1263 /* 32/64-bit ModR/M decode. */
1264 if ((ctxt->modrm_rm & 7) == 4) {
1265 sib = insn_fetch(u8, ctxt);
1266 index_reg |= (sib >> 3) & 7;
1267 base_reg |= sib & 7;
1270 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1271 modrm_ea += insn_fetch(s32, ctxt);
1273 modrm_ea += reg_read(ctxt, base_reg);
1274 adjust_modrm_seg(ctxt, base_reg);
1275 /* Increment ESP on POP [ESP] */
1276 if ((ctxt->d & IncSP) &&
1277 base_reg == VCPU_REGS_RSP)
1278 modrm_ea += ctxt->op_bytes;
1281 modrm_ea += reg_read(ctxt, index_reg) << scale;
1282 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1283 modrm_ea += insn_fetch(s32, ctxt);
1284 if (ctxt->mode == X86EMUL_MODE_PROT64)
1285 ctxt->rip_relative = 1;
1287 base_reg = ctxt->modrm_rm;
1288 modrm_ea += reg_read(ctxt, base_reg);
1289 adjust_modrm_seg(ctxt, base_reg);
1291 switch (ctxt->modrm_mod) {
1293 modrm_ea += insn_fetch(s8, ctxt);
1296 modrm_ea += insn_fetch(s32, ctxt);
1300 op->addr.mem.ea = modrm_ea;
1301 if (ctxt->ad_bytes != 8)
1302 ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
1308 static int decode_abs(struct x86_emulate_ctxt *ctxt,
1311 int rc = X86EMUL_CONTINUE;
1314 switch (ctxt->ad_bytes) {
1316 op->addr.mem.ea = insn_fetch(u16, ctxt);
1319 op->addr.mem.ea = insn_fetch(u32, ctxt);
1322 op->addr.mem.ea = insn_fetch(u64, ctxt);
1329 static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1333 if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
1334 mask = ~((long)ctxt->dst.bytes * 8 - 1);
1336 if (ctxt->src.bytes == 2)
1337 sv = (s16)ctxt->src.val & (s16)mask;
1338 else if (ctxt->src.bytes == 4)
1339 sv = (s32)ctxt->src.val & (s32)mask;
1341 sv = (s64)ctxt->src.val & (s64)mask;
1343 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1344 ctxt->dst.addr.mem.ea + (sv >> 3));
1347 /* only subword offset */
1348 ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
1351 static int read_emulated(struct x86_emulate_ctxt *ctxt,
1352 unsigned long addr, void *dest, unsigned size)
1355 struct read_cache *mc = &ctxt->mem_read;
1357 if (mc->pos < mc->end)
1360 WARN_ON((mc->end + size) >= sizeof(mc->data));
1362 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1364 if (rc != X86EMUL_CONTINUE)
1370 memcpy(dest, mc->data + mc->pos, size);
1372 return X86EMUL_CONTINUE;
1375 static int segmented_read(struct x86_emulate_ctxt *ctxt,
1376 struct segmented_address addr,
1383 rc = linearize(ctxt, addr, size, false, &linear);
1384 if (rc != X86EMUL_CONTINUE)
1386 return read_emulated(ctxt, linear, data, size);
1389 static int segmented_write(struct x86_emulate_ctxt *ctxt,
1390 struct segmented_address addr,
1397 rc = linearize(ctxt, addr, size, true, &linear);
1398 if (rc != X86EMUL_CONTINUE)
1400 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1404 static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1405 struct segmented_address addr,
1406 const void *orig_data, const void *data,
1412 rc = linearize(ctxt, addr, size, true, &linear);
1413 if (rc != X86EMUL_CONTINUE)
1415 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1416 size, &ctxt->exception);
1419 static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1420 unsigned int size, unsigned short port,
1423 struct read_cache *rc = &ctxt->io_read;
1425 if (rc->pos == rc->end) { /* refill pio read ahead */
1426 unsigned int in_page, n;
1427 unsigned int count = ctxt->rep_prefix ?
1428 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1429 in_page = (ctxt->eflags & X86_EFLAGS_DF) ?
1430 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1431 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1432 n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
1435 rc->pos = rc->end = 0;
1436 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1441 if (ctxt->rep_prefix && (ctxt->d & String) &&
1442 !(ctxt->eflags & X86_EFLAGS_DF)) {
1443 ctxt->dst.data = rc->data + rc->pos;
1444 ctxt->dst.type = OP_MEM_STR;
1445 ctxt->dst.count = (rc->end - rc->pos) / size;
1448 memcpy(dest, rc->data + rc->pos, size);
1454 static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1455 u16 index, struct desc_struct *desc)
1460 ctxt->ops->get_idt(ctxt, &dt);
1462 if (dt.size < index * 8 + 7)
1463 return emulate_gp(ctxt, index << 3 | 0x2);
1465 addr = dt.address + index * 8;
1466 return linear_read_system(ctxt, addr, desc, sizeof(*desc));
1469 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1470 u16 selector, struct desc_ptr *dt)
1472 const struct x86_emulate_ops *ops = ctxt->ops;
1475 if (selector & 1 << 2) {
1476 struct desc_struct desc;
1479 memset(dt, 0, sizeof(*dt));
1480 if (!ops->get_segment(ctxt, &sel, &desc, &base3,
1484 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1485 dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
1487 ops->get_gdt(ctxt, dt);
1490 static int get_descriptor_ptr(struct x86_emulate_ctxt *ctxt,
1491 u16 selector, ulong *desc_addr_p)
1494 u16 index = selector >> 3;
1497 get_descriptor_table_ptr(ctxt, selector, &dt);
1499 if (dt.size < index * 8 + 7)
1500 return emulate_gp(ctxt, selector & 0xfffc);
1502 addr = dt.address + index * 8;
1504 #ifdef CONFIG_X86_64
1505 if (addr >> 32 != 0) {
1508 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1509 if (!(efer & EFER_LMA))
1514 *desc_addr_p = addr;
1515 return X86EMUL_CONTINUE;
1518 /* allowed just for 8 bytes segments */
1519 static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1520 u16 selector, struct desc_struct *desc,
1525 rc = get_descriptor_ptr(ctxt, selector, desc_addr_p);
1526 if (rc != X86EMUL_CONTINUE)
1529 return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
1532 /* allowed just for 8 bytes segments */
1533 static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1534 u16 selector, struct desc_struct *desc)
1539 rc = get_descriptor_ptr(ctxt, selector, &addr);
1540 if (rc != X86EMUL_CONTINUE)
1543 return linear_write_system(ctxt, addr, desc, sizeof(*desc));
1546 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1547 u16 selector, int seg, u8 cpl,
1548 enum x86_transfer_type transfer,
1549 struct desc_struct *desc)
1551 struct desc_struct seg_desc, old_desc;
1553 unsigned err_vec = GP_VECTOR;
1555 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1561 memset(&seg_desc, 0, sizeof(seg_desc));
1563 if (ctxt->mode == X86EMUL_MODE_REAL) {
1564 /* set real mode segment descriptor (keep limit etc. for
1566 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1567 set_desc_base(&seg_desc, selector << 4);
1569 } else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
1570 /* VM86 needs a clean new segment descriptor */
1571 set_desc_base(&seg_desc, selector << 4);
1572 set_desc_limit(&seg_desc, 0xffff);
1582 /* TR should be in GDT only */
1583 if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
1586 /* NULL selector is not valid for TR, CS and (except for long mode) SS */
1587 if (null_selector) {
1588 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
1591 if (seg == VCPU_SREG_SS) {
1592 if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
1596 * ctxt->ops->set_segment expects the CPL to be in
1597 * SS.DPL, so fake an expand-up 32-bit data segment.
1607 /* Skip all following checks */
1611 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1612 if (ret != X86EMUL_CONTINUE)
1615 err_code = selector & 0xfffc;
1616 err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR :
1619 /* can't load system descriptor into segment selector */
1620 if (seg <= VCPU_SREG_GS && !seg_desc.s) {
1621 if (transfer == X86_TRANSFER_CALL_JMP)
1622 return X86EMUL_UNHANDLEABLE;
1627 err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
1636 * segment is not a writable data segment or segment
1637 * selector's RPL != CPL or segment selector's RPL != CPL
1639 if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
1643 if (!(seg_desc.type & 8))
1646 if (seg_desc.type & 4) {
1652 if (rpl > cpl || dpl != cpl)
1655 /* in long-mode d/b must be clear if l is set */
1656 if (seg_desc.d && seg_desc.l) {
1659 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
1660 if (efer & EFER_LMA)
1664 /* CS(RPL) <- CPL */
1665 selector = (selector & 0xfffc) | cpl;
1668 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1670 old_desc = seg_desc;
1671 seg_desc.type |= 2; /* busy */
1672 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1673 sizeof(seg_desc), &ctxt->exception);
1674 if (ret != X86EMUL_CONTINUE)
1677 case VCPU_SREG_LDTR:
1678 if (seg_desc.s || seg_desc.type != 2)
1681 default: /* DS, ES, FS, or GS */
1683 * segment is not a data or readable code segment or
1684 * ((segment is a data or nonconforming code segment)
1685 * and (both RPL and CPL > DPL))
1687 if ((seg_desc.type & 0xa) == 0x8 ||
1688 (((seg_desc.type & 0xc) != 0xc) &&
1689 (rpl > dpl && cpl > dpl)))
1695 /* mark segment as accessed */
1696 if (!(seg_desc.type & 1)) {
1698 ret = write_segment_descriptor(ctxt, selector,
1700 if (ret != X86EMUL_CONTINUE)
1703 } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
1704 ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
1705 if (ret != X86EMUL_CONTINUE)
1707 if (emul_is_noncanonical_address(get_desc_base(&seg_desc) |
1708 ((u64)base3 << 32), ctxt))
1709 return emulate_gp(ctxt, 0);
1712 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1715 return X86EMUL_CONTINUE;
1717 return emulate_exception(ctxt, err_vec, err_code, true);
1720 static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1721 u16 selector, int seg)
1723 u8 cpl = ctxt->ops->cpl(ctxt);
1726 * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
1727 * they can load it at CPL<3 (Intel's manual says only LSS can,
1730 * However, the Intel manual says that putting IST=1/DPL=3 in
1731 * an interrupt gate will result in SS=3 (the AMD manual instead
1732 * says it doesn't), so allow SS=3 in __load_segment_descriptor
1733 * and only forbid it here.
1735 if (seg == VCPU_SREG_SS && selector == 3 &&
1736 ctxt->mode == X86EMUL_MODE_PROT64)
1737 return emulate_exception(ctxt, GP_VECTOR, 0, true);
1739 return __load_segment_descriptor(ctxt, selector, seg, cpl,
1740 X86_TRANSFER_NONE, NULL);
1743 static void write_register_operand(struct operand *op)
1745 return assign_register(op->addr.reg, op->val, op->bytes);
1748 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
1752 write_register_operand(op);
1755 if (ctxt->lock_prefix)
1756 return segmented_cmpxchg(ctxt,
1762 return segmented_write(ctxt,
1768 return segmented_write(ctxt,
1771 op->bytes * op->count);
1774 kvm_write_sse_reg(op->addr.xmm, &op->vec_val);
1777 kvm_write_mmx_reg(op->addr.mm, &op->mm_val);
1785 return X86EMUL_CONTINUE;
1788 static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1790 struct segmented_address addr;
1792 rsp_increment(ctxt, -bytes);
1793 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1794 addr.seg = VCPU_SREG_SS;
1796 return segmented_write(ctxt, addr, data, bytes);
1799 static int em_push(struct x86_emulate_ctxt *ctxt)
1801 /* Disable writeback. */
1802 ctxt->dst.type = OP_NONE;
1803 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1806 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1807 void *dest, int len)
1810 struct segmented_address addr;
1812 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1813 addr.seg = VCPU_SREG_SS;
1814 rc = segmented_read(ctxt, addr, dest, len);
1815 if (rc != X86EMUL_CONTINUE)
1818 rsp_increment(ctxt, len);
1822 static int em_pop(struct x86_emulate_ctxt *ctxt)
1824 return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1827 static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1828 void *dest, int len)
1831 unsigned long val, change_mask;
1832 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
1833 int cpl = ctxt->ops->cpl(ctxt);
1835 rc = emulate_pop(ctxt, &val, len);
1836 if (rc != X86EMUL_CONTINUE)
1839 change_mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
1840 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF |
1841 X86_EFLAGS_TF | X86_EFLAGS_DF | X86_EFLAGS_NT |
1842 X86_EFLAGS_AC | X86_EFLAGS_ID;
1844 switch(ctxt->mode) {
1845 case X86EMUL_MODE_PROT64:
1846 case X86EMUL_MODE_PROT32:
1847 case X86EMUL_MODE_PROT16:
1849 change_mask |= X86_EFLAGS_IOPL;
1851 change_mask |= X86_EFLAGS_IF;
1853 case X86EMUL_MODE_VM86:
1855 return emulate_gp(ctxt, 0);
1856 change_mask |= X86_EFLAGS_IF;
1858 default: /* real mode */
1859 change_mask |= (X86_EFLAGS_IOPL | X86_EFLAGS_IF);
1863 *(unsigned long *)dest =
1864 (ctxt->eflags & ~change_mask) | (val & change_mask);
1869 static int em_popf(struct x86_emulate_ctxt *ctxt)
1871 ctxt->dst.type = OP_REG;
1872 ctxt->dst.addr.reg = &ctxt->eflags;
1873 ctxt->dst.bytes = ctxt->op_bytes;
1874 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1877 static int em_enter(struct x86_emulate_ctxt *ctxt)
1880 unsigned frame_size = ctxt->src.val;
1881 unsigned nesting_level = ctxt->src2.val & 31;
1885 return X86EMUL_UNHANDLEABLE;
1887 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1888 rc = push(ctxt, &rbp, stack_size(ctxt));
1889 if (rc != X86EMUL_CONTINUE)
1891 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1893 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1894 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1896 return X86EMUL_CONTINUE;
1899 static int em_leave(struct x86_emulate_ctxt *ctxt)
1901 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1903 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1906 static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1908 int seg = ctxt->src2.val;
1910 ctxt->src.val = get_segment_selector(ctxt, seg);
1911 if (ctxt->op_bytes == 4) {
1912 rsp_increment(ctxt, -2);
1916 return em_push(ctxt);
1919 static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1921 int seg = ctxt->src2.val;
1922 unsigned long selector;
1925 rc = emulate_pop(ctxt, &selector, 2);
1926 if (rc != X86EMUL_CONTINUE)
1929 if (ctxt->modrm_reg == VCPU_SREG_SS)
1930 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
1931 if (ctxt->op_bytes > 2)
1932 rsp_increment(ctxt, ctxt->op_bytes - 2);
1934 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1938 static int em_pusha(struct x86_emulate_ctxt *ctxt)
1940 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1941 int rc = X86EMUL_CONTINUE;
1942 int reg = VCPU_REGS_RAX;
1944 while (reg <= VCPU_REGS_RDI) {
1945 (reg == VCPU_REGS_RSP) ?
1946 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1949 if (rc != X86EMUL_CONTINUE)
1958 static int em_pushf(struct x86_emulate_ctxt *ctxt)
1960 ctxt->src.val = (unsigned long)ctxt->eflags & ~X86_EFLAGS_VM;
1961 return em_push(ctxt);
1964 static int em_popa(struct x86_emulate_ctxt *ctxt)
1966 int rc = X86EMUL_CONTINUE;
1967 int reg = VCPU_REGS_RDI;
1970 while (reg >= VCPU_REGS_RAX) {
1971 if (reg == VCPU_REGS_RSP) {
1972 rsp_increment(ctxt, ctxt->op_bytes);
1976 rc = emulate_pop(ctxt, &val, ctxt->op_bytes);
1977 if (rc != X86EMUL_CONTINUE)
1979 assign_register(reg_rmw(ctxt, reg), val, ctxt->op_bytes);
1985 static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1987 const struct x86_emulate_ops *ops = ctxt->ops;
1994 /* TODO: Add limit checks */
1995 ctxt->src.val = ctxt->eflags;
1997 if (rc != X86EMUL_CONTINUE)
2000 ctxt->eflags &= ~(X86_EFLAGS_IF | X86_EFLAGS_TF | X86_EFLAGS_AC);
2002 ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
2004 if (rc != X86EMUL_CONTINUE)
2007 ctxt->src.val = ctxt->_eip;
2009 if (rc != X86EMUL_CONTINUE)
2012 ops->get_idt(ctxt, &dt);
2014 eip_addr = dt.address + (irq << 2);
2015 cs_addr = dt.address + (irq << 2) + 2;
2017 rc = linear_read_system(ctxt, cs_addr, &cs, 2);
2018 if (rc != X86EMUL_CONTINUE)
2021 rc = linear_read_system(ctxt, eip_addr, &eip, 2);
2022 if (rc != X86EMUL_CONTINUE)
2025 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
2026 if (rc != X86EMUL_CONTINUE)
2034 int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
2038 invalidate_registers(ctxt);
2039 rc = __emulate_int_real(ctxt, irq);
2040 if (rc == X86EMUL_CONTINUE)
2041 writeback_registers(ctxt);
2045 static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
2047 switch(ctxt->mode) {
2048 case X86EMUL_MODE_REAL:
2049 return __emulate_int_real(ctxt, irq);
2050 case X86EMUL_MODE_VM86:
2051 case X86EMUL_MODE_PROT16:
2052 case X86EMUL_MODE_PROT32:
2053 case X86EMUL_MODE_PROT64:
2055 /* Protected mode interrupts unimplemented yet */
2056 return X86EMUL_UNHANDLEABLE;
2060 static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
2062 int rc = X86EMUL_CONTINUE;
2063 unsigned long temp_eip = 0;
2064 unsigned long temp_eflags = 0;
2065 unsigned long cs = 0;
2066 unsigned long mask = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
2067 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_TF |
2068 X86_EFLAGS_IF | X86_EFLAGS_DF | X86_EFLAGS_OF |
2069 X86_EFLAGS_IOPL | X86_EFLAGS_NT | X86_EFLAGS_RF |
2070 X86_EFLAGS_AC | X86_EFLAGS_ID |
2072 unsigned long vm86_mask = X86_EFLAGS_VM | X86_EFLAGS_VIF |
2075 /* TODO: Add stack limit check */
2077 rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
2079 if (rc != X86EMUL_CONTINUE)
2082 if (temp_eip & ~0xffff)
2083 return emulate_gp(ctxt, 0);
2085 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2087 if (rc != X86EMUL_CONTINUE)
2090 rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
2092 if (rc != X86EMUL_CONTINUE)
2095 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
2097 if (rc != X86EMUL_CONTINUE)
2100 ctxt->_eip = temp_eip;
2102 if (ctxt->op_bytes == 4)
2103 ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
2104 else if (ctxt->op_bytes == 2) {
2105 ctxt->eflags &= ~0xffff;
2106 ctxt->eflags |= temp_eflags;
2109 ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
2110 ctxt->eflags |= X86_EFLAGS_FIXED;
2111 ctxt->ops->set_nmi_mask(ctxt, false);
2116 static int em_iret(struct x86_emulate_ctxt *ctxt)
2118 switch(ctxt->mode) {
2119 case X86EMUL_MODE_REAL:
2120 return emulate_iret_real(ctxt);
2121 case X86EMUL_MODE_VM86:
2122 case X86EMUL_MODE_PROT16:
2123 case X86EMUL_MODE_PROT32:
2124 case X86EMUL_MODE_PROT64:
2126 /* iret from protected mode unimplemented yet */
2127 return X86EMUL_UNHANDLEABLE;
2131 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2135 struct desc_struct new_desc;
2136 u8 cpl = ctxt->ops->cpl(ctxt);
2138 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2140 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
2141 X86_TRANSFER_CALL_JMP,
2143 if (rc != X86EMUL_CONTINUE)
2146 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2147 /* Error handling is not implemented. */
2148 if (rc != X86EMUL_CONTINUE)
2149 return X86EMUL_UNHANDLEABLE;
2154 static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2156 return assign_eip_near(ctxt, ctxt->src.val);
2159 static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2164 old_eip = ctxt->_eip;
2165 rc = assign_eip_near(ctxt, ctxt->src.val);
2166 if (rc != X86EMUL_CONTINUE)
2168 ctxt->src.val = old_eip;
2173 static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2175 u64 old = ctxt->dst.orig_val64;
2177 if (ctxt->dst.bytes == 16)
2178 return X86EMUL_UNHANDLEABLE;
2180 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
2181 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
2182 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
2183 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
2184 ctxt->eflags &= ~X86_EFLAGS_ZF;
2186 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
2187 (u32) reg_read(ctxt, VCPU_REGS_RBX);
2189 ctxt->eflags |= X86_EFLAGS_ZF;
2191 return X86EMUL_CONTINUE;
2194 static int em_ret(struct x86_emulate_ctxt *ctxt)
2199 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2200 if (rc != X86EMUL_CONTINUE)
2203 return assign_eip_near(ctxt, eip);
2206 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2209 unsigned long eip, cs;
2210 int cpl = ctxt->ops->cpl(ctxt);
2211 struct desc_struct new_desc;
2213 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2214 if (rc != X86EMUL_CONTINUE)
2216 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2217 if (rc != X86EMUL_CONTINUE)
2219 /* Outer-privilege level return is not implemented */
2220 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2221 return X86EMUL_UNHANDLEABLE;
2222 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl,
2225 if (rc != X86EMUL_CONTINUE)
2227 rc = assign_eip_far(ctxt, eip, &new_desc);
2228 /* Error handling is not implemented. */
2229 if (rc != X86EMUL_CONTINUE)
2230 return X86EMUL_UNHANDLEABLE;
2235 static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
2239 rc = em_ret_far(ctxt);
2240 if (rc != X86EMUL_CONTINUE)
2242 rsp_increment(ctxt, ctxt->src.val);
2243 return X86EMUL_CONTINUE;
2246 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2248 /* Save real source value, then compare EAX against destination. */
2249 ctxt->dst.orig_val = ctxt->dst.val;
2250 ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
2251 ctxt->src.orig_val = ctxt->src.val;
2252 ctxt->src.val = ctxt->dst.orig_val;
2253 fastop(ctxt, em_cmp);
2255 if (ctxt->eflags & X86_EFLAGS_ZF) {
2256 /* Success: write back to memory; no update of EAX */
2257 ctxt->src.type = OP_NONE;
2258 ctxt->dst.val = ctxt->src.orig_val;
2260 /* Failure: write the value we saw to EAX. */
2261 ctxt->src.type = OP_REG;
2262 ctxt->src.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2263 ctxt->src.val = ctxt->dst.orig_val;
2264 /* Create write-cycle to dest by writing the same value */
2265 ctxt->dst.val = ctxt->dst.orig_val;
2267 return X86EMUL_CONTINUE;
2270 static int em_lseg(struct x86_emulate_ctxt *ctxt)
2272 int seg = ctxt->src2.val;
2276 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2278 rc = load_segment_descriptor(ctxt, sel, seg);
2279 if (rc != X86EMUL_CONTINUE)
2282 ctxt->dst.val = ctxt->src.val;
2286 static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
2288 #ifdef CONFIG_X86_64
2289 return ctxt->ops->guest_has_long_mode(ctxt);
2295 static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
2297 desc->g = (flags >> 23) & 1;
2298 desc->d = (flags >> 22) & 1;
2299 desc->l = (flags >> 21) & 1;
2300 desc->avl = (flags >> 20) & 1;
2301 desc->p = (flags >> 15) & 1;
2302 desc->dpl = (flags >> 13) & 3;
2303 desc->s = (flags >> 12) & 1;
2304 desc->type = (flags >> 8) & 15;
2307 static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate,
2310 struct desc_struct desc;
2314 selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4);
2317 offset = 0x7f84 + n * 12;
2319 offset = 0x7f2c + (n - 3) * 12;
2321 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
2322 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
2323 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset));
2324 ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
2325 return X86EMUL_CONTINUE;
2328 #ifdef CONFIG_X86_64
2329 static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate,
2332 struct desc_struct desc;
2337 offset = 0x7e00 + n * 16;
2339 selector = GET_SMSTATE(u16, smstate, offset);
2340 rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8);
2341 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
2342 set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
2343 base3 = GET_SMSTATE(u32, smstate, offset + 12);
2345 ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
2346 return X86EMUL_CONTINUE;
2350 static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
2351 u64 cr0, u64 cr3, u64 cr4)
2356 /* In order to later set CR4.PCIDE, CR3[11:0] must be zero. */
2358 if (cr4 & X86_CR4_PCIDE) {
2363 bad = ctxt->ops->set_cr(ctxt, 3, cr3);
2365 return X86EMUL_UNHANDLEABLE;
2368 * First enable PAE, long mode needs it before CR0.PG = 1 is set.
2369 * Then enable protected mode. However, PCID cannot be enabled
2370 * if EFER.LMA=0, so set it separately.
2372 bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2374 return X86EMUL_UNHANDLEABLE;
2376 bad = ctxt->ops->set_cr(ctxt, 0, cr0);
2378 return X86EMUL_UNHANDLEABLE;
2380 if (cr4 & X86_CR4_PCIDE) {
2381 bad = ctxt->ops->set_cr(ctxt, 4, cr4);
2383 return X86EMUL_UNHANDLEABLE;
2385 bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
2387 return X86EMUL_UNHANDLEABLE;
2392 return X86EMUL_CONTINUE;
2395 static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
2396 const char *smstate)
2398 struct desc_struct desc;
2401 u32 val, cr0, cr3, cr4;
2404 cr0 = GET_SMSTATE(u32, smstate, 0x7ffc);
2405 cr3 = GET_SMSTATE(u32, smstate, 0x7ff8);
2406 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED;
2407 ctxt->_eip = GET_SMSTATE(u32, smstate, 0x7ff0);
2409 for (i = 0; i < 8; i++)
2410 *reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
2412 val = GET_SMSTATE(u32, smstate, 0x7fcc);
2414 if (ctxt->ops->set_dr(ctxt, 6, val))
2415 return X86EMUL_UNHANDLEABLE;
2417 val = GET_SMSTATE(u32, smstate, 0x7fc8);
2419 if (ctxt->ops->set_dr(ctxt, 7, val))
2420 return X86EMUL_UNHANDLEABLE;
2422 selector = GET_SMSTATE(u32, smstate, 0x7fc4);
2423 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f64));
2424 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f60));
2425 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f5c));
2426 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
2428 selector = GET_SMSTATE(u32, smstate, 0x7fc0);
2429 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f80));
2430 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f7c));
2431 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f78));
2432 ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
2434 dt.address = GET_SMSTATE(u32, smstate, 0x7f74);
2435 dt.size = GET_SMSTATE(u32, smstate, 0x7f70);
2436 ctxt->ops->set_gdt(ctxt, &dt);
2438 dt.address = GET_SMSTATE(u32, smstate, 0x7f58);
2439 dt.size = GET_SMSTATE(u32, smstate, 0x7f54);
2440 ctxt->ops->set_idt(ctxt, &dt);
2442 for (i = 0; i < 6; i++) {
2443 int r = rsm_load_seg_32(ctxt, smstate, i);
2444 if (r != X86EMUL_CONTINUE)
2448 cr4 = GET_SMSTATE(u32, smstate, 0x7f14);
2450 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8));
2452 return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2455 #ifdef CONFIG_X86_64
2456 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
2457 const char *smstate)
2459 struct desc_struct desc;
2461 u64 val, cr0, cr3, cr4;
2466 for (i = 0; i < 16; i++)
2467 *reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8);
2469 ctxt->_eip = GET_SMSTATE(u64, smstate, 0x7f78);
2470 ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
2472 val = GET_SMSTATE(u64, smstate, 0x7f68);
2474 if (ctxt->ops->set_dr(ctxt, 6, val))
2475 return X86EMUL_UNHANDLEABLE;
2477 val = GET_SMSTATE(u64, smstate, 0x7f60);
2479 if (ctxt->ops->set_dr(ctxt, 7, val))
2480 return X86EMUL_UNHANDLEABLE;
2482 cr0 = GET_SMSTATE(u64, smstate, 0x7f58);
2483 cr3 = GET_SMSTATE(u64, smstate, 0x7f50);
2484 cr4 = GET_SMSTATE(u64, smstate, 0x7f48);
2485 ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
2486 val = GET_SMSTATE(u64, smstate, 0x7ed0);
2488 if (ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA))
2489 return X86EMUL_UNHANDLEABLE;
2491 selector = GET_SMSTATE(u32, smstate, 0x7e90);
2492 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e92) << 8);
2493 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e94));
2494 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e98));
2495 base3 = GET_SMSTATE(u32, smstate, 0x7e9c);
2496 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
2498 dt.size = GET_SMSTATE(u32, smstate, 0x7e84);
2499 dt.address = GET_SMSTATE(u64, smstate, 0x7e88);
2500 ctxt->ops->set_idt(ctxt, &dt);
2502 selector = GET_SMSTATE(u32, smstate, 0x7e70);
2503 rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e72) << 8);
2504 set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e74));
2505 set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e78));
2506 base3 = GET_SMSTATE(u32, smstate, 0x7e7c);
2507 ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
2509 dt.size = GET_SMSTATE(u32, smstate, 0x7e64);
2510 dt.address = GET_SMSTATE(u64, smstate, 0x7e68);
2511 ctxt->ops->set_gdt(ctxt, &dt);
2513 r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
2514 if (r != X86EMUL_CONTINUE)
2517 for (i = 0; i < 6; i++) {
2518 r = rsm_load_seg_64(ctxt, smstate, i);
2519 if (r != X86EMUL_CONTINUE)
2523 return X86EMUL_CONTINUE;
2527 static int em_rsm(struct x86_emulate_ctxt *ctxt)
2529 unsigned long cr0, cr4, efer;
2534 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
2535 return emulate_ud(ctxt);
2537 smbase = ctxt->ops->get_smbase(ctxt);
2539 ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf));
2540 if (ret != X86EMUL_CONTINUE)
2541 return X86EMUL_UNHANDLEABLE;
2543 if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
2544 ctxt->ops->set_nmi_mask(ctxt, false);
2546 ctxt->ops->exiting_smm(ctxt);
2549 * Get back to real mode, to prepare a safe state in which to load
2550 * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
2551 * supports long mode.
2553 if (emulator_has_longmode(ctxt)) {
2554 struct desc_struct cs_desc;
2556 /* Zero CR4.PCIDE before CR0.PG. */
2557 cr4 = ctxt->ops->get_cr(ctxt, 4);
2558 if (cr4 & X86_CR4_PCIDE)
2559 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
2561 /* A 32-bit code segment is required to clear EFER.LMA. */
2562 memset(&cs_desc, 0, sizeof(cs_desc));
2564 cs_desc.s = cs_desc.g = cs_desc.p = 1;
2565 ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
2568 /* For the 64-bit case, this will clear EFER.LMA. */
2569 cr0 = ctxt->ops->get_cr(ctxt, 0);
2570 if (cr0 & X86_CR0_PE)
2571 ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
2573 if (emulator_has_longmode(ctxt)) {
2574 /* Clear CR4.PAE before clearing EFER.LME. */
2575 cr4 = ctxt->ops->get_cr(ctxt, 4);
2576 if (cr4 & X86_CR4_PAE)
2577 ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
2579 /* And finally go back to 32-bit mode. */
2581 ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
2585 * Give leave_smm() a chance to make ISA-specific changes to the vCPU
2586 * state (e.g. enter guest mode) before loading state from the SMM
2589 if (ctxt->ops->leave_smm(ctxt, buf))
2590 goto emulate_shutdown;
2592 #ifdef CONFIG_X86_64
2593 if (emulator_has_longmode(ctxt))
2594 ret = rsm_load_state_64(ctxt, buf);
2597 ret = rsm_load_state_32(ctxt, buf);
2599 if (ret != X86EMUL_CONTINUE)
2600 goto emulate_shutdown;
2603 * Note, the ctxt->ops callbacks are responsible for handling side
2604 * effects when writing MSRs and CRs, e.g. MMU context resets, CPUID
2605 * runtime updates, etc... If that changes, e.g. this flow is moved
2606 * out of the emulator to make it look more like enter_smm(), then
2607 * those side effects need to be explicitly handled for both success
2610 return X86EMUL_CONTINUE;
2613 ctxt->ops->triple_fault(ctxt);
2614 return X86EMUL_CONTINUE;
2618 setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2619 struct desc_struct *cs, struct desc_struct *ss)
2621 cs->l = 0; /* will be adjusted later */
2622 set_desc_base(cs, 0); /* flat segment */
2623 cs->g = 1; /* 4kb granularity */
2624 set_desc_limit(cs, 0xfffff); /* 4GB limit */
2625 cs->type = 0x0b; /* Read, Execute, Accessed */
2627 cs->dpl = 0; /* will be adjusted later */
2632 set_desc_base(ss, 0); /* flat segment */
2633 set_desc_limit(ss, 0xfffff); /* 4GB limit */
2634 ss->g = 1; /* 4kb granularity */
2636 ss->type = 0x03; /* Read/Write, Accessed */
2637 ss->d = 1; /* 32bit stack segment */
2644 static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2646 u32 eax, ebx, ecx, edx;
2649 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
2650 return is_guest_vendor_intel(ebx, ecx, edx);
2653 static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2655 const struct x86_emulate_ops *ops = ctxt->ops;
2656 u32 eax, ebx, ecx, edx;
2659 * syscall should always be enabled in longmode - so only become
2660 * vendor specific (cpuid) if other modes are active...
2662 if (ctxt->mode == X86EMUL_MODE_PROT64)
2667 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
2669 * remark: Intel CPUs only support "syscall" in 64bit longmode. Also a
2670 * 64bit guest with a 32bit compat-app running will #UD !! While this
2671 * behaviour can be fixed (by emulating) into AMD response - CPUs of
2672 * AMD can't behave like Intel.
2674 if (is_guest_vendor_intel(ebx, ecx, edx))
2677 if (is_guest_vendor_amd(ebx, ecx, edx) ||
2678 is_guest_vendor_hygon(ebx, ecx, edx))
2682 * default: (not Intel, not AMD, not Hygon), apply Intel's
2688 static int em_syscall(struct x86_emulate_ctxt *ctxt)
2690 const struct x86_emulate_ops *ops = ctxt->ops;
2691 struct desc_struct cs, ss;
2696 /* syscall is not available in real mode */
2697 if (ctxt->mode == X86EMUL_MODE_REAL ||
2698 ctxt->mode == X86EMUL_MODE_VM86)
2699 return emulate_ud(ctxt);
2701 if (!(em_syscall_is_enabled(ctxt)))
2702 return emulate_ud(ctxt);
2704 ops->get_msr(ctxt, MSR_EFER, &efer);
2705 if (!(efer & EFER_SCE))
2706 return emulate_ud(ctxt);
2708 setup_syscalls_segments(ctxt, &cs, &ss);
2709 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2711 cs_sel = (u16)(msr_data & 0xfffc);
2712 ss_sel = (u16)(msr_data + 8);
2714 if (efer & EFER_LMA) {
2718 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2719 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2721 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2722 if (efer & EFER_LMA) {
2723 #ifdef CONFIG_X86_64
2724 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
2727 ctxt->mode == X86EMUL_MODE_PROT64 ?
2728 MSR_LSTAR : MSR_CSTAR, &msr_data);
2729 ctxt->_eip = msr_data;
2731 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2732 ctxt->eflags &= ~msr_data;
2733 ctxt->eflags |= X86_EFLAGS_FIXED;
2737 ops->get_msr(ctxt, MSR_STAR, &msr_data);
2738 ctxt->_eip = (u32)msr_data;
2740 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2743 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
2744 return X86EMUL_CONTINUE;
2747 static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2749 const struct x86_emulate_ops *ops = ctxt->ops;
2750 struct desc_struct cs, ss;
2755 ops->get_msr(ctxt, MSR_EFER, &efer);
2756 /* inject #GP if in real mode */
2757 if (ctxt->mode == X86EMUL_MODE_REAL)
2758 return emulate_gp(ctxt, 0);
2761 * Not recognized on AMD in compat mode (but is recognized in legacy
2764 if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
2765 && !vendor_intel(ctxt))
2766 return emulate_ud(ctxt);
2768 /* sysenter/sysexit have not been tested in 64bit mode. */
2769 if (ctxt->mode == X86EMUL_MODE_PROT64)
2770 return X86EMUL_UNHANDLEABLE;
2772 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2773 if ((msr_data & 0xfffc) == 0x0)
2774 return emulate_gp(ctxt, 0);
2776 setup_syscalls_segments(ctxt, &cs, &ss);
2777 ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
2778 cs_sel = (u16)msr_data & ~SEGMENT_RPL_MASK;
2779 ss_sel = cs_sel + 8;
2780 if (efer & EFER_LMA) {
2785 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2786 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2788 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
2789 ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
2791 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2792 *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
2794 if (efer & EFER_LMA)
2795 ctxt->mode = X86EMUL_MODE_PROT64;
2797 return X86EMUL_CONTINUE;
2800 static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2802 const struct x86_emulate_ops *ops = ctxt->ops;
2803 struct desc_struct cs, ss;
2804 u64 msr_data, rcx, rdx;
2806 u16 cs_sel = 0, ss_sel = 0;
2808 /* inject #GP if in real mode or Virtual 8086 mode */
2809 if (ctxt->mode == X86EMUL_MODE_REAL ||
2810 ctxt->mode == X86EMUL_MODE_VM86)
2811 return emulate_gp(ctxt, 0);
2813 setup_syscalls_segments(ctxt, &cs, &ss);
2815 if ((ctxt->rex_prefix & 0x8) != 0x0)
2816 usermode = X86EMUL_MODE_PROT64;
2818 usermode = X86EMUL_MODE_PROT32;
2820 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2821 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2825 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
2827 case X86EMUL_MODE_PROT32:
2828 cs_sel = (u16)(msr_data + 16);
2829 if ((msr_data & 0xfffc) == 0x0)
2830 return emulate_gp(ctxt, 0);
2831 ss_sel = (u16)(msr_data + 24);
2835 case X86EMUL_MODE_PROT64:
2836 cs_sel = (u16)(msr_data + 32);
2837 if (msr_data == 0x0)
2838 return emulate_gp(ctxt, 0);
2839 ss_sel = cs_sel + 8;
2842 if (emul_is_noncanonical_address(rcx, ctxt) ||
2843 emul_is_noncanonical_address(rdx, ctxt))
2844 return emulate_gp(ctxt, 0);
2847 cs_sel |= SEGMENT_RPL_MASK;
2848 ss_sel |= SEGMENT_RPL_MASK;
2850 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2851 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2854 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2856 return X86EMUL_CONTINUE;
2859 static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2862 if (ctxt->mode == X86EMUL_MODE_REAL)
2864 if (ctxt->mode == X86EMUL_MODE_VM86)
2866 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
2867 return ctxt->ops->cpl(ctxt) > iopl;
2870 #define VMWARE_PORT_VMPORT (0x5658)
2871 #define VMWARE_PORT_VMRPC (0x5659)
2873 static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2876 const struct x86_emulate_ops *ops = ctxt->ops;
2877 struct desc_struct tr_seg;
2880 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2881 unsigned mask = (1 << len) - 1;
2885 * VMware allows access to these ports even if denied
2886 * by TSS I/O permission bitmap. Mimic behavior.
2888 if (enable_vmware_backdoor &&
2889 ((port == VMWARE_PORT_VMPORT) || (port == VMWARE_PORT_VMRPC)))
2892 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2895 if (desc_limit_scaled(&tr_seg) < 103)
2897 base = get_desc_base(&tr_seg);
2898 #ifdef CONFIG_X86_64
2899 base |= ((u64)base3) << 32;
2901 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
2902 if (r != X86EMUL_CONTINUE)
2904 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2906 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
2907 if (r != X86EMUL_CONTINUE)
2909 if ((perm >> bit_idx) & mask)
2914 static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2920 if (emulator_bad_iopl(ctxt))
2921 if (!emulator_io_port_access_allowed(ctxt, port, len))
2924 ctxt->perm_ok = true;
2929 static void string_registers_quirk(struct x86_emulate_ctxt *ctxt)
2932 * Intel CPUs mask the counter and pointers in quite strange
2933 * manner when ECX is zero due to REP-string optimizations.
2935 #ifdef CONFIG_X86_64
2936 if (ctxt->ad_bytes != 4 || !vendor_intel(ctxt))
2939 *reg_write(ctxt, VCPU_REGS_RCX) = 0;
2942 case 0xa4: /* movsb */
2943 case 0xa5: /* movsd/w */
2944 *reg_rmw(ctxt, VCPU_REGS_RSI) &= (u32)-1;
2946 case 0xaa: /* stosb */
2947 case 0xab: /* stosd/w */
2948 *reg_rmw(ctxt, VCPU_REGS_RDI) &= (u32)-1;
2953 static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2954 struct tss_segment_16 *tss)
2956 tss->ip = ctxt->_eip;
2957 tss->flag = ctxt->eflags;
2958 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2959 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2960 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2961 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2962 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2963 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2964 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2965 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2967 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2968 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2969 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2970 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2971 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2974 static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2975 struct tss_segment_16 *tss)
2980 ctxt->_eip = tss->ip;
2981 ctxt->eflags = tss->flag | 2;
2982 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2983 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2984 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2985 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2986 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2987 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2988 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2989 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2992 * SDM says that segment selectors are loaded before segment
2995 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2996 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2997 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2998 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2999 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3004 * Now load segment descriptors. If fault happens at this stage
3005 * it is handled in a context of new task
3007 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
3008 X86_TRANSFER_TASK_SWITCH, NULL);
3009 if (ret != X86EMUL_CONTINUE)
3011 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3012 X86_TRANSFER_TASK_SWITCH, NULL);
3013 if (ret != X86EMUL_CONTINUE)
3015 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3016 X86_TRANSFER_TASK_SWITCH, NULL);
3017 if (ret != X86EMUL_CONTINUE)
3019 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3020 X86_TRANSFER_TASK_SWITCH, NULL);
3021 if (ret != X86EMUL_CONTINUE)
3023 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3024 X86_TRANSFER_TASK_SWITCH, NULL);
3025 if (ret != X86EMUL_CONTINUE)
3028 return X86EMUL_CONTINUE;
3031 static int task_switch_16(struct x86_emulate_ctxt *ctxt,
3032 u16 tss_selector, u16 old_tss_sel,
3033 ulong old_tss_base, struct desc_struct *new_desc)
3035 struct tss_segment_16 tss_seg;
3037 u32 new_tss_base = get_desc_base(new_desc);
3039 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3040 if (ret != X86EMUL_CONTINUE)
3043 save_state_to_tss16(ctxt, &tss_seg);
3045 ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3046 if (ret != X86EMUL_CONTINUE)
3049 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3050 if (ret != X86EMUL_CONTINUE)
3053 if (old_tss_sel != 0xffff) {
3054 tss_seg.prev_task_link = old_tss_sel;
3056 ret = linear_write_system(ctxt, new_tss_base,
3057 &tss_seg.prev_task_link,
3058 sizeof(tss_seg.prev_task_link));
3059 if (ret != X86EMUL_CONTINUE)
3063 return load_state_from_tss16(ctxt, &tss_seg);
3066 static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
3067 struct tss_segment_32 *tss)
3069 /* CR3 and ldt selector are not saved intentionally */
3070 tss->eip = ctxt->_eip;
3071 tss->eflags = ctxt->eflags;
3072 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
3073 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
3074 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
3075 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
3076 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
3077 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
3078 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
3079 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
3081 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
3082 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
3083 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
3084 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
3085 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
3086 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
3089 static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
3090 struct tss_segment_32 *tss)
3095 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
3096 return emulate_gp(ctxt, 0);
3097 ctxt->_eip = tss->eip;
3098 ctxt->eflags = tss->eflags | 2;
3100 /* General purpose registers */
3101 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
3102 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
3103 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
3104 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
3105 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
3106 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
3107 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
3108 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
3111 * SDM says that segment selectors are loaded before segment
3112 * descriptors. This is important because CPL checks will
3115 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
3116 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
3117 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
3118 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
3119 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
3120 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
3121 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
3124 * If we're switching between Protected Mode and VM86, we need to make
3125 * sure to update the mode before loading the segment descriptors so
3126 * that the selectors are interpreted correctly.
3128 if (ctxt->eflags & X86_EFLAGS_VM) {
3129 ctxt->mode = X86EMUL_MODE_VM86;
3132 ctxt->mode = X86EMUL_MODE_PROT32;
3137 * Now load segment descriptors. If fault happens at this stage
3138 * it is handled in a context of new task
3140 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
3141 cpl, X86_TRANSFER_TASK_SWITCH, NULL);
3142 if (ret != X86EMUL_CONTINUE)
3144 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
3145 X86_TRANSFER_TASK_SWITCH, NULL);
3146 if (ret != X86EMUL_CONTINUE)
3148 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
3149 X86_TRANSFER_TASK_SWITCH, NULL);
3150 if (ret != X86EMUL_CONTINUE)
3152 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
3153 X86_TRANSFER_TASK_SWITCH, NULL);
3154 if (ret != X86EMUL_CONTINUE)
3156 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
3157 X86_TRANSFER_TASK_SWITCH, NULL);
3158 if (ret != X86EMUL_CONTINUE)
3160 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
3161 X86_TRANSFER_TASK_SWITCH, NULL);
3162 if (ret != X86EMUL_CONTINUE)
3164 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
3165 X86_TRANSFER_TASK_SWITCH, NULL);
3170 static int task_switch_32(struct x86_emulate_ctxt *ctxt,
3171 u16 tss_selector, u16 old_tss_sel,
3172 ulong old_tss_base, struct desc_struct *new_desc)
3174 struct tss_segment_32 tss_seg;
3176 u32 new_tss_base = get_desc_base(new_desc);
3177 u32 eip_offset = offsetof(struct tss_segment_32, eip);
3178 u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
3180 ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
3181 if (ret != X86EMUL_CONTINUE)
3184 save_state_to_tss32(ctxt, &tss_seg);
3186 /* Only GP registers and segment selectors are saved */
3187 ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
3188 ldt_sel_offset - eip_offset);
3189 if (ret != X86EMUL_CONTINUE)
3192 ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
3193 if (ret != X86EMUL_CONTINUE)
3196 if (old_tss_sel != 0xffff) {
3197 tss_seg.prev_task_link = old_tss_sel;
3199 ret = linear_write_system(ctxt, new_tss_base,
3200 &tss_seg.prev_task_link,
3201 sizeof(tss_seg.prev_task_link));
3202 if (ret != X86EMUL_CONTINUE)
3206 return load_state_from_tss32(ctxt, &tss_seg);
3209 static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
3210 u16 tss_selector, int idt_index, int reason,
3211 bool has_error_code, u32 error_code)
3213 const struct x86_emulate_ops *ops = ctxt->ops;
3214 struct desc_struct curr_tss_desc, next_tss_desc;
3216 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
3217 ulong old_tss_base =
3218 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
3220 ulong desc_addr, dr7;
3222 /* FIXME: old_tss_base == ~0 ? */
3224 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
3225 if (ret != X86EMUL_CONTINUE)
3227 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
3228 if (ret != X86EMUL_CONTINUE)
3231 /* FIXME: check that next_tss_desc is tss */
3234 * Check privileges. The three cases are task switch caused by...
3236 * 1. jmp/call/int to task gate: Check against DPL of the task gate
3237 * 2. Exception/IRQ/iret: No check is performed
3238 * 3. jmp/call to TSS/task-gate: No check is performed since the
3239 * hardware checks it before exiting.
3241 if (reason == TASK_SWITCH_GATE) {
3242 if (idt_index != -1) {
3243 /* Software interrupts */
3244 struct desc_struct task_gate_desc;
3247 ret = read_interrupt_descriptor(ctxt, idt_index,
3249 if (ret != X86EMUL_CONTINUE)
3252 dpl = task_gate_desc.dpl;
3253 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
3254 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
3258 desc_limit = desc_limit_scaled(&next_tss_desc);
3259 if (!next_tss_desc.p ||
3260 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
3261 desc_limit < 0x2b)) {
3262 return emulate_ts(ctxt, tss_selector & 0xfffc);
3265 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3266 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
3267 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
3270 if (reason == TASK_SWITCH_IRET)
3271 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
3273 /* set back link to prev task only if NT bit is set in eflags
3274 note that old_tss_sel is not used after this point */
3275 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
3276 old_tss_sel = 0xffff;
3278 if (next_tss_desc.type & 8)
3279 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
3280 old_tss_base, &next_tss_desc);
3282 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
3283 old_tss_base, &next_tss_desc);
3284 if (ret != X86EMUL_CONTINUE)
3287 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
3288 ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
3290 if (reason != TASK_SWITCH_IRET) {
3291 next_tss_desc.type |= (1 << 1); /* set busy flag */
3292 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
3295 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
3296 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
3298 if (has_error_code) {
3299 ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
3300 ctxt->lock_prefix = 0;
3301 ctxt->src.val = (unsigned long) error_code;
3302 ret = em_push(ctxt);
3305 ops->get_dr(ctxt, 7, &dr7);
3306 ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
3311 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
3312 u16 tss_selector, int idt_index, int reason,
3313 bool has_error_code, u32 error_code)
3317 invalidate_registers(ctxt);
3318 ctxt->_eip = ctxt->eip;
3319 ctxt->dst.type = OP_NONE;
3321 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
3322 has_error_code, error_code);
3324 if (rc == X86EMUL_CONTINUE) {
3325 ctxt->eip = ctxt->_eip;
3326 writeback_registers(ctxt);
3329 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
3332 static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
3335 int df = (ctxt->eflags & X86_EFLAGS_DF) ? -op->count : op->count;
3337 register_address_increment(ctxt, reg, df * op->bytes);
3338 op->addr.mem.ea = register_address(ctxt, reg);
3341 static int em_das(struct x86_emulate_ctxt *ctxt)
3344 bool af, cf, old_cf;
3346 cf = ctxt->eflags & X86_EFLAGS_CF;
3352 af = ctxt->eflags & X86_EFLAGS_AF;
3353 if ((al & 0x0f) > 9 || af) {
3355 cf = old_cf | (al >= 250);
3360 if (old_al > 0x99 || old_cf) {
3366 /* Set PF, ZF, SF */
3367 ctxt->src.type = OP_IMM;
3369 ctxt->src.bytes = 1;
3370 fastop(ctxt, em_or);
3371 ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
3373 ctxt->eflags |= X86_EFLAGS_CF;
3375 ctxt->eflags |= X86_EFLAGS_AF;
3376 return X86EMUL_CONTINUE;
3379 static int em_aam(struct x86_emulate_ctxt *ctxt)
3383 if (ctxt->src.val == 0)
3384 return emulate_de(ctxt);
3386 al = ctxt->dst.val & 0xff;
3387 ah = al / ctxt->src.val;
3388 al %= ctxt->src.val;
3390 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
3392 /* Set PF, ZF, SF */
3393 ctxt->src.type = OP_IMM;
3395 ctxt->src.bytes = 1;
3396 fastop(ctxt, em_or);
3398 return X86EMUL_CONTINUE;
3401 static int em_aad(struct x86_emulate_ctxt *ctxt)
3403 u8 al = ctxt->dst.val & 0xff;
3404 u8 ah = (ctxt->dst.val >> 8) & 0xff;
3406 al = (al + (ah * ctxt->src.val)) & 0xff;
3408 ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
3410 /* Set PF, ZF, SF */
3411 ctxt->src.type = OP_IMM;
3413 ctxt->src.bytes = 1;
3414 fastop(ctxt, em_or);
3416 return X86EMUL_CONTINUE;
3419 static int em_call(struct x86_emulate_ctxt *ctxt)
3422 long rel = ctxt->src.val;
3424 ctxt->src.val = (unsigned long)ctxt->_eip;
3425 rc = jmp_rel(ctxt, rel);
3426 if (rc != X86EMUL_CONTINUE)
3428 return em_push(ctxt);
3431 static int em_call_far(struct x86_emulate_ctxt *ctxt)
3436 struct desc_struct old_desc, new_desc;
3437 const struct x86_emulate_ops *ops = ctxt->ops;
3438 int cpl = ctxt->ops->cpl(ctxt);
3439 enum x86emul_mode prev_mode = ctxt->mode;
3441 old_eip = ctxt->_eip;
3442 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
3444 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
3445 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
3446 X86_TRANSFER_CALL_JMP, &new_desc);
3447 if (rc != X86EMUL_CONTINUE)
3450 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3451 if (rc != X86EMUL_CONTINUE)
3454 ctxt->src.val = old_cs;
3456 if (rc != X86EMUL_CONTINUE)
3459 ctxt->src.val = old_eip;
3461 /* If we failed, we tainted the memory, but the very least we should
3463 if (rc != X86EMUL_CONTINUE) {
3464 pr_warn_once("faulting far call emulation tainted memory\n");
3469 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3470 ctxt->mode = prev_mode;
3475 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
3480 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
3481 if (rc != X86EMUL_CONTINUE)
3483 rc = assign_eip_near(ctxt, eip);
3484 if (rc != X86EMUL_CONTINUE)
3486 rsp_increment(ctxt, ctxt->src.val);
3487 return X86EMUL_CONTINUE;
3490 static int em_xchg(struct x86_emulate_ctxt *ctxt)
3492 /* Write back the register source. */
3493 ctxt->src.val = ctxt->dst.val;
3494 write_register_operand(&ctxt->src);
3496 /* Write back the memory destination with implicit LOCK prefix. */
3497 ctxt->dst.val = ctxt->src.orig_val;
3498 ctxt->lock_prefix = 1;
3499 return X86EMUL_CONTINUE;
3502 static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
3504 ctxt->dst.val = ctxt->src2.val;
3505 return fastop(ctxt, em_imul);
3508 static int em_cwd(struct x86_emulate_ctxt *ctxt)
3510 ctxt->dst.type = OP_REG;
3511 ctxt->dst.bytes = ctxt->src.bytes;
3512 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3513 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
3515 return X86EMUL_CONTINUE;
3518 static int em_rdpid(struct x86_emulate_ctxt *ctxt)
3522 if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
3523 return emulate_ud(ctxt);
3524 ctxt->dst.val = tsc_aux;
3525 return X86EMUL_CONTINUE;
3528 static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
3532 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
3533 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
3534 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
3535 return X86EMUL_CONTINUE;
3538 static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
3542 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
3543 return emulate_gp(ctxt, 0);
3544 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
3545 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
3546 return X86EMUL_CONTINUE;
3549 static int em_mov(struct x86_emulate_ctxt *ctxt)
3551 memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
3552 return X86EMUL_CONTINUE;
3555 static int em_movbe(struct x86_emulate_ctxt *ctxt)
3559 if (!ctxt->ops->guest_has_movbe(ctxt))
3560 return emulate_ud(ctxt);
3562 switch (ctxt->op_bytes) {
3565 * From MOVBE definition: "...When the operand size is 16 bits,
3566 * the upper word of the destination register remains unchanged
3569 * Both casting ->valptr and ->val to u16 breaks strict aliasing
3570 * rules so we have to do the operation almost per hand.
3572 tmp = (u16)ctxt->src.val;
3573 ctxt->dst.val &= ~0xffffUL;
3574 ctxt->dst.val |= (unsigned long)swab16(tmp);
3577 ctxt->dst.val = swab32((u32)ctxt->src.val);
3580 ctxt->dst.val = swab64(ctxt->src.val);
3585 return X86EMUL_CONTINUE;
3588 static int em_cr_write(struct x86_emulate_ctxt *ctxt)
3590 if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
3591 return emulate_gp(ctxt, 0);
3593 /* Disable writeback. */
3594 ctxt->dst.type = OP_NONE;
3595 return X86EMUL_CONTINUE;
3598 static int em_dr_write(struct x86_emulate_ctxt *ctxt)
3602 if (ctxt->mode == X86EMUL_MODE_PROT64)
3603 val = ctxt->src.val & ~0ULL;
3605 val = ctxt->src.val & ~0U;
3607 /* #UD condition is already handled. */
3608 if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
3609 return emulate_gp(ctxt, 0);
3611 /* Disable writeback. */
3612 ctxt->dst.type = OP_NONE;
3613 return X86EMUL_CONTINUE;
3616 static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
3618 u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
3622 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
3623 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
3624 r = ctxt->ops->set_msr(ctxt, msr_index, msr_data);
3626 if (r == X86EMUL_IO_NEEDED)
3630 return emulate_gp(ctxt, 0);
3632 return r < 0 ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
3635 static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3637 u64 msr_index = reg_read(ctxt, VCPU_REGS_RCX);
3641 r = ctxt->ops->get_msr(ctxt, msr_index, &msr_data);
3643 if (r == X86EMUL_IO_NEEDED)
3647 return emulate_gp(ctxt, 0);
3649 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3650 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3651 return X86EMUL_CONTINUE;
3654 static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
3656 if (segment > VCPU_SREG_GS &&
3657 (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3658 ctxt->ops->cpl(ctxt) > 0)
3659 return emulate_gp(ctxt, 0);
3661 ctxt->dst.val = get_segment_selector(ctxt, segment);
3662 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3663 ctxt->dst.bytes = 2;
3664 return X86EMUL_CONTINUE;
3667 static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3669 if (ctxt->modrm_reg > VCPU_SREG_GS)
3670 return emulate_ud(ctxt);
3672 return em_store_sreg(ctxt, ctxt->modrm_reg);
3675 static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
3677 u16 sel = ctxt->src.val;
3679 if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
3680 return emulate_ud(ctxt);
3682 if (ctxt->modrm_reg == VCPU_SREG_SS)
3683 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3685 /* Disable writeback. */
3686 ctxt->dst.type = OP_NONE;
3687 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
3690 static int em_sldt(struct x86_emulate_ctxt *ctxt)
3692 return em_store_sreg(ctxt, VCPU_SREG_LDTR);
3695 static int em_lldt(struct x86_emulate_ctxt *ctxt)
3697 u16 sel = ctxt->src.val;
3699 /* Disable writeback. */
3700 ctxt->dst.type = OP_NONE;
3701 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3704 static int em_str(struct x86_emulate_ctxt *ctxt)
3706 return em_store_sreg(ctxt, VCPU_SREG_TR);
3709 static int em_ltr(struct x86_emulate_ctxt *ctxt)
3711 u16 sel = ctxt->src.val;
3713 /* Disable writeback. */
3714 ctxt->dst.type = OP_NONE;
3715 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3718 static int em_invlpg(struct x86_emulate_ctxt *ctxt)
3723 rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
3724 if (rc == X86EMUL_CONTINUE)
3725 ctxt->ops->invlpg(ctxt, linear);
3726 /* Disable writeback. */
3727 ctxt->dst.type = OP_NONE;
3728 return X86EMUL_CONTINUE;
3731 static int em_clts(struct x86_emulate_ctxt *ctxt)
3735 cr0 = ctxt->ops->get_cr(ctxt, 0);
3737 ctxt->ops->set_cr(ctxt, 0, cr0);
3738 return X86EMUL_CONTINUE;
3741 static int em_hypercall(struct x86_emulate_ctxt *ctxt)
3743 int rc = ctxt->ops->fix_hypercall(ctxt);
3745 if (rc != X86EMUL_CONTINUE)
3748 /* Let the processor re-execute the fixed hypercall */
3749 ctxt->_eip = ctxt->eip;
3750 /* Disable writeback. */
3751 ctxt->dst.type = OP_NONE;
3752 return X86EMUL_CONTINUE;
3755 static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3756 void (*get)(struct x86_emulate_ctxt *ctxt,
3757 struct desc_ptr *ptr))
3759 struct desc_ptr desc_ptr;
3761 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3762 ctxt->ops->cpl(ctxt) > 0)
3763 return emulate_gp(ctxt, 0);
3765 if (ctxt->mode == X86EMUL_MODE_PROT64)
3767 get(ctxt, &desc_ptr);
3768 if (ctxt->op_bytes == 2) {
3770 desc_ptr.address &= 0x00ffffff;
3772 /* Disable writeback. */
3773 ctxt->dst.type = OP_NONE;
3774 return segmented_write_std(ctxt, ctxt->dst.addr.mem,
3775 &desc_ptr, 2 + ctxt->op_bytes);
3778 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3780 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3783 static int em_sidt(struct x86_emulate_ctxt *ctxt)
3785 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3788 static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3790 struct desc_ptr desc_ptr;
3793 if (ctxt->mode == X86EMUL_MODE_PROT64)
3795 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3796 &desc_ptr.size, &desc_ptr.address,
3798 if (rc != X86EMUL_CONTINUE)
3800 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3801 emul_is_noncanonical_address(desc_ptr.address, ctxt))
3802 return emulate_gp(ctxt, 0);
3804 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3806 ctxt->ops->set_idt(ctxt, &desc_ptr);
3807 /* Disable writeback. */
3808 ctxt->dst.type = OP_NONE;
3809 return X86EMUL_CONTINUE;
3812 static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3814 return em_lgdt_lidt(ctxt, true);
3817 static int em_lidt(struct x86_emulate_ctxt *ctxt)
3819 return em_lgdt_lidt(ctxt, false);
3822 static int em_smsw(struct x86_emulate_ctxt *ctxt)
3824 if ((ctxt->ops->get_cr(ctxt, 4) & X86_CR4_UMIP) &&
3825 ctxt->ops->cpl(ctxt) > 0)
3826 return emulate_gp(ctxt, 0);
3828 if (ctxt->dst.type == OP_MEM)
3829 ctxt->dst.bytes = 2;
3830 ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
3831 return X86EMUL_CONTINUE;
3834 static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3836 ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
3837 | (ctxt->src.val & 0x0f));
3838 ctxt->dst.type = OP_NONE;
3839 return X86EMUL_CONTINUE;
3842 static int em_loop(struct x86_emulate_ctxt *ctxt)
3844 int rc = X86EMUL_CONTINUE;
3846 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3847 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3848 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3849 rc = jmp_rel(ctxt, ctxt->src.val);
3854 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3856 int rc = X86EMUL_CONTINUE;
3858 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3859 rc = jmp_rel(ctxt, ctxt->src.val);
3864 static int em_in(struct x86_emulate_ctxt *ctxt)
3866 if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
3868 return X86EMUL_IO_NEEDED;
3870 return X86EMUL_CONTINUE;
3873 static int em_out(struct x86_emulate_ctxt *ctxt)
3875 ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
3877 /* Disable writeback. */
3878 ctxt->dst.type = OP_NONE;
3879 return X86EMUL_CONTINUE;
3882 static int em_cli(struct x86_emulate_ctxt *ctxt)
3884 if (emulator_bad_iopl(ctxt))
3885 return emulate_gp(ctxt, 0);
3887 ctxt->eflags &= ~X86_EFLAGS_IF;
3888 return X86EMUL_CONTINUE;
3891 static int em_sti(struct x86_emulate_ctxt *ctxt)
3893 if (emulator_bad_iopl(ctxt))
3894 return emulate_gp(ctxt, 0);
3896 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
3897 ctxt->eflags |= X86_EFLAGS_IF;
3898 return X86EMUL_CONTINUE;
3901 static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3903 u32 eax, ebx, ecx, edx;
3906 ctxt->ops->get_msr(ctxt, MSR_MISC_FEATURES_ENABLES, &msr);
3907 if (msr & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3908 ctxt->ops->cpl(ctxt)) {
3909 return emulate_gp(ctxt, 0);
3912 eax = reg_read(ctxt, VCPU_REGS_RAX);
3913 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3914 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
3915 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3916 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3917 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3918 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3919 return X86EMUL_CONTINUE;
3922 static int em_sahf(struct x86_emulate_ctxt *ctxt)
3926 flags = X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
3928 flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
3930 ctxt->eflags &= ~0xffUL;
3931 ctxt->eflags |= flags | X86_EFLAGS_FIXED;
3932 return X86EMUL_CONTINUE;
3935 static int em_lahf(struct x86_emulate_ctxt *ctxt)
3937 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3938 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3939 return X86EMUL_CONTINUE;
3942 static int em_bswap(struct x86_emulate_ctxt *ctxt)
3944 switch (ctxt->op_bytes) {
3945 #ifdef CONFIG_X86_64
3947 asm("bswap %0" : "+r"(ctxt->dst.val));
3951 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3954 return X86EMUL_CONTINUE;
3957 static int em_clflush(struct x86_emulate_ctxt *ctxt)
3959 /* emulating clflush regardless of cpuid */
3960 return X86EMUL_CONTINUE;
3963 static int em_clflushopt(struct x86_emulate_ctxt *ctxt)
3965 /* emulating clflushopt regardless of cpuid */
3966 return X86EMUL_CONTINUE;
3969 static int em_movsxd(struct x86_emulate_ctxt *ctxt)
3971 ctxt->dst.val = (s32) ctxt->src.val;
3972 return X86EMUL_CONTINUE;
3975 static int check_fxsr(struct x86_emulate_ctxt *ctxt)
3977 if (!ctxt->ops->guest_has_fxsr(ctxt))
3978 return emulate_ud(ctxt);
3980 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
3981 return emulate_nm(ctxt);
3984 * Don't emulate a case that should never be hit, instead of working
3985 * around a lack of fxsave64/fxrstor64 on old compilers.
3987 if (ctxt->mode >= X86EMUL_MODE_PROT64)
3988 return X86EMUL_UNHANDLEABLE;
3990 return X86EMUL_CONTINUE;
3994 * Hardware doesn't save and restore XMM 0-7 without CR4.OSFXSR, but does save
3995 * and restore MXCSR.
3997 static size_t __fxstate_size(int nregs)
3999 return offsetof(struct fxregs_state, xmm_space[0]) + nregs * 16;
4002 static inline size_t fxstate_size(struct x86_emulate_ctxt *ctxt)
4005 if (ctxt->mode == X86EMUL_MODE_PROT64)
4006 return __fxstate_size(16);
4008 cr4_osfxsr = ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR;
4009 return __fxstate_size(cr4_osfxsr ? 8 : 0);
4013 * FXSAVE and FXRSTOR have 4 different formats depending on execution mode,
4016 * - like (1), but FIP and FDP (foo) are only 16 bit. At least Intel CPUs
4017 * preserve whole 32 bit values, though, so (1) and (2) are the same wrt.
4019 * 3) 64-bit mode with REX.W prefix
4020 * - like (2), but XMM 8-15 are being saved and restored
4021 * 4) 64-bit mode without REX.W prefix
4022 * - like (3), but FIP and FDP are 64 bit
4024 * Emulation uses (3) for (1) and (2) and preserves XMM 8-15 to reach the
4025 * desired result. (4) is not emulated.
4027 * Note: Guest and host CPUID.(EAX=07H,ECX=0H):EBX[bit 13] (deprecate FPU CS
4028 * and FPU DS) should match.
4030 static int em_fxsave(struct x86_emulate_ctxt *ctxt)
4032 struct fxregs_state fx_state;
4035 rc = check_fxsr(ctxt);
4036 if (rc != X86EMUL_CONTINUE)
4041 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
4045 if (rc != X86EMUL_CONTINUE)
4048 return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state,
4049 fxstate_size(ctxt));
4053 * FXRSTOR might restore XMM registers not provided by the guest. Fill
4054 * in the host registers (via FXSAVE) instead, so they won't be modified.
4055 * (preemption has to stay disabled until FXRSTOR).
4057 * Use noinline to keep the stack for other functions called by callers small.
4059 static noinline int fxregs_fixup(struct fxregs_state *fx_state,
4060 const size_t used_size)
4062 struct fxregs_state fx_tmp;
4065 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
4066 memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
4067 __fxstate_size(16) - used_size);
4072 static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
4074 struct fxregs_state fx_state;
4078 rc = check_fxsr(ctxt);
4079 if (rc != X86EMUL_CONTINUE)
4082 size = fxstate_size(ctxt);
4083 rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
4084 if (rc != X86EMUL_CONTINUE)
4089 if (size < __fxstate_size(16)) {
4090 rc = fxregs_fixup(&fx_state, size);
4091 if (rc != X86EMUL_CONTINUE)
4095 if (fx_state.mxcsr >> 16) {
4096 rc = emulate_gp(ctxt, 0);
4100 if (rc == X86EMUL_CONTINUE)
4101 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
4109 static int em_xsetbv(struct x86_emulate_ctxt *ctxt)
4113 eax = reg_read(ctxt, VCPU_REGS_RAX);
4114 edx = reg_read(ctxt, VCPU_REGS_RDX);
4115 ecx = reg_read(ctxt, VCPU_REGS_RCX);
4117 if (ctxt->ops->set_xcr(ctxt, ecx, ((u64)edx << 32) | eax))
4118 return emulate_gp(ctxt, 0);
4120 return X86EMUL_CONTINUE;
4123 static bool valid_cr(int nr)
4135 static int check_cr_access(struct x86_emulate_ctxt *ctxt)
4137 if (!valid_cr(ctxt->modrm_reg))
4138 return emulate_ud(ctxt);
4140 return X86EMUL_CONTINUE;
4143 static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
4147 ctxt->ops->get_dr(ctxt, 7, &dr7);
4149 /* Check if DR7.Global_Enable is set */
4150 return dr7 & (1 << 13);
4153 static int check_dr_read(struct x86_emulate_ctxt *ctxt)
4155 int dr = ctxt->modrm_reg;
4159 return emulate_ud(ctxt);
4161 cr4 = ctxt->ops->get_cr(ctxt, 4);
4162 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
4163 return emulate_ud(ctxt);
4165 if (check_dr7_gd(ctxt)) {
4168 ctxt->ops->get_dr(ctxt, 6, &dr6);
4169 dr6 &= ~DR_TRAP_BITS;
4170 dr6 |= DR6_BD | DR6_ACTIVE_LOW;
4171 ctxt->ops->set_dr(ctxt, 6, dr6);
4172 return emulate_db(ctxt);
4175 return X86EMUL_CONTINUE;
4178 static int check_dr_write(struct x86_emulate_ctxt *ctxt)
4180 u64 new_val = ctxt->src.val64;
4181 int dr = ctxt->modrm_reg;
4183 if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
4184 return emulate_gp(ctxt, 0);
4186 return check_dr_read(ctxt);
4189 static int check_svme(struct x86_emulate_ctxt *ctxt)
4193 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
4195 if (!(efer & EFER_SVME))
4196 return emulate_ud(ctxt);
4198 return X86EMUL_CONTINUE;
4201 static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
4203 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
4205 /* Valid physical address? */
4206 if (rax & 0xffff000000000000ULL)
4207 return emulate_gp(ctxt, 0);
4209 return check_svme(ctxt);
4212 static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
4214 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4216 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
4217 return emulate_gp(ctxt, 0);
4219 return X86EMUL_CONTINUE;
4222 static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
4224 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
4225 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
4228 * VMware allows access to these Pseduo-PMCs even when read via RDPMC
4229 * in Ring3 when CR4.PCE=0.
4231 if (enable_vmware_backdoor && is_vmware_backdoor_pmc(rcx))
4232 return X86EMUL_CONTINUE;
4235 * If CR4.PCE is set, the SDM requires CPL=0 or CR0.PE=0. The CR0.PE
4236 * check however is unnecessary because CPL is always 0 outside
4239 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
4240 ctxt->ops->check_pmc(ctxt, rcx))
4241 return emulate_gp(ctxt, 0);
4243 return X86EMUL_CONTINUE;
4246 static int check_perm_in(struct x86_emulate_ctxt *ctxt)
4248 ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
4249 if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
4250 return emulate_gp(ctxt, 0);
4252 return X86EMUL_CONTINUE;
4255 static int check_perm_out(struct x86_emulate_ctxt *ctxt)
4257 ctxt->src.bytes = min(ctxt->src.bytes, 4u);
4258 if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
4259 return emulate_gp(ctxt, 0);
4261 return X86EMUL_CONTINUE;
4264 #define D(_y) { .flags = (_y) }
4265 #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
4266 #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
4267 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4268 #define N D(NotImpl)
4269 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
4270 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
4271 #define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
4272 #define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
4273 #define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
4274 #define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
4275 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4276 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
4277 #define II(_f, _e, _i) \
4278 { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
4279 #define IIP(_f, _e, _i, _p) \
4280 { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
4281 .intercept = x86_intercept_##_i, .check_perm = (_p) }
4282 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
4284 #define D2bv(_f) D((_f) | ByteOp), D(_f)
4285 #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
4286 #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
4287 #define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
4288 #define I2bvIP(_f, _e, _i, _p) \
4289 IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
4291 #define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
4292 F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
4293 F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
4295 static const struct opcode group7_rm0[] = {
4297 I(SrcNone | Priv | EmulateOnUD, em_hypercall),
4301 static const struct opcode group7_rm1[] = {
4302 DI(SrcNone | Priv, monitor),
4303 DI(SrcNone | Priv, mwait),
4307 static const struct opcode group7_rm2[] = {
4309 II(ImplicitOps | Priv, em_xsetbv, xsetbv),
4313 static const struct opcode group7_rm3[] = {
4314 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
4315 II(SrcNone | Prot | EmulateOnUD, em_hypercall, vmmcall),
4316 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
4317 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
4318 DIP(SrcNone | Prot | Priv, stgi, check_svme),
4319 DIP(SrcNone | Prot | Priv, clgi, check_svme),
4320 DIP(SrcNone | Prot | Priv, skinit, check_svme),
4321 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
4324 static const struct opcode group7_rm7[] = {
4326 DIP(SrcNone, rdtscp, check_rdtsc),
4330 static const struct opcode group1[] = {
4332 F(Lock | PageTable, em_or),
4335 F(Lock | PageTable, em_and),
4341 static const struct opcode group1A[] = {
4342 I(DstMem | SrcNone | Mov | Stack | IncSP | TwoMemOp, em_pop), N, N, N, N, N, N, N,
4345 static const struct opcode group2[] = {
4346 F(DstMem | ModRM, em_rol),
4347 F(DstMem | ModRM, em_ror),
4348 F(DstMem | ModRM, em_rcl),
4349 F(DstMem | ModRM, em_rcr),
4350 F(DstMem | ModRM, em_shl),
4351 F(DstMem | ModRM, em_shr),
4352 F(DstMem | ModRM, em_shl),
4353 F(DstMem | ModRM, em_sar),
4356 static const struct opcode group3[] = {
4357 F(DstMem | SrcImm | NoWrite, em_test),
4358 F(DstMem | SrcImm | NoWrite, em_test),
4359 F(DstMem | SrcNone | Lock, em_not),
4360 F(DstMem | SrcNone | Lock, em_neg),
4361 F(DstXacc | Src2Mem, em_mul_ex),
4362 F(DstXacc | Src2Mem, em_imul_ex),
4363 F(DstXacc | Src2Mem, em_div_ex),
4364 F(DstXacc | Src2Mem, em_idiv_ex),
4367 static const struct opcode group4[] = {
4368 F(ByteOp | DstMem | SrcNone | Lock, em_inc),
4369 F(ByteOp | DstMem | SrcNone | Lock, em_dec),
4373 static const struct opcode group5[] = {
4374 F(DstMem | SrcNone | Lock, em_inc),
4375 F(DstMem | SrcNone | Lock, em_dec),
4376 I(SrcMem | NearBranch | IsBranch, em_call_near_abs),
4377 I(SrcMemFAddr | ImplicitOps | IsBranch, em_call_far),
4378 I(SrcMem | NearBranch | IsBranch, em_jmp_abs),
4379 I(SrcMemFAddr | ImplicitOps | IsBranch, em_jmp_far),
4380 I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined),
4383 static const struct opcode group6[] = {
4384 II(Prot | DstMem, em_sldt, sldt),
4385 II(Prot | DstMem, em_str, str),
4386 II(Prot | Priv | SrcMem16, em_lldt, lldt),
4387 II(Prot | Priv | SrcMem16, em_ltr, ltr),
4391 static const struct group_dual group7 = { {
4392 II(Mov | DstMem, em_sgdt, sgdt),
4393 II(Mov | DstMem, em_sidt, sidt),
4394 II(SrcMem | Priv, em_lgdt, lgdt),
4395 II(SrcMem | Priv, em_lidt, lidt),
4396 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4397 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4398 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
4404 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
4405 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
4409 static const struct opcode group8[] = {
4411 F(DstMem | SrcImmByte | NoWrite, em_bt),
4412 F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
4413 F(DstMem | SrcImmByte | Lock, em_btr),
4414 F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
4418 * The "memory" destination is actually always a register, since we come
4419 * from the register case of group9.
4421 static const struct gprefix pfx_0f_c7_7 = {
4422 N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdpid),
4426 static const struct group_dual group9 = { {
4427 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
4429 N, N, N, N, N, N, N,
4430 GP(0, &pfx_0f_c7_7),
4433 static const struct opcode group11[] = {
4434 I(DstMem | SrcImm | Mov | PageTable, em_mov),
4438 static const struct gprefix pfx_0f_ae_7 = {
4439 I(SrcMem | ByteOp, em_clflush), I(SrcMem | ByteOp, em_clflushopt), N, N,
4442 static const struct group_dual group15 = { {
4443 I(ModRM | Aligned16, em_fxsave),
4444 I(ModRM | Aligned16, em_fxrstor),
4445 N, N, N, N, N, GP(0, &pfx_0f_ae_7),
4447 N, N, N, N, N, N, N, N,
4450 static const struct gprefix pfx_0f_6f_0f_7f = {
4451 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
4454 static const struct instr_dual instr_dual_0f_2b = {
4458 static const struct gprefix pfx_0f_2b = {
4459 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
4462 static const struct gprefix pfx_0f_10_0f_11 = {
4463 I(Unaligned, em_mov), I(Unaligned, em_mov), N, N,
4466 static const struct gprefix pfx_0f_28_0f_29 = {
4467 I(Aligned, em_mov), I(Aligned, em_mov), N, N,
4470 static const struct gprefix pfx_0f_e7 = {
4471 N, I(Sse, em_mov), N, N,
4474 static const struct escape escape_d9 = { {
4475 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstcw),
4478 N, N, N, N, N, N, N, N,
4480 N, N, N, N, N, N, N, N,
4482 N, N, N, N, N, N, N, N,
4484 N, N, N, N, N, N, N, N,
4486 N, N, N, N, N, N, N, N,
4488 N, N, N, N, N, N, N, N,
4490 N, N, N, N, N, N, N, N,
4492 N, N, N, N, N, N, N, N,
4495 static const struct escape escape_db = { {
4496 N, N, N, N, N, N, N, N,
4499 N, N, N, N, N, N, N, N,
4501 N, N, N, N, N, N, N, N,
4503 N, N, N, N, N, N, N, N,
4505 N, N, N, N, N, N, N, N,
4507 N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
4509 N, N, N, N, N, N, N, N,
4511 N, N, N, N, N, N, N, N,
4513 N, N, N, N, N, N, N, N,
4516 static const struct escape escape_dd = { {
4517 N, N, N, N, N, N, N, I(DstMem16 | Mov, em_fnstsw),
4520 N, N, N, N, N, N, N, N,
4522 N, N, N, N, N, N, N, N,
4524 N, N, N, N, N, N, N, N,
4526 N, N, N, N, N, N, N, N,
4528 N, N, N, N, N, N, N, N,
4530 N, N, N, N, N, N, N, N,
4532 N, N, N, N, N, N, N, N,
4534 N, N, N, N, N, N, N, N,
4537 static const struct instr_dual instr_dual_0f_c3 = {
4538 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
4541 static const struct mode_dual mode_dual_63 = {
4542 N, I(DstReg | SrcMem32 | ModRM | Mov, em_movsxd)
4545 static const struct opcode opcode_table[256] = {
4547 F6ALU(Lock, em_add),
4548 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
4549 I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
4551 F6ALU(Lock | PageTable, em_or),
4552 I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
4555 F6ALU(Lock, em_adc),
4556 I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
4557 I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
4559 F6ALU(Lock, em_sbb),
4560 I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
4561 I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
4563 F6ALU(Lock | PageTable, em_and), N, N,
4565 F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
4567 F6ALU(Lock, em_xor), N, N,
4569 F6ALU(NoWrite, em_cmp), N, N,
4571 X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
4573 X8(I(SrcReg | Stack, em_push)),
4575 X8(I(DstReg | Stack, em_pop)),
4577 I(ImplicitOps | Stack | No64, em_pusha),
4578 I(ImplicitOps | Stack | No64, em_popa),
4579 N, MD(ModRM, &mode_dual_63),
4582 I(SrcImm | Mov | Stack, em_push),
4583 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
4584 I(SrcImmByte | Mov | Stack, em_push),
4585 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
4586 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
4587 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
4589 X16(D(SrcImmByte | NearBranch | IsBranch)),
4591 G(ByteOp | DstMem | SrcImm, group1),
4592 G(DstMem | SrcImm, group1),
4593 G(ByteOp | DstMem | SrcImm | No64, group1),
4594 G(DstMem | SrcImmByte, group1),
4595 F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
4596 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
4598 I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
4599 I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
4600 I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
4601 D(ModRM | SrcMem | NoAccess | DstReg),
4602 I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
4605 DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
4607 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
4608 I(SrcImmFAddr | No64 | IsBranch, em_call_far), N,
4609 II(ImplicitOps | Stack, em_pushf, pushf),
4610 II(ImplicitOps | Stack, em_popf, popf),
4611 I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
4613 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
4614 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
4615 I2bv(SrcSI | DstDI | Mov | String | TwoMemOp, em_mov),
4616 F2bv(SrcSI | DstDI | String | NoWrite | TwoMemOp, em_cmp_r),
4618 F2bv(DstAcc | SrcImm | NoWrite, em_test),
4619 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
4620 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
4621 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4623 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4625 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4627 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4628 I(ImplicitOps | NearBranch | SrcImmU16 | IsBranch, em_ret_near_imm),
4629 I(ImplicitOps | NearBranch | IsBranch, em_ret),
4630 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4631 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4632 G(ByteOp, group11), G(0, group11),
4634 I(Stack | SrcImmU16 | Src2ImmByte | IsBranch, em_enter),
4635 I(Stack | IsBranch, em_leave),
4636 I(ImplicitOps | SrcImmU16 | IsBranch, em_ret_far_imm),
4637 I(ImplicitOps | IsBranch, em_ret_far),
4638 D(ImplicitOps | IsBranch), DI(SrcImmByte | IsBranch, intn),
4639 D(ImplicitOps | No64 | IsBranch),
4640 II(ImplicitOps | IsBranch, em_iret, iret),
4642 G(Src2One | ByteOp, group2), G(Src2One, group2),
4643 G(Src2CL | ByteOp, group2), G(Src2CL, group2),
4644 I(DstAcc | SrcImmUByte | No64, em_aam),
4645 I(DstAcc | SrcImmUByte | No64, em_aad),
4646 F(DstAcc | ByteOp | No64, em_salc),
4647 I(DstAcc | SrcXLat | ByteOp, em_mov),
4649 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4651 X3(I(SrcImmByte | NearBranch | IsBranch, em_loop)),
4652 I(SrcImmByte | NearBranch | IsBranch, em_jcxz),
4653 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4654 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4656 I(SrcImm | NearBranch | IsBranch, em_call),
4657 D(SrcImm | ImplicitOps | NearBranch | IsBranch),
4658 I(SrcImmFAddr | No64 | IsBranch, em_jmp_far),
4659 D(SrcImmByte | ImplicitOps | NearBranch | IsBranch),
4660 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4661 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4663 N, DI(ImplicitOps, icebp), N, N,
4664 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
4665 G(ByteOp, group3), G(0, group3),
4667 D(ImplicitOps), D(ImplicitOps),
4668 I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
4669 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
4672 static const struct opcode twobyte_table[256] = {
4674 G(0, group6), GD(0, &group7), N, N,
4675 N, I(ImplicitOps | EmulateOnUD | IsBranch, em_syscall),
4676 II(ImplicitOps | Priv, em_clts, clts), N,
4677 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
4678 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4680 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_10_0f_11),
4681 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_10_0f_11),
4683 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 4 * prefetch + 4 * reserved NOP */
4684 D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
4685 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4686 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4687 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
4688 D(ImplicitOps | ModRM | SrcMem | NoAccess), /* NOP + 7 * reserved NOP */
4690 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_access),
4691 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
4692 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
4694 IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
4697 GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
4698 GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
4699 N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
4702 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
4703 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
4704 II(ImplicitOps | Priv, em_rdmsr, rdmsr),
4705 IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
4706 I(ImplicitOps | EmulateOnUD | IsBranch, em_sysenter),
4707 I(ImplicitOps | Priv | EmulateOnUD | IsBranch, em_sysexit),
4709 N, N, N, N, N, N, N, N,
4711 X16(D(DstReg | SrcMem | ModRM)),
4713 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4718 N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
4723 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4725 X16(D(SrcImm | NearBranch | IsBranch)),
4727 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4729 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
4730 II(ImplicitOps, em_cpuid, cpuid),
4731 F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
4732 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
4733 F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
4735 I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
4736 II(EmulateOnUD | ImplicitOps, em_rsm, rsm),
4737 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
4738 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
4739 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
4740 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
4742 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable | SrcWrite, em_cmpxchg),
4743 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
4744 F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
4745 I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
4746 I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
4747 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4751 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
4752 I(DstReg | SrcMem | ModRM, em_bsf_c),
4753 I(DstReg | SrcMem | ModRM, em_bsr_c),
4754 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4756 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4757 N, ID(0, &instr_dual_0f_c3),
4758 N, N, N, GD(0, &group9),
4760 X8(I(DstReg, em_bswap)),
4762 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
4764 N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
4765 N, N, N, N, N, N, N, N,
4767 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4770 static const struct instr_dual instr_dual_0f_38_f0 = {
4771 I(DstReg | SrcMem | Mov, em_movbe), N
4774 static const struct instr_dual instr_dual_0f_38_f1 = {
4775 I(DstMem | SrcReg | Mov, em_movbe), N
4778 static const struct gprefix three_byte_0f_38_f0 = {
4779 ID(0, &instr_dual_0f_38_f0), N, N, N
4782 static const struct gprefix three_byte_0f_38_f1 = {
4783 ID(0, &instr_dual_0f_38_f1), N, N, N
4787 * Insns below are selected by the prefix which indexed by the third opcode
4790 static const struct opcode opcode_map_0f_38[256] = {
4792 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4794 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4796 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4797 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4818 static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
4822 size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4828 static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
4829 unsigned size, bool sign_extension)
4831 int rc = X86EMUL_CONTINUE;
4835 op->addr.mem.ea = ctxt->_eip;
4836 /* NB. Immediates are sign-extended as necessary. */
4837 switch (op->bytes) {
4839 op->val = insn_fetch(s8, ctxt);
4842 op->val = insn_fetch(s16, ctxt);
4845 op->val = insn_fetch(s32, ctxt);
4848 op->val = insn_fetch(s64, ctxt);
4851 if (!sign_extension) {
4852 switch (op->bytes) {
4860 op->val &= 0xffffffff;
4868 static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4871 int rc = X86EMUL_CONTINUE;
4875 decode_register_operand(ctxt, op);
4878 rc = decode_imm(ctxt, op, 1, false);
4881 ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4885 if (ctxt->d & BitOp)
4886 fetch_bit_operand(ctxt);
4887 op->orig_val = op->val;
4890 ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
4894 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4895 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4896 fetch_register_operand(op);
4897 op->orig_val = op->val;
4901 op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
4902 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
4903 fetch_register_operand(op);
4904 op->orig_val = op->val;
4907 if (ctxt->d & ByteOp) {
4912 op->bytes = ctxt->op_bytes;
4913 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4914 fetch_register_operand(op);
4915 op->orig_val = op->val;
4919 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4921 register_address(ctxt, VCPU_REGS_RDI);
4922 op->addr.mem.seg = VCPU_SREG_ES;
4929 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
4930 fetch_register_operand(op);
4935 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4938 rc = decode_imm(ctxt, op, 1, true);
4946 rc = decode_imm(ctxt, op, imm_size(ctxt), true);
4949 rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
4952 ctxt->memop.bytes = 1;
4953 if (ctxt->memop.type == OP_REG) {
4954 ctxt->memop.addr.reg = decode_register(ctxt,
4955 ctxt->modrm_rm, true);
4956 fetch_register_operand(&ctxt->memop);
4960 ctxt->memop.bytes = 2;
4963 ctxt->memop.bytes = 4;
4966 rc = decode_imm(ctxt, op, 2, false);
4969 rc = decode_imm(ctxt, op, imm_size(ctxt), false);
4973 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4975 register_address(ctxt, VCPU_REGS_RSI);
4976 op->addr.mem.seg = ctxt->seg_override;
4982 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4985 reg_read(ctxt, VCPU_REGS_RBX) +
4986 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4987 op->addr.mem.seg = ctxt->seg_override;
4992 op->addr.mem.ea = ctxt->_eip;
4993 op->bytes = ctxt->op_bytes + 2;
4994 insn_fetch_arr(op->valptr, op->bytes, ctxt);
4997 ctxt->memop.bytes = ctxt->op_bytes + 2;
5001 op->val = VCPU_SREG_ES;
5005 op->val = VCPU_SREG_CS;
5009 op->val = VCPU_SREG_SS;
5013 op->val = VCPU_SREG_DS;
5017 op->val = VCPU_SREG_FS;
5021 op->val = VCPU_SREG_GS;
5024 /* Special instructions do their own operand decoding. */
5026 op->type = OP_NONE; /* Disable writeback. */
5034 int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type)
5036 int rc = X86EMUL_CONTINUE;
5037 int mode = ctxt->mode;
5038 int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
5039 bool op_prefix = false;
5040 bool has_seg_override = false;
5041 struct opcode opcode;
5043 struct desc_struct desc;
5045 ctxt->memop.type = OP_NONE;
5046 ctxt->memopp = NULL;
5047 ctxt->_eip = ctxt->eip;
5048 ctxt->fetch.ptr = ctxt->fetch.data;
5049 ctxt->fetch.end = ctxt->fetch.data + insn_len;
5050 ctxt->opcode_len = 1;
5051 ctxt->intercept = x86_intercept_none;
5053 memcpy(ctxt->fetch.data, insn, insn_len);
5055 rc = __do_insn_fetch_bytes(ctxt, 1);
5056 if (rc != X86EMUL_CONTINUE)
5061 case X86EMUL_MODE_REAL:
5062 case X86EMUL_MODE_VM86:
5063 def_op_bytes = def_ad_bytes = 2;
5064 ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
5066 def_op_bytes = def_ad_bytes = 4;
5068 case X86EMUL_MODE_PROT16:
5069 def_op_bytes = def_ad_bytes = 2;
5071 case X86EMUL_MODE_PROT32:
5072 def_op_bytes = def_ad_bytes = 4;
5074 #ifdef CONFIG_X86_64
5075 case X86EMUL_MODE_PROT64:
5081 return EMULATION_FAILED;
5084 ctxt->op_bytes = def_op_bytes;
5085 ctxt->ad_bytes = def_ad_bytes;
5087 /* Legacy prefixes. */
5089 switch (ctxt->b = insn_fetch(u8, ctxt)) {
5090 case 0x66: /* operand-size override */
5092 /* switch between 2/4 bytes */
5093 ctxt->op_bytes = def_op_bytes ^ 6;
5095 case 0x67: /* address-size override */
5096 if (mode == X86EMUL_MODE_PROT64)
5097 /* switch between 4/8 bytes */
5098 ctxt->ad_bytes = def_ad_bytes ^ 12;
5100 /* switch between 2/4 bytes */
5101 ctxt->ad_bytes = def_ad_bytes ^ 6;
5103 case 0x26: /* ES override */
5104 has_seg_override = true;
5105 ctxt->seg_override = VCPU_SREG_ES;
5107 case 0x2e: /* CS override */
5108 has_seg_override = true;
5109 ctxt->seg_override = VCPU_SREG_CS;
5111 case 0x36: /* SS override */
5112 has_seg_override = true;
5113 ctxt->seg_override = VCPU_SREG_SS;
5115 case 0x3e: /* DS override */
5116 has_seg_override = true;
5117 ctxt->seg_override = VCPU_SREG_DS;
5119 case 0x64: /* FS override */
5120 has_seg_override = true;
5121 ctxt->seg_override = VCPU_SREG_FS;
5123 case 0x65: /* GS override */
5124 has_seg_override = true;
5125 ctxt->seg_override = VCPU_SREG_GS;
5127 case 0x40 ... 0x4f: /* REX */
5128 if (mode != X86EMUL_MODE_PROT64)
5130 ctxt->rex_prefix = ctxt->b;
5132 case 0xf0: /* LOCK */
5133 ctxt->lock_prefix = 1;
5135 case 0xf2: /* REPNE/REPNZ */
5136 case 0xf3: /* REP/REPE/REPZ */
5137 ctxt->rep_prefix = ctxt->b;
5143 /* Any legacy prefix after a REX prefix nullifies its effect. */
5145 ctxt->rex_prefix = 0;
5151 if (ctxt->rex_prefix & 8)
5152 ctxt->op_bytes = 8; /* REX.W */
5154 /* Opcode byte(s). */
5155 opcode = opcode_table[ctxt->b];
5156 /* Two-byte opcode? */
5157 if (ctxt->b == 0x0f) {
5158 ctxt->opcode_len = 2;
5159 ctxt->b = insn_fetch(u8, ctxt);
5160 opcode = twobyte_table[ctxt->b];
5162 /* 0F_38 opcode map */
5163 if (ctxt->b == 0x38) {
5164 ctxt->opcode_len = 3;
5165 ctxt->b = insn_fetch(u8, ctxt);
5166 opcode = opcode_map_0f_38[ctxt->b];
5169 ctxt->d = opcode.flags;
5171 if (ctxt->d & ModRM)
5172 ctxt->modrm = insn_fetch(u8, ctxt);
5174 /* vex-prefix instructions are not implemented */
5175 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
5176 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
5180 while (ctxt->d & GroupMask) {
5181 switch (ctxt->d & GroupMask) {
5183 goffset = (ctxt->modrm >> 3) & 7;
5184 opcode = opcode.u.group[goffset];
5187 goffset = (ctxt->modrm >> 3) & 7;
5188 if ((ctxt->modrm >> 6) == 3)
5189 opcode = opcode.u.gdual->mod3[goffset];
5191 opcode = opcode.u.gdual->mod012[goffset];
5194 goffset = ctxt->modrm & 7;
5195 opcode = opcode.u.group[goffset];
5198 if (ctxt->rep_prefix && op_prefix)
5199 return EMULATION_FAILED;
5200 simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
5201 switch (simd_prefix) {
5202 case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
5203 case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
5204 case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
5205 case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
5209 if (ctxt->modrm > 0xbf) {
5210 size_t size = ARRAY_SIZE(opcode.u.esc->high);
5211 u32 index = array_index_nospec(
5212 ctxt->modrm - 0xc0, size);
5214 opcode = opcode.u.esc->high[index];
5216 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
5220 if ((ctxt->modrm >> 6) == 3)
5221 opcode = opcode.u.idual->mod3;
5223 opcode = opcode.u.idual->mod012;
5226 if (ctxt->mode == X86EMUL_MODE_PROT64)
5227 opcode = opcode.u.mdual->mode64;
5229 opcode = opcode.u.mdual->mode32;
5232 return EMULATION_FAILED;
5235 ctxt->d &= ~(u64)GroupMask;
5236 ctxt->d |= opcode.flags;
5239 ctxt->is_branch = opcode.flags & IsBranch;
5243 return EMULATION_FAILED;
5245 ctxt->execute = opcode.u.execute;
5247 if (unlikely(emulation_type & EMULTYPE_TRAP_UD) &&
5248 likely(!(ctxt->d & EmulateOnUD)))
5249 return EMULATION_FAILED;
5251 if (unlikely(ctxt->d &
5252 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
5255 * These are copied unconditionally here, and checked unconditionally
5256 * in x86_emulate_insn.
5258 ctxt->check_perm = opcode.check_perm;
5259 ctxt->intercept = opcode.intercept;
5261 if (ctxt->d & NotImpl)
5262 return EMULATION_FAILED;
5264 if (mode == X86EMUL_MODE_PROT64) {
5265 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
5267 else if (ctxt->d & NearBranch)
5271 if (ctxt->d & Op3264) {
5272 if (mode == X86EMUL_MODE_PROT64)
5278 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
5282 ctxt->op_bytes = 16;
5283 else if (ctxt->d & Mmx)
5287 /* ModRM and SIB bytes. */
5288 if (ctxt->d & ModRM) {
5289 rc = decode_modrm(ctxt, &ctxt->memop);
5290 if (!has_seg_override) {
5291 has_seg_override = true;
5292 ctxt->seg_override = ctxt->modrm_seg;
5294 } else if (ctxt->d & MemAbs)
5295 rc = decode_abs(ctxt, &ctxt->memop);
5296 if (rc != X86EMUL_CONTINUE)
5299 if (!has_seg_override)
5300 ctxt->seg_override = VCPU_SREG_DS;
5302 ctxt->memop.addr.mem.seg = ctxt->seg_override;
5305 * Decode and fetch the source operand: register, memory
5308 rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
5309 if (rc != X86EMUL_CONTINUE)
5313 * Decode and fetch the second source operand: register, memory
5316 rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
5317 if (rc != X86EMUL_CONTINUE)
5320 /* Decode and fetch the destination operand: register or memory. */
5321 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
5323 if (ctxt->rip_relative && likely(ctxt->memopp))
5324 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
5325 ctxt->memopp->addr.mem.ea + ctxt->_eip);
5328 if (rc == X86EMUL_PROPAGATE_FAULT)
5329 ctxt->have_exception = true;
5330 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
5333 bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
5335 return ctxt->d & PageTable;
5338 static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
5340 /* The second termination condition only applies for REPE
5341 * and REPNE. Test if the repeat string operation prefix is
5342 * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
5343 * corresponding termination condition according to:
5344 * - if REPE/REPZ and ZF = 0 then done
5345 * - if REPNE/REPNZ and ZF = 1 then done
5347 if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
5348 (ctxt->b == 0xae) || (ctxt->b == 0xaf))
5349 && (((ctxt->rep_prefix == REPE_PREFIX) &&
5350 ((ctxt->eflags & X86_EFLAGS_ZF) == 0))
5351 || ((ctxt->rep_prefix == REPNE_PREFIX) &&
5352 ((ctxt->eflags & X86_EFLAGS_ZF) == X86_EFLAGS_ZF))))
5358 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
5363 rc = asm_safe("fwait");
5366 if (unlikely(rc != X86EMUL_CONTINUE))
5367 return emulate_exception(ctxt, MF_VECTOR, 0, false);
5369 return X86EMUL_CONTINUE;
5372 static void fetch_possible_mmx_operand(struct operand *op)
5374 if (op->type == OP_MM)
5375 kvm_read_mmx_reg(op->addr.mm, &op->mm_val);
5378 static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop)
5380 ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5382 if (!(ctxt->d & ByteOp))
5383 fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5385 asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
5386 : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5387 [thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
5388 : "c"(ctxt->src2.val));
5390 ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5391 if (!fop) /* exception is returned in fop variable */
5392 return emulate_de(ctxt);
5393 return X86EMUL_CONTINUE;
5396 void init_decode_cache(struct x86_emulate_ctxt *ctxt)
5398 memset(&ctxt->rip_relative, 0,
5399 (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
5401 ctxt->io_read.pos = 0;
5402 ctxt->io_read.end = 0;
5403 ctxt->mem_read.end = 0;
5406 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
5408 const struct x86_emulate_ops *ops = ctxt->ops;
5409 int rc = X86EMUL_CONTINUE;
5410 int saved_dst_type = ctxt->dst.type;
5411 unsigned emul_flags;
5413 ctxt->mem_read.pos = 0;
5415 /* LOCK prefix is allowed only with some instructions */
5416 if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
5417 rc = emulate_ud(ctxt);
5421 if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
5422 rc = emulate_ud(ctxt);
5426 emul_flags = ctxt->ops->get_hflags(ctxt);
5427 if (unlikely(ctxt->d &
5428 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
5429 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
5430 (ctxt->d & Undefined)) {
5431 rc = emulate_ud(ctxt);
5435 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
5436 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
5437 rc = emulate_ud(ctxt);
5441 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
5442 rc = emulate_nm(ctxt);
5446 if (ctxt->d & Mmx) {
5447 rc = flush_pending_x87_faults(ctxt);
5448 if (rc != X86EMUL_CONTINUE)
5451 * Now that we know the fpu is exception safe, we can fetch
5454 fetch_possible_mmx_operand(&ctxt->src);
5455 fetch_possible_mmx_operand(&ctxt->src2);
5456 if (!(ctxt->d & Mov))
5457 fetch_possible_mmx_operand(&ctxt->dst);
5460 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
5461 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5462 X86_ICPT_PRE_EXCEPT);
5463 if (rc != X86EMUL_CONTINUE)
5467 /* Instruction can only be executed in protected mode */
5468 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
5469 rc = emulate_ud(ctxt);
5473 /* Privileged instruction can be executed only in CPL=0 */
5474 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
5475 if (ctxt->d & PrivUD)
5476 rc = emulate_ud(ctxt);
5478 rc = emulate_gp(ctxt, 0);
5482 /* Do instruction specific permission checks */
5483 if (ctxt->d & CheckPerm) {
5484 rc = ctxt->check_perm(ctxt);
5485 if (rc != X86EMUL_CONTINUE)
5489 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5490 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5491 X86_ICPT_POST_EXCEPT);
5492 if (rc != X86EMUL_CONTINUE)
5496 if (ctxt->rep_prefix && (ctxt->d & String)) {
5497 /* All REP prefixes have the same first termination condition */
5498 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
5499 string_registers_quirk(ctxt);
5500 ctxt->eip = ctxt->_eip;
5501 ctxt->eflags &= ~X86_EFLAGS_RF;
5507 if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
5508 rc = segmented_read(ctxt, ctxt->src.addr.mem,
5509 ctxt->src.valptr, ctxt->src.bytes);
5510 if (rc != X86EMUL_CONTINUE)
5512 ctxt->src.orig_val64 = ctxt->src.val64;
5515 if (ctxt->src2.type == OP_MEM) {
5516 rc = segmented_read(ctxt, ctxt->src2.addr.mem,
5517 &ctxt->src2.val, ctxt->src2.bytes);
5518 if (rc != X86EMUL_CONTINUE)
5522 if ((ctxt->d & DstMask) == ImplicitOps)
5526 if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
5527 /* optimisation - avoid slow emulated read if Mov */
5528 rc = segmented_read(ctxt, ctxt->dst.addr.mem,
5529 &ctxt->dst.val, ctxt->dst.bytes);
5530 if (rc != X86EMUL_CONTINUE) {
5531 if (!(ctxt->d & NoWrite) &&
5532 rc == X86EMUL_PROPAGATE_FAULT &&
5533 ctxt->exception.vector == PF_VECTOR)
5534 ctxt->exception.error_code |= PFERR_WRITE_MASK;
5538 /* Copy full 64-bit value for CMPXCHG8B. */
5539 ctxt->dst.orig_val64 = ctxt->dst.val64;
5543 if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
5544 rc = emulator_check_intercept(ctxt, ctxt->intercept,
5545 X86_ICPT_POST_MEMACCESS);
5546 if (rc != X86EMUL_CONTINUE)
5550 if (ctxt->rep_prefix && (ctxt->d & String))
5551 ctxt->eflags |= X86_EFLAGS_RF;
5553 ctxt->eflags &= ~X86_EFLAGS_RF;
5555 if (ctxt->execute) {
5556 if (ctxt->d & Fastop)
5557 rc = fastop(ctxt, ctxt->fop);
5559 rc = ctxt->execute(ctxt);
5560 if (rc != X86EMUL_CONTINUE)
5565 if (ctxt->opcode_len == 2)
5567 else if (ctxt->opcode_len == 3)
5568 goto threebyte_insn;
5571 case 0x70 ... 0x7f: /* jcc (short) */
5572 if (test_cc(ctxt->b, ctxt->eflags))
5573 rc = jmp_rel(ctxt, ctxt->src.val);
5575 case 0x8d: /* lea r16/r32, m */
5576 ctxt->dst.val = ctxt->src.addr.mem.ea;
5578 case 0x90 ... 0x97: /* nop / xchg reg, rax */
5579 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
5580 ctxt->dst.type = OP_NONE;
5584 case 0x98: /* cbw/cwde/cdqe */
5585 switch (ctxt->op_bytes) {
5586 case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
5587 case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
5588 case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
5591 case 0xcc: /* int3 */
5592 rc = emulate_int(ctxt, 3);
5594 case 0xcd: /* int n */
5595 rc = emulate_int(ctxt, ctxt->src.val);
5597 case 0xce: /* into */
5598 if (ctxt->eflags & X86_EFLAGS_OF)
5599 rc = emulate_int(ctxt, 4);
5601 case 0xe9: /* jmp rel */
5602 case 0xeb: /* jmp rel short */
5603 rc = jmp_rel(ctxt, ctxt->src.val);
5604 ctxt->dst.type = OP_NONE; /* Disable writeback. */
5606 case 0xf4: /* hlt */
5607 ctxt->ops->halt(ctxt);
5609 case 0xf5: /* cmc */
5610 /* complement carry flag from eflags reg */
5611 ctxt->eflags ^= X86_EFLAGS_CF;
5613 case 0xf8: /* clc */
5614 ctxt->eflags &= ~X86_EFLAGS_CF;
5616 case 0xf9: /* stc */
5617 ctxt->eflags |= X86_EFLAGS_CF;
5619 case 0xfc: /* cld */
5620 ctxt->eflags &= ~X86_EFLAGS_DF;
5622 case 0xfd: /* std */
5623 ctxt->eflags |= X86_EFLAGS_DF;
5626 goto cannot_emulate;
5629 if (rc != X86EMUL_CONTINUE)
5633 if (ctxt->d & SrcWrite) {
5634 BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
5635 rc = writeback(ctxt, &ctxt->src);
5636 if (rc != X86EMUL_CONTINUE)
5639 if (!(ctxt->d & NoWrite)) {
5640 rc = writeback(ctxt, &ctxt->dst);
5641 if (rc != X86EMUL_CONTINUE)
5646 * restore dst type in case the decoding will be reused
5647 * (happens for string instruction )
5649 ctxt->dst.type = saved_dst_type;
5651 if ((ctxt->d & SrcMask) == SrcSI)
5652 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
5654 if ((ctxt->d & DstMask) == DstDI)
5655 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
5657 if (ctxt->rep_prefix && (ctxt->d & String)) {
5659 struct read_cache *r = &ctxt->io_read;
5660 if ((ctxt->d & SrcMask) == SrcSI)
5661 count = ctxt->src.count;
5663 count = ctxt->dst.count;
5664 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
5666 if (!string_insn_completed(ctxt)) {
5668 * Re-enter guest when pio read ahead buffer is empty
5669 * or, if it is not used, after each 1024 iteration.
5671 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
5672 (r->end == 0 || r->end != r->pos)) {
5674 * Reset read cache. Usually happens before
5675 * decode, but since instruction is restarted
5676 * we have to do it here.
5678 ctxt->mem_read.end = 0;
5679 writeback_registers(ctxt);
5680 return EMULATION_RESTART;
5682 goto done; /* skip rip writeback */
5684 ctxt->eflags &= ~X86_EFLAGS_RF;
5687 ctxt->eip = ctxt->_eip;
5688 if (ctxt->mode != X86EMUL_MODE_PROT64)
5689 ctxt->eip = (u32)ctxt->_eip;
5692 if (rc == X86EMUL_PROPAGATE_FAULT) {
5693 WARN_ON(ctxt->exception.vector > 0x1f);
5694 ctxt->have_exception = true;
5696 if (rc == X86EMUL_INTERCEPTED)
5697 return EMULATION_INTERCEPTED;
5699 if (rc == X86EMUL_CONTINUE)
5700 writeback_registers(ctxt);
5702 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
5706 case 0x09: /* wbinvd */
5707 (ctxt->ops->wbinvd)(ctxt);
5709 case 0x08: /* invd */
5710 case 0x0d: /* GrpP (prefetch) */
5711 case 0x18: /* Grp16 (prefetch/nop) */
5712 case 0x1f: /* nop */
5714 case 0x20: /* mov cr, reg */
5715 ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
5717 case 0x21: /* mov from dr to reg */
5718 ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
5720 case 0x40 ... 0x4f: /* cmov */
5721 if (test_cc(ctxt->b, ctxt->eflags))
5722 ctxt->dst.val = ctxt->src.val;
5723 else if (ctxt->op_bytes != 4)
5724 ctxt->dst.type = OP_NONE; /* no writeback */
5726 case 0x80 ... 0x8f: /* jnz rel, etc*/
5727 if (test_cc(ctxt->b, ctxt->eflags))
5728 rc = jmp_rel(ctxt, ctxt->src.val);
5730 case 0x90 ... 0x9f: /* setcc r/m8 */
5731 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
5733 case 0xb6 ... 0xb7: /* movzx */
5734 ctxt->dst.bytes = ctxt->op_bytes;
5735 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
5736 : (u16) ctxt->src.val;
5738 case 0xbe ... 0xbf: /* movsx */
5739 ctxt->dst.bytes = ctxt->op_bytes;
5740 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5741 (s16) ctxt->src.val;
5744 goto cannot_emulate;
5749 if (rc != X86EMUL_CONTINUE)
5755 return EMULATION_FAILED;
5758 void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
5760 invalidate_registers(ctxt);
5763 void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
5765 writeback_registers(ctxt);
5768 bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
5770 if (ctxt->rep_prefix && (ctxt->d & String))
5773 if (ctxt->d & TwoMemOp)