1 // license:BSD-3-Clause
2 // copyright-holders:Ville Linde, Barry Rodewald, Carl, Philip Bennett
21 //#include "debugger.h"
25 //#include "debug/debugcpu.h"
27 /* seems to be defined on mingw-gcc */
30 static CPU_RESET( CPU_MODEL );
32 int i386_parity_table[256];
33 MODRM_TABLE i386_MODRM_table[256];
35 static void i386_trap_with_error(i386_state* cpustate, int irq, int irq_gate, int trap_level, UINT32 err);
36 static void i286_task_switch(i386_state* cpustate, UINT16 selector, UINT8 nested);
37 static void i386_task_switch(i386_state* cpustate, UINT16 selector, UINT8 nested);
38 static void build_opcode_table(i386_state *cpustate, UINT32 features);
39 static void zero_state(i386_state *cpustate);
40 static void pentium_smi(i386_state* cpustate);
42 #define FAULT(fault,error) {cpustate->ext = 1; i386_trap_with_error(cpustate,fault,0,0,error); return;}
43 #define FAULT_EXP(fault,error) {cpustate->ext = 1; i386_trap_with_error(cpustate,fault,0,trap_level+1,error); return;}
45 /*************************************************************************/
47 static UINT32 i386_load_protected_mode_segment(i386_state *cpustate, I386_SREG *seg, UINT64 *desc )
63 if ( seg->selector & 0x4 )
65 base = cpustate->ldtr.base;
66 limit = cpustate->ldtr.limit;
68 base = cpustate->gdtr.base;
69 limit = cpustate->gdtr.limit;
72 entry = seg->selector & ~0x7;
73 if (limit == 0 || entry + 7 > limit)
76 v1 = READ32PL0(cpustate, base + entry );
77 v2 = READ32PL0(cpustate, base + entry + 4 );
79 seg->flags = (v2 >> 8) & 0xf0ff;
80 seg->base = (v2 & 0xff000000) | ((v2 & 0xff) << 16) | ((v1 >> 16) & 0xffff);
81 seg->limit = (v2 & 0xf0000) | (v1 & 0xffff);
82 if (seg->flags & 0x8000)
83 seg->limit = (seg->limit << 12) | 0xfff;
84 seg->d = (seg->flags & 0x4000) ? 1 : 0;
88 *desc = ((UINT64)v2<<32)|v1;
92 static void i386_load_call_gate(i386_state* cpustate, I386_CALL_GATE *gate)
98 if ( gate->segment & 0x4 )
100 base = cpustate->ldtr.base;
101 limit = cpustate->ldtr.limit;
103 base = cpustate->gdtr.base;
104 limit = cpustate->gdtr.limit;
107 entry = gate->segment & ~0x7;
108 if (limit == 0 || entry + 7 > limit)
111 v1 = READ32PL0(cpustate, base + entry );
112 v2 = READ32PL0(cpustate, base + entry + 4 );
114 /* Note that for task gates, offset and dword_count are not used */
115 gate->selector = (v1 >> 16) & 0xffff;
116 gate->offset = (v1 & 0x0000ffff) | (v2 & 0xffff0000);
117 gate->ar = (v2 >> 8) & 0xff;
118 gate->dword_count = v2 & 0x001f;
119 gate->present = (gate->ar >> 7) & 0x01;
120 gate->dpl = (gate->ar >> 5) & 0x03;
123 static void i386_set_descriptor_accessed(i386_state *cpustate, UINT16 selector)
125 // assume the selector is valid, we don't need to check it again
131 if ( selector & 0x4 )
132 base = cpustate->ldtr.base;
134 base = cpustate->gdtr.base;
136 addr = base + (selector & ~7) + 5;
137 i386_translate_address(cpustate, TRANSLATE_READ, &addr, NULL);
138 rights = cpustate->program->read_data8(addr);
139 // Should a fault be thrown if the table is read only?
140 cpustate->program->write_data8(addr, rights | 1);
143 static void i386_load_segment_descriptor(i386_state *cpustate, int segment )
147 UINT16 old_flags = cpustate->sreg[segment].flags;
150 i386_load_protected_mode_segment(cpustate, &cpustate->sreg[segment], NULL );
151 if(cpustate->sreg[segment].selector)
153 i386_set_descriptor_accessed(cpustate, cpustate->sreg[segment].selector);
154 cpustate->sreg[segment].flags |= 0x0001;
159 cpustate->sreg[segment].base = cpustate->sreg[segment].selector << 4;
160 cpustate->sreg[segment].limit = 0xffff;
161 cpustate->sreg[segment].flags = (segment == CS) ? 0x00fb : 0x00f3;
162 cpustate->sreg[segment].d = 0;
163 cpustate->sreg[segment].valid = true;
165 // if (segment == CS && cpustate->sreg[segment].flags != old_flags)
166 // debugger_privilege_hook();
170 cpustate->sreg[segment].base = cpustate->sreg[segment].selector << 4;
171 cpustate->sreg[segment].d = 0;
172 cpustate->sreg[segment].valid = true;
176 if( !cpustate->performed_intersegment_jump )
177 cpustate->sreg[segment].base |= 0xfff00000;
178 if(cpustate->cpu_version < 0x5000)
179 cpustate->sreg[segment].flags = 0x93;
184 /* Retrieves the stack selector located in the current TSS */
185 static UINT32 i386_get_stack_segment(i386_state* cpustate, UINT8 privilege)
191 if(cpustate->task.flags & 8)
192 ret = READ32PL0(cpustate,(cpustate->task.base+8) + (8*privilege));
194 ret = READ16PL0(cpustate,(cpustate->task.base+4) + (4*privilege));
199 /* Retrieves the stack pointer located in the current TSS */
200 static UINT32 i386_get_stack_ptr(i386_state* cpustate, UINT8 privilege)
206 if(cpustate->task.flags & 8)
207 ret = READ32PL0(cpustate,(cpustate->task.base+4) + (8*privilege));
209 ret = READ16PL0(cpustate,(cpustate->task.base+2) + (4*privilege));
214 static UINT32 get_flags(i386_state *cpustate)
218 f |= cpustate->PF << 2;
219 f |= cpustate->AF << 4;
220 f |= cpustate->ZF << 6;
221 f |= cpustate->SF << 7;
222 f |= cpustate->TF << 8;
223 f |= cpustate->IF << 9;
224 f |= cpustate->DF << 10;
225 f |= cpustate->OF << 11;
226 f |= cpustate->IOP1 << 12;
227 f |= cpustate->IOP2 << 13;
228 f |= cpustate->NT << 14;
229 f |= cpustate->RF << 16;
230 f |= cpustate->VM << 17;
231 f |= cpustate->AC << 18;
232 f |= cpustate->VIF << 19;
233 f |= cpustate->VIP << 20;
234 f |= cpustate->ID << 21;
235 return (cpustate->eflags & ~cpustate->eflags_mask) | (f & cpustate->eflags_mask);
238 static void set_flags(i386_state *cpustate, UINT32 f )
240 f &= cpustate->eflags_mask;;
241 cpustate->CF = (f & 0x1) ? 1 : 0;
242 cpustate->PF = (f & 0x4) ? 1 : 0;
243 cpustate->AF = (f & 0x10) ? 1 : 0;
244 cpustate->ZF = (f & 0x40) ? 1 : 0;
245 cpustate->SF = (f & 0x80) ? 1 : 0;
246 cpustate->TF = (f & 0x100) ? 1 : 0;
247 cpustate->IF = (f & 0x200) ? 1 : 0;
248 cpustate->DF = (f & 0x400) ? 1 : 0;
249 cpustate->OF = (f & 0x800) ? 1 : 0;
250 cpustate->IOP1 = (f & 0x1000) ? 1 : 0;
251 cpustate->IOP2 = (f & 0x2000) ? 1 : 0;
252 cpustate->NT = (f & 0x4000) ? 1 : 0;
253 cpustate->RF = (f & 0x10000) ? 1 : 0;
254 cpustate->VM = (f & 0x20000) ? 1 : 0;
255 cpustate->AC = (f & 0x40000) ? 1 : 0;
256 cpustate->VIF = (f & 0x80000) ? 1 : 0;
257 cpustate->VIP = (f & 0x100000) ? 1 : 0;
258 cpustate->ID = (f & 0x200000) ? 1 : 0;
259 cpustate->eflags = f;
262 static void sib_byte(i386_state *cpustate,UINT8 mod, UINT32* out_ea, UINT8* out_segment)
266 UINT8 scale, i, base;
267 UINT8 sib = FETCH(cpustate);
268 scale = (sib >> 6) & 0x3;
269 i = (sib >> 3) & 0x7;
274 case 0: ea = REG32(EAX); segment = DS; break;
275 case 1: ea = REG32(ECX); segment = DS; break;
276 case 2: ea = REG32(EDX); segment = DS; break;
277 case 3: ea = REG32(EBX); segment = DS; break;
278 case 4: ea = REG32(ESP); segment = SS; break;
281 ea = FETCH32(cpustate);
283 } else if( mod == 1 ) {
286 } else if( mod == 2 ) {
291 case 6: ea = REG32(ESI); segment = DS; break;
292 case 7: ea = REG32(EDI); segment = DS; break;
296 case 0: ea += REG32(EAX) * (1 << scale); break;
297 case 1: ea += REG32(ECX) * (1 << scale); break;
298 case 2: ea += REG32(EDX) * (1 << scale); break;
299 case 3: ea += REG32(EBX) * (1 << scale); break;
301 case 5: ea += REG32(EBP) * (1 << scale); break;
302 case 6: ea += REG32(ESI) * (1 << scale); break;
303 case 7: ea += REG32(EDI) * (1 << scale); break;
306 *out_segment = segment;
309 static void modrm_to_EA(i386_state *cpustate,UINT8 mod_rm, UINT32* out_ea, UINT8* out_segment)
314 UINT8 mod = (mod_rm >> 6) & 0x3;
315 UINT8 rm = mod_rm & 0x7;
320 fatalerror("i386: Called modrm_to_EA with modrm value %02X!\n",mod_rm);
323 if( cpustate->address_size ) {
327 case 0: ea = REG32(EAX); segment = DS; break;
328 case 1: ea = REG32(ECX); segment = DS; break;
329 case 2: ea = REG32(EDX); segment = DS; break;
330 case 3: ea = REG32(EBX); segment = DS; break;
331 case 4: sib_byte(cpustate, mod, &ea, &segment ); break;
334 ea = FETCH32(cpustate); segment = DS;
336 ea = REG32(EBP); segment = SS;
339 case 6: ea = REG32(ESI); segment = DS; break;
340 case 7: ea = REG32(EDI); segment = DS; break;
343 disp8 = FETCH(cpustate);
345 } else if( mod == 2 ) {
346 disp32 = FETCH32(cpustate);
350 if( cpustate->segment_prefix )
351 segment = cpustate->segment_override;
354 *out_segment = segment;
360 case 0: ea = REG16(BX) + REG16(SI); segment = DS; break;
361 case 1: ea = REG16(BX) + REG16(DI); segment = DS; break;
362 case 2: ea = REG16(BP) + REG16(SI); segment = SS; break;
363 case 3: ea = REG16(BP) + REG16(DI); segment = SS; break;
364 case 4: ea = REG16(SI); segment = DS; break;
365 case 5: ea = REG16(DI); segment = DS; break;
368 ea = FETCH16(cpustate); segment = DS;
370 ea = REG16(BP); segment = SS;
373 case 7: ea = REG16(BX); segment = DS; break;
376 disp8 = FETCH(cpustate);
378 } else if( mod == 2 ) {
379 disp16 = FETCH16(cpustate);
383 if( cpustate->segment_prefix )
384 segment = cpustate->segment_override;
386 *out_ea = ea & 0xffff;
387 *out_segment = segment;
391 static UINT32 GetNonTranslatedEA(i386_state *cpustate,UINT8 modrm,UINT8 *seg)
395 modrm_to_EA(cpustate, modrm, &ea, &segment );
396 if(seg) *seg = segment;
400 static UINT32 GetEA(i386_state *cpustate,UINT8 modrm, int rwn, UINT32 size)
404 modrm_to_EA(cpustate, modrm, &ea, &segment );
405 return i386_translate(cpustate, segment, ea, rwn, size );
408 /* Check segment register for validity when changing privilege level after an RETF */
409 static void i386_check_sreg_validity(i386_state* cpustate, int reg)
411 UINT16 selector = cpustate->sreg[reg].selector;
412 UINT8 CPL = cpustate->CPL;
417 memset(&desc, 0, sizeof(desc));
418 desc.selector = selector;
419 i386_load_protected_mode_segment(cpustate,&desc,NULL);
420 DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level
421 RPL = selector & 0x03;
423 /* Must be within the relevant descriptor table limits */
426 if((selector & ~0x07) > cpustate->ldtr.limit)
431 if((selector & ~0x07) > cpustate->gdtr.limit)
435 /* Must be either a data or readable code segment */
436 if(((desc.flags & 0x0018) == 0x0018 && (desc.flags & 0x0002)) || (desc.flags & 0x0018) == 0x0010)
441 /* If a data segment or non-conforming code segment, then either DPL >= CPL or DPL >= RPL */
442 if(((desc.flags & 0x0018) == 0x0018 && (desc.flags & 0x0004) == 0) || (desc.flags & 0x0018) == 0x0010)
444 if((DPL < CPL) || (DPL < RPL))
448 /* if segment is invalid, then segment register is nulled */
451 cpustate->sreg[reg].selector = 0;
452 i386_load_segment_descriptor(cpustate,reg);
456 static int i386_limit_check(i386_state *cpustate, int seg, UINT32 offset, UINT32 size)
458 if(PROTECTED_MODE && !V8086_MODE)
460 if((cpustate->sreg[seg].flags & 0x0018) == 0x0010 && cpustate->sreg[seg].flags & 0x0004) // if expand-down data segment
462 // compare if greater then 0xffffffff when we're passed the access size
463 if((offset <= cpustate->sreg[seg].limit) || ((cpustate->sreg[seg].d)?0:((offset + size - 1) > 0xffff)))
465 logerror("Limit check at 0x%08x failed. Segment %04x, limit %08x, offset %08x (expand-down)\n",cpustate->pc,cpustate->sreg[seg].selector,cpustate->sreg[seg].limit,offset);
471 if((offset + size - 1) > cpustate->sreg[seg].limit)
473 logerror("Limit check at 0x%08x failed. Segment %04x, limit %08x, offset %08x\n",cpustate->pc,cpustate->sreg[seg].selector,cpustate->sreg[seg].limit,offset);
481 static void i386_sreg_load(i386_state *cpustate, UINT16 selector, UINT8 reg, bool *fault)
483 // Checks done when MOV changes a segment register in protected mode
487 RPL = selector & 0x0003;
489 if(!PROTECTED_MODE || V8086_MODE)
491 cpustate->sreg[reg].selector = selector;
492 i386_load_segment_descriptor(cpustate, reg);
493 if(fault) *fault = false;
497 if(fault) *fault = true;
502 memset(&stack, 0, sizeof(stack));
503 stack.selector = selector;
504 i386_load_protected_mode_segment(cpustate,&stack,NULL);
505 DPL = (stack.flags >> 5) & 0x03;
507 if((selector & ~0x0003) == 0)
509 logerror("SReg Load (%08x): Selector is null.\n",cpustate->pc);
512 if(selector & 0x0004) // LDT
514 if((selector & ~0x0007) > cpustate->ldtr.limit)
516 logerror("SReg Load (%08x): Selector is out of LDT bounds.\n",cpustate->pc);
517 FAULT(FAULT_GP,selector & ~0x03)
522 if((selector & ~0x0007) > cpustate->gdtr.limit)
524 logerror("SReg Load (%08x): Selector is out of GDT bounds.\n",cpustate->pc);
525 FAULT(FAULT_GP,selector & ~0x03)
530 logerror("SReg Load (%08x): Selector RPL does not equal CPL.\n",cpustate->pc);
531 FAULT(FAULT_GP,selector & ~0x03)
533 if(((stack.flags & 0x0018) != 0x10) && (stack.flags & 0x0002) != 0)
535 logerror("SReg Load (%08x): Segment is not a writable data segment.\n",cpustate->pc);
536 FAULT(FAULT_GP,selector & ~0x03)
540 logerror("SReg Load (%08x): Segment DPL does not equal CPL.\n",cpustate->pc);
541 FAULT(FAULT_GP,selector & ~0x03)
543 if(!(stack.flags & 0x0080))
545 logerror("SReg Load (%08x): Segment is not present.\n",cpustate->pc);
546 FAULT(FAULT_SS,selector & ~0x03)
549 if(reg == DS || reg == ES || reg == FS || reg == GS)
553 if((selector & ~0x0003) == 0)
555 cpustate->sreg[reg].selector = selector;
556 i386_load_segment_descriptor(cpustate, reg );
557 if(fault) *fault = false;
561 memset(&desc, 0, sizeof(desc));
562 desc.selector = selector;
563 i386_load_protected_mode_segment(cpustate,&desc,NULL);
564 DPL = (desc.flags >> 5) & 0x03;
566 if(selector & 0x0004) // LDT
568 if((selector & ~0x0007) > cpustate->ldtr.limit)
570 logerror("SReg Load (%08x): Selector is out of LDT bounds.\n",cpustate->pc);
571 FAULT(FAULT_GP,selector & ~0x03)
576 if((selector & ~0x0007) > cpustate->gdtr.limit)
578 logerror("SReg Load (%08x): Selector is out of GDT bounds.\n",cpustate->pc);
579 FAULT(FAULT_GP,selector & ~0x03)
582 if((desc.flags & 0x0018) != 0x10)
584 if((((desc.flags & 0x0002) != 0) && ((desc.flags & 0x0018) != 0x18)) || !(desc.flags & 0x10))
586 logerror("SReg Load (%08x): Segment is not a data segment or readable code segment.\n",cpustate->pc);
587 FAULT(FAULT_GP,selector & ~0x03)
590 if(((desc.flags & 0x0018) == 0x10) || ((!(desc.flags & 0x0004)) && ((desc.flags & 0x0018) == 0x18)))
592 // if data or non-conforming code segment
593 if((RPL > DPL) || (CPL > DPL))
595 logerror("SReg Load (%08x): Selector RPL or CPL is not less or equal to segment DPL.\n",cpustate->pc);
596 FAULT(FAULT_GP,selector & ~0x03)
599 if(!(desc.flags & 0x0080))
601 logerror("SReg Load (%08x): Segment is not present.\n",cpustate->pc);
602 FAULT(FAULT_NP,selector & ~0x03)
606 cpustate->sreg[reg].selector = selector;
607 i386_load_segment_descriptor(cpustate, reg );
608 if(fault) *fault = false;
611 static void i386_trap(i386_state *cpustate,int irq, int irq_gate, int trap_level)
613 /* I386 Interrupts/Traps/Faults:
615 * 0x00 Divide by zero
616 * 0x01 Debug exception
620 * 0x05 Array bounds check
621 * 0x06 Illegal Opcode
622 * 0x07 FPU not available
624 * 0x09 Coprocessor segment overrun
625 * 0x0a Invalid task state
626 * 0x0b Segment not present
627 * 0x0c Stack exception
628 * 0x0d General Protection Fault
631 * 0x10 Coprocessor error
634 UINT32 offset, oldflags = get_flags(cpustate);
636 int entry = irq * (PROTECTED_MODE ? 8 : 4);
638 cpustate->lock = false;
640 if( !(PROTECTED_MODE) )
643 PUSH16(cpustate, oldflags & 0xffff );
644 PUSH16(cpustate, cpustate->sreg[CS].selector );
645 if(irq == 3 || irq == 4 || irq == 9 || irq_gate == 1)
646 PUSH16(cpustate, cpustate->eip );
648 PUSH16(cpustate, cpustate->prev_eip );
650 cpustate->sreg[CS].selector = READ16(cpustate, cpustate->idtr.base + entry + 2 );
651 cpustate->eip = READ16(cpustate, cpustate->idtr.base + entry );
661 UINT8 CPL = cpustate->CPL, DPL = 0; //, RPL = 0;
664 v1 = READ32PL0(cpustate, cpustate->idtr.base + entry );
665 v2 = READ32PL0(cpustate, cpustate->idtr.base + entry + 4 );
666 offset = (v2 & 0xffff0000) | (v1 & 0xffff);
667 segment = (v1 >> 16) & 0xffff;
668 type = (v2>>8) & 0x1F;
669 flags = (v2>>8) & 0xf0ff;
673 logerror("IRQ: Double fault.\n");
674 FAULT_EXP(FAULT_DF,0);
678 logerror("IRQ: Triple fault. CPU reset.\n");
679 CPU_RESET_CALL(CPU_MODEL);
680 cpustate->shutdown = 1;
684 /* segment privilege checks */
685 if(entry >= cpustate->idtr.limit)
687 logerror("IRQ (%08x): Vector %02xh is past IDT limit.\n",cpustate->pc,entry);
688 FAULT_EXP(FAULT_GP,entry+2)
690 /* segment must be interrupt gate, trap gate, or task gate */
691 if(type != 0x05 && type != 0x06 && type != 0x07 && type != 0x0e && type != 0x0f)
693 logerror("IRQ#%02x (%08x): Vector segment %04x is not an interrupt, trap or task gate.\n",irq,cpustate->pc,segment);
694 FAULT_EXP(FAULT_GP,entry+2)
697 if(cpustate->ext == 0) // if software interrupt (caused by INT/INTO/INT3)
699 if(((flags >> 5) & 0x03) < CPL)
701 logerror("IRQ (%08x): Software IRQ - gate DPL is less than CPL.\n",cpustate->pc);
702 FAULT_EXP(FAULT_GP,entry+2)
706 if((!cpustate->IOP1 || !cpustate->IOP2) && (cpustate->opcode != 0xcc))
708 logerror("IRQ (%08x): Is in Virtual 8086 mode and IOPL != 3.\n",cpustate->pc);
715 if((flags & 0x0080) == 0)
717 logerror("IRQ: Vector segment is not present.\n");
718 FAULT_EXP(FAULT_NP,entry+2)
724 memset(&desc, 0, sizeof(desc));
725 desc.selector = segment;
726 i386_load_protected_mode_segment(cpustate,&desc,NULL);
729 logerror("IRQ: Task gate: TSS is not in the GDT.\n");
730 FAULT_EXP(FAULT_TS,segment & ~0x03);
734 if(segment > cpustate->gdtr.limit)
736 logerror("IRQ: Task gate: TSS is past GDT limit.\n");
737 FAULT_EXP(FAULT_TS,segment & ~0x03);
740 if((desc.flags & 0x000f) != 0x09 && (desc.flags & 0x000f) != 0x01)
742 logerror("IRQ: Task gate: TSS is not an available TSS.\n");
743 FAULT_EXP(FAULT_TS,segment & ~0x03);
745 if((desc.flags & 0x0080) == 0)
747 logerror("IRQ: Task gate: TSS is not present.\n");
748 FAULT_EXP(FAULT_NP,segment & ~0x03);
750 if(!(irq == 3 || irq == 4 || irq == 9 || irq_gate == 1))
751 cpustate->eip = cpustate->prev_eip;
752 if(desc.flags & 0x08)
753 i386_task_switch(cpustate,desc.selector,1);
755 i286_task_switch(cpustate,desc.selector,1);
760 /* Interrupt or Trap gate */
761 memset(&desc, 0, sizeof(desc));
762 desc.selector = segment;
763 i386_load_protected_mode_segment(cpustate,&desc,NULL);
764 CPL = cpustate->CPL; // current privilege level
765 DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level
766 // RPL = segment & 0x03; // requested privilege level
768 if((segment & ~0x03) == 0)
770 logerror("IRQ: Gate segment is null.\n");
771 FAULT_EXP(FAULT_GP,cpustate->ext)
775 if((segment & ~0x07) > cpustate->ldtr.limit)
777 logerror("IRQ: Gate segment is past LDT limit.\n");
778 FAULT_EXP(FAULT_GP,(segment & 0x03)+cpustate->ext)
783 if((segment & ~0x07) > cpustate->gdtr.limit)
785 logerror("IRQ: Gate segment is past GDT limit.\n");
786 FAULT_EXP(FAULT_GP,(segment & 0x03)+cpustate->ext)
789 if((desc.flags & 0x0018) != 0x18)
791 logerror("IRQ: Gate descriptor is not a code segment.\n");
792 FAULT_EXP(FAULT_GP,(segment & 0x03)+cpustate->ext)
794 if((desc.flags & 0x0080) == 0)
796 logerror("IRQ: Gate segment is not present.\n");
797 FAULT_EXP(FAULT_NP,(segment & 0x03)+cpustate->ext)
799 if((desc.flags & 0x0004) == 0 && (DPL < CPL))
801 /* IRQ to inner privilege */
803 UINT32 newESP,oldSS,oldESP;
805 if(V8086_MODE && DPL)
807 logerror("IRQ: Gate to CPL>0 from VM86 mode.\n");
808 FAULT_EXP(FAULT_GP,segment & ~0x03);
810 /* Check new stack segment in TSS */
811 memset(&stack, 0, sizeof(stack));
812 stack.selector = i386_get_stack_segment(cpustate,DPL);
813 i386_load_protected_mode_segment(cpustate,&stack,NULL);
814 oldSS = cpustate->sreg[SS].selector;
819 if((stack.selector & ~0x03) == 0)
821 logerror("IRQ: New stack selector is null.\n");
822 FAULT_EXP(FAULT_GP,cpustate->ext)
824 if(stack.selector & 0x04)
826 if((stack.selector & ~0x07) > cpustate->ldtr.base)
828 logerror("IRQ: New stack selector is past LDT limit.\n");
829 FAULT_EXP(FAULT_TS,(stack.selector & ~0x03)+cpustate->ext)
834 if((stack.selector & ~0x07) > cpustate->gdtr.base)
836 logerror("IRQ: New stack selector is past GDT limit.\n");
837 FAULT_EXP(FAULT_TS,(stack.selector & ~0x03)+cpustate->ext)
840 if((stack.selector & 0x03) != DPL)
842 logerror("IRQ: New stack selector RPL is not equal to code segment DPL.\n");
843 FAULT_EXP(FAULT_TS,(stack.selector & ~0x03)+cpustate->ext)
845 if(((stack.flags >> 5) & 0x03) != DPL)
847 logerror("IRQ: New stack segment DPL is not equal to code segment DPL.\n");
848 FAULT_EXP(FAULT_TS,(stack.selector & ~0x03)+cpustate->ext)
850 if(((stack.flags & 0x0018) != 0x10) && (stack.flags & 0x0002) != 0)
852 logerror("IRQ: New stack segment is not a writable data segment.\n");
853 FAULT_EXP(FAULT_TS,(stack.selector & ~0x03)+cpustate->ext) // #TS(stack selector + EXT)
855 if((stack.flags & 0x0080) == 0)
857 logerror("IRQ: New stack segment is not present.\n");
858 FAULT_EXP(FAULT_SS,(stack.selector & ~0x03)+cpustate->ext) // #TS(stack selector + EXT)
860 newESP = i386_get_stack_ptr(cpustate,DPL);
861 if(type & 0x08) // 32-bit gate
863 if(((newESP < (V8086_MODE?36:20)) && !(stack.flags & 0x4)) || ((~stack.limit < (~(newESP - 1) + (V8086_MODE?36:20))) && (stack.flags & 0x4)))
865 logerror("IRQ: New stack has no space for return addresses.\n");
866 FAULT_EXP(FAULT_SS,0)
872 if(((newESP < (V8086_MODE?18:10)) && !(stack.flags & 0x4)) || ((~stack.limit < (~(newESP - 1) + (V8086_MODE?18:10))) && (stack.flags & 0x4)))
874 logerror("IRQ: New stack has no space for return addresses.\n");
875 FAULT_EXP(FAULT_SS,0)
878 if(offset > desc.limit)
880 logerror("IRQ: New EIP is past code segment limit.\n");
881 FAULT_EXP(FAULT_GP,0)
883 /* change CPL before accessing the stack */
885 /* check for page fault at new stack TODO: check if stack frame crosses page boundary */
886 WRITE_TEST(cpustate, stack.base+newESP-1);
887 /* Load new stack segment descriptor */
888 cpustate->sreg[SS].selector = stack.selector;
889 i386_load_protected_mode_segment(cpustate,&cpustate->sreg[SS],NULL);
890 i386_set_descriptor_accessed(cpustate, stack.selector);
894 //logerror("IRQ (%08x): Interrupt during V8086 task\n",cpustate->pc);
897 PUSH32SEG(cpustate,cpustate->sreg[GS].selector & 0xffff);
898 PUSH32SEG(cpustate,cpustate->sreg[FS].selector & 0xffff);
899 PUSH32SEG(cpustate,cpustate->sreg[DS].selector & 0xffff);
900 PUSH32SEG(cpustate,cpustate->sreg[ES].selector & 0xffff);
904 PUSH16(cpustate,cpustate->sreg[GS].selector);
905 PUSH16(cpustate,cpustate->sreg[FS].selector);
906 PUSH16(cpustate,cpustate->sreg[DS].selector);
907 PUSH16(cpustate,cpustate->sreg[ES].selector);
909 cpustate->sreg[GS].selector = 0;
910 cpustate->sreg[FS].selector = 0;
911 cpustate->sreg[DS].selector = 0;
912 cpustate->sreg[ES].selector = 0;
914 i386_load_segment_descriptor(cpustate,GS);
915 i386_load_segment_descriptor(cpustate,FS);
916 i386_load_segment_descriptor(cpustate,DS);
917 i386_load_segment_descriptor(cpustate,ES);
922 PUSH32SEG(cpustate,oldSS);
923 PUSH32(cpustate,oldESP);
928 PUSH16(cpustate,oldSS);
929 PUSH16(cpustate,oldESP);
936 if((desc.flags & 0x0004) || (DPL == CPL))
938 /* IRQ to same privilege */
939 if(V8086_MODE && !cpustate->ext)
941 logerror("IRQ: Gate to same privilege from VM86 mode.\n");
942 FAULT_EXP(FAULT_GP,segment & ~0x03);
944 if(type == 0x0e || type == 0x0f) // 32-bit gate
948 // TODO: Add check for error code (2 extra bytes)
949 if(REG32(ESP) < stack_limit)
951 logerror("IRQ: Stack has no space left (needs %i bytes).\n",stack_limit);
952 FAULT_EXP(FAULT_SS,0)
954 if(offset > desc.limit)
956 logerror("IRQ: Gate segment offset is past segment limit.\n");
957 FAULT_EXP(FAULT_GP,0)
963 logerror("IRQ: Gate descriptor is non-conforming, and DPL does not equal CPL.\n");
964 FAULT_EXP(FAULT_GP,segment)
968 UINT32 tempSP = REG32(ESP);
971 // this is ugly but the alternative is worse
972 if(type != 0x0e && type != 0x0f) // if not 386 interrupt or trap gate
974 PUSH16(cpustate, oldflags & 0xffff );
975 PUSH16(cpustate, cpustate->sreg[CS].selector );
976 if(irq == 3 || irq == 4 || irq == 9 || irq_gate == 1)
977 PUSH16(cpustate, cpustate->eip );
979 PUSH16(cpustate, cpustate->prev_eip );
983 PUSH32(cpustate, oldflags & 0x00ffffff );
984 PUSH32SEG(cpustate, cpustate->sreg[CS].selector );
985 if(irq == 3 || irq == 4 || irq == 9 || irq_gate == 1)
986 PUSH32(cpustate, cpustate->eip );
988 PUSH32(cpustate, cpustate->prev_eip );
997 segment = (segment & ~0x03) | cpustate->CPL;
998 cpustate->sreg[CS].selector = segment;
999 cpustate->eip = offset;
1001 if(type == 0x0e || type == 0x06)
1007 i386_load_segment_descriptor(cpustate,CS);
1008 CHANGE_PC(cpustate,cpustate->eip);
1012 static void i386_trap_with_error(i386_state *cpustate,int irq, int irq_gate, int trap_level, UINT32 error)
1014 i386_trap(cpustate,irq,irq_gate,trap_level);
1015 if(irq == 8 || irq == 10 || irq == 11 || irq == 12 || irq == 13 || irq == 14)
1017 // for these exceptions, an error code is pushed onto the stack by the processor.
1018 // no error code is pushed for software interrupts, either.
1021 UINT32 entry = irq * 8;
1023 v2 = READ32PL0(cpustate, cpustate->idtr.base + entry + 4 );
1024 type = (v2>>8) & 0x1F;
1027 v2 = READ32PL0(cpustate, cpustate->idtr.base + entry);
1028 v2 = READ32PL0(cpustate, cpustate->gdtr.base + ((v2 >> 16) & 0xfff8) + 4);
1029 type = (v2>>8) & 0x1F;
1032 PUSH32(cpustate,error);
1034 PUSH16(cpustate,error);
1037 PUSH16(cpustate,error);
1042 static void i286_task_switch(i386_state *cpustate, UINT16 selector, UINT8 nested)
1047 UINT8 ar_byte; // access rights byte
1049 /* TODO: Task State Segment privilege checks */
1051 /* For tasks that aren't nested, clear the busy bit in the task's descriptor */
1054 if(cpustate->task.segment & 0x0004)
1056 ar_byte = READ8(cpustate,cpustate->ldtr.base + (cpustate->task.segment & ~0x0007) + 5);
1057 WRITE8(cpustate,cpustate->ldtr.base + (cpustate->task.segment & ~0x0007) + 5,ar_byte & ~0x02);
1061 ar_byte = READ8(cpustate,cpustate->gdtr.base + (cpustate->task.segment & ~0x0007) + 5);
1062 WRITE8(cpustate,cpustate->gdtr.base + (cpustate->task.segment & ~0x0007) + 5,ar_byte & ~0x02);
1066 /* Save the state of the current task in the current TSS (TR register base) */
1067 tss = cpustate->task.base;
1068 WRITE16(cpustate,tss+0x0e,cpustate->eip & 0x0000ffff);
1069 WRITE16(cpustate,tss+0x10,get_flags(cpustate) & 0x0000ffff);
1070 WRITE16(cpustate,tss+0x12,REG16(AX));
1071 WRITE16(cpustate,tss+0x14,REG16(CX));
1072 WRITE16(cpustate,tss+0x16,REG16(DX));
1073 WRITE16(cpustate,tss+0x18,REG16(BX));
1074 WRITE16(cpustate,tss+0x1a,REG16(SP));
1075 WRITE16(cpustate,tss+0x1c,REG16(BP));
1076 WRITE16(cpustate,tss+0x1e,REG16(SI));
1077 WRITE16(cpustate,tss+0x20,REG16(DI));
1078 WRITE16(cpustate,tss+0x22,cpustate->sreg[ES].selector);
1079 WRITE16(cpustate,tss+0x24,cpustate->sreg[CS].selector);
1080 WRITE16(cpustate,tss+0x26,cpustate->sreg[SS].selector);
1081 WRITE16(cpustate,tss+0x28,cpustate->sreg[DS].selector);
1083 old_task = cpustate->task.segment;
1085 /* Load task register with the selector of the incoming task */
1086 cpustate->task.segment = selector;
1087 memset(&seg, 0, sizeof(seg));
1088 seg.selector = cpustate->task.segment;
1089 i386_load_protected_mode_segment(cpustate,&seg,NULL);
1090 cpustate->task.limit = seg.limit;
1091 cpustate->task.base = seg.base;
1092 cpustate->task.flags = seg.flags;
1094 /* Set TS bit in CR0 */
1095 cpustate->cr[0] |= 0x08;
1097 /* Load incoming task state from the new task's TSS */
1098 tss = cpustate->task.base;
1099 cpustate->ldtr.segment = READ16(cpustate,tss+0x2a) & 0xffff;
1100 seg.selector = cpustate->ldtr.segment;
1101 i386_load_protected_mode_segment(cpustate,&seg,NULL);
1102 cpustate->ldtr.limit = seg.limit;
1103 cpustate->ldtr.base = seg.base;
1104 cpustate->ldtr.flags = seg.flags;
1105 cpustate->eip = READ16(cpustate,tss+0x0e);
1106 set_flags(cpustate,READ16(cpustate,tss+0x10));
1107 REG16(AX) = READ16(cpustate,tss+0x12);
1108 REG16(CX) = READ16(cpustate,tss+0x14);
1109 REG16(DX) = READ16(cpustate,tss+0x16);
1110 REG16(BX) = READ16(cpustate,tss+0x18);
1111 REG16(SP) = READ16(cpustate,tss+0x1a);
1112 REG16(BP) = READ16(cpustate,tss+0x1c);
1113 REG16(SI) = READ16(cpustate,tss+0x1e);
1114 REG16(DI) = READ16(cpustate,tss+0x20);
1115 cpustate->sreg[ES].selector = READ16(cpustate,tss+0x22) & 0xffff;
1116 i386_load_segment_descriptor(cpustate, ES);
1117 cpustate->sreg[CS].selector = READ16(cpustate,tss+0x24) & 0xffff;
1118 i386_load_segment_descriptor(cpustate, CS);
1119 cpustate->sreg[SS].selector = READ16(cpustate,tss+0x26) & 0xffff;
1120 i386_load_segment_descriptor(cpustate, SS);
1121 cpustate->sreg[DS].selector = READ16(cpustate,tss+0x28) & 0xffff;
1122 i386_load_segment_descriptor(cpustate, DS);
1124 /* Set the busy bit in the new task's descriptor */
1125 if(selector & 0x0004)
1127 ar_byte = READ8(cpustate,cpustate->ldtr.base + (selector & ~0x0007) + 5);
1128 WRITE8(cpustate,cpustate->ldtr.base + (selector & ~0x0007) + 5,ar_byte | 0x02);
1132 ar_byte = READ8(cpustate,cpustate->gdtr.base + (selector & ~0x0007) + 5);
1133 WRITE8(cpustate,cpustate->gdtr.base + (selector & ~0x0007) + 5,ar_byte | 0x02);
1136 /* For nested tasks, we write the outgoing task's selector to the back-link field of the new TSS,
1137 and set the NT flag in the EFLAGS register */
1140 WRITE16(cpustate,tss+0,old_task);
1143 CHANGE_PC(cpustate,cpustate->eip);
1145 cpustate->CPL = (cpustate->sreg[SS].flags >> 5) & 3;
1146 // printf("286 Task Switch from selector %04x to %04x\n",old_task,selector);
1149 static void i386_task_switch(i386_state *cpustate, UINT16 selector, UINT8 nested)
1154 UINT8 ar_byte; // access rights byte
1155 UINT32 oldcr3 = cpustate->cr[3];
1157 /* TODO: Task State Segment privilege checks */
1159 /* For tasks that aren't nested, clear the busy bit in the task's descriptor */
1162 if(cpustate->task.segment & 0x0004)
1164 ar_byte = READ8(cpustate,cpustate->ldtr.base + (cpustate->task.segment & ~0x0007) + 5);
1165 WRITE8(cpustate,cpustate->ldtr.base + (cpustate->task.segment & ~0x0007) + 5,ar_byte & ~0x02);
1169 ar_byte = READ8(cpustate,cpustate->gdtr.base + (cpustate->task.segment & ~0x0007) + 5);
1170 WRITE8(cpustate,cpustate->gdtr.base + (cpustate->task.segment & ~0x0007) + 5,ar_byte & ~0x02);
1174 /* Save the state of the current task in the current TSS (TR register base) */
1175 tss = cpustate->task.base;
1176 WRITE32(cpustate,tss+0x1c,cpustate->cr[3]); // correct?
1177 WRITE32(cpustate,tss+0x20,cpustate->eip);
1178 WRITE32(cpustate,tss+0x24,get_flags(cpustate));
1179 WRITE32(cpustate,tss+0x28,REG32(EAX));
1180 WRITE32(cpustate,tss+0x2c,REG32(ECX));
1181 WRITE32(cpustate,tss+0x30,REG32(EDX));
1182 WRITE32(cpustate,tss+0x34,REG32(EBX));
1183 WRITE32(cpustate,tss+0x38,REG32(ESP));
1184 WRITE32(cpustate,tss+0x3c,REG32(EBP));
1185 WRITE32(cpustate,tss+0x40,REG32(ESI));
1186 WRITE32(cpustate,tss+0x44,REG32(EDI));
1187 WRITE32(cpustate,tss+0x48,cpustate->sreg[ES].selector);
1188 WRITE32(cpustate,tss+0x4c,cpustate->sreg[CS].selector);
1189 WRITE32(cpustate,tss+0x50,cpustate->sreg[SS].selector);
1190 WRITE32(cpustate,tss+0x54,cpustate->sreg[DS].selector);
1191 WRITE32(cpustate,tss+0x58,cpustate->sreg[FS].selector);
1192 WRITE32(cpustate,tss+0x5c,cpustate->sreg[GS].selector);
1194 old_task = cpustate->task.segment;
1196 /* Load task register with the selector of the incoming task */
1197 cpustate->task.segment = selector;
1198 memset(&seg, 0, sizeof(seg));
1199 seg.selector = cpustate->task.segment;
1200 i386_load_protected_mode_segment(cpustate,&seg,NULL);
1201 cpustate->task.limit = seg.limit;
1202 cpustate->task.base = seg.base;
1203 cpustate->task.flags = seg.flags;
1205 /* Set TS bit in CR0 */
1206 cpustate->cr[0] |= 0x08;
1208 /* Load incoming task state from the new task's TSS */
1209 tss = cpustate->task.base;
1210 cpustate->ldtr.segment = READ32(cpustate,tss+0x60) & 0xffff;
1211 seg.selector = cpustate->ldtr.segment;
1212 i386_load_protected_mode_segment(cpustate,&seg,NULL);
1213 cpustate->ldtr.limit = seg.limit;
1214 cpustate->ldtr.base = seg.base;
1215 cpustate->ldtr.flags = seg.flags;
1216 cpustate->eip = READ32(cpustate,tss+0x20);
1217 set_flags(cpustate,READ32(cpustate,tss+0x24));
1218 REG32(EAX) = READ32(cpustate,tss+0x28);
1219 REG32(ECX) = READ32(cpustate,tss+0x2c);
1220 REG32(EDX) = READ32(cpustate,tss+0x30);
1221 REG32(EBX) = READ32(cpustate,tss+0x34);
1222 REG32(ESP) = READ32(cpustate,tss+0x38);
1223 REG32(EBP) = READ32(cpustate,tss+0x3c);
1224 REG32(ESI) = READ32(cpustate,tss+0x40);
1225 REG32(EDI) = READ32(cpustate,tss+0x44);
1226 cpustate->sreg[ES].selector = READ32(cpustate,tss+0x48) & 0xffff;
1227 i386_load_segment_descriptor(cpustate, ES);
1228 cpustate->sreg[CS].selector = READ32(cpustate,tss+0x4c) & 0xffff;
1229 i386_load_segment_descriptor(cpustate, CS);
1230 cpustate->sreg[SS].selector = READ32(cpustate,tss+0x50) & 0xffff;
1231 i386_load_segment_descriptor(cpustate, SS);
1232 cpustate->sreg[DS].selector = READ32(cpustate,tss+0x54) & 0xffff;
1233 i386_load_segment_descriptor(cpustate, DS);
1234 cpustate->sreg[FS].selector = READ32(cpustate,tss+0x58) & 0xffff;
1235 i386_load_segment_descriptor(cpustate, FS);
1236 cpustate->sreg[GS].selector = READ32(cpustate,tss+0x5c) & 0xffff;
1237 i386_load_segment_descriptor(cpustate, GS);
1238 /* For nested tasks, we write the outgoing task's selector to the back-link field of the new TSS,
1239 and set the NT flag in the EFLAGS register before setting cr3 as the old tss address might be gone */
1242 WRITE32(cpustate,tss+0,old_task);
1245 cpustate->cr[3] = READ32(cpustate,tss+0x1c); // CR3 (PDBR)
1246 if(oldcr3 != cpustate->cr[3])
1247 vtlb_flush_dynamic(cpustate->vtlb);
1249 /* Set the busy bit in the new task's descriptor */
1250 if(selector & 0x0004)
1252 ar_byte = READ8(cpustate,cpustate->ldtr.base + (selector & ~0x0007) + 5);
1253 WRITE8(cpustate,cpustate->ldtr.base + (selector & ~0x0007) + 5,ar_byte | 0x02);
1257 ar_byte = READ8(cpustate,cpustate->gdtr.base + (selector & ~0x0007) + 5);
1258 WRITE8(cpustate,cpustate->gdtr.base + (selector & ~0x0007) + 5,ar_byte | 0x02);
1261 CHANGE_PC(cpustate,cpustate->eip);
1263 cpustate->CPL = (cpustate->sreg[SS].flags >> 5) & 3;
1264 // printf("386 Task Switch from selector %04x to %04x\n",old_task,selector);
1267 static void i386_check_irq_line(i386_state *cpustate)
1269 if(!cpustate->smm && cpustate->smi)
1271 pentium_smi(cpustate);
1275 /* Check if the interrupts are enabled */
1276 if ( (cpustate->irq_state) && cpustate->IF )
1278 cpustate->cycles -= 2;
1279 i386_trap(cpustate, cpustate->pic->get_intr_ack(), 1, 0);
1280 cpustate->irq_state = 0;
1284 static void i386_protected_mode_jump(i386_state *cpustate, UINT16 seg, UINT32 off, int indirect, int operand32)
1287 I386_CALL_GATE call_gate;
1290 UINT16 segment = seg;
1291 UINT32 offset = off;
1293 /* Check selector is not null */
1294 if((segment & ~0x03) == 0)
1296 logerror("JMP: Segment is null.\n");
1299 /* Selector is within descriptor table limit */
1300 if((segment & 0x04) == 0)
1302 /* check GDT limit */
1303 if((segment & ~0x07) > (cpustate->gdtr.limit))
1305 logerror("JMP: Segment is past GDT limit.\n");
1306 FAULT(FAULT_GP,segment & 0xfffc)
1311 /* check LDT limit */
1312 if((segment & ~0x07) > (cpustate->ldtr.limit))
1314 logerror("JMP: Segment is past LDT limit.\n");
1315 FAULT(FAULT_GP,segment & 0xfffc)
1318 /* Determine segment type */
1319 memset(&desc, 0, sizeof(desc));
1320 desc.selector = segment;
1321 i386_load_protected_mode_segment(cpustate,&desc,NULL);
1322 CPL = cpustate->CPL; // current privilege level
1323 DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level
1324 RPL = segment & 0x03; // requested privilege level
1325 if((desc.flags & 0x0018) == 0x0018)
1328 if((desc.flags & 0x0004) == 0)
1330 /* non-conforming */
1333 logerror("JMP: RPL %i is less than CPL %i\n",RPL,CPL);
1334 FAULT(FAULT_GP,segment & 0xfffc)
1338 logerror("JMP: DPL %i is not equal CPL %i\n",DPL,CPL);
1339 FAULT(FAULT_GP,segment & 0xfffc)
1347 logerror("JMP: DPL %i is less than CPL %i\n",DPL,CPL);
1348 FAULT(FAULT_GP,segment & 0xfffc)
1352 if((desc.flags & 0x0080) == 0)
1354 logerror("JMP: Segment is not present\n");
1355 FAULT(FAULT_NP,segment & 0xfffc)
1357 if(offset > desc.limit)
1359 logerror("JMP: Offset is past segment limit\n");
1365 if((desc.flags & 0x0010) != 0)
1367 logerror("JMP: Segment is a data segment\n");
1368 FAULT(FAULT_GP,segment & 0xfffc) // #GP (cannot execute code in a data segment)
1372 switch(desc.flags & 0x000f)
1374 case 0x01: // 286 Available TSS
1375 case 0x09: // 386 Available TSS
1376 logerror("JMP: Available 386 TSS at %08x\n",cpustate->pc);
1377 memset(&desc, 0, sizeof(desc));
1378 desc.selector = segment;
1379 i386_load_protected_mode_segment(cpustate,&desc,NULL);
1380 DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level
1383 logerror("JMP: TSS: DPL %i is less than CPL %i\n",DPL,CPL);
1384 FAULT(FAULT_GP,segment & 0xfffc)
1388 logerror("JMP: TSS: DPL %i is less than TSS RPL %i\n",DPL,RPL);
1389 FAULT(FAULT_GP,segment & 0xfffc)
1391 if((desc.flags & 0x0080) == 0)
1393 logerror("JMP: TSS: Segment is not present\n");
1394 FAULT(FAULT_GP,segment & 0xfffc)
1396 if(desc.flags & 0x0008)
1397 i386_task_switch(cpustate,desc.selector,0);
1399 i286_task_switch(cpustate,desc.selector,0);
1401 case 0x04: // 286 Call Gate
1402 case 0x0c: // 386 Call Gate
1403 //logerror("JMP: Call gate at %08x\n",cpustate->pc);
1405 memset(&call_gate, 0, sizeof(call_gate));
1406 call_gate.segment = segment;
1407 i386_load_call_gate(cpustate,&call_gate);
1408 DPL = call_gate.dpl;
1411 logerror("JMP: Call Gate: DPL %i is less than CPL %i\n",DPL,CPL);
1412 FAULT(FAULT_GP,segment & 0xfffc)
1416 logerror("JMP: Call Gate: DPL %i is less than RPL %i\n",DPL,RPL);
1417 FAULT(FAULT_GP,segment & 0xfffc)
1419 if((desc.flags & 0x0080) == 0)
1421 logerror("JMP: Call Gate: Segment is not present\n");
1422 FAULT(FAULT_NP,segment & 0xfffc)
1424 /* Now we examine the segment that the call gate refers to */
1425 if(call_gate.selector == 0)
1427 logerror("JMP: Call Gate: Gate selector is null\n");
1430 if(call_gate.selector & 0x04)
1432 if((call_gate.selector & ~0x07) > cpustate->ldtr.limit)
1434 logerror("JMP: Call Gate: Gate Selector is past LDT segment limit\n");
1435 FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1440 if((call_gate.selector & ~0x07) > cpustate->gdtr.limit)
1442 logerror("JMP: Call Gate: Gate Selector is past GDT segment limit\n");
1443 FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1446 desc.selector = call_gate.selector;
1447 i386_load_protected_mode_segment(cpustate,&desc,NULL);
1448 DPL = (desc.flags >> 5) & 0x03;
1449 if((desc.flags & 0x0018) != 0x18)
1451 logerror("JMP: Call Gate: Gate does not point to a code segment\n");
1452 FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1454 if((desc.flags & 0x0004) == 0)
1458 logerror("JMP: Call Gate: Gate DPL does not equal CPL\n");
1459 FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1466 logerror("JMP: Call Gate: Gate DPL is greater than CPL\n");
1467 FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1470 if((desc.flags & 0x0080) == 0)
1472 logerror("JMP: Call Gate: Gate Segment is not present\n");
1473 FAULT(FAULT_NP,call_gate.selector & 0xfffc)
1475 if(call_gate.offset > desc.limit)
1477 logerror("JMP: Call Gate: Gate offset is past Gate segment limit\n");
1478 FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1480 segment = call_gate.selector;
1481 offset = call_gate.offset;
1483 case 0x05: // Task Gate
1484 logerror("JMP: Task gate at %08x\n",cpustate->pc);
1485 memset(&call_gate, 0, sizeof(call_gate));
1486 call_gate.segment = segment;
1487 i386_load_call_gate(cpustate,&call_gate);
1488 DPL = call_gate.dpl;
1491 logerror("JMP: Task Gate: Gate DPL %i is less than CPL %i\n",DPL,CPL);
1492 FAULT(FAULT_GP,segment & 0xfffc)
1496 logerror("JMP: Task Gate: Gate DPL %i is less than CPL %i\n",DPL,CPL);
1497 FAULT(FAULT_GP,segment & 0xfffc)
1499 if(call_gate.present == 0)
1501 logerror("JMP: Task Gate: Gate is not present.\n");
1502 FAULT(FAULT_GP,segment & 0xfffc)
1504 /* Check the TSS that the task gate points to */
1505 desc.selector = call_gate.selector;
1506 i386_load_protected_mode_segment(cpustate,&desc,NULL);
1507 DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level
1508 RPL = call_gate.selector & 0x03; // requested privilege level
1509 if(call_gate.selector & 0x04)
1511 logerror("JMP: Task Gate TSS: TSS must be global.\n");
1512 FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1516 if((call_gate.selector & ~0x07) > cpustate->gdtr.limit)
1518 logerror("JMP: Task Gate TSS: TSS is past GDT limit.\n");
1519 FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1522 if((call_gate.ar & 0x000f) == 0x0009 || (call_gate.ar & 0x000f) == 0x0001)
1524 logerror("JMP: Task Gate TSS: Segment is not an available TSS.\n");
1525 FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1527 if(call_gate.present == 0)
1529 logerror("JMP: Task Gate TSS: TSS is not present.\n");
1530 FAULT(FAULT_NP,call_gate.selector & 0xfffc)
1532 if(call_gate.ar & 0x08)
1533 i386_task_switch(cpustate,call_gate.selector,0);
1535 i286_task_switch(cpustate,call_gate.selector,0);
1537 default: // invalid segment type
1538 logerror("JMP: Invalid segment type (%i) to jump to.\n",desc.flags & 0x000f);
1539 FAULT(FAULT_GP,segment & 0xfffc)
1545 segment = (segment & ~0x03) | cpustate->CPL;
1547 cpustate->eip = offset & 0x0000ffff;
1549 cpustate->eip = offset;
1550 cpustate->sreg[CS].selector = segment;
1551 cpustate->performed_intersegment_jump = 1;
1552 i386_load_segment_descriptor(cpustate,CS);
1553 CHANGE_PC(cpustate,cpustate->eip);
1556 static void i386_protected_mode_call(i386_state *cpustate, UINT16 seg, UINT32 off, int indirect, int operand32)
1559 I386_CALL_GATE gate;
1561 UINT8 CPL, DPL, RPL;
1562 UINT16 selector = seg;
1563 UINT32 offset = off;
1566 if((selector & ~0x03) == 0)
1568 logerror("CALL (%08x): Selector is null.\n",cpustate->pc);
1569 FAULT(FAULT_GP,0) // #GP(0)
1573 if((selector & ~0x07) > cpustate->ldtr.limit)
1575 logerror("CALL: Selector is past LDT limit.\n");
1576 FAULT(FAULT_GP,selector & ~0x03) // #GP(selector)
1581 if((selector & ~0x07) > cpustate->gdtr.limit)
1583 logerror("CALL: Selector is past GDT limit.\n");
1584 FAULT(FAULT_GP,selector & ~0x03) // #GP(selector)
1588 /* Determine segment type */
1589 memset(&desc, 0, sizeof(desc));
1590 desc.selector = selector;
1591 i386_load_protected_mode_segment(cpustate,&desc,NULL);
1592 CPL = cpustate->CPL; // current privilege level
1593 DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level
1594 RPL = selector & 0x03; // requested privilege level
1595 if((desc.flags & 0x0018) == 0x18) // is a code segment
1597 if(desc.flags & 0x0004)
1602 logerror("CALL: Code segment DPL %i is greater than CPL %i\n",DPL,CPL);
1603 FAULT(FAULT_GP,selector & ~0x03) // #GP(selector)
1608 /* non-conforming */
1611 logerror("CALL: RPL %i is greater than CPL %i\n",RPL,CPL);
1612 FAULT(FAULT_GP,selector & ~0x03) // #GP(selector)
1616 logerror("CALL: Code segment DPL %i is not equal to CPL %i\n",DPL,CPL);
1617 FAULT(FAULT_GP,selector & ~0x03) // #GP(selector)
1621 if((desc.flags & 0x0080) == 0)
1623 logerror("CALL (%08x): Code segment is not present.\n",cpustate->pc);
1624 FAULT(FAULT_NP,selector & ~0x03) // #NP(selector)
1626 if (operand32 != 0) // if 32-bit
1628 UINT32 offset = (STACK_32BIT ? REG32(ESP) - 8 : (REG16(SP) - 8) & 0xffff);
1629 if(i386_limit_check(cpustate, SS, offset, 8))
1631 logerror("CALL (%08x): Stack has no room for return address.\n",cpustate->pc);
1632 FAULT(FAULT_SS,0) // #SS(0)
1637 UINT32 offset = (STACK_32BIT ? REG32(ESP) - 4 : (REG16(SP) - 4) & 0xffff);
1638 if(i386_limit_check(cpustate, SS, offset, 4))
1640 logerror("CALL (%08x): Stack has no room for return address.\n",cpustate->pc);
1641 FAULT(FAULT_SS,0) // #SS(0)
1644 if(offset > desc.limit)
1646 logerror("CALL: EIP is past segment limit.\n");
1647 FAULT(FAULT_GP,0) // #GP(0)
1652 /* special segment type */
1653 if(desc.flags & 0x0010)
1655 logerror("CALL: Segment is a data segment.\n");
1656 FAULT(FAULT_GP,desc.selector & ~0x03) // #GP(selector)
1660 switch(desc.flags & 0x000f)
1662 case 0x01: // Available 286 TSS
1663 case 0x09: // Available 386 TSS
1664 logerror("CALL: Available TSS at %08x\n",cpustate->pc);
1667 logerror("CALL: TSS: DPL is less than CPL.\n");
1668 FAULT(FAULT_TS,selector & ~0x03) // #TS(selector)
1672 logerror("CALL: TSS: DPL is less than RPL.\n");
1673 FAULT(FAULT_TS,selector & ~0x03) // #TS(selector)
1675 if(desc.flags & 0x0002)
1677 logerror("CALL: TSS: TSS is busy.\n");
1678 FAULT(FAULT_TS,selector & ~0x03) // #TS(selector)
1680 if((desc.flags & 0x0080) == 0)
1682 logerror("CALL: TSS: Segment %02x is not present.\n",selector);
1683 FAULT(FAULT_NP,selector & ~0x03) // #NP(selector)
1685 if(desc.flags & 0x08)
1686 i386_task_switch(cpustate,desc.selector,1);
1688 i286_task_switch(cpustate,desc.selector,1);
1690 case 0x04: // 286 call gate
1691 case 0x0c: // 386 call gate
1692 if((desc.flags & 0x000f) == 0x04)
1696 memset(&gate, 0, sizeof(gate));
1697 gate.segment = selector;
1698 i386_load_call_gate(cpustate,&gate);
1700 //logerror("CALL: Call gate at %08x (%i parameters)\n",cpustate->pc,gate.dword_count);
1703 logerror("CALL: Call gate DPL %i is less than CPL %i.\n",DPL,CPL);
1704 FAULT(FAULT_GP,desc.selector & ~0x03) // #GP(selector)
1708 logerror("CALL: Call gate DPL %i is less than RPL %i.\n",DPL,RPL);
1709 FAULT(FAULT_GP,desc.selector & ~0x03) // #GP(selector)
1711 if(gate.present == 0)
1713 logerror("CALL: Call gate is not present.\n");
1714 FAULT(FAULT_NP,desc.selector & ~0x03) // #GP(selector)
1716 desc.selector = gate.selector;
1717 if((gate.selector & ~0x03) == 0)
1719 logerror("CALL: Call gate: Segment is null.\n");
1720 FAULT(FAULT_GP,0) // #GP(0)
1722 if(desc.selector & 0x04)
1724 if((desc.selector & ~0x07) > cpustate->ldtr.limit)
1726 logerror("CALL: Call gate: Segment is past LDT limit\n");
1727 FAULT(FAULT_GP,desc.selector & ~0x03) // #GP(selector)
1732 if((desc.selector & ~0x07) > cpustate->gdtr.limit)
1734 logerror("CALL: Call gate: Segment is past GDT limit\n");
1735 FAULT(FAULT_GP,desc.selector & ~0x03) // #GP(selector)
1738 i386_load_protected_mode_segment(cpustate,&desc,NULL);
1739 if((desc.flags & 0x0018) != 0x18)
1741 logerror("CALL: Call gate: Segment is not a code segment.\n");
1742 FAULT(FAULT_GP,desc.selector & ~0x03) // #GP(selector)
1744 DPL = ((desc.flags >> 5) & 0x03);
1747 logerror("CALL: Call gate: Segment DPL %i is greater than CPL %i.\n",DPL,CPL);
1748 FAULT(FAULT_GP,desc.selector & ~0x03) // #GP(selector)
1750 if((desc.flags & 0x0080) == 0)
1752 logerror("CALL (%08x): Code segment is not present.\n",cpustate->pc);
1753 FAULT(FAULT_NP,desc.selector & ~0x03) // #NP(selector)
1755 if(DPL < CPL && (desc.flags & 0x0004) == 0)
1759 UINT32 oldSS,oldESP;
1760 /* more privilege */
1761 /* Check new SS segment for privilege level from TSS */
1762 memset(&stack, 0, sizeof(stack));
1763 stack.selector = i386_get_stack_segment(cpustate,DPL);
1764 i386_load_protected_mode_segment(cpustate,&stack,NULL);
1765 if((stack.selector & ~0x03) == 0)
1767 logerror("CALL: Call gate: TSS selector is null\n");
1768 FAULT(FAULT_TS,0) // #TS(0)
1770 if(stack.selector & 0x04)
1772 if((stack.selector & ~0x07) > cpustate->ldtr.limit)
1774 logerror("CALL: Call gate: TSS selector is past LDT limit\n");
1775 FAULT(FAULT_TS,stack.selector) // #TS(SS selector)
1780 if((stack.selector & ~0x07) > cpustate->gdtr.limit)
1782 logerror("CALL: Call gate: TSS selector is past GDT limit\n");
1783 FAULT(FAULT_TS,stack.selector) // #TS(SS selector)
1786 if((stack.selector & 0x03) != DPL)
1788 logerror("CALL: Call gate: Stack selector RPL does not equal code segment DPL %i\n",DPL);
1789 FAULT(FAULT_TS,stack.selector) // #TS(SS selector)
1791 if(((stack.flags >> 5) & 0x03) != DPL)
1793 logerror("CALL: Call gate: Stack DPL does not equal code segment DPL %i\n",DPL);
1794 FAULT(FAULT_TS,stack.selector) // #TS(SS selector)
1796 if((stack.flags & 0x0018) != 0x10 && (stack.flags & 0x0002))
1798 logerror("CALL: Call gate: Stack segment is not a writable data segment\n");
1799 FAULT(FAULT_TS,stack.selector) // #TS(SS selector)
1801 if((stack.flags & 0x0080) == 0)
1803 logerror("CALL: Call gate: Stack segment is not present\n");
1804 FAULT(FAULT_SS,stack.selector) // #SS(SS selector)
1806 UINT32 newESP = i386_get_stack_ptr(cpustate,DPL);
1813 if(newESP < ((gate.dword_count & 0x1f) + 16))
1815 logerror("CALL: Call gate: New stack has no room for 32-bit return address and parameters.\n");
1816 FAULT(FAULT_SS,0) // #SS(0)
1818 if(gate.offset > desc.limit)
1820 logerror("CALL: Call gate: EIP is past segment limit.\n");
1821 FAULT(FAULT_GP,0) // #GP(0)
1826 if(newESP < ((gate.dword_count & 0x1f) + 8))
1828 logerror("CALL: Call gate: New stack has no room for 16-bit return address and parameters.\n");
1829 FAULT(FAULT_SS,0) // #SS(0)
1831 if((gate.offset & 0xffff) > desc.limit)
1833 logerror("CALL: Call gate: IP is past segment limit.\n");
1834 FAULT(FAULT_GP,0) // #GP(0)
1837 selector = gate.selector;
1838 offset = gate.offset;
1840 cpustate->CPL = (stack.flags >> 5) & 0x03;
1841 /* check for page fault at new stack */
1842 WRITE_TEST(cpustate, stack.base+newESP-1);
1843 /* switch to new stack */
1844 oldSS = cpustate->sreg[SS].selector;
1845 cpustate->sreg[SS].selector = i386_get_stack_segment(cpustate,cpustate->CPL);
1848 oldESP = REG32(ESP);
1854 i386_load_segment_descriptor(cpustate, SS );
1855 REG32(ESP) = newESP;
1859 PUSH32SEG(cpustate,oldSS);
1860 PUSH32(cpustate,oldESP);
1864 PUSH16(cpustate,oldSS);
1865 PUSH16(cpustate,oldESP & 0xffff);
1868 memset(&temp, 0, sizeof(temp));
1869 temp.selector = oldSS;
1870 i386_load_protected_mode_segment(cpustate,&temp,NULL);
1871 /* copy parameters from old stack to new stack */
1872 for(x=(gate.dword_count & 0x1f)-1;x>=0;x--)
1874 UINT32 addr = oldESP + (operand32?(x*4):(x*2));
1875 addr = temp.base + (temp.d?addr:(addr&0xffff));
1877 PUSH32(cpustate,READ32(cpustate,addr));
1879 PUSH16(cpustate,READ16(cpustate,addr));
1885 /* same privilege */
1886 if (operand32 != 0) // if 32-bit
1888 UINT32 stkoff = (STACK_32BIT ? REG32(ESP) - 8 : (REG16(SP) - 8) & 0xffff);
1889 if(i386_limit_check(cpustate, SS, stkoff, 8))
1891 logerror("CALL: Stack has no room for return address.\n");
1892 FAULT(FAULT_SS,0) // #SS(0)
1894 selector = gate.selector;
1895 offset = gate.offset;
1899 UINT32 stkoff = (STACK_32BIT ? REG32(ESP) - 4 : (REG16(SP) - 4) & 0xffff);
1900 if(i386_limit_check(cpustate, SS, stkoff, 4))
1902 logerror("CALL: Stack has no room for return address.\n");
1903 FAULT(FAULT_SS,0) // #SS(0)
1905 selector = gate.selector;
1906 offset = gate.offset & 0xffff;
1908 if(offset > desc.limit)
1910 logerror("CALL: EIP is past segment limit.\n");
1911 FAULT(FAULT_GP,0) // #GP(0)
1916 case 0x05: // task gate
1917 logerror("CALL: Task gate at %08x\n",cpustate->pc);
1918 memset(&gate, 0, sizeof(gate));
1919 gate.segment = selector;
1920 i386_load_call_gate(cpustate,&gate);
1924 logerror("CALL: Task Gate: Gate DPL is less than CPL.\n");
1925 FAULT(FAULT_TS,selector & ~0x03) // #TS(selector)
1929 logerror("CALL: Task Gate: Gate DPL is less than RPL.\n");
1930 FAULT(FAULT_TS,selector & ~0x03) // #TS(selector)
1932 if((gate.ar & 0x0080) == 0)
1934 logerror("CALL: Task Gate: Gate is not present.\n");
1935 FAULT(FAULT_NP,selector & ~0x03) // #NP(selector)
1937 /* Check the TSS that the task gate points to */
1938 desc.selector = gate.selector;
1939 i386_load_protected_mode_segment(cpustate,&desc,NULL);
1940 if(gate.selector & 0x04)
1942 logerror("CALL: Task Gate: TSS is not global.\n");
1943 FAULT(FAULT_TS,gate.selector & ~0x03) // #TS(selector)
1947 if((gate.selector & ~0x07) > cpustate->gdtr.limit)
1949 logerror("CALL: Task Gate: TSS is past GDT limit.\n");
1950 FAULT(FAULT_TS,gate.selector & ~0x03) // #TS(selector)
1953 if(desc.flags & 0x0002)
1955 logerror("CALL: Task Gate: TSS is busy.\n");
1956 FAULT(FAULT_TS,gate.selector & ~0x03) // #TS(selector)
1958 if((desc.flags & 0x0080) == 0)
1960 logerror("CALL: Task Gate: TSS is not present.\n");
1961 FAULT(FAULT_NP,gate.selector & ~0x03) // #TS(selector)
1963 if(desc.flags & 0x08)
1964 i386_task_switch(cpustate,desc.selector,1); // with nesting
1966 i286_task_switch(cpustate,desc.selector,1);
1969 logerror("CALL: Invalid special segment type (%i) to jump to.\n",desc.flags & 0x000f);
1970 FAULT(FAULT_GP,selector & ~0x07) // #GP(selector)
1976 selector = (selector & ~0x03) | cpustate->CPL;
1978 UINT32 tempSP = REG32(ESP);
1981 // this is ugly but the alternative is worse
1984 /* 16-bit operand size */
1985 PUSH16(cpustate, cpustate->sreg[CS].selector );
1986 PUSH16(cpustate, cpustate->eip & 0x0000ffff );
1987 cpustate->sreg[CS].selector = selector;
1988 cpustate->performed_intersegment_jump = 1;
1989 cpustate->eip = offset;
1990 i386_load_segment_descriptor(cpustate,CS);
1994 /* 32-bit operand size */
1995 PUSH32SEG(cpustate, cpustate->sreg[CS].selector );
1996 PUSH32(cpustate, cpustate->eip );
1997 cpustate->sreg[CS].selector = selector;
1998 cpustate->performed_intersegment_jump = 1;
1999 cpustate->eip = offset;
2000 i386_load_segment_descriptor(cpustate, CS );
2005 REG32(ESP) = tempSP;
2009 CHANGE_PC(cpustate,cpustate->eip);
2012 static void i386_protected_mode_retf(i386_state* cpustate, UINT8 count, UINT8 operand32)
2014 UINT32 newCS, newEIP;
2016 UINT8 CPL, RPL, DPL;
2018 UINT32 ea = i386_translate(cpustate, SS, (STACK_32BIT)?REG32(ESP):REG16(SP), 0, (operand32)?8:4);
2022 newEIP = READ16(cpustate, ea) & 0xffff;
2023 newCS = READ16(cpustate, ea+2) & 0xffff;
2027 newEIP = READ32(cpustate, ea);
2028 newCS = READ32(cpustate, ea+4) & 0xffff;
2031 memset(&desc, 0, sizeof(desc));
2032 desc.selector = newCS;
2033 i386_load_protected_mode_segment(cpustate,&desc,NULL);
2034 CPL = cpustate->CPL; // current privilege level
2035 DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level
2040 logerror("RETF (%08x): Return segment RPL is less than CPL.\n",cpustate->pc);
2041 FAULT(FAULT_GP,newCS & ~0x03)
2046 /* same privilege level */
2047 if((newCS & ~0x03) == 0)
2049 logerror("RETF: Return segment is null.\n");
2054 if((newCS & ~0x07) >= cpustate->ldtr.limit)
2056 logerror("RETF: Return segment is past LDT limit.\n");
2057 FAULT(FAULT_GP,newCS & ~0x03)
2062 if((newCS & ~0x07) >= cpustate->gdtr.limit)
2064 logerror("RETF: Return segment is past GDT limit.\n");
2065 FAULT(FAULT_GP,newCS & ~0x03)
2068 if((desc.flags & 0x0018) != 0x0018)
2070 logerror("RETF: Return segment is not a code segment.\n");
2071 FAULT(FAULT_GP,newCS & ~0x03)
2073 if(desc.flags & 0x0004)
2077 logerror("RETF: Conforming code segment DPL is greater than CS RPL.\n");
2078 FAULT(FAULT_GP,newCS & ~0x03)
2085 logerror("RETF: Non-conforming code segment DPL does not equal CS RPL.\n");
2086 FAULT(FAULT_GP,newCS & ~0x03)
2089 if((desc.flags & 0x0080) == 0)
2091 logerror("RETF (%08x): Code segment is not present.\n",cpustate->pc);
2092 FAULT(FAULT_NP,newCS & ~0x03)
2094 if(newEIP > desc.limit)
2096 logerror("RETF: EIP is past code segment limit.\n");
2101 UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2102 if(i386_limit_check(cpustate,SS,offset,count+4) != 0)
2104 logerror("RETF (%08x): SP is past stack segment limit.\n",cpustate->pc);
2110 UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2111 if(i386_limit_check(cpustate,SS,offset,count+8) != 0)
2113 logerror("RETF: ESP is past stack segment limit.\n");
2118 REG16(SP) += (4+count);
2120 REG32(ESP) += (8+count);
2124 UINT32 newSS, newESP; // when changing privilege
2125 /* outer privilege level */
2128 UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2129 if(i386_limit_check(cpustate,SS,offset,count+8) != 0)
2131 logerror("RETF (%08x): SP is past stack segment limit.\n",cpustate->pc);
2137 UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2138 if(i386_limit_check(cpustate,SS,offset,count+16) != 0)
2140 logerror("RETF: ESP is past stack segment limit.\n");
2144 /* Check CS selector and descriptor */
2145 if((newCS & ~0x03) == 0)
2147 logerror("RETF: CS segment is null.\n");
2152 if((newCS & ~0x07) >= cpustate->ldtr.limit)
2154 logerror("RETF: CS segment selector is past LDT limit.\n");
2155 FAULT(FAULT_GP,newCS & ~0x03)
2160 if((newCS & ~0x07) >= cpustate->gdtr.limit)
2162 logerror("RETF: CS segment selector is past GDT limit.\n");
2163 FAULT(FAULT_GP,newCS & ~0x03)
2166 if((desc.flags & 0x0018) != 0x0018)
2168 logerror("RETF: CS segment is not a code segment.\n");
2169 FAULT(FAULT_GP,newCS & ~0x03)
2171 if(desc.flags & 0x0004)
2175 logerror("RETF: Conforming CS segment DPL is greater than return selector RPL.\n");
2176 FAULT(FAULT_GP,newCS & ~0x03)
2183 logerror("RETF: Non-conforming CS segment DPL is not equal to return selector RPL.\n");
2184 FAULT(FAULT_GP,newCS & ~0x03)
2187 if((desc.flags & 0x0080) == 0)
2189 logerror("RETF: CS segment is not present.\n");
2190 FAULT(FAULT_NP,newCS & ~0x03)
2192 if(newEIP > desc.limit)
2194 logerror("RETF: EIP is past return CS segment limit.\n");
2201 newESP = READ16(cpustate, ea) & 0xffff;
2202 newSS = READ16(cpustate, ea+2) & 0xffff;
2207 newESP = READ32(cpustate, ea);
2208 newSS = READ32(cpustate, ea+4) & 0xffff;
2211 /* Check SS selector and descriptor */
2212 desc.selector = newSS;
2213 i386_load_protected_mode_segment(cpustate,&desc,NULL);
2214 DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level
2215 if((newSS & ~0x07) == 0)
2217 logerror("RETF: SS segment is null.\n");
2222 if((newSS & ~0x07) > cpustate->ldtr.limit)
2224 logerror("RETF (%08x): SS segment selector is past LDT limit.\n",cpustate->pc);
2225 FAULT(FAULT_GP,newSS & ~0x03)
2230 if((newSS & ~0x07) > cpustate->gdtr.limit)
2232 logerror("RETF (%08x): SS segment selector is past GDT limit.\n",cpustate->pc);
2233 FAULT(FAULT_GP,newSS & ~0x03)
2236 if((newSS & 0x03) != RPL)
2238 logerror("RETF: SS segment RPL is not equal to CS segment RPL.\n");
2239 FAULT(FAULT_GP,newSS & ~0x03)
2241 if((desc.flags & 0x0018) != 0x0010 || (desc.flags & 0x0002) == 0)
2243 logerror("RETF: SS segment is not a writable data segment.\n");
2244 FAULT(FAULT_GP,newSS & ~0x03)
2246 if(((desc.flags >> 5) & 0x03) != RPL)
2248 logerror("RETF: SS DPL is not equal to CS segment RPL.\n");
2249 FAULT(FAULT_GP,newSS & ~0x03)
2251 if((desc.flags & 0x0080) == 0)
2253 logerror("RETF: SS segment is not present.\n");
2254 FAULT(FAULT_GP,newSS & ~0x03)
2256 cpustate->CPL = newCS & 0x03;
2258 /* Load new SS:(E)SP */
2260 REG16(SP) = (newESP+count) & 0xffff;
2262 REG32(ESP) = newESP+count;
2263 cpustate->sreg[SS].selector = newSS;
2264 i386_load_segment_descriptor(cpustate, SS );
2266 /* Check that DS, ES, FS and GS are valid for the new privilege level */
2267 i386_check_sreg_validity(cpustate,DS);
2268 i386_check_sreg_validity(cpustate,ES);
2269 i386_check_sreg_validity(cpustate,FS);
2270 i386_check_sreg_validity(cpustate,GS);
2273 /* Load new CS:(E)IP */
2275 cpustate->eip = newEIP & 0xffff;
2277 cpustate->eip = newEIP;
2278 cpustate->sreg[CS].selector = newCS;
2279 i386_load_segment_descriptor(cpustate, CS );
2280 CHANGE_PC(cpustate,cpustate->eip);
2283 static void i386_protected_mode_iret(i386_state* cpustate, int operand32)
2285 UINT32 newCS, newEIP;
2286 UINT32 newSS, newESP; // when changing privilege
2287 I386_SREG desc,stack;
2288 UINT8 CPL, RPL, DPL;
2290 UINT8 IOPL = cpustate->IOP1 | (cpustate->IOP2 << 1);
2292 CPL = cpustate->CPL;
2293 UINT32 ea = i386_translate(cpustate, SS, (STACK_32BIT)?REG32(ESP):REG16(SP), 0, (operand32)?12:6);
2296 newEIP = READ16(cpustate, ea) & 0xffff;
2297 newCS = READ16(cpustate, ea+2) & 0xffff;
2298 newflags = READ16(cpustate, ea+4) & 0xffff;
2302 newEIP = READ32(cpustate, ea);
2303 newCS = READ32(cpustate, ea+4) & 0xffff;
2304 newflags = READ32(cpustate, ea+8);
2309 UINT32 oldflags = get_flags(cpustate);
2312 logerror("IRET (%08x): Is in Virtual 8086 mode and IOPL != 3.\n",cpustate->pc);
2317 cpustate->eip = newEIP & 0xffff;
2318 cpustate->sreg[CS].selector = newCS & 0xffff;
2319 newflags &= ~(3<<12);
2320 newflags |= (((oldflags>>12)&3)<<12); // IOPL cannot be changed in V86 mode
2321 set_flags(cpustate,(newflags & 0xffff) | (oldflags & ~0xffff));
2326 cpustate->eip = newEIP;
2327 cpustate->sreg[CS].selector = newCS & 0xffff;
2328 newflags &= ~(3<<12);
2329 newflags |= 0x20000 | (((oldflags>>12)&3)<<12); // IOPL and VM cannot be changed in V86 mode
2330 set_flags(cpustate,newflags);
2334 else if(NESTED_TASK)
2336 UINT32 task = READ32(cpustate,cpustate->task.base);
2338 logerror("IRET (%08x): Nested task return.\n",cpustate->pc);
2339 /* Check back-link selector in TSS */
2342 logerror("IRET: Task return: Back-linked TSS is not in GDT.\n");
2343 FAULT(FAULT_TS,task & ~0x03)
2345 if((task & ~0x07) >= cpustate->gdtr.limit)
2347 logerror("IRET: Task return: Back-linked TSS is not in GDT.\n");
2348 FAULT(FAULT_TS,task & ~0x03)
2350 memset(&desc, 0, sizeof(desc));
2351 desc.selector = task;
2352 i386_load_protected_mode_segment(cpustate,&desc,NULL);
2353 if((desc.flags & 0x001f) != 0x000b)
2355 logerror("IRET (%08x): Task return: Back-linked TSS is not a busy TSS.\n",cpustate->pc);
2356 FAULT(FAULT_TS,task & ~0x03)
2358 if((desc.flags & 0x0080) == 0)
2360 logerror("IRET: Task return: Back-linked TSS is not present.\n");
2361 FAULT(FAULT_NP,task & ~0x03)
2363 if(desc.flags & 0x08)
2364 i386_task_switch(cpustate,desc.selector,0);
2366 i286_task_switch(cpustate,desc.selector,0);
2371 if(newflags & 0x00020000) // if returning to virtual 8086 mode
2373 // 16-bit iret can't reach here
2374 newESP = READ32(cpustate, ea+12);
2375 newSS = READ32(cpustate, ea+16) & 0xffff;
2376 /* Return to v86 mode */
2377 //logerror("IRET (%08x): Returning to Virtual 8086 mode.\n",cpustate->pc);
2380 UINT32 oldflags = get_flags(cpustate);
2381 newflags = (newflags & ~0x00003000) | (oldflags & 0x00003000);
2383 newflags = (newflags & ~0x200 ) | (oldflags & 0x200);
2385 set_flags(cpustate,newflags);
2386 cpustate->eip = POP32(cpustate) & 0xffff; // high 16 bits are ignored
2387 cpustate->sreg[CS].selector = POP32(cpustate) & 0xffff;
2388 POP32(cpustate); // already set flags
2389 newESP = POP32(cpustate);
2390 newSS = POP32(cpustate) & 0xffff;
2391 cpustate->sreg[ES].selector = POP32(cpustate) & 0xffff;
2392 cpustate->sreg[DS].selector = POP32(cpustate) & 0xffff;
2393 cpustate->sreg[FS].selector = POP32(cpustate) & 0xffff;
2394 cpustate->sreg[GS].selector = POP32(cpustate) & 0xffff;
2395 REG32(ESP) = newESP; // all 32 bits are loaded
2396 cpustate->sreg[SS].selector = newSS;
2397 i386_load_segment_descriptor(cpustate,ES);
2398 i386_load_segment_descriptor(cpustate,DS);
2399 i386_load_segment_descriptor(cpustate,FS);
2400 i386_load_segment_descriptor(cpustate,GS);
2401 i386_load_segment_descriptor(cpustate,SS);
2402 cpustate->CPL = 3; // Virtual 8086 tasks are always run at CPL 3
2408 UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2409 if(i386_limit_check(cpustate,SS,offset,4) != 0)
2411 logerror("IRET: Data on stack is past SS limit.\n");
2417 UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2418 if(i386_limit_check(cpustate,SS,offset,8) != 0)
2420 logerror("IRET: Data on stack is past SS limit.\n");
2427 logerror("IRET (%08x): Return CS RPL is less than CPL.\n",cpustate->pc);
2428 FAULT(FAULT_GP,newCS & ~0x03)
2432 /* return to same privilege level */
2435 UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2436 if(i386_limit_check(cpustate,SS,offset,6) != 0)
2438 logerror("IRET (%08x): Data on stack is past SS limit.\n",cpustate->pc);
2444 UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2445 if(i386_limit_check(cpustate,SS,offset,12) != 0)
2447 logerror("IRET (%08x): Data on stack is past SS limit.\n",cpustate->pc);
2451 if((newCS & ~0x03) == 0)
2453 logerror("IRET: Return CS selector is null.\n");
2458 if((newCS & ~0x07) >= cpustate->ldtr.limit)
2460 logerror("IRET: Return CS selector (%04x) is past LDT limit.\n",newCS);
2461 FAULT(FAULT_GP,newCS & ~0x03)
2466 if((newCS & ~0x07) >= cpustate->gdtr.limit)
2468 logerror("IRET: Return CS selector is past GDT limit.\n");
2469 FAULT(FAULT_GP,newCS & ~0x03)
2472 memset(&desc, 0, sizeof(desc));
2473 desc.selector = newCS;
2474 i386_load_protected_mode_segment(cpustate,&desc,NULL);
2475 DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level
2477 if((desc.flags & 0x0018) != 0x0018)
2479 logerror("IRET (%08x): Return CS segment is not a code segment.\n",cpustate->pc);
2480 FAULT(FAULT_GP,newCS & ~0x07)
2482 if(desc.flags & 0x0004)
2486 logerror("IRET: Conforming return CS DPL is greater than CS RPL.\n");
2487 FAULT(FAULT_GP,newCS & ~0x03)
2494 logerror("IRET: Non-conforming return CS DPL is not equal to CS RPL.\n");
2495 FAULT(FAULT_GP,newCS & ~0x03)
2498 if((desc.flags & 0x0080) == 0)
2500 logerror("IRET: (%08x) Return CS segment is not present.\n", cpustate->pc);
2501 FAULT(FAULT_NP,newCS & ~0x03)
2503 if(newEIP > desc.limit)
2505 logerror("IRET: Return EIP is past return CS limit.\n");
2511 UINT32 oldflags = get_flags(cpustate);
2512 newflags = (newflags & ~0x00003000) | (oldflags & 0x00003000);
2514 newflags = (newflags & ~0x200 ) | (oldflags & 0x200);
2519 cpustate->eip = newEIP;
2520 cpustate->sreg[CS].selector = newCS;
2521 set_flags(cpustate,newflags);
2526 cpustate->eip = newEIP;
2527 cpustate->sreg[CS].selector = newCS & 0xffff;
2528 set_flags(cpustate,newflags);
2534 /* return to outer privilege level */
2535 memset(&desc, 0, sizeof(desc));
2536 desc.selector = newCS;
2537 i386_load_protected_mode_segment(cpustate,&desc,NULL);
2538 DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level
2542 UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2543 if(i386_limit_check(cpustate,SS,offset,10) != 0)
2545 logerror("IRET: SP is past SS limit.\n");
2551 UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2552 if(i386_limit_check(cpustate,SS,offset,20) != 0)
2554 logerror("IRET: ESP is past SS limit.\n");
2558 /* Check CS selector and descriptor */
2559 if((newCS & ~0x03) == 0)
2561 logerror("IRET: Return CS selector is null.\n");
2566 if((newCS & ~0x07) >= cpustate->ldtr.limit)
2568 logerror("IRET: Return CS selector is past LDT limit.\n");
2569 FAULT(FAULT_GP,newCS & ~0x03);
2574 if((newCS & ~0x07) >= cpustate->gdtr.limit)
2576 logerror("IRET: Return CS selector is past GDT limit.\n");
2577 FAULT(FAULT_GP,newCS & ~0x03);
2580 if((desc.flags & 0x0018) != 0x0018)
2582 logerror("IRET: Return CS segment is not a code segment.\n");
2583 FAULT(FAULT_GP,newCS & ~0x03)
2585 if(desc.flags & 0x0004)
2589 logerror("IRET: Conforming return CS DPL is greater than CS RPL.\n");
2590 FAULT(FAULT_GP,newCS & ~0x03)
2597 logerror("IRET: Non-conforming return CS DPL does not equal CS RPL.\n");
2598 FAULT(FAULT_GP,newCS & ~0x03)
2601 if((desc.flags & 0x0080) == 0)
2603 logerror("IRET: Return CS segment is not present.\n");
2604 FAULT(FAULT_NP,newCS & ~0x03)
2607 /* Check SS selector and descriptor */
2610 newESP = READ16(cpustate, ea+6) & 0xffff;
2611 newSS = READ16(cpustate, ea+8) & 0xffff;
2615 newESP = READ32(cpustate, ea+12);
2616 newSS = READ32(cpustate, ea+16) & 0xffff;
2618 memset(&stack, 0, sizeof(stack));
2619 stack.selector = newSS;
2620 i386_load_protected_mode_segment(cpustate,&stack,NULL);
2621 DPL = (stack.flags >> 5) & 0x03;
2622 if((newSS & ~0x03) == 0)
2624 logerror("IRET: Return SS selector is null.\n");
2629 if((newSS & ~0x07) >= cpustate->ldtr.limit)
2631 logerror("IRET: Return SS selector is past LDT limit.\n");
2632 FAULT(FAULT_GP,newSS & ~0x03);
2637 if((newSS & ~0x07) >= cpustate->gdtr.limit)
2639 logerror("IRET: Return SS selector is past GDT limit.\n");
2640 FAULT(FAULT_GP,newSS & ~0x03);
2643 if((newSS & 0x03) != RPL)
2645 logerror("IRET: Return SS RPL is not equal to return CS RPL.\n");
2646 FAULT(FAULT_GP,newSS & ~0x03)
2648 if((stack.flags & 0x0018) != 0x0010)
2650 logerror("IRET: Return SS segment is not a data segment.\n");
2651 FAULT(FAULT_GP,newSS & ~0x03)
2653 if((stack.flags & 0x0002) == 0)
2655 logerror("IRET: Return SS segment is not writable.\n");
2656 FAULT(FAULT_GP,newSS & ~0x03)
2660 logerror("IRET: Return SS DPL does not equal SS RPL.\n");
2661 FAULT(FAULT_GP,newSS & ~0x03)
2663 if((stack.flags & 0x0080) == 0)
2665 logerror("IRET: Return SS segment is not present.\n");
2666 FAULT(FAULT_NP,newSS & ~0x03)
2668 if(newEIP > desc.limit)
2670 logerror("IRET: EIP is past return CS limit.\n");
2674 // if(operand32 == 0)
2677 // REG32(ESP) += 20;
2679 // IOPL can only change if CPL is zero
2682 UINT32 oldflags = get_flags(cpustate);
2683 newflags = (newflags & ~0x00003000) | (oldflags & 0x00003000);
2685 newflags = (newflags & ~0x200 ) | (oldflags & 0x200);
2690 cpustate->eip = newEIP & 0xffff;
2691 cpustate->sreg[CS].selector = newCS;
2692 set_flags(cpustate,newflags);
2693 REG16(SP) = newESP & 0xffff;
2694 cpustate->sreg[SS].selector = newSS;
2698 cpustate->eip = newEIP;
2699 cpustate->sreg[CS].selector = newCS & 0xffff;
2700 set_flags(cpustate,newflags);
2701 REG32(ESP) = newESP;
2702 cpustate->sreg[SS].selector = newSS & 0xffff;
2704 cpustate->CPL = newCS & 0x03;
2705 i386_load_segment_descriptor(cpustate,SS);
2707 /* Check that DS, ES, FS and GS are valid for the new privilege level */
2708 i386_check_sreg_validity(cpustate,DS);
2709 i386_check_sreg_validity(cpustate,ES);
2710 i386_check_sreg_validity(cpustate,FS);
2711 i386_check_sreg_validity(cpustate,GS);
2716 i386_load_segment_descriptor(cpustate,CS);
2717 CHANGE_PC(cpustate,cpustate->eip);
2722 static UINT8 cycle_table_rm[X86_NUM_CPUS][CYCLES_NUM_OPCODES];
2723 static UINT8 cycle_table_pm[X86_NUM_CPUS][CYCLES_NUM_OPCODES];
2725 #define CYCLES_NUM(x) (cpustate->cycles -= (x))
2727 INLINE void CYCLES(i386_state *cpustate,int x)
2731 cpustate->cycles -= cpustate->cycle_table_pm[x];
2735 cpustate->cycles -= cpustate->cycle_table_rm[x];
2739 INLINE void CYCLES_RM(i386_state *cpustate,int modrm, int r, int m)
2745 cpustate->cycles -= cpustate->cycle_table_pm[r];
2749 cpustate->cycles -= cpustate->cycle_table_rm[r];
2756 cpustate->cycles -= cpustate->cycle_table_pm[m];
2760 cpustate->cycles -= cpustate->cycle_table_rm[m];
2765 static void build_cycle_table()
2768 for (j=0; j < X86_NUM_CPUS; j++)
2770 // cycle_table_rm[j] = (UINT8 *)malloc(CYCLES_NUM_OPCODES);
2771 // cycle_table_pm[j] = (UINT8 *)malloc(CYCLES_NUM_OPCODES);
2773 for (i=0; i < sizeof(x86_cycle_table)/sizeof(X86_CYCLE_TABLE); i++)
2775 int opcode = x86_cycle_table[i].op;
2776 cycle_table_rm[j][opcode] = x86_cycle_table[i].cpu_cycles[j][0];
2777 cycle_table_pm[j][opcode] = x86_cycle_table[i].cpu_cycles[j][1];
2782 static void report_invalid_opcode(i386_state *cpustate)
2784 #ifndef DEBUG_MISSING_OPCODE
2785 logerror("i386: Invalid opcode %02X at %08X %s\n", cpustate->opcode, cpustate->pc - 1, cpustate->lock ? "with lock" : "");
2787 logerror("i386: Invalid opcode");
2788 for (int a = 0; a < cpustate->opcode_bytes_length; a++)
2789 logerror(" %02X", cpustate->opcode_bytes[a]);
2790 logerror(" at %08X\n", cpustate->opcode_pc);
2794 static void report_invalid_modrm(i386_state *cpustate, const char* opcode, UINT8 modrm)
2796 #ifndef DEBUG_MISSING_OPCODE
2797 logerror("i386: Invalid %s modrm %01X at %08X\n", opcode, modrm, cpustate->pc - 2);
2799 logerror("i386: Invalid %s modrm %01X", opcode, modrm);
2800 for (int a = 0; a < cpustate->opcode_bytes_length; a++)
2801 logerror(" %02X", cpustate->opcode_bytes[a]);
2802 logerror(" at %08X\n", cpustate->opcode_pc);
2804 i386_trap(cpustate, 6, 0, 0);
2807 /* Forward declarations */
2808 static void I386OP(decode_opcode)(i386_state *cpustate);
2809 static void I386OP(decode_two_byte)(i386_state *cpustate);
2810 static void I386OP(decode_three_byte38)(i386_state *cpustate);
2811 static void I386OP(decode_three_byte3a)(i386_state *cpustate);
2812 static void I386OP(decode_three_byte66)(i386_state *cpustate);
2813 static void I386OP(decode_three_bytef2)(i386_state *cpustate);
2814 static void I386OP(decode_three_bytef3)(i386_state *cpustate);
2815 static void I386OP(decode_four_byte3866)(i386_state *cpustate);
2816 static void I386OP(decode_four_byte3a66)(i386_state *cpustate);
2817 static void I386OP(decode_four_byte38f2)(i386_state *cpustate);
2818 static void I386OP(decode_four_byte3af2)(i386_state *cpustate);
2819 static void I386OP(decode_four_byte38f3)(i386_state *cpustate);
2823 #include "i386ops.c"
2824 #include "i386op16.c"
2825 #include "i386op32.c"
2826 #include "i486ops.c"
2827 #include "pentops.c"
2829 #include "i386ops.h"
2831 static void I386OP(decode_opcode)(i386_state *cpustate)
2833 cpustate->opcode = FETCH(cpustate);
2835 if(cpustate->lock && !cpustate->lock_table[0][cpustate->opcode])
2836 return I386OP(invalid)(cpustate);
2838 if( cpustate->operand_size )
2839 cpustate->opcode_table1_32[cpustate->opcode](cpustate);
2841 cpustate->opcode_table1_16[cpustate->opcode](cpustate);
2844 /* Two-byte opcode 0f xx */
2845 static void I386OP(decode_two_byte)(i386_state *cpustate)
2847 cpustate->opcode = FETCH(cpustate);
2849 if(cpustate->lock && !cpustate->lock_table[1][cpustate->opcode])
2850 return I386OP(invalid)(cpustate);
2852 if( cpustate->operand_size )
2853 cpustate->opcode_table2_32[cpustate->opcode](cpustate);
2855 cpustate->opcode_table2_16[cpustate->opcode](cpustate);
2858 /* Three-byte opcode 0f 38 xx */
2859 static void I386OP(decode_three_byte38)(i386_state *cpustate)
2861 cpustate->opcode = FETCH(cpustate);
2863 if (cpustate->operand_size)
2864 cpustate->opcode_table338_32[cpustate->opcode](cpustate);
2866 cpustate->opcode_table338_16[cpustate->opcode](cpustate);
2869 /* Three-byte opcode 0f 3a xx */
2870 static void I386OP(decode_three_byte3a)(i386_state *cpustate)
2872 cpustate->opcode = FETCH(cpustate);
2874 if (cpustate->operand_size)
2875 cpustate->opcode_table33a_32[cpustate->opcode](cpustate);
2877 cpustate->opcode_table33a_16[cpustate->opcode](cpustate);
2880 /* Three-byte opcode prefix 66 0f xx */
2881 static void I386OP(decode_three_byte66)(i386_state *cpustate)
2883 cpustate->opcode = FETCH(cpustate);
2884 if( cpustate->operand_size )
2885 cpustate->opcode_table366_32[cpustate->opcode](cpustate);
2887 cpustate->opcode_table366_16[cpustate->opcode](cpustate);
2890 /* Three-byte opcode prefix f2 0f xx */
2891 static void I386OP(decode_three_bytef2)(i386_state *cpustate)
2893 cpustate->opcode = FETCH(cpustate);
2894 if( cpustate->operand_size )
2895 cpustate->opcode_table3f2_32[cpustate->opcode](cpustate);
2897 cpustate->opcode_table3f2_16[cpustate->opcode](cpustate);
2900 /* Three-byte opcode prefix f3 0f */
2901 static void I386OP(decode_three_bytef3)(i386_state *cpustate)
2903 cpustate->opcode = FETCH(cpustate);
2904 if( cpustate->operand_size )
2905 cpustate->opcode_table3f3_32[cpustate->opcode](cpustate);
2907 cpustate->opcode_table3f3_16[cpustate->opcode](cpustate);
2910 /* Four-byte opcode prefix 66 0f 38 xx */
2911 static void I386OP(decode_four_byte3866)(i386_state *cpustate)
2913 cpustate->opcode = FETCH(cpustate);
2914 if (cpustate->operand_size)
2915 cpustate->opcode_table46638_32[cpustate->opcode](cpustate);
2917 cpustate->opcode_table46638_16[cpustate->opcode](cpustate);
2920 /* Four-byte opcode prefix 66 0f 3a xx */
2921 static void I386OP(decode_four_byte3a66)(i386_state *cpustate)
2923 cpustate->opcode = FETCH(cpustate);
2924 if (cpustate->operand_size)
2925 cpustate->opcode_table4663a_32[cpustate->opcode](cpustate);
2927 cpustate->opcode_table4663a_16[cpustate->opcode](cpustate);
2930 /* Four-byte opcode prefix f2 0f 38 xx */
2931 static void I386OP(decode_four_byte38f2)(i386_state *cpustate)
2933 cpustate->opcode = FETCH(cpustate);
2934 if (cpustate->operand_size)
2935 cpustate->opcode_table4f238_32[cpustate->opcode](cpustate);
2937 cpustate->opcode_table4f238_16[cpustate->opcode](cpustate);
2940 /* Four-byte opcode prefix f2 0f 3a xx */
2941 static void I386OP(decode_four_byte3af2)(i386_state *cpustate)
2943 cpustate->opcode = FETCH(cpustate);
2944 if (cpustate->operand_size)
2945 cpustate->opcode_table4f23a_32[cpustate->opcode](cpustate);
2947 cpustate->opcode_table4f23a_16[cpustate->opcode](cpustate);
2950 /* Four-byte opcode prefix f3 0f 38 xx */
2951 static void I386OP(decode_four_byte38f3)(i386_state *cpustate)
2953 cpustate->opcode = FETCH(cpustate);
2954 if (cpustate->operand_size)
2955 cpustate->opcode_table4f338_32[cpustate->opcode](cpustate);
2957 cpustate->opcode_table4f338_16[cpustate->opcode](cpustate);
2961 /*************************************************************************/
2963 static void i386_postload(i386_state *cpustate)
2966 for (i = 0; i < 6; i++)
2967 i386_load_segment_descriptor(cpustate,i);
2968 CHANGE_PC(cpustate,cpustate->eip);
2971 static i386_state *i386_common_init(int tlbsize)
2974 static const int regs8[8] = {AL,CL,DL,BL,AH,CH,DH,BH};
2975 static const int regs16[8] = {AX,CX,DX,BX,SP,BP,SI,DI};
2976 static const int regs32[8] = {EAX,ECX,EDX,EBX,ESP,EBP,ESI,EDI};
2977 i386_state *cpustate = (i386_state *)calloc(1, sizeof(i386_state));
2979 assert((sizeof(XMM_REG)/sizeof(double)) == 2);
2981 build_cycle_table();
2983 for( i=0; i < 256; i++ ) {
2985 for( j=0; j < 8; j++ ) {
2989 i386_parity_table[i] = ~(c & 0x1) & 0x1;
2992 for( i=0; i < 256; i++ ) {
2993 i386_MODRM_table[i].reg.b = regs8[(i >> 3) & 0x7];
2994 i386_MODRM_table[i].reg.w = regs16[(i >> 3) & 0x7];
2995 i386_MODRM_table[i].reg.d = regs32[(i >> 3) & 0x7];
2997 i386_MODRM_table[i].rm.b = regs8[i & 0x7];
2998 i386_MODRM_table[i].rm.w = regs16[i & 0x7];
2999 i386_MODRM_table[i].rm.d = regs32[i & 0x7];
3002 cpustate->vtlb = vtlb_alloc(cpustate, AS_PROGRAM, 0, tlbsize);
3003 cpustate->smi = false;
3004 cpustate->lock = false;
3006 // i386_interface *intf = (i386_interface *) device->static_config();
3008 // if (intf != NULL)
3009 // cpustate->smiact.resolve(intf->smiact, *device);
3011 // memset(&cpustate->smiact, 0, sizeof(cpustate->smiact));
3013 zero_state(cpustate);
3020 i386_state *cpustate = i386_common_init(32);
3021 build_opcode_table(cpustate, OP_I386);
3022 cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_I386];
3023 cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_I386];
3027 static void build_opcode_table(i386_state *cpustate, UINT32 features)
3030 for (i=0; i < 256; i++)
3032 cpustate->opcode_table1_16[i] = I386OP(invalid);
3033 cpustate->opcode_table1_32[i] = I386OP(invalid);
3034 cpustate->opcode_table2_16[i] = I386OP(invalid);
3035 cpustate->opcode_table2_32[i] = I386OP(invalid);
3036 cpustate->opcode_table366_16[i] = I386OP(invalid);
3037 cpustate->opcode_table366_32[i] = I386OP(invalid);
3038 cpustate->opcode_table3f2_16[i] = I386OP(invalid);
3039 cpustate->opcode_table3f2_32[i] = I386OP(invalid);
3040 cpustate->opcode_table3f3_16[i] = I386OP(invalid);
3041 cpustate->opcode_table3f3_32[i] = I386OP(invalid);
3042 cpustate->lock_table[0][i] = false;
3043 cpustate->lock_table[1][i] = false;
3046 for (i=0; i < sizeof(x86_opcode_table)/sizeof(X86_OPCODE); i++)
3048 const X86_OPCODE *op = &x86_opcode_table[i];
3050 if ((op->flags & features))
3052 if (op->flags & OP_2BYTE)
3054 cpustate->opcode_table2_32[op->opcode] = op->handler32;
3055 cpustate->opcode_table2_16[op->opcode] = op->handler16;
3056 cpustate->opcode_table366_32[op->opcode] = op->handler32;
3057 cpustate->opcode_table366_16[op->opcode] = op->handler16;
3058 cpustate->lock_table[1][op->opcode] = op->lockable;
3060 else if (op->flags & OP_3BYTE66)
3062 cpustate->opcode_table366_32[op->opcode] = op->handler32;
3063 cpustate->opcode_table366_16[op->opcode] = op->handler16;
3065 else if (op->flags & OP_3BYTEF2)
3067 cpustate->opcode_table3f2_32[op->opcode] = op->handler32;
3068 cpustate->opcode_table3f2_16[op->opcode] = op->handler16;
3070 else if (op->flags & OP_3BYTEF3)
3072 cpustate->opcode_table3f3_32[op->opcode] = op->handler32;
3073 cpustate->opcode_table3f3_16[op->opcode] = op->handler16;
3075 else if (op->flags & OP_3BYTE38)
3077 cpustate->opcode_table338_32[op->opcode] = op->handler32;
3078 cpustate->opcode_table338_16[op->opcode] = op->handler16;
3080 else if (op->flags & OP_3BYTE3A)
3082 cpustate->opcode_table33a_32[op->opcode] = op->handler32;
3083 cpustate->opcode_table33a_16[op->opcode] = op->handler16;
3085 else if (op->flags & OP_4BYTE3866)
3087 cpustate->opcode_table46638_32[op->opcode] = op->handler32;
3088 cpustate->opcode_table46638_16[op->opcode] = op->handler16;
3090 else if (op->flags & OP_4BYTE3A66)
3092 cpustate->opcode_table4663a_32[op->opcode] = op->handler32;
3093 cpustate->opcode_table4663a_16[op->opcode] = op->handler16;
3095 else if (op->flags & OP_4BYTE38F2)
3097 cpustate->opcode_table4f238_32[op->opcode] = op->handler32;
3098 cpustate->opcode_table4f238_16[op->opcode] = op->handler16;
3100 else if (op->flags & OP_4BYTE3AF2)
3102 cpustate->opcode_table4f23a_32[op->opcode] = op->handler32;
3103 cpustate->opcode_table4f23a_16[op->opcode] = op->handler16;
3105 else if (op->flags & OP_4BYTE38F3)
3107 cpustate->opcode_table4f338_32[op->opcode] = op->handler32;
3108 cpustate->opcode_table4f338_16[op->opcode] = op->handler16;
3112 cpustate->opcode_table1_32[op->opcode] = op->handler32;
3113 cpustate->opcode_table1_16[op->opcode] = op->handler16;
3114 cpustate->lock_table[0][op->opcode] = op->lockable;
3120 static void zero_state(i386_state *cpustate)
3122 memset( &cpustate->reg, 0, sizeof(cpustate->reg) );
3123 memset( cpustate->sreg, 0, sizeof(cpustate->sreg) );
3126 cpustate->prev_eip = 0;
3127 cpustate->eflags = 0;
3128 cpustate->eflags_mask = 0;
3148 cpustate->performed_intersegment_jump = 0;
3149 cpustate->delayed_interrupt_enable = 0;
3150 memset( cpustate->cr, 0, sizeof(cpustate->cr) );
3151 memset( cpustate->dr, 0, sizeof(cpustate->dr) );
3152 memset( cpustate->tr, 0, sizeof(cpustate->tr) );
3153 memset( &cpustate->gdtr, 0, sizeof(cpustate->gdtr) );
3154 memset( &cpustate->idtr, 0, sizeof(cpustate->idtr) );
3155 memset( &cpustate->task, 0, sizeof(cpustate->task) );
3156 memset( &cpustate->ldtr, 0, sizeof(cpustate->ldtr) );
3158 cpustate->halted = 0;
3159 cpustate->busreq = 0;
3160 cpustate->shutdown = 0;
3161 cpustate->operand_size = 0;
3162 cpustate->xmm_operand_size = 0;
3163 cpustate->address_size = 0;
3164 cpustate->operand_prefix = 0;
3165 cpustate->address_prefix = 0;
3166 cpustate->segment_prefix = 0;
3167 cpustate->segment_override = 0;
3168 // cpustate->cycles = 0;
3169 // cpustate->base_cycles = 0;
3170 cpustate->opcode = 0;
3171 cpustate->irq_state = 0;
3172 cpustate->a20_mask = 0;
3173 cpustate->cpuid_max_input_value_eax = 0;
3174 cpustate->cpuid_id0 = 0;
3175 cpustate->cpuid_id1 = 0;
3176 cpustate->cpuid_id2 = 0;
3177 cpustate->cpu_version = 0;
3178 cpustate->feature_flags = 0;
3180 cpustate->perfctr[0] = cpustate->perfctr[1] = 0;
3181 memset( cpustate->x87_reg, 0, sizeof(cpustate->x87_reg) );
3182 cpustate->x87_cw = 0;
3183 cpustate->x87_sw = 0;
3184 cpustate->x87_tw = 0;
3185 cpustate->x87_data_ptr = 0;
3186 cpustate->x87_inst_ptr = 0;
3187 cpustate->x87_opcode = 0;
3188 memset( cpustate->sse_reg, 0, sizeof(cpustate->sse_reg) );
3189 cpustate->mxcsr = 0;
3190 cpustate->smm = false;
3191 cpustate->smi = false;
3192 cpustate->smi_latched = false;
3193 cpustate->nmi_masked = false;
3194 cpustate->nmi_latched = false;
3195 cpustate->smbase = 0;
3196 #ifdef DEBUG_MISSING_OPCODE
3197 memset( cpustate->opcode_bytes, 0, sizeof(cpustate->opcode_bytes) );
3198 cpustate->opcode_pc = 0;
3199 cpustate->opcode_bytes_length = 0;
3203 static CPU_RESET( i386 )
3205 zero_state(cpustate);
3206 vtlb_flush_dynamic(cpustate->vtlb);
3208 cpustate->sreg[CS].selector = 0xf000;
3209 cpustate->sreg[CS].base = 0xffff0000;
3210 cpustate->sreg[CS].limit = 0xffff;
3211 cpustate->sreg[CS].flags = 0x93;
3212 cpustate->sreg[CS].valid = true;
3214 cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
3215 cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
3216 cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0093;
3217 cpustate->sreg[DS].valid = cpustate->sreg[ES].valid = cpustate->sreg[FS].valid = cpustate->sreg[GS].valid = cpustate->sreg[SS].valid =true;
3219 cpustate->idtr.base = 0;
3220 cpustate->idtr.limit = 0x3ff;
3221 cpustate->smm = false;
3222 cpustate->smi_latched = false;
3223 cpustate->nmi_masked = false;
3224 cpustate->nmi_latched = false;
3226 cpustate->a20_mask = ~0;
3228 cpustate->cr[0] = 0x7fffffe0; // reserved bits set to 1
3229 cpustate->eflags = 0;
3230 cpustate->eflags_mask = 0x00037fd7;
3231 cpustate->eip = 0xfff0;
3235 // [ 3:0] Stepping ID
3236 // Family 3 (386), Model 0 (DX), Stepping 8 (D1)
3238 REG32(EDX) = (3 << 8) | (0 << 4) | (8);
3242 CHANGE_PC(cpustate,cpustate->eip);
3245 static void pentium_smi(i386_state *cpustate)
3247 UINT32 smram_state = cpustate->smbase + 0xfe00;
3248 UINT32 old_cr0 = cpustate->cr[0];
3249 UINT32 old_flags = get_flags(cpustate);
3254 cpustate->cr[0] &= ~(0x8000000d);
3255 set_flags(cpustate, 2);
3256 // if(!cpustate->smiact.isnull())
3257 // cpustate->smiact(true);
3258 cpustate->smm = true;
3259 cpustate->smi_latched = false;
3262 WRITE32(cpustate, cpustate->cr[4], smram_state+SMRAM_IP5_CR4);
3263 WRITE32(cpustate, cpustate->sreg[ES].limit, smram_state+SMRAM_IP5_ESLIM);
3264 WRITE32(cpustate, cpustate->sreg[ES].base, smram_state+SMRAM_IP5_ESBASE);
3265 WRITE32(cpustate, cpustate->sreg[ES].flags, smram_state+SMRAM_IP5_ESACC);
3266 WRITE32(cpustate, cpustate->sreg[CS].limit, smram_state+SMRAM_IP5_CSLIM);
3267 WRITE32(cpustate, cpustate->sreg[CS].base, smram_state+SMRAM_IP5_CSBASE);
3268 WRITE32(cpustate, cpustate->sreg[CS].flags, smram_state+SMRAM_IP5_CSACC);
3269 WRITE32(cpustate, cpustate->sreg[SS].limit, smram_state+SMRAM_IP5_SSLIM);
3270 WRITE32(cpustate, cpustate->sreg[SS].base, smram_state+SMRAM_IP5_SSBASE);
3271 WRITE32(cpustate, cpustate->sreg[SS].flags, smram_state+SMRAM_IP5_SSACC);
3272 WRITE32(cpustate, cpustate->sreg[DS].limit, smram_state+SMRAM_IP5_DSLIM);
3273 WRITE32(cpustate, cpustate->sreg[DS].base, smram_state+SMRAM_IP5_DSBASE);
3274 WRITE32(cpustate, cpustate->sreg[DS].flags, smram_state+SMRAM_IP5_DSACC);
3275 WRITE32(cpustate, cpustate->sreg[FS].limit, smram_state+SMRAM_IP5_FSLIM);
3276 WRITE32(cpustate, cpustate->sreg[FS].base, smram_state+SMRAM_IP5_FSBASE);
3277 WRITE32(cpustate, cpustate->sreg[FS].flags, smram_state+SMRAM_IP5_FSACC);
3278 WRITE32(cpustate, cpustate->sreg[GS].limit, smram_state+SMRAM_IP5_GSLIM);
3279 WRITE32(cpustate, cpustate->sreg[GS].base, smram_state+SMRAM_IP5_GSBASE);
3280 WRITE32(cpustate, cpustate->sreg[GS].flags, smram_state+SMRAM_IP5_GSACC);
3281 WRITE32(cpustate, cpustate->ldtr.flags, smram_state+SMRAM_IP5_LDTACC);
3282 WRITE32(cpustate, cpustate->ldtr.limit, smram_state+SMRAM_IP5_LDTLIM);
3283 WRITE32(cpustate, cpustate->ldtr.base, smram_state+SMRAM_IP5_LDTBASE);
3284 WRITE32(cpustate, cpustate->gdtr.limit, smram_state+SMRAM_IP5_GDTLIM);
3285 WRITE32(cpustate, cpustate->gdtr.base, smram_state+SMRAM_IP5_GDTBASE);
3286 WRITE32(cpustate, cpustate->idtr.limit, smram_state+SMRAM_IP5_IDTLIM);
3287 WRITE32(cpustate, cpustate->idtr.base, smram_state+SMRAM_IP5_IDTBASE);
3288 WRITE32(cpustate, cpustate->task.limit, smram_state+SMRAM_IP5_TRLIM);
3289 WRITE32(cpustate, cpustate->task.base, smram_state+SMRAM_IP5_TRBASE);
3290 WRITE32(cpustate, cpustate->task.flags, smram_state+SMRAM_IP5_TRACC);
3292 WRITE32(cpustate, cpustate->sreg[ES].selector, smram_state+SMRAM_ES);
3293 WRITE32(cpustate, cpustate->sreg[CS].selector, smram_state+SMRAM_CS);
3294 WRITE32(cpustate, cpustate->sreg[SS].selector, smram_state+SMRAM_SS);
3295 WRITE32(cpustate, cpustate->sreg[DS].selector, smram_state+SMRAM_DS);
3296 WRITE32(cpustate, cpustate->sreg[FS].selector, smram_state+SMRAM_FS);
3297 WRITE32(cpustate, cpustate->sreg[GS].selector, smram_state+SMRAM_GS);
3298 WRITE32(cpustate, cpustate->ldtr.segment, smram_state+SMRAM_LDTR);
3299 WRITE32(cpustate, cpustate->task.segment, smram_state+SMRAM_TR);
3301 WRITE32(cpustate, cpustate->dr[7], smram_state+SMRAM_DR7);
3302 WRITE32(cpustate, cpustate->dr[6], smram_state+SMRAM_DR6);
3303 WRITE32(cpustate, REG32(EAX), smram_state+SMRAM_EAX);
3304 WRITE32(cpustate, REG32(ECX), smram_state+SMRAM_ECX);
3305 WRITE32(cpustate, REG32(EDX), smram_state+SMRAM_EDX);
3306 WRITE32(cpustate, REG32(EBX), smram_state+SMRAM_EBX);
3307 WRITE32(cpustate, REG32(ESP), smram_state+SMRAM_ESP);
3308 WRITE32(cpustate, REG32(EBP), smram_state+SMRAM_EBP);
3309 WRITE32(cpustate, REG32(ESI), smram_state+SMRAM_ESI);
3310 WRITE32(cpustate, REG32(EDI), smram_state+SMRAM_EDI);
3311 WRITE32(cpustate, cpustate->eip, smram_state+SMRAM_EIP);
3312 WRITE32(cpustate, old_flags, smram_state+SMRAM_EFLAGS);
3313 WRITE32(cpustate, cpustate->cr[3], smram_state+SMRAM_CR3);
3314 WRITE32(cpustate, old_cr0, smram_state+SMRAM_CR0);
3316 cpustate->sreg[DS].selector = cpustate->sreg[ES].selector = cpustate->sreg[FS].selector = cpustate->sreg[GS].selector = cpustate->sreg[SS].selector = 0;
3317 cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
3318 cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffffffff;
3319 cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x8093;
3320 cpustate->sreg[DS].valid = cpustate->sreg[ES].valid = cpustate->sreg[FS].valid = cpustate->sreg[GS].valid = cpustate->sreg[SS].valid =true;
3321 cpustate->sreg[CS].selector = 0x3000; // pentium only, ppro sel = smbase >> 4
3322 cpustate->sreg[CS].base = cpustate->smbase;
3323 cpustate->sreg[CS].limit = 0xffffffff;
3324 cpustate->sreg[CS].flags = 0x8093;
3325 cpustate->sreg[CS].valid = true;
3326 cpustate->cr[4] = 0;
3327 cpustate->dr[7] = 0x400;
3328 cpustate->eip = 0x8000;
3330 cpustate->nmi_masked = true;
3331 CHANGE_PC(cpustate,cpustate->eip);
3334 static void i386_set_irq_line(i386_state *cpustate,int irqline, int state)
3336 int first_cycles = cpustate->cycles;
3338 if (state != CLEAR_LINE && cpustate->halted)
3340 cpustate->halted = 0;
3343 if ( irqline == INPUT_LINE_NMI )
3345 /* NMI (I do not think that this is 100% right) */
3346 if(cpustate->nmi_masked)
3348 cpustate->nmi_latched = true;
3352 i386_trap(cpustate,2, 1, 0);
3356 cpustate->irq_state = state;
3358 cpustate->extra_cycles += first_cycles - cpustate->cycles;
3359 cpustate->cycles = first_cycles;
3362 static void i386_set_a20_line(i386_state *cpustate,int state)
3366 cpustate->a20_mask = ~0;
3370 cpustate->a20_mask = ~(1 << 20);
3372 // TODO: how does A20M and the tlb interact
3373 vtlb_flush_dynamic(cpustate->vtlb);
3376 static CPU_EXECUTE( i386 )
3378 CHANGE_PC(cpustate,cpustate->eip);
3380 if (cpustate->halted || cpustate->busreq)
3382 #ifdef SINGLE_MODE_DMA
3383 if(cpustate->dma != NULL) {
3384 cpustate->dma->do_dma();
3388 int passed_cycles = max(1, cpustate->extra_cycles);
3389 // this is main cpu, cpustate->cycles is not used
3390 /*cpustate->cycles = */cpustate->extra_cycles = 0;
3391 cpustate->tsc += passed_cycles;
3393 cpustate->total_cycles += passed_cycles;
3395 return passed_cycles;
3397 cpustate->cycles += cycles;
3398 cpustate->base_cycles = cpustate->cycles;
3400 /* adjust for any interrupts that came in */
3401 cpustate->cycles -= cpustate->extra_cycles;
3402 cpustate->extra_cycles = 0;
3404 /* if busreq is raised, spin cpu while remained clock */
3405 if (cpustate->cycles > 0) {
3406 cpustate->cycles = 0;
3408 int passed_cycles = cpustate->base_cycles - cpustate->cycles;
3409 cpustate->tsc += passed_cycles;
3411 cpustate->total_cycles += passed_cycles;
3413 return passed_cycles;
3418 cpustate->cycles = 1;
3420 cpustate->cycles += cycles;
3422 cpustate->base_cycles = cpustate->cycles;
3424 /* adjust for any interrupts that came in */
3426 cpustate->total_cycles += cpustate->extra_cycles;
3428 cpustate->cycles -= cpustate->extra_cycles;
3429 cpustate->extra_cycles = 0;
3431 while( cpustate->cycles > 0 && !cpustate->busreq )
3434 bool now_debugging = cpustate->debugger->now_debugging;
3436 cpustate->debugger->check_break_points(cpustate->pc);
3437 if(cpustate->debugger->now_suspended) {
3438 cpustate->emu->mute_sound();
3439 cpustate->debugger->now_waiting = true;
3440 while(cpustate->debugger->now_debugging && cpustate->debugger->now_suspended) {
3441 cpustate->emu->sleep(10);
3443 cpustate->debugger->now_waiting = false;
3445 if(cpustate->debugger->now_debugging) {
3446 cpustate->program = cpustate->io = cpustate->debugger;
3448 now_debugging = false;
3450 int first_cycles = cpustate->cycles;
3451 i386_check_irq_line(cpustate);
3452 cpustate->operand_size = cpustate->sreg[CS].d;
3453 cpustate->xmm_operand_size = 0;
3454 cpustate->address_size = cpustate->sreg[CS].d;
3455 cpustate->operand_prefix = 0;
3456 cpustate->address_prefix = 0;
3459 int old_tf = cpustate->TF;
3461 cpustate->debugger->add_cpu_trace(cpustate->pc);
3462 cpustate->segment_prefix = 0;
3463 cpustate->prev_eip = cpustate->eip;
3464 cpustate->prev_pc = cpustate->pc;
3466 if(cpustate->delayed_interrupt_enable != 0)
3469 cpustate->delayed_interrupt_enable = 0;
3471 #ifdef DEBUG_MISSING_OPCODE
3472 cpustate->opcode_bytes_length = 0;
3473 cpustate->opcode_pc = cpustate->pc;
3477 I386OP(decode_opcode)(cpustate);
3478 if(cpustate->TF && old_tf)
3480 cpustate->prev_eip = cpustate->eip;
3482 i386_trap(cpustate,1,0,0);
3484 if(cpustate->lock && (cpustate->opcode != 0xf0))
3485 cpustate->lock = false;
3490 i386_trap_with_error(cpustate,e&0xffffffff,0,0,e>>32);
3492 #ifdef SINGLE_MODE_DMA
3493 if(cpustate->dma != NULL) {
3494 cpustate->dma->do_dma();
3497 /* adjust for any interrupts that came in */
3498 cpustate->cycles -= cpustate->extra_cycles;
3499 cpustate->extra_cycles = 0;
3500 cpustate->total_cycles += first_cycles - cpustate->cycles;
3503 if(!cpustate->debugger->now_going) {
3504 cpustate->debugger->now_suspended = true;
3506 cpustate->program = cpustate->program_stored;
3507 cpustate->io = cpustate->io_stored;
3510 int first_cycles = cpustate->cycles;
3512 i386_check_irq_line(cpustate);
3513 cpustate->operand_size = cpustate->sreg[CS].d;
3514 cpustate->xmm_operand_size = 0;
3515 cpustate->address_size = cpustate->sreg[CS].d;
3516 cpustate->operand_prefix = 0;
3517 cpustate->address_prefix = 0;
3520 int old_tf = cpustate->TF;
3523 cpustate->debugger->add_cpu_trace(cpustate->pc);
3525 cpustate->segment_prefix = 0;
3526 cpustate->prev_eip = cpustate->eip;
3527 cpustate->prev_pc = cpustate->pc;
3529 if(cpustate->delayed_interrupt_enable != 0)
3532 cpustate->delayed_interrupt_enable = 0;
3534 #ifdef DEBUG_MISSING_OPCODE
3535 cpustate->opcode_bytes_length = 0;
3536 cpustate->opcode_pc = cpustate->pc;
3540 I386OP(decode_opcode)(cpustate);
3541 if(cpustate->TF && old_tf)
3543 cpustate->prev_eip = cpustate->eip;
3545 i386_trap(cpustate,1,0,0);
3547 if(cpustate->lock && (cpustate->opcode != 0xf0))
3548 cpustate->lock = false;
3553 i386_trap_with_error(cpustate,e&0xffffffff,0,0,e>>32);
3555 #ifdef SINGLE_MODE_DMA
3556 if(cpustate->dma != NULL) {
3557 cpustate->dma->do_dma();
3560 /* adjust for any interrupts that came in */
3561 cpustate->cycles -= cpustate->extra_cycles;
3562 cpustate->extra_cycles = 0;
3564 cpustate->total_cycles += first_cycles - cpustate->cycles;
3569 /* if busreq is raised, spin cpu while remained clock */
3570 if (cpustate->cycles > 0 && cpustate->busreq) {
3572 cpustate->total_cycles += cpustate->cycles;
3574 cpustate->cycles = 0;
3576 int passed_cycles = cpustate->base_cycles - cpustate->cycles;
3577 cpustate->tsc += passed_cycles;
3578 return passed_cycles;
3581 /*************************************************************************/
3583 static CPU_TRANSLATE( i386 )
3585 i386_state *cpustate = (i386_state *)cpudevice;
3587 if(space == AS_PROGRAM)
3588 ret = i386_translate_address(cpustate, intention, address, NULL);
3589 *address &= cpustate->a20_mask;
3593 /*****************************************************************************/
3597 static CPU_INIT( i486 )
3599 i386_state *cpustate = i386_common_init(32);
3600 build_opcode_table(cpustate, OP_I386 | OP_FPU | OP_I486);
3601 build_x87_opcode_table(cpustate);
3602 cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_I486];
3603 cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_I486];
3607 static CPU_RESET( i486 )
3609 zero_state(cpustate);
3610 vtlb_flush_dynamic(cpustate->vtlb);
3612 cpustate->sreg[CS].selector = 0xf000;
3613 cpustate->sreg[CS].base = 0xffff0000;
3614 cpustate->sreg[CS].limit = 0xffff;
3615 cpustate->sreg[CS].flags = 0x0093;
3617 cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
3618 cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
3619 cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0093;
3621 cpustate->idtr.base = 0;
3622 cpustate->idtr.limit = 0x3ff;
3624 cpustate->a20_mask = ~0;
3626 cpustate->cr[0] = 0x00000010;
3627 cpustate->eflags = 0;
3628 cpustate->eflags_mask = 0x00077fd7;
3629 cpustate->eip = 0xfff0;
3630 cpustate->smm = false;
3631 cpustate->smi_latched = false;
3632 cpustate->nmi_masked = false;
3633 cpustate->nmi_latched = false;
3635 x87_reset(cpustate);
3639 // [ 3:0] Stepping ID
3640 // Family 4 (486), Model 0/1 (DX), Stepping 3
3642 REG32(EDX) = (4 << 8) | (0 << 4) | (3);
3644 CHANGE_PC(cpustate,cpustate->eip);
3647 /*****************************************************************************/
3651 static CPU_INIT( pentium )
3653 // 64 dtlb small, 8 dtlb large, 32 itlb
3654 i386_state *cpustate = i386_common_init(96);
3655 build_opcode_table(cpustate, OP_I386 | OP_FPU | OP_I486 | OP_PENTIUM);
3656 build_x87_opcode_table(cpustate);
3657 cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_PENTIUM];
3658 cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_PENTIUM];
3662 static CPU_RESET( pentium )
3664 zero_state(cpustate);
3665 vtlb_flush_dynamic(cpustate->vtlb);
3667 cpustate->sreg[CS].selector = 0xf000;
3668 cpustate->sreg[CS].base = 0xffff0000;
3669 cpustate->sreg[CS].limit = 0xffff;
3670 cpustate->sreg[CS].flags = 0x0093;
3672 cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
3673 cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
3674 cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0093;
3676 cpustate->idtr.base = 0;
3677 cpustate->idtr.limit = 0x3ff;
3679 cpustate->a20_mask = ~0;
3681 cpustate->cr[0] = 0x00000010;
3682 cpustate->eflags = 0x00200000;
3683 cpustate->eflags_mask = 0x003f7fd7;
3684 cpustate->eip = 0xfff0;
3685 cpustate->mxcsr = 0x1f80;
3686 cpustate->smm = false;
3687 cpustate->smi_latched = false;
3688 cpustate->smbase = 0x30000;
3689 cpustate->nmi_masked = false;
3690 cpustate->nmi_latched = false;
3692 x87_reset(cpustate);
3696 // [ 3:0] Stepping ID
3697 // Family 5 (Pentium), Model 2 (75 - 200MHz), Stepping 5
3699 REG32(EDX) = (5 << 8) | (2 << 4) | (5);
3701 cpustate->cpuid_id0 = 0x756e6547; // Genu
3702 cpustate->cpuid_id1 = 0x49656e69; // ineI
3703 cpustate->cpuid_id2 = 0x6c65746e; // ntel
3705 cpustate->cpuid_max_input_value_eax = 0x01;
3706 cpustate->cpu_version = REG32(EDX);
3708 // [ 0:0] FPU on chip
3709 // [ 2:2] I/O breakpoints
3710 // [ 4:4] Time Stamp Counter
3711 // [ 5:5] Pentium CPU style model specific registers
3712 // [ 7:7] Machine Check Exception
3713 // [ 8:8] CMPXCHG8B instruction
3714 cpustate->feature_flags = 0x000001bf;
3716 CHANGE_PC(cpustate,cpustate->eip);
3719 /*****************************************************************************/
3723 static CPU_INIT( mediagx )
3725 // probably 32 unified
3726 i386_state *cpustate = i386_common_init(32);
3727 build_x87_opcode_table(cpustate);
3728 build_opcode_table(cpustate, OP_I386 | OP_FPU | OP_I486 | OP_PENTIUM | OP_CYRIX);
3729 cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_MEDIAGX];
3730 cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_MEDIAGX];
3734 static CPU_RESET( mediagx )
3736 zero_state(cpustate);
3737 vtlb_flush_dynamic(cpustate->vtlb);
3739 cpustate->sreg[CS].selector = 0xf000;
3740 cpustate->sreg[CS].base = 0xffff0000;
3741 cpustate->sreg[CS].limit = 0xffff;
3742 cpustate->sreg[CS].flags = 0x0093;
3744 cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
3745 cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
3746 cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0093;
3748 cpustate->idtr.base = 0;
3749 cpustate->idtr.limit = 0x3ff;
3751 cpustate->a20_mask = ~0;
3753 cpustate->cr[0] = 0x00000010;
3754 cpustate->eflags = 0x00200000;
3755 cpustate->eflags_mask = 0x00277fd7; /* TODO: is this correct? */
3756 cpustate->eip = 0xfff0;
3757 cpustate->smm = false;
3758 cpustate->smi_latched = false;
3759 cpustate->nmi_masked = false;
3760 cpustate->nmi_latched = false;
3762 x87_reset(cpustate);
3766 // [ 3:0] Stepping ID
3767 // Family 4, Model 4 (MediaGX)
3769 REG32(EDX) = (4 << 8) | (4 << 4) | (1); /* TODO: is this correct? */
3771 cpustate->cpuid_id0 = 0x69727943; // Cyri
3772 cpustate->cpuid_id1 = 0x736e4978; // xIns
3773 cpustate->cpuid_id2 = 0x6d616574; // tead
3775 cpustate->cpuid_max_input_value_eax = 0x01;
3776 cpustate->cpu_version = REG32(EDX);
3778 // [ 0:0] FPU on chip
3779 cpustate->feature_flags = 0x00000001;
3781 CHANGE_PC(cpustate,cpustate->eip);
3784 /*****************************************************************************/
3785 /* Intel Pentium Pro */
3787 static CPU_INIT( pentium_pro )
3789 // 64 dtlb small, 32 itlb
3790 i386_state *cpustate = i386_common_init(96);
3791 build_x87_opcode_table(cpustate);
3792 build_opcode_table(cpustate, OP_I386 | OP_FPU | OP_I486 | OP_PENTIUM | OP_PPRO);
3793 cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_PENTIUM]; // TODO: generate own cycle tables
3794 cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_PENTIUM]; // TODO: generate own cycle tables
3798 static CPU_RESET( pentium_pro )
3800 zero_state(cpustate);
3801 vtlb_flush_dynamic(cpustate->vtlb);
3803 cpustate->sreg[CS].selector = 0xf000;
3804 cpustate->sreg[CS].base = 0xffff0000;
3805 cpustate->sreg[CS].limit = 0xffff;
3806 cpustate->sreg[CS].flags = 0x0093;
3808 cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
3809 cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
3810 cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0093;
3812 cpustate->idtr.base = 0;
3813 cpustate->idtr.limit = 0x3ff;
3815 cpustate->a20_mask = ~0;
3817 cpustate->cr[0] = 0x60000010;
3818 cpustate->eflags = 0x00200000;
3819 cpustate->eflags_mask = 0x00277fd7; /* TODO: is this correct? */
3820 cpustate->eip = 0xfff0;
3821 cpustate->mxcsr = 0x1f80;
3822 cpustate->smm = false;
3823 cpustate->smi_latched = false;
3824 cpustate->smbase = 0x30000;
3825 cpustate->nmi_masked = false;
3826 cpustate->nmi_latched = false;
3828 x87_reset(cpustate);
3832 // [ 3:0] Stepping ID
3833 // Family 6, Model 1 (Pentium Pro)
3835 REG32(EDX) = (6 << 8) | (1 << 4) | (1); /* TODO: is this correct? */
3837 cpustate->cpuid_id0 = 0x756e6547; // Genu
3838 cpustate->cpuid_id1 = 0x49656e69; // ineI
3839 cpustate->cpuid_id2 = 0x6c65746e; // ntel
3841 cpustate->cpuid_max_input_value_eax = 0x02;
3842 cpustate->cpu_version = REG32(EDX);
3844 // [ 0:0] FPU on chip
3845 // [ 2:2] I/O breakpoints
3846 // [ 4:4] Time Stamp Counter
3847 // [ 5:5] Pentium CPU style model specific registers
3848 // [ 7:7] Machine Check Exception
3849 // [ 8:8] CMPXCHG8B instruction
3850 // [15:15] CMOV and FCMOV
3852 cpustate->feature_flags = 0x000081bf;
3854 CHANGE_PC(cpustate,cpustate->eip);
3857 /*****************************************************************************/
3858 /* Intel Pentium MMX */
3860 static CPU_INIT( pentium_mmx )
3862 // 64 dtlb small, 8 dtlb large, 32 itlb small, 2 itlb large
3863 i386_state *cpustate = i386_common_init(96);
3864 build_x87_opcode_table(cpustate);
3865 build_opcode_table(cpustate, OP_I386 | OP_FPU | OP_I486 | OP_PENTIUM | OP_MMX);
3866 cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_PENTIUM]; // TODO: generate own cycle tables
3867 cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_PENTIUM]; // TODO: generate own cycle tables
3871 static CPU_RESET( pentium_mmx )
3873 zero_state(cpustate);
3874 vtlb_flush_dynamic(cpustate->vtlb);
3876 cpustate->sreg[CS].selector = 0xf000;
3877 cpustate->sreg[CS].base = 0xffff0000;
3878 cpustate->sreg[CS].limit = 0xffff;
3879 cpustate->sreg[CS].flags = 0x0093;
3881 cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
3882 cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
3883 cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0093;
3885 cpustate->idtr.base = 0;
3886 cpustate->idtr.limit = 0x3ff;
3888 cpustate->a20_mask = ~0;
3890 cpustate->cr[0] = 0x60000010;
3891 cpustate->eflags = 0x00200000;
3892 cpustate->eflags_mask = 0x00277fd7; /* TODO: is this correct? */
3893 cpustate->eip = 0xfff0;
3894 cpustate->mxcsr = 0x1f80;
3895 cpustate->smm = false;
3896 cpustate->smi_latched = false;
3897 cpustate->smbase = 0x30000;
3898 cpustate->nmi_masked = false;
3899 cpustate->nmi_latched = false;
3901 x87_reset(cpustate);
3905 // [ 3:0] Stepping ID
3906 // Family 5, Model 4 (P55C)
3908 REG32(EDX) = (5 << 8) | (4 << 4) | (1);
3910 cpustate->cpuid_id0 = 0x756e6547; // Genu
3911 cpustate->cpuid_id1 = 0x49656e69; // ineI
3912 cpustate->cpuid_id2 = 0x6c65746e; // ntel
3914 cpustate->cpuid_max_input_value_eax = 0x01;
3915 cpustate->cpu_version = REG32(EDX);
3917 // [ 0:0] FPU on chip
3918 // [ 2:2] I/O breakpoints
3919 // [ 4:4] Time Stamp Counter
3920 // [ 5:5] Pentium CPU style model specific registers
3921 // [ 7:7] Machine Check Exception
3922 // [ 8:8] CMPXCHG8B instruction
3923 // [23:23] MMX instructions
3924 cpustate->feature_flags = 0x008001bf;
3926 CHANGE_PC(cpustate,cpustate->eip);
3929 /*****************************************************************************/
3930 /* Intel Pentium II */
3932 static CPU_INIT( pentium2 )
3934 // 64 dtlb small, 8 dtlb large, 32 itlb small, 2 itlb large
3935 i386_state *cpustate = i386_common_init(96);
3936 build_x87_opcode_table(cpustate);
3937 build_opcode_table(cpustate, OP_I386 | OP_FPU | OP_I486 | OP_PENTIUM | OP_PPRO | OP_MMX);
3938 cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_PENTIUM]; // TODO: generate own cycle tables
3939 cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_PENTIUM]; // TODO: generate own cycle tables
3943 static CPU_RESET( pentium2 )
3945 zero_state(cpustate);
3946 vtlb_flush_dynamic(cpustate->vtlb);
3948 cpustate->sreg[CS].selector = 0xf000;
3949 cpustate->sreg[CS].base = 0xffff0000;
3950 cpustate->sreg[CS].limit = 0xffff;
3951 cpustate->sreg[CS].flags = 0x0093;
3953 cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
3954 cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
3955 cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0093;
3957 cpustate->idtr.base = 0;
3958 cpustate->idtr.limit = 0x3ff;
3960 cpustate->a20_mask = ~0;
3962 cpustate->cr[0] = 0x60000010;
3963 cpustate->eflags = 0x00200000;
3964 cpustate->eflags_mask = 0x00277fd7; /* TODO: is this correct? */
3965 cpustate->eip = 0xfff0;
3966 cpustate->mxcsr = 0x1f80;
3967 cpustate->smm = false;
3968 cpustate->smi_latched = false;
3969 cpustate->smbase = 0x30000;
3970 cpustate->nmi_masked = false;
3971 cpustate->nmi_latched = false;
3973 x87_reset(cpustate);
3977 // [ 3:0] Stepping ID
3978 // Family 6, Model 3 (Pentium II / Klamath)
3980 REG32(EDX) = (6 << 8) | (3 << 4) | (1); /* TODO: is this correct? */
3982 cpustate->cpuid_id0 = 0x756e6547; // Genu
3983 cpustate->cpuid_id1 = 0x49656e69; // ineI
3984 cpustate->cpuid_id2 = 0x6c65746e; // ntel
3986 cpustate->cpuid_max_input_value_eax = 0x02;
3987 cpustate->cpu_version = REG32(EDX);
3989 // [ 0:0] FPU on chip
3990 cpustate->feature_flags = 0x008081bf; // TODO: enable relevant flags here
3992 CHANGE_PC(cpustate,cpustate->eip);
3995 /*****************************************************************************/
3996 /* Intel Pentium III */
3998 static CPU_INIT( pentium3 )
4000 // 64 dtlb small, 8 dtlb large, 32 itlb small, 2 itlb large
4001 i386_state *cpustate = i386_common_init(96);
4002 build_x87_opcode_table(cpustate);
4003 build_opcode_table(cpustate, OP_I386 | OP_FPU | OP_I486 | OP_PENTIUM | OP_PPRO | OP_MMX | OP_SSE);
4004 cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_PENTIUM]; // TODO: generate own cycle tables
4005 cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_PENTIUM]; // TODO: generate own cycle tables
4009 static CPU_RESET( pentium3 )
4011 zero_state(cpustate);
4012 vtlb_flush_dynamic(cpustate->vtlb);
4014 cpustate->sreg[CS].selector = 0xf000;
4015 cpustate->sreg[CS].base = 0xffff0000;
4016 cpustate->sreg[CS].limit = 0xffff;
4017 cpustate->sreg[CS].flags = 0x0093;
4019 cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
4020 cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
4021 cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0093;
4023 cpustate->idtr.base = 0;
4024 cpustate->idtr.limit = 0x3ff;
4026 cpustate->a20_mask = ~0;
4028 cpustate->cr[0] = 0x60000010;
4029 cpustate->eflags = 0x00200000;
4030 cpustate->eflags_mask = 0x00277fd7; /* TODO: is this correct? */
4031 cpustate->eip = 0xfff0;
4032 cpustate->mxcsr = 0x1f80;
4033 cpustate->smm = false;
4034 cpustate->smi_latched = false;
4035 cpustate->smbase = 0x30000;
4036 cpustate->nmi_masked = false;
4037 cpustate->nmi_latched = false;
4039 x87_reset(cpustate);
4043 // [ 3:0] Stepping ID
4044 // Family 6, Model 8 (Pentium III / Coppermine)
4046 REG32(EDX) = (6 << 8) | (8 << 4) | (10);
4048 cpustate->cpuid_id0 = 0x756e6547; // Genu
4049 cpustate->cpuid_id1 = 0x49656e69; // ineI
4050 cpustate->cpuid_id2 = 0x6c65746e; // ntel
4052 cpustate->cpuid_max_input_value_eax = 0x03;
4053 cpustate->cpu_version = REG32(EDX);
4055 // [ 0:0] FPU on chip
4056 // [ 4:4] Time Stamp Counter
4057 // [ D:D] PTE Global Bit
4058 cpustate->feature_flags = 0x00002011; // TODO: enable relevant flags here
4060 CHANGE_PC(cpustate,cpustate->eip);
4063 /*****************************************************************************/
4064 /* Intel Pentium 4 */
4066 static CPU_INIT( pentium4 )
4068 // 128 dtlb, 64 itlb
4069 i386_state *cpustate = i386_common_init(196);
4070 build_x87_opcode_table(cpustate);
4071 build_opcode_table(cpustate, OP_I386 | OP_FPU | OP_I486 | OP_PENTIUM | OP_PPRO | OP_MMX | OP_SSE | OP_SSE2);
4072 cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_PENTIUM]; // TODO: generate own cycle tables
4073 cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_PENTIUM]; // TODO: generate own cycle tables
4077 static CPU_RESET( pentium4 )
4079 zero_state(cpustate);
4080 vtlb_flush_dynamic(cpustate->vtlb);
4082 cpustate->sreg[CS].selector = 0xf000;
4083 cpustate->sreg[CS].base = 0xffff0000;
4084 cpustate->sreg[CS].limit = 0xffff;
4085 cpustate->sreg[CS].flags = 0x0093;
4087 cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
4088 cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
4089 cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0093;
4091 cpustate->idtr.base = 0;
4092 cpustate->idtr.limit = 0x3ff;
4094 cpustate->a20_mask = ~0;
4096 cpustate->cr[0] = 0x60000010;
4097 cpustate->eflags = 0x00200000;
4098 cpustate->eflags_mask = 0x00277fd7; /* TODO: is this correct? */
4099 cpustate->eip = 0xfff0;
4100 cpustate->mxcsr = 0x1f80;
4101 cpustate->smm = false;
4102 cpustate->smi_latched = false;
4103 cpustate->smbase = 0x30000;
4104 cpustate->nmi_masked = false;
4105 cpustate->nmi_latched = false;
4107 x87_reset(cpustate);
4109 // [27:20] Extended family
4110 // [19:16] Extended model
4114 // [ 3: 0] Stepping ID
4115 // Family 15, Model 0 (Pentium 4 / Willamette)
4117 REG32(EDX) = (0 << 20) | (0xf << 8) | (0 << 4) | (1);
4119 cpustate->cpuid_id0 = 0x756e6547; // Genu
4120 cpustate->cpuid_id1 = 0x49656e69; // ineI
4121 cpustate->cpuid_id2 = 0x6c65746e; // ntel
4123 cpustate->cpuid_max_input_value_eax = 0x02;
4124 cpustate->cpu_version = REG32(EDX);
4126 // [ 0:0] FPU on chip
4127 cpustate->feature_flags = 0x00000001; // TODO: enable relevant flags here
4129 CHANGE_PC(cpustate,cpustate->eip);