1 // license:BSD-3-Clause
2 // copyright-holders:Ville Linde, Barry Rodewald, Carl, Philip Bennett
20 #include "./i386_opdef.h"
21 #include "./i386ops.h"
23 /* seems to be defined on mingw-gcc */
26 #define FAULT(fault,error) {cpustate->ext = 1; i386_trap_with_error(fault,0,0,error); return;}
27 #define FAULT_EXP(fault,error) {cpustate->ext = 1; i386_trap_with_error(fault,0,trap_level+1,error); return;}
29 /*************************************************************************/
31 UINT32 I386_OPS_BASE::i386_load_protected_mode_segment( I386_SREG *seg, UINT64 *desc )
47 if ( seg->selector & 0x4 )
49 base = cpustate->ldtr.base;
50 limit = cpustate->ldtr.limit;
52 base = cpustate->gdtr.base;
53 limit = cpustate->gdtr.limit;
56 entry = seg->selector & ~0x7;
57 if ((limit == 0) || ((UINT32)(entry + 7) > limit))
60 v1 = READ32PL0(base + entry );
61 v2 = READ32PL0(base + entry + 4 );
63 seg->flags = (v2 >> 8) & 0xf0ff;
64 seg->base = (v2 & 0xff000000) | ((v2 & 0xff) << 16) | ((v1 >> 16) & 0xffff);
65 seg->limit = (v2 & 0xf0000) | (v1 & 0xffff);
66 if (seg->flags & 0x8000)
67 seg->limit = (seg->limit << 12) | 0xfff;
68 seg->d = (seg->flags & 0x4000) ? 1 : 0;
72 *desc = ((UINT64)v2<<32)|v1;
76 void I386_OPS_BASE::i386_load_call_gate(I386_CALL_GATE *gate)
82 if ( gate->segment & 0x4 )
84 base = cpustate->ldtr.base;
85 limit = cpustate->ldtr.limit;
87 base = cpustate->gdtr.base;
88 limit = cpustate->gdtr.limit;
91 entry = gate->segment & ~0x7;
92 if ((limit == 0) || ((UINT32)(entry + 7) > limit))
95 v1 = READ32PL0(base + entry );
96 v2 = READ32PL0(base + entry + 4 );
98 /* Note that for task gates, offset and dword_count are not used */
99 gate->selector = (v1 >> 16) & 0xffff;
100 gate->offset = (v1 & 0x0000ffff) | (v2 & 0xffff0000);
101 gate->ar = (v2 >> 8) & 0xff;
102 gate->dword_count = v2 & 0x001f;
103 gate->present = (gate->ar >> 7) & 0x01;
104 gate->dpl = (gate->ar >> 5) & 0x03;
107 void I386_OPS_BASE::i386_set_descriptor_accessed( UINT16 selector)
109 // assume the selector is valid, we don't need to check it again
115 if ( selector & 0x4 )
116 base = cpustate->ldtr.base;
118 base = cpustate->gdtr.base;
120 addr = base + (selector & ~7) + 5;
121 i386_translate_address(TRANSLATE_READ, &addr, NULL);
122 rights = cpustate->program->read_data8(addr);
123 // Should a fault be thrown if the table is read only?
124 cpustate->program->write_data8(addr, rights | 1);
127 void I386_OPS_BASE::i386_load_segment_descriptor( int segment )
133 i386_load_protected_mode_segment(&cpustate->sreg[segment], NULL );
135 i386_set_descriptor_accessed(cpustate, cpustate->sreg[segment].selector);
136 cpustate->sreg[segment].flags |= 0x0001;
141 cpustate->sreg[segment].base = cpustate->sreg[segment].selector << 4;
142 cpustate->sreg[segment].limit = 0xffff;
143 cpustate->sreg[segment].flags = (segment == CS) ? 0x00fb : 0x00f3;
144 cpustate->sreg[segment].d = 0;
145 cpustate->sreg[segment].valid = true;
147 // if (segment == CS && cpustate->sreg[segment].flags != old_flags)
148 // debugger_privilege_hook();
152 cpustate->sreg[segment].base = cpustate->sreg[segment].selector << 4;
153 cpustate->sreg[segment].d = 0;
154 cpustate->sreg[segment].valid = true;
158 if( !cpustate->performed_intersegment_jump )
159 cpustate->sreg[segment].base |= 0xfff00000;
160 if(cpustate->cpu_version < 0x5000)
161 cpustate->sreg[segment].flags = 0x93;
166 /* Retrieves the stack selector located in the current TSS */
167 UINT32 I386_OPS_BASE::i386_get_stack_segment(UINT8 privilege)
173 if(cpustate->task.flags & 8)
174 ret = READ32PL0((cpustate->task.base+8) + (8*privilege));
176 ret = READ16PL0((cpustate->task.base+4) + (4*privilege));
181 /* Retrieves the stack pointer located in the current TSS */
182 UINT32 I386_OPS_BASE::i386_get_stack_ptr(UINT8 privilege)
188 if(cpustate->task.flags & 8)
189 ret = READ32PL0((cpustate->task.base+4) + (8*privilege));
191 ret = READ16PL0((cpustate->task.base+2) + (4*privilege));
196 UINT32 I386_OPS_BASE::get_flags()
200 f |= cpustate->PF << 2;
201 f |= cpustate->AF << 4;
202 f |= cpustate->ZF << 6;
203 f |= cpustate->SF << 7;
204 f |= cpustate->TF << 8;
205 f |= cpustate->IF << 9;
206 f |= cpustate->DF << 10;
207 f |= cpustate->OF << 11;
208 f |= cpustate->IOP1 << 12;
209 f |= cpustate->IOP2 << 13;
210 f |= cpustate->NT << 14;
211 f |= cpustate->RF << 16;
212 f |= cpustate->VM << 17;
213 f |= cpustate->AC << 18;
214 f |= cpustate->VIF << 19;
215 f |= cpustate->VIP << 20;
216 f |= cpustate->ID << 21;
217 return (cpustate->eflags & ~cpustate->eflags_mask) | (f & cpustate->eflags_mask);
220 void I386_OPS_BASE::set_flags( UINT32 f )
222 f &= cpustate->eflags_mask;;
223 cpustate->CF = (f & 0x1) ? 1 : 0;
224 cpustate->PF = (f & 0x4) ? 1 : 0;
225 cpustate->AF = (f & 0x10) ? 1 : 0;
226 cpustate->ZF = (f & 0x40) ? 1 : 0;
227 cpustate->SF = (f & 0x80) ? 1 : 0;
228 cpustate->TF = (f & 0x100) ? 1 : 0;
229 cpustate->IF = (f & 0x200) ? 1 : 0;
230 cpustate->DF = (f & 0x400) ? 1 : 0;
231 cpustate->OF = (f & 0x800) ? 1 : 0;
232 cpustate->IOP1 = (f & 0x1000) ? 1 : 0;
233 cpustate->IOP2 = (f & 0x2000) ? 1 : 0;
234 cpustate->NT = (f & 0x4000) ? 1 : 0;
235 cpustate->RF = (f & 0x10000) ? 1 : 0;
236 cpustate->VM = (f & 0x20000) ? 1 : 0;
237 cpustate->AC = (f & 0x40000) ? 1 : 0;
238 cpustate->VIF = (f & 0x80000) ? 1 : 0;
239 cpustate->VIP = (f & 0x100000) ? 1 : 0;
240 cpustate->ID = (f & 0x200000) ? 1 : 0;
241 cpustate->eflags = f;
244 void I386_OPS_BASE::sib_byte(UINT8 mod, UINT32* out_ea, UINT8* out_segment)
248 UINT8 scale, i, base;
250 scale = (sib >> 6) & 0x3;
251 i = (sib >> 3) & 0x7;
256 case 0: ea = REG32(EAX); segment = DS; break;
257 case 1: ea = REG32(ECX); segment = DS; break;
258 case 2: ea = REG32(EDX); segment = DS; break;
259 case 3: ea = REG32(EBX); segment = DS; break;
260 case 4: ea = REG32(ESP); segment = SS; break;
265 } else if( mod == 1 ) {
268 } else if( mod == 2 ) {
273 case 6: ea = REG32(ESI); segment = DS; break;
274 case 7: ea = REG32(EDI); segment = DS; break;
278 case 0: ea += REG32(EAX) * (1 << scale); break;
279 case 1: ea += REG32(ECX) * (1 << scale); break;
280 case 2: ea += REG32(EDX) * (1 << scale); break;
281 case 3: ea += REG32(EBX) * (1 << scale); break;
283 case 5: ea += REG32(EBP) * (1 << scale); break;
284 case 6: ea += REG32(ESI) * (1 << scale); break;
285 case 7: ea += REG32(EDI) * (1 << scale); break;
288 *out_segment = segment;
291 void I386_OPS_BASE::modrm_to_EA(UINT8 mod_rm, UINT32* out_ea, UINT8* out_segment)
296 UINT8 mod = (mod_rm >> 6) & 0x3;
297 UINT8 rm = mod_rm & 0x7;
302 fatalerror("i386: Called modrm_to_EA with modrm value %02X!\n",mod_rm);
305 if( cpustate->address_size ) {
309 case 0: ea = REG32(EAX); segment = DS; break;
310 case 1: ea = REG32(ECX); segment = DS; break;
311 case 2: ea = REG32(EDX); segment = DS; break;
312 case 3: ea = REG32(EBX); segment = DS; break;
313 case 4: sib_byte(mod, &ea, &segment ); break;
316 ea = FETCH32(); segment = DS;
318 ea = REG32(EBP); segment = SS;
321 case 6: ea = REG32(ESI); segment = DS; break;
322 case 7: ea = REG32(EDI); segment = DS; break;
327 } else if( mod == 2 ) {
332 if( cpustate->segment_prefix )
333 segment = cpustate->segment_override;
336 *out_segment = segment;
342 case 0: ea = REG16(BX) + REG16(SI); segment = DS; break;
343 case 1: ea = REG16(BX) + REG16(DI); segment = DS; break;
344 case 2: ea = REG16(BP) + REG16(SI); segment = SS; break;
345 case 3: ea = REG16(BP) + REG16(DI); segment = SS; break;
346 case 4: ea = REG16(SI); segment = DS; break;
347 case 5: ea = REG16(DI); segment = DS; break;
350 ea = FETCH16(); segment = DS;
352 ea = REG16(BP); segment = SS;
355 case 7: ea = REG16(BX); segment = DS; break;
360 } else if( mod == 2 ) {
365 if( cpustate->segment_prefix )
366 segment = cpustate->segment_override;
368 *out_ea = ea & 0xffff;
369 *out_segment = segment;
373 UINT32 I386_OPS_BASE::GetNonTranslatedEA(UINT8 modrm,UINT8 *seg)
377 modrm_to_EA(modrm, &ea, &segment );
378 if(seg) *seg = segment;
382 UINT32 I386_OPS_BASE::GetEA(UINT8 modrm, int rwn, UINT32 size)
386 modrm_to_EA(modrm, &ea, &segment );
387 return i386_translate(segment, ea, rwn, size );
390 /* Check segment register for validity when changing privilege level after an RETF */
391 void I386_OPS_BASE::i386_check_sreg_validity(int reg)
393 UINT16 selector = cpustate->sreg[reg].selector;
394 UINT8 CPL = cpustate->CPL;
399 memset(&desc, 0, sizeof(desc));
400 desc.selector = selector;
401 i386_load_protected_mode_segment(&desc,NULL);
402 DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level
403 RPL = selector & 0x03;
405 /* Must be within the relevant descriptor table limits */
408 if((selector & ~0x07) > cpustate->ldtr.limit)
413 if((selector & ~0x07) > cpustate->gdtr.limit)
417 /* Must be either a data or readable code segment */
418 if(((desc.flags & 0x0018) == 0x0018 && (desc.flags & 0x0002)) || (desc.flags & 0x0018) == 0x0010)
423 /* If a data segment or non-conforming code segment, then either DPL >= CPL or DPL >= RPL */
424 if(((desc.flags & 0x0018) == 0x0018 && (desc.flags & 0x0004) == 0) || (desc.flags & 0x0018) == 0x0010)
426 if((DPL < CPL) || (DPL < RPL))
430 /* if segment is invalid, then segment register is nulled */
433 cpustate->sreg[reg].selector = 0;
434 i386_load_segment_descriptor(reg);
438 int I386_OPS_BASE::i386_limit_check( int seg, UINT32 offset, UINT32 size)
440 if(PROTECTED_MODE && !V8086_MODE)
442 if((cpustate->sreg[seg].flags & 0x0018) == 0x0010 && cpustate->sreg[seg].flags & 0x0004) // if expand-down data segment
444 // compare if greater then 0xffffffff when we're passed the access size
445 if((offset <= cpustate->sreg[seg].limit) || ((cpustate->sreg[seg].d)?0:((offset + size - 1) > 0xffff)))
447 logerror("Limit check at 0x%08x failed. Segment %04x, limit %08x, offset %08x (expand-down)\n",cpustate->pc,cpustate->sreg[seg].selector,cpustate->sreg[seg].limit,offset);
453 if((offset + size - 1) > cpustate->sreg[seg].limit)
455 logerror("Limit check at 0x%08x failed. Segment %04x, limit %08x, offset %08x\n",cpustate->pc,cpustate->sreg[seg].selector,cpustate->sreg[seg].limit,offset);
463 void I386_OPS_BASE::i386_sreg_load( UINT16 selector, UINT8 reg, bool *fault)
465 // Checks done when MOV changes a segment register in protected mode
469 RPL = selector & 0x0003;
471 if(!PROTECTED_MODE || V8086_MODE)
473 cpustate->sreg[reg].selector = selector;
474 i386_load_segment_descriptor(reg);
475 if(fault) *fault = false;
479 if(fault) *fault = true;
484 memset(&stack, 0, sizeof(stack));
485 stack.selector = selector;
486 i386_load_protected_mode_segment(&stack,NULL);
487 DPL = (stack.flags >> 5) & 0x03;
489 if((selector & ~0x0003) == 0)
491 logerror("SReg Load (%08x): Selector is null.\n",cpustate->pc);
494 if(selector & 0x0004) // LDT
496 if((selector & ~0x0007) > cpustate->ldtr.limit)
498 logerror("SReg Load (%08x): Selector is out of LDT bounds.\n",cpustate->pc);
499 FAULT(FAULT_GP,selector & ~0x03)
504 if((selector & ~0x0007) > cpustate->gdtr.limit)
506 logerror("SReg Load (%08x): Selector is out of GDT bounds.\n",cpustate->pc);
507 FAULT(FAULT_GP,selector & ~0x03)
512 logerror("SReg Load (%08x): Selector RPL does not equal CPL.\n",cpustate->pc);
513 FAULT(FAULT_GP,selector & ~0x03)
515 if(((stack.flags & 0x0018) != 0x10) && (stack.flags & 0x0002) != 0)
517 logerror("SReg Load (%08x): Segment is not a writable data segment.\n",cpustate->pc);
518 FAULT(FAULT_GP,selector & ~0x03)
522 logerror("SReg Load (%08x): Segment DPL does not equal CPL.\n",cpustate->pc);
523 FAULT(FAULT_GP,selector & ~0x03)
525 if(!(stack.flags & 0x0080))
527 logerror("SReg Load (%08x): Segment is not present.\n",cpustate->pc);
528 FAULT(FAULT_SS,selector & ~0x03)
531 if(reg == DS || reg == ES || reg == FS || reg == GS)
535 if((selector & ~0x0003) == 0)
537 cpustate->sreg[reg].selector = selector;
538 i386_load_segment_descriptor(reg );
539 if(fault) *fault = false;
543 memset(&desc, 0, sizeof(desc));
544 desc.selector = selector;
545 i386_load_protected_mode_segment(&desc,NULL);
546 DPL = (desc.flags >> 5) & 0x03;
548 if(selector & 0x0004) // LDT
550 if((selector & ~0x0007) > cpustate->ldtr.limit)
552 logerror("SReg Load (%08x): Selector is out of LDT bounds.\n",cpustate->pc);
553 FAULT(FAULT_GP,selector & ~0x03)
558 if((selector & ~0x0007) > cpustate->gdtr.limit)
560 logerror("SReg Load (%08x): Selector is out of GDT bounds.\n",cpustate->pc);
561 FAULT(FAULT_GP,selector & ~0x03)
564 if((desc.flags & 0x0018) != 0x10)
566 if((((desc.flags & 0x0002) != 0) && ((desc.flags & 0x0018) != 0x18)) || !(desc.flags & 0x10))
568 logerror("SReg Load (%08x): Segment is not a data segment or readable code segment.\n",cpustate->pc);
569 FAULT(FAULT_GP,selector & ~0x03)
572 if(((desc.flags & 0x0018) == 0x10) || ((!(desc.flags & 0x0004)) && ((desc.flags & 0x0018) == 0x18)))
574 // if data or non-conforming code segment
575 if((RPL > DPL) || (CPL > DPL))
577 logerror("SReg Load (%08x): Selector RPL or CPL is not less or equal to segment DPL.\n",cpustate->pc);
578 FAULT(FAULT_GP,selector & ~0x03)
581 if(!(desc.flags & 0x0080))
583 logerror("SReg Load (%08x): Segment is not present.\n",cpustate->pc);
584 FAULT(FAULT_NP,selector & ~0x03)
588 cpustate->sreg[reg].selector = selector;
589 i386_load_segment_descriptor(reg );
590 if(fault) *fault = false;
593 void I386_OPS_BASE::i386_trap(int irq, int irq_gate, int trap_level)
595 /* I386 Interrupts/Traps/Faults:
597 * 0x00 Divide by zero
598 * 0x01 Debug exception
602 * 0x05 Array bounds check
603 * 0x06 Illegal Opcode
604 * 0x07 FPU not available
606 * 0x09 Coprocessor segment overrun
607 * 0x0a Invalid task state
608 * 0x0b Segment not present
609 * 0x0c Stack exception
610 * 0x0d General Protection Fault
613 * 0x10 Coprocessor error
616 UINT32 offset, oldflags = get_flags();
618 int entry = irq * (PROTECTED_MODE ? 8 : 4);
620 cpustate->lock = false;
622 if( !(PROTECTED_MODE) )
625 PUSH16(oldflags & 0xffff );
626 PUSH16(cpustate->sreg[CS].selector );
627 if(irq == 3 || irq == 4 || irq == 9 || irq_gate == 1)
628 PUSH16(cpustate->eip );
630 PUSH16(cpustate->prev_eip );
632 cpustate->sreg[CS].selector = READ16(cpustate->idtr.base + entry + 2 );
633 cpustate->eip = READ16(cpustate->idtr.base + entry );
643 UINT8 CPL = cpustate->CPL, DPL = 0; //, RPL = 0;
646 v1 = READ32PL0(cpustate->idtr.base + entry );
647 v2 = READ32PL0(cpustate->idtr.base + entry + 4 );
648 offset = (v2 & 0xffff0000) | (v1 & 0xffff);
649 segment = (v1 >> 16) & 0xffff;
650 type = (v2>>8) & 0x1F;
651 flags = (v2>>8) & 0xf0ff;
655 logerror("IRQ: Double fault.\n");
656 FAULT_EXP(FAULT_DF,0);
660 logerror("IRQ: Triple fault. CPU reset.\n");
661 CPU_RESET_CALL(i386); //!
662 cpustate->shutdown = 1;
666 /* segment privilege checks */
667 if(entry >= cpustate->idtr.limit)
669 logerror("IRQ (%08x): Vector %02xh is past IDT limit.\n",cpustate->pc,entry);
670 FAULT_EXP(FAULT_GP,entry+2)
672 /* segment must be interrupt gate, trap gate, or task gate */
673 if(type != 0x05 && type != 0x06 && type != 0x07 && type != 0x0e && type != 0x0f)
675 logerror("IRQ#%02x (%08x): Vector segment %04x is not an interrupt, trap or task gate.\n",irq,cpustate->pc,segment);
676 FAULT_EXP(FAULT_GP,entry+2)
679 if(cpustate->ext == 0) // if software interrupt (caused by INT/INTO/INT3)
681 if(((flags >> 5) & 0x03) < CPL)
683 logerror("IRQ (%08x): Software IRQ - gate DPL is less than CPL.\n",cpustate->pc);
684 FAULT_EXP(FAULT_GP,entry+2)
688 if((!cpustate->IOP1 || !cpustate->IOP2) && (cpustate->opcode != 0xcc))
690 logerror("IRQ (%08x): Is in Virtual 8086 mode and IOPL != 3.\n",cpustate->pc);
697 if((flags & 0x0080) == 0)
699 logerror("IRQ: Vector segment is not present.\n");
700 FAULT_EXP(FAULT_NP,entry+2)
706 memset(&desc, 0, sizeof(desc));
707 desc.selector = segment;
708 i386_load_protected_mode_segment(&desc,NULL);
711 logerror("IRQ: Task gate: TSS is not in the GDT.\n");
712 FAULT_EXP(FAULT_TS,segment & ~0x03);
716 if(segment > cpustate->gdtr.limit)
718 logerror("IRQ: Task gate: TSS is past GDT limit.\n");
719 FAULT_EXP(FAULT_TS,segment & ~0x03);
722 if((desc.flags & 0x000f) != 0x09 && (desc.flags & 0x000f) != 0x01)
724 logerror("IRQ: Task gate: TSS is not an available TSS.\n");
725 FAULT_EXP(FAULT_TS,segment & ~0x03);
727 if((desc.flags & 0x0080) == 0)
729 logerror("IRQ: Task gate: TSS is not present.\n");
730 FAULT_EXP(FAULT_NP,segment & ~0x03);
732 if(!(irq == 3 || irq == 4 || irq == 9 || irq_gate == 1))
733 cpustate->eip = cpustate->prev_eip;
734 if(desc.flags & 0x08)
735 i386_task_switch(desc.selector,1);
737 i286_task_switch(desc.selector,1);
742 /* Interrupt or Trap gate */
743 memset(&desc, 0, sizeof(desc));
744 desc.selector = segment;
745 i386_load_protected_mode_segment(&desc,NULL);
746 CPL = cpustate->CPL; // current privilege level
747 DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level
748 // RPL = segment & 0x03; // requested privilege level
750 if((segment & ~0x03) == 0)
752 logerror("IRQ: Gate segment is null.\n");
753 FAULT_EXP(FAULT_GP,cpustate->ext)
757 if((segment & ~0x07) > cpustate->ldtr.limit)
759 logerror("IRQ: Gate segment is past LDT limit.\n");
760 FAULT_EXP(FAULT_GP,(segment & 0x03)+cpustate->ext)
765 if((segment & ~0x07) > cpustate->gdtr.limit)
767 logerror("IRQ: Gate segment is past GDT limit.\n");
768 FAULT_EXP(FAULT_GP,(segment & 0x03)+cpustate->ext)
771 if((desc.flags & 0x0018) != 0x18)
773 logerror("IRQ: Gate descriptor is not a code segment.\n");
774 FAULT_EXP(FAULT_GP,(segment & 0x03)+cpustate->ext)
776 if((desc.flags & 0x0080) == 0)
778 logerror("IRQ: Gate segment is not present.\n");
779 FAULT_EXP(FAULT_NP,(segment & 0x03)+cpustate->ext)
781 if((desc.flags & 0x0004) == 0 && (DPL < CPL))
783 /* IRQ to inner privilege */
785 UINT32 newESP,oldSS,oldESP;
787 if(V8086_MODE && DPL)
789 logerror("IRQ: Gate to CPL>0 from VM86 mode.\n");
790 FAULT_EXP(FAULT_GP,segment & ~0x03);
792 /* Check new stack segment in TSS */
793 memset(&stack, 0, sizeof(stack));
794 stack.selector = i386_get_stack_segment(DPL);
795 i386_load_protected_mode_segment(&stack,NULL);
796 oldSS = cpustate->sreg[SS].selector;
801 if((stack.selector & ~0x03) == 0)
803 logerror("IRQ: New stack selector is null.\n");
804 FAULT_EXP(FAULT_GP,cpustate->ext)
806 if(stack.selector & 0x04)
808 if((stack.selector & ~0x07) > cpustate->ldtr.base)
810 logerror("IRQ: New stack selector is past LDT limit.\n");
811 FAULT_EXP(FAULT_TS,(stack.selector & ~0x03)+cpustate->ext)
816 if((stack.selector & ~0x07) > cpustate->gdtr.base)
818 logerror("IRQ: New stack selector is past GDT limit.\n");
819 FAULT_EXP(FAULT_TS,(stack.selector & ~0x03)+cpustate->ext)
822 if((stack.selector & 0x03) != DPL)
824 logerror("IRQ: New stack selector RPL is not equal to code segment DPL.\n");
825 FAULT_EXP(FAULT_TS,(stack.selector & ~0x03)+cpustate->ext)
827 if(((stack.flags >> 5) & 0x03) != DPL)
829 logerror("IRQ: New stack segment DPL is not equal to code segment DPL.\n");
830 FAULT_EXP(FAULT_TS,(stack.selector & ~0x03)+cpustate->ext)
832 if(((stack.flags & 0x0018) != 0x10) && (stack.flags & 0x0002) != 0)
834 logerror("IRQ: New stack segment is not a writable data segment.\n");
835 FAULT_EXP(FAULT_TS,(stack.selector & ~0x03)+cpustate->ext) // #TS(stack selector + EXT)
837 if((stack.flags & 0x0080) == 0)
839 logerror("IRQ: New stack segment is not present.\n");
840 FAULT_EXP(FAULT_SS,(stack.selector & ~0x03)+cpustate->ext) // #TS(stack selector + EXT)
842 newESP = i386_get_stack_ptr(DPL);
843 if(type & 0x08) // 32-bit gate
845 if(((newESP < (V8086_MODE?36:20)) && !(stack.flags & 0x4)) || ((~stack.limit < (~(newESP - 1) + (V8086_MODE?36:20))) && (stack.flags & 0x4)))
847 logerror("IRQ: New stack has no space for return addresses.\n");
848 FAULT_EXP(FAULT_SS,0)
854 if(((newESP < (V8086_MODE?18:10)) && !(stack.flags & 0x4)) || ((~stack.limit < (~(newESP - 1) + (V8086_MODE?18:10))) && (stack.flags & 0x4)))
856 logerror("IRQ: New stack has no space for return addresses.\n");
857 FAULT_EXP(FAULT_SS,0)
860 if(offset > desc.limit)
862 logerror("IRQ: New EIP is past code segment limit.\n");
863 FAULT_EXP(FAULT_GP,0)
865 /* change CPL before accessing the stack */
867 /* check for page fault at new stack TODO: check if stack frame crosses page boundary */
868 WRITE_TEST(stack.base+newESP-1);
869 /* Load new stack segment descriptor */
870 cpustate->sreg[SS].selector = stack.selector;
871 i386_load_protected_mode_segment(&cpustate->sreg[SS],NULL);
872 i386_set_descriptor_accessed(stack.selector);
876 //logerror("IRQ (%08x): Interrupt during V8086 task\n",cpustate->pc);
879 PUSH32SEG(cpustate->sreg[GS].selector & 0xffff);
880 PUSH32SEG(cpustate->sreg[FS].selector & 0xffff);
881 PUSH32SEG(cpustate->sreg[DS].selector & 0xffff);
882 PUSH32SEG(cpustate->sreg[ES].selector & 0xffff);
886 PUSH16(cpustate->sreg[GS].selector);
887 PUSH16(cpustate->sreg[FS].selector);
888 PUSH16(cpustate->sreg[DS].selector);
889 PUSH16(cpustate->sreg[ES].selector);
891 cpustate->sreg[GS].selector = 0;
892 cpustate->sreg[FS].selector = 0;
893 cpustate->sreg[DS].selector = 0;
894 cpustate->sreg[ES].selector = 0;
896 i386_load_segment_descriptor(GS);
897 i386_load_segment_descriptor(FS);
898 i386_load_segment_descriptor(DS);
899 i386_load_segment_descriptor(ES);
918 if((desc.flags & 0x0004) || (DPL == CPL))
920 /* IRQ to same privilege */
921 if(V8086_MODE && !cpustate->ext)
923 logerror("IRQ: Gate to same privilege from VM86 mode.\n");
924 FAULT_EXP(FAULT_GP,segment & ~0x03);
926 if(type == 0x0e || type == 0x0f) // 32-bit gate
930 // TODO: Add check for error code (2 extra bytes)
931 if((int)(REG32(ESP)) < stack_limit)
933 logerror("IRQ: Stack has no space left (needs %i bytes).\n",stack_limit);
934 FAULT_EXP(FAULT_SS,0)
936 if(offset > desc.limit)
938 logerror("IRQ: Gate segment offset is past segment limit.\n");
939 FAULT_EXP(FAULT_GP,0)
945 logerror("IRQ: Gate descriptor is non-conforming, and DPL does not equal CPL.\n");
946 FAULT_EXP(FAULT_GP,segment)
950 UINT32 tempSP = REG32(ESP);
953 // this is ugly but the alternative is worse
954 if(type != 0x0e && type != 0x0f) // if not 386 interrupt or trap gate
956 PUSH16(oldflags & 0xffff );
957 PUSH16(cpustate->sreg[CS].selector );
958 if(irq == 3 || irq == 4 || irq == 9 || irq_gate == 1)
959 PUSH16(cpustate->eip );
961 PUSH16(cpustate->prev_eip );
965 PUSH32(oldflags & 0x00ffffff );
966 PUSH32SEG(cpustate->sreg[CS].selector );
967 if(irq == 3 || irq == 4 || irq == 9 || irq_gate == 1)
968 PUSH32(cpustate->eip );
970 PUSH32(cpustate->prev_eip );
979 segment = (segment & ~0x03) | cpustate->CPL;
980 cpustate->sreg[CS].selector = segment;
981 cpustate->eip = offset;
983 if(type == 0x0e || type == 0x06)
989 i386_load_segment_descriptor(CS);
990 CHANGE_PC(cpustate->eip);
994 void I386_OPS_BASE::i386_trap_with_error(int irq, int irq_gate, int trap_level, UINT32 error)
996 i386_trap(irq,irq_gate,trap_level);
997 if(irq == 8 || irq == 10 || irq == 11 || irq == 12 || irq == 13 || irq == 14)
999 // for these exceptions, an error code is pushed onto the stack by the processor.
1000 // no error code is pushed for software interrupts, either.
1003 UINT32 entry = irq * 8;
1005 v2 = READ32PL0(cpustate->idtr.base + entry + 4 );
1006 type = (v2>>8) & 0x1F;
1009 v2 = READ32PL0(cpustate->idtr.base + entry);
1010 v2 = READ32PL0(cpustate->gdtr.base + ((v2 >> 16) & 0xfff8) + 4);
1011 type = (v2>>8) & 0x1F;
1024 void I386_OPS_BASE::i286_task_switch( UINT16 selector, UINT8 nested)
1029 UINT8 ar_byte; // access rights byte
1031 /* TODO: Task State Segment privilege checks */
1033 /* For tasks that aren't nested, clear the busy bit in the task's descriptor */
1036 if(cpustate->task.segment & 0x0004)
1038 ar_byte = READ8(cpustate->ldtr.base + (cpustate->task.segment & ~0x0007) + 5);
1039 WRITE8(cpustate->ldtr.base + (cpustate->task.segment & ~0x0007) + 5,ar_byte & ~0x02);
1043 ar_byte = READ8(cpustate->gdtr.base + (cpustate->task.segment & ~0x0007) + 5);
1044 WRITE8(cpustate->gdtr.base + (cpustate->task.segment & ~0x0007) + 5,ar_byte & ~0x02);
1048 /* Save the state of the current task in the current TSS (TR register base) */
1049 tss = cpustate->task.base;
1050 WRITE16(tss+0x0e,cpustate->eip & 0x0000ffff);
1051 WRITE16(tss+0x10,get_flags() & 0x0000ffff);
1052 WRITE16(tss+0x12,REG16(AX));
1053 WRITE16(tss+0x14,REG16(CX));
1054 WRITE16(tss+0x16,REG16(DX));
1055 WRITE16(tss+0x18,REG16(BX));
1056 WRITE16(tss+0x1a,REG16(SP));
1057 WRITE16(tss+0x1c,REG16(BP));
1058 WRITE16(tss+0x1e,REG16(SI));
1059 WRITE16(tss+0x20,REG16(DI));
1060 WRITE16(tss+0x22,cpustate->sreg[ES].selector);
1061 WRITE16(tss+0x24,cpustate->sreg[CS].selector);
1062 WRITE16(tss+0x26,cpustate->sreg[SS].selector);
1063 WRITE16(tss+0x28,cpustate->sreg[DS].selector);
1065 old_task = cpustate->task.segment;
1067 /* Load task register with the selector of the incoming task */
1068 cpustate->task.segment = selector;
1069 memset(&seg, 0, sizeof(seg));
1070 seg.selector = cpustate->task.segment;
1071 i386_load_protected_mode_segment(&seg,NULL);
1072 cpustate->task.limit = seg.limit;
1073 cpustate->task.base = seg.base;
1074 cpustate->task.flags = seg.flags;
1076 /* Set TS bit in CR0 */
1077 cpustate->cr[0] |= 0x08;
1079 /* Load incoming task state from the new task's TSS */
1080 tss = cpustate->task.base;
1081 cpustate->ldtr.segment = READ16(tss+0x2a) & 0xffff;
1082 seg.selector = cpustate->ldtr.segment;
1083 i386_load_protected_mode_segment(&seg,NULL);
1084 cpustate->ldtr.limit = seg.limit;
1085 cpustate->ldtr.base = seg.base;
1086 cpustate->ldtr.flags = seg.flags;
1087 cpustate->eip = READ16(tss+0x0e);
1088 set_flags(READ16(tss+0x10));
1089 REG16(AX) = READ16(tss+0x12);
1090 REG16(CX) = READ16(tss+0x14);
1091 REG16(DX) = READ16(tss+0x16);
1092 REG16(BX) = READ16(tss+0x18);
1093 REG16(SP) = READ16(tss+0x1a);
1094 REG16(BP) = READ16(tss+0x1c);
1095 REG16(SI) = READ16(tss+0x1e);
1096 REG16(DI) = READ16(tss+0x20);
1097 cpustate->sreg[ES].selector = READ16(tss+0x22) & 0xffff;
1098 i386_load_segment_descriptor(ES);
1099 cpustate->sreg[CS].selector = READ16(tss+0x24) & 0xffff;
1100 i386_load_segment_descriptor(CS);
1101 cpustate->sreg[SS].selector = READ16(tss+0x26) & 0xffff;
1102 i386_load_segment_descriptor(SS);
1103 cpustate->sreg[DS].selector = READ16(tss+0x28) & 0xffff;
1104 i386_load_segment_descriptor(DS);
1106 /* Set the busy bit in the new task's descriptor */
1107 if(selector & 0x0004)
1109 ar_byte = READ8(cpustate->ldtr.base + (selector & ~0x0007) + 5);
1110 WRITE8(cpustate->ldtr.base + (selector & ~0x0007) + 5,ar_byte | 0x02);
1114 ar_byte = READ8(cpustate->gdtr.base + (selector & ~0x0007) + 5);
1115 WRITE8(cpustate->gdtr.base + (selector & ~0x0007) + 5,ar_byte | 0x02);
1118 /* For nested tasks, we write the outgoing task's selector to the back-link field of the new TSS,
1119 and set the NT flag in the EFLAGS register */
1122 WRITE16(tss+0,old_task);
1125 CHANGE_PC(cpustate->eip);
1127 cpustate->CPL = (cpustate->sreg[SS].flags >> 5) & 3;
1128 // printf("286 Task Switch from selector %04x to %04x\n",old_task,selector);
1131 void I386_OPS_BASE::i386_task_switch( UINT16 selector, UINT8 nested)
1136 UINT8 ar_byte; // access rights byte
1137 UINT32 oldcr3 = cpustate->cr[3];
1139 /* TODO: Task State Segment privilege checks */
1141 /* For tasks that aren't nested, clear the busy bit in the task's descriptor */
1144 if(cpustate->task.segment & 0x0004)
1146 ar_byte = READ8(cpustate->ldtr.base + (cpustate->task.segment & ~0x0007) + 5);
1147 WRITE8(cpustate->ldtr.base + (cpustate->task.segment & ~0x0007) + 5,ar_byte & ~0x02);
1151 ar_byte = READ8(cpustate->gdtr.base + (cpustate->task.segment & ~0x0007) + 5);
1152 WRITE8(cpustate->gdtr.base + (cpustate->task.segment & ~0x0007) + 5,ar_byte & ~0x02);
1156 /* Save the state of the current task in the current TSS (TR register base) */
1157 tss = cpustate->task.base;
1158 WRITE32(tss+0x1c,cpustate->cr[3]); // correct?
1159 WRITE32(tss+0x20,cpustate->eip);
1160 WRITE32(tss+0x24,get_flags());
1161 WRITE32(tss+0x28,REG32(EAX));
1162 WRITE32(tss+0x2c,REG32(ECX));
1163 WRITE32(tss+0x30,REG32(EDX));
1164 WRITE32(tss+0x34,REG32(EBX));
1165 WRITE32(tss+0x38,REG32(ESP));
1166 WRITE32(tss+0x3c,REG32(EBP));
1167 WRITE32(tss+0x40,REG32(ESI));
1168 WRITE32(tss+0x44,REG32(EDI));
1169 WRITE32(tss+0x48,cpustate->sreg[ES].selector);
1170 WRITE32(tss+0x4c,cpustate->sreg[CS].selector);
1171 WRITE32(tss+0x50,cpustate->sreg[SS].selector);
1172 WRITE32(tss+0x54,cpustate->sreg[DS].selector);
1173 WRITE32(tss+0x58,cpustate->sreg[FS].selector);
1174 WRITE32(tss+0x5c,cpustate->sreg[GS].selector);
1176 old_task = cpustate->task.segment;
1178 /* Load task register with the selector of the incoming task */
1179 cpustate->task.segment = selector;
1180 memset(&seg, 0, sizeof(seg));
1181 seg.selector = cpustate->task.segment;
1182 i386_load_protected_mode_segment(&seg,NULL);
1183 cpustate->task.limit = seg.limit;
1184 cpustate->task.base = seg.base;
1185 cpustate->task.flags = seg.flags;
1187 /* Set TS bit in CR0 */
1188 cpustate->cr[0] |= 0x08;
1190 /* Load incoming task state from the new task's TSS */
1191 tss = cpustate->task.base;
1192 cpustate->ldtr.segment = READ32(tss+0x60) & 0xffff;
1193 seg.selector = cpustate->ldtr.segment;
1194 i386_load_protected_mode_segment(&seg,NULL);
1195 cpustate->ldtr.limit = seg.limit;
1196 cpustate->ldtr.base = seg.base;
1197 cpustate->ldtr.flags = seg.flags;
1198 cpustate->eip = READ32(tss+0x20);
1199 set_flags(READ32(tss+0x24));
1200 REG32(EAX) = READ32(tss+0x28);
1201 REG32(ECX) = READ32(tss+0x2c);
1202 REG32(EDX) = READ32(tss+0x30);
1203 REG32(EBX) = READ32(tss+0x34);
1204 REG32(ESP) = READ32(tss+0x38);
1205 REG32(EBP) = READ32(tss+0x3c);
1206 REG32(ESI) = READ32(tss+0x40);
1207 REG32(EDI) = READ32(tss+0x44);
1208 cpustate->sreg[ES].selector = READ32(tss+0x48) & 0xffff;
1209 i386_load_segment_descriptor(ES);
1210 cpustate->sreg[CS].selector = READ32(tss+0x4c) & 0xffff;
1211 i386_load_segment_descriptor(CS);
1212 cpustate->sreg[SS].selector = READ32(tss+0x50) & 0xffff;
1213 i386_load_segment_descriptor(SS);
1214 cpustate->sreg[DS].selector = READ32(tss+0x54) & 0xffff;
1215 i386_load_segment_descriptor(DS);
1216 cpustate->sreg[FS].selector = READ32(tss+0x58) & 0xffff;
1217 i386_load_segment_descriptor(FS);
1218 cpustate->sreg[GS].selector = READ32(tss+0x5c) & 0xffff;
1219 i386_load_segment_descriptor(GS);
1220 /* For nested tasks, we write the outgoing task's selector to the back-link field of the new TSS,
1221 and set the NT flag in the EFLAGS register before setting cr3 as the old tss address might be gone */
1224 WRITE32(tss+0,old_task);
1227 cpustate->cr[3] = READ32(tss+0x1c); // CR3 (PDBR)
1228 if(oldcr3 != cpustate->cr[3])
1229 vtlb_flush_dynamic(cpustate->vtlb);
1231 /* Set the busy bit in the new task's descriptor */
1232 if(selector & 0x0004)
1234 ar_byte = READ8(cpustate->ldtr.base + (selector & ~0x0007) + 5);
1235 WRITE8(cpustate->ldtr.base + (selector & ~0x0007) + 5,ar_byte | 0x02);
1239 ar_byte = READ8(cpustate->gdtr.base + (selector & ~0x0007) + 5);
1240 WRITE8(cpustate->gdtr.base + (selector & ~0x0007) + 5,ar_byte | 0x02);
1243 CHANGE_PC(cpustate->eip);
1245 cpustate->CPL = (cpustate->sreg[SS].flags >> 5) & 3;
1246 // printf("386 Task Switch from selector %04x to %04x\n",old_task,selector);
1249 void I386_OPS_BASE::i386_check_irq_line()
1251 if(!cpustate->smm && cpustate->smi)
1257 /* Check if the interrupts are enabled */
1258 if ( (cpustate->irq_state) && cpustate->IF )
1260 cpustate->cycles -= 2;
1261 i386_trap(cpustate->pic->get_intr_ack(), 1, 0);
1262 cpustate->irq_state = 0;
1266 void I386_OPS_BASE::i386_protected_mode_jump( UINT16 seg, UINT32 off, int indirect, int operand32)
1269 I386_CALL_GATE call_gate;
1272 UINT16 segment = seg;
1273 UINT32 offset = off;
1275 /* Check selector is not null */
1276 if((segment & ~0x03) == 0)
1278 logerror("JMP: Segment is null.\n");
1281 /* Selector is within descriptor table limit */
1282 if((segment & 0x04) == 0)
1284 /* check GDT limit */
1285 if((segment & ~0x07) > (cpustate->gdtr.limit))
1287 logerror("JMP: Segment is past GDT limit.\n");
1288 FAULT(FAULT_GP,segment & 0xfffc)
1293 /* check LDT limit */
1294 if((segment & ~0x07) > (cpustate->ldtr.limit))
1296 logerror("JMP: Segment is past LDT limit.\n");
1297 FAULT(FAULT_GP,segment & 0xfffc)
1300 /* Determine segment type */
1301 memset(&desc, 0, sizeof(desc));
1302 desc.selector = segment;
1303 i386_load_protected_mode_segment(&desc,NULL);
1304 CPL = cpustate->CPL; // current privilege level
1305 DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level
1306 RPL = segment & 0x03; // requested privilege level
1307 if((desc.flags & 0x0018) == 0x0018)
1310 if((desc.flags & 0x0004) == 0)
1312 /* non-conforming */
1315 logerror("JMP: RPL %i is less than CPL %i\n",RPL,CPL);
1316 FAULT(FAULT_GP,segment & 0xfffc)
1320 logerror("JMP: DPL %i is not equal CPL %i\n",DPL,CPL);
1321 FAULT(FAULT_GP,segment & 0xfffc)
1329 logerror("JMP: DPL %i is less than CPL %i\n",DPL,CPL);
1330 FAULT(FAULT_GP,segment & 0xfffc)
1334 if((desc.flags & 0x0080) == 0)
1336 logerror("JMP: Segment is not present\n");
1337 FAULT(FAULT_NP,segment & 0xfffc)
1339 if(offset > desc.limit)
1341 logerror("JMP: Offset is past segment limit\n");
1347 if((desc.flags & 0x0010) != 0)
1349 logerror("JMP: Segment is a data segment\n");
1350 FAULT(FAULT_GP,segment & 0xfffc) // #GP (cannot execute code in a data segment)
1354 switch(desc.flags & 0x000f)
1356 case 0x01: // 286 Available TSS
1357 case 0x09: // 386 Available TSS
1358 logerror("JMP: Available 386 TSS at %08x\n",cpustate->pc);
1359 memset(&desc, 0, sizeof(desc));
1360 desc.selector = segment;
1361 i386_load_protected_mode_segment(&desc,NULL);
1362 DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level
1365 logerror("JMP: TSS: DPL %i is less than CPL %i\n",DPL,CPL);
1366 FAULT(FAULT_GP,segment & 0xfffc)
1370 logerror("JMP: TSS: DPL %i is less than TSS RPL %i\n",DPL,RPL);
1371 FAULT(FAULT_GP,segment & 0xfffc)
1373 if((desc.flags & 0x0080) == 0)
1375 logerror("JMP: TSS: Segment is not present\n");
1376 FAULT(FAULT_GP,segment & 0xfffc)
1378 if(desc.flags & 0x0008)
1379 i386_task_switch(desc.selector,0);
1381 i286_task_switch(desc.selector,0);
1383 case 0x04: // 286 Call Gate
1384 case 0x0c: // 386 Call Gate
1385 //logerror("JMP: Call gate at %08x\n",cpustate->pc);
1387 memset(&call_gate, 0, sizeof(call_gate));
1388 call_gate.segment = segment;
1389 i386_load_call_gate(&call_gate);
1390 DPL = call_gate.dpl;
1393 logerror("JMP: Call Gate: DPL %i is less than CPL %i\n",DPL,CPL);
1394 FAULT(FAULT_GP,segment & 0xfffc)
1398 logerror("JMP: Call Gate: DPL %i is less than RPL %i\n",DPL,RPL);
1399 FAULT(FAULT_GP,segment & 0xfffc)
1401 if((desc.flags & 0x0080) == 0)
1403 logerror("JMP: Call Gate: Segment is not present\n");
1404 FAULT(FAULT_NP,segment & 0xfffc)
1406 /* Now we examine the segment that the call gate refers to */
1407 if(call_gate.selector == 0)
1409 logerror("JMP: Call Gate: Gate selector is null\n");
1412 if(call_gate.selector & 0x04)
1414 if((call_gate.selector & ~0x07) > cpustate->ldtr.limit)
1416 logerror("JMP: Call Gate: Gate Selector is past LDT segment limit\n");
1417 FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1422 if((call_gate.selector & ~0x07) > cpustate->gdtr.limit)
1424 logerror("JMP: Call Gate: Gate Selector is past GDT segment limit\n");
1425 FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1428 desc.selector = call_gate.selector;
1429 i386_load_protected_mode_segment(&desc,NULL);
1430 DPL = (desc.flags >> 5) & 0x03;
1431 if((desc.flags & 0x0018) != 0x18)
1433 logerror("JMP: Call Gate: Gate does not point to a code segment\n");
1434 FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1436 if((desc.flags & 0x0004) == 0)
1440 logerror("JMP: Call Gate: Gate DPL does not equal CPL\n");
1441 FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1448 logerror("JMP: Call Gate: Gate DPL is greater than CPL\n");
1449 FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1452 if((desc.flags & 0x0080) == 0)
1454 logerror("JMP: Call Gate: Gate Segment is not present\n");
1455 FAULT(FAULT_NP,call_gate.selector & 0xfffc)
1457 if(call_gate.offset > desc.limit)
1459 logerror("JMP: Call Gate: Gate offset is past Gate segment limit\n");
1460 FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1462 segment = call_gate.selector;
1463 offset = call_gate.offset;
1465 case 0x05: // Task Gate
1466 logerror("JMP: Task gate at %08x\n",cpustate->pc);
1467 memset(&call_gate, 0, sizeof(call_gate));
1468 call_gate.segment = segment;
1469 i386_load_call_gate(&call_gate);
1470 DPL = call_gate.dpl;
1473 logerror("JMP: Task Gate: Gate DPL %i is less than CPL %i\n",DPL,CPL);
1474 FAULT(FAULT_GP,segment & 0xfffc)
1478 logerror("JMP: Task Gate: Gate DPL %i is less than CPL %i\n",DPL,CPL);
1479 FAULT(FAULT_GP,segment & 0xfffc)
1481 if(call_gate.present == 0)
1483 logerror("JMP: Task Gate: Gate is not present.\n");
1484 FAULT(FAULT_GP,segment & 0xfffc)
1486 /* Check the TSS that the task gate points to */
1487 desc.selector = call_gate.selector;
1488 i386_load_protected_mode_segment(&desc,NULL);
1489 DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level
1490 RPL = call_gate.selector & 0x03; // requested privilege level
1491 if(call_gate.selector & 0x04)
1493 logerror("JMP: Task Gate TSS: TSS must be global.\n");
1494 FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1498 if((call_gate.selector & ~0x07) > cpustate->gdtr.limit)
1500 logerror("JMP: Task Gate TSS: TSS is past GDT limit.\n");
1501 FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1504 if((call_gate.ar & 0x000f) == 0x0009 || (call_gate.ar & 0x000f) == 0x0001)
1506 logerror("JMP: Task Gate TSS: Segment is not an available TSS.\n");
1507 FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1509 if(call_gate.present == 0)
1511 logerror("JMP: Task Gate TSS: TSS is not present.\n");
1512 FAULT(FAULT_NP,call_gate.selector & 0xfffc)
1514 if(call_gate.ar & 0x08)
1515 i386_task_switch(call_gate.selector,0);
1517 i286_task_switch(call_gate.selector,0);
1519 default: // invalid segment type
1520 logerror("JMP: Invalid segment type (%i) to jump to.\n",desc.flags & 0x000f);
1521 FAULT(FAULT_GP,segment & 0xfffc)
1527 segment = (segment & ~0x03) | cpustate->CPL;
1529 cpustate->eip = offset & 0x0000ffff;
1531 cpustate->eip = offset;
1532 cpustate->sreg[CS].selector = segment;
1533 cpustate->performed_intersegment_jump = 1;
1534 i386_load_segment_descriptor(CS);
1535 CHANGE_PC(cpustate->eip);
1538 void I386_OPS_BASE::i386_protected_mode_call( UINT16 seg, UINT32 off, int indirect, int operand32)
1541 I386_CALL_GATE gate;
1543 UINT8 CPL, DPL, RPL;
1544 UINT16 selector = seg;
1545 UINT32 offset = off;
1548 if((selector & ~0x03) == 0)
1550 logerror("CALL (%08x): Selector is null.\n",cpustate->pc);
1551 FAULT(FAULT_GP,0) // #GP(0)
1555 if((selector & ~0x07) > cpustate->ldtr.limit)
1557 logerror("CALL: Selector is past LDT limit.\n");
1558 FAULT(FAULT_GP,selector & ~0x03) // #GP(selector)
1563 if((selector & ~0x07) > cpustate->gdtr.limit)
1565 logerror("CALL: Selector is past GDT limit.\n");
1566 FAULT(FAULT_GP,selector & ~0x03) // #GP(selector)
1570 /* Determine segment type */
1571 memset(&desc, 0, sizeof(desc));
1572 desc.selector = selector;
1573 i386_load_protected_mode_segment(&desc,NULL);
1574 CPL = cpustate->CPL; // current privilege level
1575 DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level
1576 RPL = selector & 0x03; // requested privilege level
1577 if((desc.flags & 0x0018) == 0x18) // is a code segment
1579 if(desc.flags & 0x0004)
1584 logerror("CALL: Code segment DPL %i is greater than CPL %i\n",DPL,CPL);
1585 FAULT(FAULT_GP,selector & ~0x03) // #GP(selector)
1590 /* non-conforming */
1593 logerror("CALL: RPL %i is greater than CPL %i\n",RPL,CPL);
1594 FAULT(FAULT_GP,selector & ~0x03) // #GP(selector)
1598 logerror("CALL: Code segment DPL %i is not equal to CPL %i\n",DPL,CPL);
1599 FAULT(FAULT_GP,selector & ~0x03) // #GP(selector)
1603 if((desc.flags & 0x0080) == 0)
1605 logerror("CALL (%08x): Code segment is not present.\n",cpustate->pc);
1606 FAULT(FAULT_NP,selector & ~0x03) // #NP(selector)
1608 if (operand32 != 0) // if 32-bit
1610 UINT32 offset = (STACK_32BIT ? REG32(ESP) - 8 : (REG16(SP) - 8) & 0xffff);
1611 if(i386_limit_check(SS, offset, 8))
1613 logerror("CALL (%08x): Stack has no room for return address.\n",cpustate->pc);
1614 FAULT(FAULT_SS,0) // #SS(0)
1619 UINT32 offset = (STACK_32BIT ? REG32(ESP) - 4 : (REG16(SP) - 4) & 0xffff);
1620 if(i386_limit_check(SS, offset, 4))
1622 logerror("CALL (%08x): Stack has no room for return address.\n",cpustate->pc);
1623 FAULT(FAULT_SS,0) // #SS(0)
1626 if(offset > desc.limit)
1628 logerror("CALL: EIP is past segment limit.\n");
1629 FAULT(FAULT_GP,0) // #GP(0)
1634 /* special segment type */
1635 if(desc.flags & 0x0010)
1637 logerror("CALL: Segment is a data segment.\n");
1638 FAULT(FAULT_GP,desc.selector & ~0x03) // #GP(selector)
1642 switch(desc.flags & 0x000f)
1644 case 0x01: // Available 286 TSS
1645 case 0x09: // Available 386 TSS
1646 logerror("CALL: Available TSS at %08x\n",cpustate->pc);
1649 logerror("CALL: TSS: DPL is less than CPL.\n");
1650 FAULT(FAULT_TS,selector & ~0x03) // #TS(selector)
1654 logerror("CALL: TSS: DPL is less than RPL.\n");
1655 FAULT(FAULT_TS,selector & ~0x03) // #TS(selector)
1657 if(desc.flags & 0x0002)
1659 logerror("CALL: TSS: TSS is busy.\n");
1660 FAULT(FAULT_TS,selector & ~0x03) // #TS(selector)
1662 if((desc.flags & 0x0080) == 0)
1664 logerror("CALL: TSS: Segment %02x is not present.\n",selector);
1665 FAULT(FAULT_NP,selector & ~0x03) // #NP(selector)
1667 if(desc.flags & 0x08)
1668 i386_task_switch(desc.selector,1);
1670 i286_task_switch(desc.selector,1);
1672 case 0x04: // 286 call gate
1673 case 0x0c: // 386 call gate
1674 if((desc.flags & 0x000f) == 0x04)
1678 memset(&gate, 0, sizeof(gate));
1679 gate.segment = selector;
1680 i386_load_call_gate(&gate);
1682 //logerror("CALL: Call gate at %08x (%i parameters)\n",cpustate->pc,gate.dword_count);
1685 logerror("CALL: Call gate DPL %i is less than CPL %i.\n",DPL,CPL);
1686 FAULT(FAULT_GP,desc.selector & ~0x03) // #GP(selector)
1690 logerror("CALL: Call gate DPL %i is less than RPL %i.\n",DPL,RPL);
1691 FAULT(FAULT_GP,desc.selector & ~0x03) // #GP(selector)
1693 if(gate.present == 0)
1695 logerror("CALL: Call gate is not present.\n");
1696 FAULT(FAULT_NP,desc.selector & ~0x03) // #GP(selector)
1698 desc.selector = gate.selector;
1699 if((gate.selector & ~0x03) == 0)
1701 logerror("CALL: Call gate: Segment is null.\n");
1702 FAULT(FAULT_GP,0) // #GP(0)
1704 if(desc.selector & 0x04)
1706 if((desc.selector & ~0x07) > cpustate->ldtr.limit)
1708 logerror("CALL: Call gate: Segment is past LDT limit\n");
1709 FAULT(FAULT_GP,desc.selector & ~0x03) // #GP(selector)
1714 if((desc.selector & ~0x07) > cpustate->gdtr.limit)
1716 logerror("CALL: Call gate: Segment is past GDT limit\n");
1717 FAULT(FAULT_GP,desc.selector & ~0x03) // #GP(selector)
1720 i386_load_protected_mode_segment(&desc,NULL);
1721 if((desc.flags & 0x0018) != 0x18)
1723 logerror("CALL: Call gate: Segment is not a code segment.\n");
1724 FAULT(FAULT_GP,desc.selector & ~0x03) // #GP(selector)
1726 DPL = ((desc.flags >> 5) & 0x03);
1729 logerror("CALL: Call gate: Segment DPL %i is greater than CPL %i.\n",DPL,CPL);
1730 FAULT(FAULT_GP,desc.selector & ~0x03) // #GP(selector)
1732 if((desc.flags & 0x0080) == 0)
1734 logerror("CALL (%08x): Code segment is not present.\n",cpustate->pc);
1735 FAULT(FAULT_NP,desc.selector & ~0x03) // #NP(selector)
1737 if(DPL < CPL && (desc.flags & 0x0004) == 0)
1741 UINT32 oldSS,oldESP;
1742 /* more privilege */
1743 /* Check new SS segment for privilege level from TSS */
1744 memset(&stack, 0, sizeof(stack));
1745 stack.selector = i386_get_stack_segment(DPL);
1746 i386_load_protected_mode_segment(&stack,NULL);
1747 if((stack.selector & ~0x03) == 0)
1749 logerror("CALL: Call gate: TSS selector is null\n");
1750 FAULT(FAULT_TS,0) // #TS(0)
1752 if(stack.selector & 0x04)
1754 if((stack.selector & ~0x07) > cpustate->ldtr.limit)
1756 logerror("CALL: Call gate: TSS selector is past LDT limit\n");
1757 FAULT(FAULT_TS,stack.selector) // #TS(SS selector)
1762 if((stack.selector & ~0x07) > cpustate->gdtr.limit)
1764 logerror("CALL: Call gate: TSS selector is past GDT limit\n");
1765 FAULT(FAULT_TS,stack.selector) // #TS(SS selector)
1768 if((stack.selector & 0x03) != DPL)
1770 logerror("CALL: Call gate: Stack selector RPL does not equal code segment DPL %i\n",DPL);
1771 FAULT(FAULT_TS,stack.selector) // #TS(SS selector)
1773 if(((stack.flags >> 5) & 0x03) != DPL)
1775 logerror("CALL: Call gate: Stack DPL does not equal code segment DPL %i\n",DPL);
1776 FAULT(FAULT_TS,stack.selector) // #TS(SS selector)
1778 if((stack.flags & 0x0018) != 0x10 && (stack.flags & 0x0002))
1780 logerror("CALL: Call gate: Stack segment is not a writable data segment\n");
1781 FAULT(FAULT_TS,stack.selector) // #TS(SS selector)
1783 if((stack.flags & 0x0080) == 0)
1785 logerror("CALL: Call gate: Stack segment is not present\n");
1786 FAULT(FAULT_SS,stack.selector) // #SS(SS selector)
1788 UINT32 newESP = i386_get_stack_ptr(DPL);
1795 if(newESP < (UINT32)((gate.dword_count & 0x1f) + 16))
1797 logerror("CALL: Call gate: New stack has no room for 32-bit return address and parameters.\n");
1798 FAULT(FAULT_SS,0) // #SS(0)
1800 if(gate.offset > desc.limit)
1802 logerror("CALL: Call gate: EIP is past segment limit.\n");
1803 FAULT(FAULT_GP,0) // #GP(0)
1808 if(newESP < (UINT32)((gate.dword_count & 0x1f) + 8))
1810 logerror("CALL: Call gate: New stack has no room for 16-bit return address and parameters.\n");
1811 FAULT(FAULT_SS,0) // #SS(0)
1813 if((gate.offset & 0xffff) > desc.limit)
1815 logerror("CALL: Call gate: IP is past segment limit.\n");
1816 FAULT(FAULT_GP,0) // #GP(0)
1819 selector = gate.selector;
1820 offset = gate.offset;
1822 cpustate->CPL = (stack.flags >> 5) & 0x03;
1823 /* check for page fault at new stack */
1824 WRITE_TEST(stack.base+newESP-1);
1825 /* switch to new stack */
1826 oldSS = cpustate->sreg[SS].selector;
1827 cpustate->sreg[SS].selector = i386_get_stack_segment(cpustate->CPL);
1830 oldESP = REG32(ESP);
1836 i386_load_segment_descriptor(SS );
1837 REG32(ESP) = newESP;
1847 PUSH16(oldESP & 0xffff);
1850 memset(&temp, 0, sizeof(temp));
1851 temp.selector = oldSS;
1852 i386_load_protected_mode_segment(&temp,NULL);
1853 /* copy parameters from old stack to new stack */
1854 for(x=(gate.dword_count & 0x1f)-1;x>=0;x--)
1856 UINT32 addr = oldESP + (operand32?(x*4):(x*2));
1857 addr = temp.base + (temp.d?addr:(addr&0xffff));
1859 PUSH32(READ32(addr));
1861 PUSH16(READ16(addr));
1867 /* same privilege */
1868 if (operand32 != 0) // if 32-bit
1870 UINT32 stkoff = (STACK_32BIT ? REG32(ESP) - 8 : (REG16(SP) - 8) & 0xffff);
1871 if(i386_limit_check(SS, stkoff, 8))
1873 logerror("CALL: Stack has no room for return address.\n");
1874 FAULT(FAULT_SS,0) // #SS(0)
1876 selector = gate.selector;
1877 offset = gate.offset;
1881 UINT32 stkoff = (STACK_32BIT ? REG32(ESP) - 4 : (REG16(SP) - 4) & 0xffff);
1882 if(i386_limit_check(SS, stkoff, 4))
1884 logerror("CALL: Stack has no room for return address.\n");
1885 FAULT(FAULT_SS,0) // #SS(0)
1887 selector = gate.selector;
1888 offset = gate.offset & 0xffff;
1890 if(offset > desc.limit)
1892 logerror("CALL: EIP is past segment limit.\n");
1893 FAULT(FAULT_GP,0) // #GP(0)
1898 case 0x05: // task gate
1899 logerror("CALL: Task gate at %08x\n",cpustate->pc);
1900 memset(&gate, 0, sizeof(gate));
1901 gate.segment = selector;
1902 i386_load_call_gate(&gate);
1906 logerror("CALL: Task Gate: Gate DPL is less than CPL.\n");
1907 FAULT(FAULT_TS,selector & ~0x03) // #TS(selector)
1911 logerror("CALL: Task Gate: Gate DPL is less than RPL.\n");
1912 FAULT(FAULT_TS,selector & ~0x03) // #TS(selector)
1914 if((gate.ar & 0x0080) == 0)
1916 logerror("CALL: Task Gate: Gate is not present.\n");
1917 FAULT(FAULT_NP,selector & ~0x03) // #NP(selector)
1919 /* Check the TSS that the task gate points to */
1920 desc.selector = gate.selector;
1921 i386_load_protected_mode_segment(&desc,NULL);
1922 if(gate.selector & 0x04)
1924 logerror("CALL: Task Gate: TSS is not global.\n");
1925 FAULT(FAULT_TS,gate.selector & ~0x03) // #TS(selector)
1929 if((gate.selector & ~0x07) > cpustate->gdtr.limit)
1931 logerror("CALL: Task Gate: TSS is past GDT limit.\n");
1932 FAULT(FAULT_TS,gate.selector & ~0x03) // #TS(selector)
1935 if(desc.flags & 0x0002)
1937 logerror("CALL: Task Gate: TSS is busy.\n");
1938 FAULT(FAULT_TS,gate.selector & ~0x03) // #TS(selector)
1940 if((desc.flags & 0x0080) == 0)
1942 logerror("CALL: Task Gate: TSS is not present.\n");
1943 FAULT(FAULT_NP,gate.selector & ~0x03) // #TS(selector)
1945 if(desc.flags & 0x08)
1946 i386_task_switch(desc.selector,1); // with nesting
1948 i286_task_switch(desc.selector,1);
1951 logerror("CALL: Invalid special segment type (%i) to jump to.\n",desc.flags & 0x000f);
1952 FAULT(FAULT_GP,selector & ~0x07) // #GP(selector)
1958 selector = (selector & ~0x03) | cpustate->CPL;
1960 UINT32 tempSP = REG32(ESP);
1963 // this is ugly but the alternative is worse
1966 /* 16-bit operand size */
1967 PUSH16(cpustate->sreg[CS].selector );
1968 PUSH16(cpustate->eip & 0x0000ffff );
1969 cpustate->sreg[CS].selector = selector;
1970 cpustate->performed_intersegment_jump = 1;
1971 cpustate->eip = offset;
1972 i386_load_segment_descriptor(CS);
1976 /* 32-bit operand size */
1977 PUSH32SEG(cpustate->sreg[CS].selector );
1978 PUSH32(cpustate->eip );
1979 cpustate->sreg[CS].selector = selector;
1980 cpustate->performed_intersegment_jump = 1;
1981 cpustate->eip = offset;
1982 i386_load_segment_descriptor(CS );
1987 REG32(ESP) = tempSP;
1991 CHANGE_PC(cpustate->eip);
1994 void I386_OPS_BASE::i386_protected_mode_retf(UINT8 count, UINT8 operand32)
1996 UINT32 newCS, newEIP;
1998 UINT8 CPL, RPL, DPL;
2000 UINT32 ea = i386_translate(SS, (STACK_32BIT)?REG32(ESP):REG16(SP), 0, (operand32!=0) ? 8 : 4);
2004 newEIP = READ16(ea) & 0xffff;
2005 newCS = READ16(ea+2) & 0xffff;
2009 newEIP = READ32(ea);
2010 newCS = READ32(ea+4) & 0xffff;
2013 memset(&desc, 0, sizeof(desc));
2014 desc.selector = newCS;
2015 i386_load_protected_mode_segment(&desc,NULL);
2016 CPL = cpustate->CPL; // current privilege level
2017 DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level
2022 logerror("RETF (%08x): Return segment RPL is less than CPL.\n",cpustate->pc);
2023 FAULT(FAULT_GP,newCS & ~0x03)
2028 /* same privilege level */
2029 if((newCS & ~0x03) == 0)
2031 logerror("RETF: Return segment is null.\n");
2036 if((newCS & ~0x07) >= cpustate->ldtr.limit)
2038 logerror("RETF: Return segment is past LDT limit.\n");
2039 FAULT(FAULT_GP,newCS & ~0x03)
2044 if((newCS & ~0x07) >= cpustate->gdtr.limit)
2046 logerror("RETF: Return segment is past GDT limit.\n");
2047 FAULT(FAULT_GP,newCS & ~0x03)
2050 if((desc.flags & 0x0018) != 0x0018)
2052 logerror("RETF: Return segment is not a code segment.\n");
2053 FAULT(FAULT_GP,newCS & ~0x03)
2055 if(desc.flags & 0x0004)
2059 logerror("RETF: Conforming code segment DPL is greater than CS RPL.\n");
2060 FAULT(FAULT_GP,newCS & ~0x03)
2067 logerror("RETF: Non-conforming code segment DPL does not equal CS RPL.\n");
2068 FAULT(FAULT_GP,newCS & ~0x03)
2071 if((desc.flags & 0x0080) == 0)
2073 logerror("RETF (%08x): Code segment is not present.\n",cpustate->pc);
2074 FAULT(FAULT_NP,newCS & ~0x03)
2076 if(newEIP > desc.limit)
2078 logerror("RETF: EIP is past code segment limit.\n");
2083 UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2084 if(i386_limit_check(SS, offset, count + 4) != 0)
2086 logerror("RETF (%08x): SP is past stack segment limit.\n",cpustate->pc);
2092 UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2093 if(i386_limit_check(SS, offset, count + 8) != 0)
2095 logerror("RETF: ESP is past stack segment limit.\n");
2100 REG16(SP) += (4+count);
2102 REG32(ESP) += (8+count);
2106 UINT32 newSS, newESP; // when changing privilege
2107 /* outer privilege level */
2110 UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2111 if(i386_limit_check(SS, offset, count + 8) != 0)
2113 logerror("RETF (%08x): SP is past stack segment limit.\n",cpustate->pc);
2119 UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2120 if(i386_limit_check(SS, offset, count + 16) != 0)
2122 logerror("RETF: ESP is past stack segment limit.\n");
2126 /* Check CS selector and descriptor */
2127 if((newCS & ~0x03) == 0)
2129 logerror("RETF: CS segment is null.\n");
2134 if((newCS & ~0x07) >= cpustate->ldtr.limit)
2136 logerror("RETF: CS segment selector is past LDT limit.\n");
2137 FAULT(FAULT_GP,newCS & ~0x03)
2142 if((newCS & ~0x07) >= cpustate->gdtr.limit)
2144 logerror("RETF: CS segment selector is past GDT limit.\n");
2145 FAULT(FAULT_GP,newCS & ~0x03)
2148 if((desc.flags & 0x0018) != 0x0018)
2150 logerror("RETF: CS segment is not a code segment.\n");
2151 FAULT(FAULT_GP,newCS & ~0x03)
2153 if(desc.flags & 0x0004)
2157 logerror("RETF: Conforming CS segment DPL is greater than return selector RPL.\n");
2158 FAULT(FAULT_GP,newCS & ~0x03)
2165 logerror("RETF: Non-conforming CS segment DPL is not equal to return selector RPL.\n");
2166 FAULT(FAULT_GP,newCS & ~0x03)
2169 if((desc.flags & 0x0080) == 0)
2171 logerror("RETF: CS segment is not present.\n");
2172 FAULT(FAULT_NP,newCS & ~0x03)
2174 if(newEIP > desc.limit)
2176 logerror("RETF: EIP is past return CS segment limit.\n");
2183 newESP = READ16(ea) & 0xffff;
2184 newSS = READ16(ea+2) & 0xffff;
2189 newESP = READ32(ea);
2190 newSS = READ32(ea+4) & 0xffff;
2193 /* Check SS selector and descriptor */
2194 desc.selector = newSS;
2195 i386_load_protected_mode_segment(&desc,NULL);
2196 DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level
2197 if((newSS & ~0x07) == 0)
2199 logerror("RETF: SS segment is null.\n");
2204 if((newSS & ~0x07) > cpustate->ldtr.limit)
2206 logerror("RETF (%08x): SS segment selector is past LDT limit.\n",cpustate->pc);
2207 FAULT(FAULT_GP,newSS & ~0x03)
2212 if((newSS & ~0x07) > cpustate->gdtr.limit)
2214 logerror("RETF (%08x): SS segment selector is past GDT limit.\n",cpustate->pc);
2215 FAULT(FAULT_GP,newSS & ~0x03)
2218 if((newSS & 0x03) != RPL)
2220 logerror("RETF: SS segment RPL is not equal to CS segment RPL.\n");
2221 FAULT(FAULT_GP,newSS & ~0x03)
2223 if((desc.flags & 0x0018) != 0x0010 || (desc.flags & 0x0002) == 0)
2225 logerror("RETF: SS segment is not a writable data segment.\n");
2226 FAULT(FAULT_GP,newSS & ~0x03)
2228 if(((desc.flags >> 5) & 0x03) != RPL)
2230 logerror("RETF: SS DPL is not equal to CS segment RPL.\n");
2231 FAULT(FAULT_GP,newSS & ~0x03)
2233 if((desc.flags & 0x0080) == 0)
2235 logerror("RETF: SS segment is not present.\n");
2236 FAULT(FAULT_GP,newSS & ~0x03)
2238 cpustate->CPL = newCS & 0x03;
2240 /* Load new SS:(E)SP */
2242 REG16(SP) = (newESP+count) & 0xffff;
2244 REG32(ESP) = newESP+count;
2245 cpustate->sreg[SS].selector = newSS;
2246 i386_load_segment_descriptor(SS );
2248 /* Check that DS, ES, FS and GS are valid for the new privilege level */
2249 i386_check_sreg_validity(DS);
2250 i386_check_sreg_validity(ES);
2251 i386_check_sreg_validity(FS);
2252 i386_check_sreg_validity(GS);
2255 /* Load new CS:(E)IP */
2257 cpustate->eip = newEIP & 0xffff;
2259 cpustate->eip = newEIP;
2260 cpustate->sreg[CS].selector = newCS;
2261 i386_load_segment_descriptor(CS );
2262 CHANGE_PC(cpustate->eip);
2265 void I386_OPS_BASE::i386_protected_mode_iret(int operand32)
2267 UINT32 newCS, newEIP;
2268 UINT32 newSS, newESP; // when changing privilege
2269 I386_SREG desc,stack;
2270 UINT8 CPL, RPL, DPL;
2272 UINT8 IOPL = cpustate->IOP1 | (cpustate->IOP2 << 1);
2274 CPL = cpustate->CPL;
2275 UINT32 ea = i386_translate(SS, (STACK_32BIT)?REG32(ESP):REG16(SP), 0, (operand32 != 0) ? 12 : 6);
2278 newEIP = READ16(ea) & 0xffff;
2279 newCS = READ16(ea+2) & 0xffff;
2280 newflags = READ16(ea+4) & 0xffff;
2284 newEIP = READ32(ea);
2285 newCS = READ32(ea+4) & 0xffff;
2286 newflags = READ32(ea+8);
2291 UINT32 oldflags = get_flags();
2294 logerror("IRET (%08x): Is in Virtual 8086 mode and IOPL != 3.\n",cpustate->pc);
2299 cpustate->eip = newEIP & 0xffff;
2300 cpustate->sreg[CS].selector = newCS & 0xffff;
2301 newflags &= ~(3<<12);
2302 newflags |= (((oldflags>>12)&3)<<12); // IOPL cannot be changed in V86 mode
2303 set_flags((newflags & 0xffff) | (oldflags & ~0xffff));
2308 cpustate->eip = newEIP;
2309 cpustate->sreg[CS].selector = newCS & 0xffff;
2310 newflags &= ~(3<<12);
2311 newflags |= 0x20000 | (((oldflags>>12)&3)<<12); // IOPL and VM cannot be changed in V86 mode
2312 set_flags(newflags);
2316 else if(NESTED_TASK)
2318 UINT32 task = READ32(cpustate->task.base);
2320 logerror("IRET (%08x): Nested task return.\n",cpustate->pc);
2321 /* Check back-link selector in TSS */
2324 logerror("IRET: Task return: Back-linked TSS is not in GDT.\n");
2325 FAULT(FAULT_TS,task & ~0x03)
2327 if((task & ~0x07) >= cpustate->gdtr.limit)
2329 logerror("IRET: Task return: Back-linked TSS is not in GDT.\n");
2330 FAULT(FAULT_TS,task & ~0x03)
2332 memset(&desc, 0, sizeof(desc));
2333 desc.selector = task;
2334 i386_load_protected_mode_segment(&desc,NULL);
2335 if((desc.flags & 0x001f) != 0x000b)
2337 logerror("IRET (%08x): Task return: Back-linked TSS is not a busy TSS.\n",cpustate->pc);
2338 FAULT(FAULT_TS,task & ~0x03)
2340 if((desc.flags & 0x0080) == 0)
2342 logerror("IRET: Task return: Back-linked TSS is not present.\n");
2343 FAULT(FAULT_NP,task & ~0x03)
2345 if(desc.flags & 0x08)
2346 i386_task_switch(desc.selector,0);
2348 i286_task_switch(desc.selector,0);
2353 if(newflags & 0x00020000) // if returning to virtual 8086 mode
2355 // 16-bit iret can't reach here
2356 newESP = READ32(ea+12);
2357 newSS = READ32(ea+16) & 0xffff;
2358 /* Return to v86 mode */
2359 //logerror("IRET (%08x): Returning to Virtual 8086 mode.\n",cpustate->pc);
2362 UINT32 oldflags = get_flags();
2363 newflags = (newflags & ~0x00003000) | (oldflags & 0x00003000);
2365 newflags = (newflags & ~0x200 ) | (oldflags & 0x200);
2367 set_flags(newflags);
2368 cpustate->eip = POP32() & 0xffff; // high 16 bits are ignored
2369 cpustate->sreg[CS].selector = POP32() & 0xffff;
2370 POP32(); // already set flags
2372 newSS = POP32() & 0xffff;
2373 cpustate->sreg[ES].selector = POP32() & 0xffff;
2374 cpustate->sreg[DS].selector = POP32() & 0xffff;
2375 cpustate->sreg[FS].selector = POP32() & 0xffff;
2376 cpustate->sreg[GS].selector = POP32() & 0xffff;
2377 REG32(ESP) = newESP; // all 32 bits are loaded
2378 cpustate->sreg[SS].selector = newSS;
2379 i386_load_segment_descriptor(ES);
2380 i386_load_segment_descriptor(DS);
2381 i386_load_segment_descriptor(FS);
2382 i386_load_segment_descriptor(GS);
2383 i386_load_segment_descriptor(SS);
2384 cpustate->CPL = 3; // Virtual 8086 tasks are always run at CPL 3
2390 UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2391 if(i386_limit_check(SS, offset, 4) != 0)
2393 logerror("IRET: Data on stack is past SS limit.\n");
2399 UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2400 if(i386_limit_check(SS, offset, 8) != 0)
2402 logerror("IRET: Data on stack is past SS limit.\n");
2409 logerror("IRET (%08x): Return CS RPL is less than CPL.\n",cpustate->pc);
2410 FAULT(FAULT_GP,newCS & ~0x03)
2414 /* return to same privilege level */
2417 UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2418 if(i386_limit_check(SS, offset, 6) != 0)
2420 logerror("IRET (%08x): Data on stack is past SS limit.\n",cpustate->pc);
2426 UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2427 if(i386_limit_check(SS, offset, 12) != 0)
2429 logerror("IRET (%08x): Data on stack is past SS limit.\n",cpustate->pc);
2433 if((newCS & ~0x03) == 0)
2435 logerror("IRET: Return CS selector is null.\n");
2440 if((newCS & ~0x07) >= cpustate->ldtr.limit)
2442 logerror("IRET: Return CS selector (%04x) is past LDT limit.\n",newCS);
2443 FAULT(FAULT_GP,newCS & ~0x03)
2448 if((newCS & ~0x07) >= cpustate->gdtr.limit)
2450 logerror("IRET: Return CS selector is past GDT limit.\n");
2451 FAULT(FAULT_GP,newCS & ~0x03)
2454 memset(&desc, 0, sizeof(desc));
2455 desc.selector = newCS;
2456 i386_load_protected_mode_segment(&desc,NULL);
2457 DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level
2459 if((desc.flags & 0x0018) != 0x0018)
2461 logerror("IRET (%08x): Return CS segment is not a code segment.\n",cpustate->pc);
2462 FAULT(FAULT_GP,newCS & ~0x07)
2464 if(desc.flags & 0x0004)
2468 logerror("IRET: Conforming return CS DPL is greater than CS RPL.\n");
2469 FAULT(FAULT_GP,newCS & ~0x03)
2476 logerror("IRET: Non-conforming return CS DPL is not equal to CS RPL.\n");
2477 FAULT(FAULT_GP,newCS & ~0x03)
2480 if((desc.flags & 0x0080) == 0)
2482 logerror("IRET: (%08x) Return CS segment is not present.\n", cpustate->pc);
2483 FAULT(FAULT_NP,newCS & ~0x03)
2485 if(newEIP > desc.limit)
2487 logerror("IRET: Return EIP is past return CS limit.\n");
2493 UINT32 oldflags = get_flags();
2494 newflags = (newflags & ~0x00003000) | (oldflags & 0x00003000);
2499 cpustate->eip = newEIP;
2500 cpustate->sreg[CS].selector = newCS;
2501 set_flags(newflags);
2506 cpustate->eip = newEIP;
2507 cpustate->sreg[CS].selector = newCS & 0xffff;
2508 set_flags(newflags);
2514 /* return to outer privilege level */
2515 memset(&desc, 0, sizeof(desc));
2516 desc.selector = newCS;
2517 i386_load_protected_mode_segment(&desc,NULL);
2518 DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level
2522 UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2523 if(i386_limit_check(SS, offset, 10) != 0)
2525 logerror("IRET: SP is past SS limit.\n");
2531 UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2532 if(i386_limit_check(SS, offset, 20) != 0)
2534 logerror("IRET: ESP is past SS limit.\n");
2538 /* Check CS selector and descriptor */
2539 if((newCS & ~0x03) == 0)
2541 logerror("IRET: Return CS selector is null.\n");
2546 if((newCS & ~0x07) >= cpustate->ldtr.limit)
2548 logerror("IRET: Return CS selector is past LDT limit.\n");
2549 FAULT(FAULT_GP,newCS & ~0x03);
2554 if((newCS & ~0x07) >= cpustate->gdtr.limit)
2556 logerror("IRET: Return CS selector is past GDT limit.\n");
2557 FAULT(FAULT_GP,newCS & ~0x03);
2560 if((desc.flags & 0x0018) != 0x0018)
2562 logerror("IRET: Return CS segment is not a code segment.\n");
2563 FAULT(FAULT_GP,newCS & ~0x03)
2565 if(desc.flags & 0x0004)
2569 logerror("IRET: Conforming return CS DPL is greater than CS RPL.\n");
2570 FAULT(FAULT_GP,newCS & ~0x03)
2577 logerror("IRET: Non-conforming return CS DPL does not equal CS RPL.\n");
2578 FAULT(FAULT_GP,newCS & ~0x03)
2581 if((desc.flags & 0x0080) == 0)
2583 logerror("IRET: Return CS segment is not present.\n");
2584 FAULT(FAULT_NP,newCS & ~0x03)
2587 /* Check SS selector and descriptor */
2590 newESP = READ16(ea+6) & 0xffff;
2591 newSS = READ16(ea+8) & 0xffff;
2595 newESP = READ32(ea+12);
2596 newSS = READ32(ea+16) & 0xffff;
2598 memset(&stack, 0, sizeof(stack));
2599 stack.selector = newSS;
2600 i386_load_protected_mode_segment(&stack,NULL);
2601 DPL = (stack.flags >> 5) & 0x03;
2602 if((newSS & ~0x03) == 0)
2604 logerror("IRET: Return SS selector is null.\n");
2609 if((newSS & ~0x07) >= cpustate->ldtr.limit)
2611 logerror("IRET: Return SS selector is past LDT limit.\n");
2612 FAULT(FAULT_GP,newSS & ~0x03);
2617 if((newSS & ~0x07) >= cpustate->gdtr.limit)
2619 logerror("IRET: Return SS selector is past GDT limit.\n");
2620 FAULT(FAULT_GP,newSS & ~0x03);
2623 if((newSS & 0x03) != RPL)
2625 logerror("IRET: Return SS RPL is not equal to return CS RPL.\n");
2626 FAULT(FAULT_GP,newSS & ~0x03)
2628 if((stack.flags & 0x0018) != 0x0010)
2630 logerror("IRET: Return SS segment is not a data segment.\n");
2631 FAULT(FAULT_GP,newSS & ~0x03)
2633 if((stack.flags & 0x0002) == 0)
2635 logerror("IRET: Return SS segment is not writable.\n");
2636 FAULT(FAULT_GP,newSS & ~0x03)
2640 logerror("IRET: Return SS DPL does not equal SS RPL.\n");
2641 FAULT(FAULT_GP,newSS & ~0x03)
2643 if((stack.flags & 0x0080) == 0)
2645 logerror("IRET: Return SS segment is not present.\n");
2646 FAULT(FAULT_NP,newSS & ~0x03)
2648 if(newEIP > desc.limit)
2650 logerror("IRET: EIP is past return CS limit.\n");
2654 // if(operand32 == 0)
2657 // REG32(ESP) += 20;
2659 // IOPL can only change if CPL is zero
2662 UINT32 oldflags = get_flags();
2663 newflags = (newflags & ~0x00003000) | (oldflags & 0x00003000);
2665 newflags = (newflags & ~0x200 ) | (oldflags & 0x200);
2670 cpustate->eip = newEIP & 0xffff;
2671 cpustate->sreg[CS].selector = newCS;
2672 set_flags(newflags);
2673 REG16(SP) = newESP & 0xffff;
2674 cpustate->sreg[SS].selector = newSS;
2678 cpustate->eip = newEIP;
2679 cpustate->sreg[CS].selector = newCS & 0xffff;
2680 set_flags(newflags);
2681 REG32(ESP) = newESP;
2682 cpustate->sreg[SS].selector = newSS & 0xffff;
2684 cpustate->CPL = newCS & 0x03;
2685 i386_load_segment_descriptor(SS);
2687 /* Check that DS, ES, FS and GS are valid for the new privilege level */
2688 i386_check_sreg_validity(DS);
2689 i386_check_sreg_validity(ES);
2690 i386_check_sreg_validity(FS);
2691 i386_check_sreg_validity(GS);
2696 i386_load_segment_descriptor(CS);
2697 CHANGE_PC(cpustate->eip);
2702 void I386_OPS_BASE::build_cycle_table()
2705 for (j=0; j < X86_NUM_CPUS; j++)
2707 // cycle_table_rm[j] = (UINT8 *)malloc(CYCLES_NUM_OPCODES);
2708 // cycle_table_pm[j] = (UINT8 *)malloc(CYCLES_NUM_OPCODES);
2710 for (i=0; i < sizeof(x86_cycle_table)/sizeof(X86_CYCLE_TABLE); i++)
2712 int opcode = x86_cycle_table[i].op;
2713 cycle_table_rm[j][opcode] = x86_cycle_table[i].cpu_cycles[j][0];
2714 cycle_table_pm[j][opcode] = x86_cycle_table[i].cpu_cycles[j][1];
2719 void I386_OPS_BASE::report_invalid_opcode()
2721 #ifndef DEBUG_MISSING_OPCODE
2722 logerror("i386: Invalid opcode %02X at %08X %s\n", cpustate->opcode, cpustate->pc - 1, cpustate->lock ? "with lock" : "");
2724 logerror("i386: Invalid opcode");
2725 for (int a = 0; a < cpustate->opcode_bytes_length; a++)
2726 logerror(" %02X", cpustate->opcode_bytes[a]);
2727 logerror(" at %08X\n", cpustate->opcode_pc);
2731 void I386_OPS_BASE::report_invalid_modrm( const char* opcode, UINT8 modrm)
2733 #ifndef DEBUG_MISSING_OPCODE
2734 logerror("i386: Invalid %s modrm %01X at %08X\n", opcode, modrm, cpustate->pc - 2);
2736 logerror("i386: Invalid %s modrm %01X", opcode, modrm);
2737 for (int a = 0; a < cpustate->opcode_bytes_length; a++)
2738 logerror(" %02X", cpustate->opcode_bytes[a]);
2739 logerror(" at %08X\n", cpustate->opcode_pc);
2744 /* Forward declarations */
2747 void I386_OPS_BASE::I386OP(decode_opcode)()
2749 cpustate->opcode = FETCH();
2751 if(cpustate->lock && !cpustate->lock_table[0][cpustate->opcode])
2752 return I386OP(invalid)();
2754 if( cpustate->operand_size )
2755 (this->*cpustate->opcode_table1_32[cpustate->opcode])();
2757 (this->*cpustate->opcode_table1_16[cpustate->opcode])();
2760 /* Two-byte opcode 0f xx */
2761 void I386_OPS_BASE::I386OP(decode_two_byte)()
2763 cpustate->opcode = FETCH();
2765 if(cpustate->lock && !cpustate->lock_table[1][cpustate->opcode])
2766 return I386OP(invalid)();
2768 if( cpustate->operand_size )
2769 (this->*cpustate->opcode_table2_32[cpustate->opcode])();
2771 (this->*cpustate->opcode_table2_16[cpustate->opcode])();
2774 /* Three-byte opcode 0f 38 xx */
2775 void I386_OPS_BASE::I386OP(decode_three_byte38)()
2777 cpustate->opcode = FETCH();
2779 if (cpustate->operand_size)
2780 (this->*cpustate->opcode_table338_32[cpustate->opcode])();
2782 (this->*cpustate->opcode_table338_16[cpustate->opcode])();
2785 /* Three-byte opcode 0f 3a xx */
2786 void I386_OPS_BASE::I386OP(decode_three_byte3a)()
2788 cpustate->opcode = FETCH();
2790 if (cpustate->operand_size)
2791 (this->*cpustate->opcode_table33a_32[cpustate->opcode])();
2793 (this->*cpustate->opcode_table33a_16[cpustate->opcode])();
2796 /* Three-byte opcode prefix 66 0f xx */
2797 void I386_OPS_BASE::I386OP(decode_three_byte66)()
2799 cpustate->opcode = FETCH();
2800 if( cpustate->operand_size )
2801 (this->*cpustate->opcode_table366_32[cpustate->opcode])();
2803 (this->*cpustate->opcode_table366_16[cpustate->opcode])();
2806 /* Three-byte opcode prefix f2 0f xx */
2807 void I386_OPS_BASE::I386OP(decode_three_bytef2)()
2809 cpustate->opcode = FETCH();
2810 if( cpustate->operand_size )
2811 (this->*cpustate->opcode_table3f2_32[cpustate->opcode])();
2813 (this->*cpustate->opcode_table3f2_16[cpustate->opcode])();
2816 /* Three-byte opcode prefix f3 0f */
2817 void I386_OPS_BASE::I386OP(decode_three_bytef3)()
2819 cpustate->opcode = FETCH();
2820 if( cpustate->operand_size )
2821 (this->*cpustate->opcode_table3f3_32[cpustate->opcode])();
2823 (this->*cpustate->opcode_table3f3_16[cpustate->opcode])();
2826 /* Four-byte opcode prefix 66 0f 38 xx */
2827 void I386_OPS_BASE::I386OP(decode_four_byte3866)()
2829 cpustate->opcode = FETCH();
2830 if (cpustate->operand_size)
2831 (this->*cpustate->opcode_table46638_32[cpustate->opcode])();
2833 (this->*cpustate->opcode_table46638_16[cpustate->opcode])();
2836 /* Four-byte opcode prefix 66 0f 3a xx */
2837 void I386_OPS_BASE::I386OP(decode_four_byte3a66)()
2839 cpustate->opcode = FETCH();
2840 if (cpustate->operand_size)
2841 (this->*cpustate->opcode_table4663a_32[cpustate->opcode])();
2843 (this->*cpustate->opcode_table4663a_16[cpustate->opcode])();
2846 /* Four-byte opcode prefix f2 0f 38 xx */
2847 void I386_OPS_BASE::I386OP(decode_four_byte38f2)()
2849 cpustate->opcode = FETCH();
2850 if (cpustate->operand_size)
2851 (this->*cpustate->opcode_table4f238_32[cpustate->opcode])();
2853 (this->*cpustate->opcode_table4f238_16[cpustate->opcode])();
2856 /* Four-byte opcode prefix f2 0f 3a xx */
2857 void I386_OPS_BASE::I386OP(decode_four_byte3af2)()
2859 cpustate->opcode = FETCH();
2860 if (cpustate->operand_size)
2861 (this->*cpustate->opcode_table4f23a_32[cpustate->opcode])();
2863 (this->*cpustate->opcode_table4f23a_16[cpustate->opcode])();
2866 /* Four-byte opcode prefix f3 0f 38 xx */
2867 void I386_OPS_BASE::I386OP(decode_four_byte38f3)()
2869 cpustate->opcode = FETCH();
2870 if (cpustate->operand_size)
2871 (this->*cpustate->opcode_table4f338_32[cpustate->opcode])();
2873 (this->*cpustate->opcode_table4f338_16[cpustate->opcode])();
2877 /*************************************************************************/
2879 void I386_OPS_BASE::i386_postload()
2882 for (i = 0; i < 6; i++)
2883 i386_load_segment_descriptor(i);
2884 CHANGE_PC(cpustate->eip);
2887 #include "./i386_ops_table.h"
2889 i386_state *I386_OPS_BASE::i386_common_init(int tlbsize)
2892 static const int regs8[8] = {AL,CL,DL,BL,AH,CH,DH,BH};
2893 static const int regs16[8] = {AX,CX,DX,BX,SP,BP,SI,DI};
2894 static const int regs32[8] = {EAX,ECX,EDX,EBX,ESP,EBP,ESI,EDI};
2895 cpustate = &__cpustate;
2896 //cpustate = (i386_state *)malloc(sizeof(i386_state));
2897 //x86_cycle_table = _x86_cycle_table_real;
2898 //x86_opcode_table = _x86_opcode_table_fake;
2900 assert((sizeof(XMM_REG)/sizeof(double)) == 2);
2902 build_cycle_table();
2904 for( i=0; i < 256; i++ ) {
2906 for( j=0; j < 8; j++ ) {
2910 i386_parity_table[i] = ~(c & 0x1) & 0x1;
2913 for( i=0; i < 256; i++ ) {
2914 i386_MODRM_table[i].reg.b = regs8[(i >> 3) & 0x7];
2915 i386_MODRM_table[i].reg.w = regs16[(i >> 3) & 0x7];
2916 i386_MODRM_table[i].reg.d = regs32[(i >> 3) & 0x7];
2918 i386_MODRM_table[i].rm.b = regs8[i & 0x7];
2919 i386_MODRM_table[i].rm.w = regs16[i & 0x7];
2920 i386_MODRM_table[i].rm.d = regs32[i & 0x7];
2923 cpustate->vtlb = vtlb_alloc((void *)cpustate, AS_PROGRAM, 0, tlbsize);
2924 cpustate->smi = false;
2925 cpustate->lock = false;
2927 // i386_interface *intf = (i386_interface *) device->static_config();
2929 // if (intf != NULL)
2930 // cpustate->smiact.resolve(intf->smiact, *device);
2932 // memset(&cpustate->smiact, 0, sizeof(cpustate->smiact));
2935 cpustate->program = d_mem;
2936 cpustate->io = d_io;
2937 cpustate->pic = d_pic;
2941 void I386_OPS_BASE::i386_vtlb_free(void)
2943 vtlb_free(cpustate->vtlb);
2944 cpustate->vtlb = NULL;
2947 void I386_OPS_BASE::i386_free_state(void)
2949 //if(cpustate != NULL) free(cpustate);
2952 void *I386_OPS_BASE::cpu_init_i386(void)
2954 i386_common_init(32);
2955 build_opcode_table(OP_I386);
2956 cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_I386];
2957 cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_I386];
2962 void I386_OPS_BASE::process_state_SREG(I386_SREG* val, FILEIO* state_fio)
2964 state_fio->StateValue(val->selector);
2965 state_fio->StateValue(val->flags);
2966 state_fio->StateValue(val->base);
2967 state_fio->StateValue(val->limit);
2968 state_fio->StateValue(val->d);
2969 state_fio->StateValue(val->valid);
2972 void I386_OPS_BASE::process_state_SYS_TABLE(I386_SYS_TABLE* val, FILEIO* state_fio)
2974 state_fio->StateValue(val->base);
2975 state_fio->StateValue(val->limit);
2978 void I386_OPS_BASE::process_state_SEG_DESC(I386_SEG_DESC* val, FILEIO* state_fio)
2980 state_fio->StateValue(val->segment);
2981 state_fio->StateValue(val->flags);
2982 state_fio->StateValue(val->base);
2983 state_fio->StateValue(val->limit);
2986 void I386_OPS_BASE::process_state_GPR(I386_GPR* val, FILEIO* state_fio)
2988 state_fio->StateArray(val->d, sizeof(val->d), 1);
2989 state_fio->StateArray(val->w, sizeof(val->w), 1);
2990 state_fio->StateArray(val->b, sizeof(val->b), 1);
2993 void I386_OPS_BASE::process_state_floatx80(floatx80* val, FILEIO* state_fio)
2995 state_fio->StateValue(val->high);
2996 state_fio->StateValue(val->low);
2999 void I386_OPS_BASE::process_state_XMM_REG(XMM_REG* val, FILEIO* state_fio)
3001 state_fio->StateArray(val->b, sizeof(val->b), 1);
3002 state_fio->StateArray(val->w, sizeof(val->w), 1);
3003 state_fio->StateArray(val->d, sizeof(val->d), 1);
3004 state_fio->StateArray(val->q, sizeof(val->q), 1);
3005 state_fio->StateArray(val->c, sizeof(val->c), 1);
3006 state_fio->StateArray(val->s, sizeof(val->s), 1);
3007 state_fio->StateArray(val->i, sizeof(val->i), 1);
3008 state_fio->StateArray(val->l, sizeof(val->l), 1);
3009 state_fio->StateArray(val->f, sizeof(val->f), 1);
3010 state_fio->StateArray(val->f64, sizeof(val->f64), 1);
3013 void I386_OPS_BASE::process_state_vtlb(vtlb_state* val, FILEIO* state_fio)
3015 // state_fio->StateValue(val->space);
3016 // state_fio->StateValue(val->dynamic);
3017 // state_fio->StateValue(val->fixed);
3018 state_fio->StateValue(val->dynindex);
3019 // state_fio->StateValue(val->pageshift);
3020 // state_fio->StateValue(val->addrwidth);
3021 if(val->live != NULL) {
3022 state_fio->StateArray(val->live, val->fixed + val->dynamic, 1);
3024 if(val->fixedpages != NULL) {
3025 state_fio->StateArray(val->fixedpages, val->fixed, 1);
3027 if(val->table != NULL) {
3028 state_fio->StateArray(val->table, (size_t) 1 << (val->addrwidth - val->pageshift), 1);
3033 bool I386_OPS_BASE::process_state(FILEIO *state_fio, bool loading)
3035 // ToDo: Write endian
3036 vtlb_state *vtlb = cpustate->vtlb;
3037 void *cpudevice = NULL;
3038 offs_t *live = NULL;
3040 int *fixedpages = NULL;
3041 int fixedpages_size = 0;
3042 vtlb_entry *table = NULL;
3044 // vtlb_entry *save = NULL;
3045 // int save_size = 0;
3048 process_state_GPR(&cpustate->reg, state_fio);
3049 for(int i = 0; i < (int)array_length(cpustate->sreg); i++) {
3050 process_state_SREG(&cpustate->sreg[i], state_fio);
3052 state_fio->StateValue(cpustate->eip);
3053 state_fio->StateValue(cpustate->pc);
3054 state_fio->StateValue(cpustate->prev_eip);
3055 state_fio->StateValue(cpustate->prev_pc);
3056 state_fio->StateValue(cpustate->eflags);
3057 state_fio->StateValue(cpustate->eflags_mask);
3058 state_fio->StateValue(cpustate->CF);
3059 state_fio->StateValue(cpustate->DF);
3060 state_fio->StateValue(cpustate->SF);
3061 state_fio->StateValue(cpustate->OF);
3062 state_fio->StateValue(cpustate->ZF);
3063 state_fio->StateValue(cpustate->PF);
3064 state_fio->StateValue(cpustate->AF);
3065 state_fio->StateValue(cpustate->IF);
3066 state_fio->StateValue(cpustate->TF);
3067 state_fio->StateValue(cpustate->IOP1);
3068 state_fio->StateValue(cpustate->IOP2);
3069 state_fio->StateValue(cpustate->NT);
3070 state_fio->StateValue(cpustate->RF);
3071 state_fio->StateValue(cpustate->VM);
3072 state_fio->StateValue(cpustate->AC);
3073 state_fio->StateValue(cpustate->VIF);
3074 state_fio->StateValue(cpustate->VIP);
3075 state_fio->StateValue(cpustate->ID);
3076 state_fio->StateValue(cpustate->CPL);
3077 state_fio->StateValue(cpustate->performed_intersegment_jump);
3078 state_fio->StateValue(cpustate->delayed_interrupt_enable);
3079 state_fio->StateArray(cpustate->cr, sizeof(cpustate->cr), 1);
3080 state_fio->StateArray(cpustate->dr, sizeof(cpustate->dr), 1);
3081 state_fio->StateArray(cpustate->tr, sizeof(cpustate->tr), 1);
3082 process_state_SYS_TABLE(&cpustate->gdtr, state_fio);
3083 process_state_SYS_TABLE(&cpustate->idtr, state_fio);
3084 process_state_SEG_DESC(&cpustate->task, state_fio);
3085 process_state_SEG_DESC(&cpustate->ldtr, state_fio);
3086 state_fio->StateValue(cpustate->ext);
3087 state_fio->StateValue(cpustate->halted);
3088 state_fio->StateValue(cpustate->busreq);
3089 state_fio->StateValue(cpustate->shutdown);
3090 state_fio->StateValue(cpustate->operand_size);
3091 state_fio->StateValue(cpustate->xmm_operand_size);
3092 state_fio->StateValue(cpustate->address_size);
3093 state_fio->StateValue(cpustate->operand_prefix);
3094 state_fio->StateValue(cpustate->address_prefix);
3095 state_fio->StateValue(cpustate->segment_prefix);
3096 state_fio->StateValue(cpustate->segment_override);
3098 state_fio->StateValue(cpustate->total_cycles);
3100 state_fio->StateValue(cpustate->cycles);
3101 state_fio->StateValue(cpustate->extra_cycles);
3102 state_fio->StateValue(cpustate->base_cycles);
3103 state_fio->StateValue(cpustate->opcode);
3104 state_fio->StateValue(cpustate->irq_state);
3105 state_fio->StateValue(cpustate->a20_mask);
3106 state_fio->StateValue(cpustate->cpuid_max_input_value_eax);
3107 state_fio->StateValue(cpustate->cpuid_id0);
3108 state_fio->StateValue(cpustate->cpuid_id1);
3109 state_fio->StateValue(cpustate->cpuid_id2);
3110 state_fio->StateValue(cpustate->cpu_version);
3111 state_fio->StateValue(cpustate->feature_flags);
3112 state_fio->StateValue(cpustate->tsc);
3113 state_fio->StateArray(cpustate->perfctr, sizeof(cpustate->perfctr), 1);
3114 for(int i = 0; i < array_length(cpustate->x87_reg); i++) {
3115 process_state_floatx80(&cpustate->x87_reg[i], state_fio);
3117 state_fio->StateValue(cpustate->x87_cw);
3118 state_fio->StateValue(cpustate->x87_sw);
3119 state_fio->StateValue(cpustate->x87_tw);
3120 state_fio->StateValue(cpustate->x87_data_ptr);
3121 state_fio->StateValue(cpustate->x87_inst_ptr);
3122 state_fio->StateValue(cpustate->x87_opcode);
3123 for(int i = 0; i < array_length(cpustate->sse_reg); i++) {
3124 process_state_XMM_REG(&cpustate->sse_reg[i], state_fio);
3126 state_fio->StateValue(cpustate->mxcsr);
3127 state_fio->StateArray(&cpustate->lock_table[0][0], sizeof(cpustate->lock_table), 1);
3129 if(cpustate->vtlb != NULL) {
3130 process_state_vtlb(cpustate->vtlb, state_fio);
3132 state_fio->StateValue(cpustate->smm);
3133 state_fio->StateValue(cpustate->smi);
3134 state_fio->StateValue(cpustate->smi_latched);
3135 state_fio->StateValue(cpustate->nmi_masked);
3136 state_fio->StateValue(cpustate->nmi_latched);
3137 state_fio->StateValue(cpustate->smbase);
3138 // state_fio->StateValue(cpustate->smiact);
3139 state_fio->StateValue(cpustate->lock);
3140 #ifdef DEBUG_MISSING_OPCODE
3141 state_fio->StateArray(cpustate->opcode_bytes, sizeof(cpustate->opcode_bytes), 1);
3142 state_fio->StateValue(cpustate->opcode_pc);
3143 state_fio->StateValue(cpustate->opcode_bytes_length);
3149 void I386_OPS_BASE::build_opcode_table(UINT32 features)
3152 i386_state *_cpustate = cpustate;
3153 for (i=0; i < 256; i++)
3155 _cpustate->opcode_table1_16[i] = &I386_OPS_BASE::I386OP(invalid);
3156 _cpustate->opcode_table1_32[i] = &I386_OPS_BASE::I386OP(invalid);
3157 _cpustate->opcode_table2_16[i] = &I386_OPS_BASE::I386OP(invalid);
3158 _cpustate->opcode_table2_32[i] = &I386_OPS_BASE::I386OP(invalid);
3159 _cpustate->opcode_table366_16[i] = &I386_OPS_BASE::I386OP(invalid);
3160 _cpustate->opcode_table366_32[i] = &I386_OPS_BASE::I386OP(invalid);
3161 _cpustate->opcode_table3f2_16[i] = &I386_OPS_BASE::I386OP(invalid);
3162 _cpustate->opcode_table3f2_32[i] = &I386_OPS_BASE::I386OP(invalid);
3163 _cpustate->opcode_table3f3_16[i] = &I386_OPS_BASE::I386OP(invalid);
3164 _cpustate->opcode_table3f3_32[i] = &I386_OPS_BASE::I386OP(invalid);
3165 _cpustate->lock_table[0][i] = false;
3166 _cpustate->lock_table[1][i] = false;
3169 for (i=0; i < (int)(sizeof(x86_opcode_table) / sizeof(X86_OPCODE)); i++)
3171 const X86_OPCODE *op = &x86_opcode_table[i];
3173 if ((op->flags & features))
3175 if (op->flags & OP_2BYTE)
3177 _cpustate->opcode_table2_32[op->opcode] = op->handler32;
3178 _cpustate->opcode_table2_16[op->opcode] = op->handler16;
3179 _cpustate->opcode_table366_32[op->opcode] = op->handler32;
3180 _cpustate->opcode_table366_16[op->opcode] = op->handler16;
3181 _cpustate->lock_table[1][op->opcode] = op->lockable;
3183 else if (op->flags & OP_3BYTE66)
3185 _cpustate->opcode_table366_32[op->opcode] = op->handler32;
3186 _cpustate->opcode_table366_16[op->opcode] = op->handler16;
3188 else if (op->flags & OP_3BYTEF2)
3190 _cpustate->opcode_table3f2_32[op->opcode] = op->handler32;
3191 _cpustate->opcode_table3f2_16[op->opcode] = op->handler16;
3193 else if (op->flags & OP_3BYTEF3)
3195 _cpustate->opcode_table3f3_32[op->opcode] = op->handler32;
3196 _cpustate->opcode_table3f3_16[op->opcode] = op->handler16;
3198 else if (op->flags & OP_3BYTE38)
3200 _cpustate->opcode_table338_32[op->opcode] = op->handler32;
3201 _cpustate->opcode_table338_16[op->opcode] = op->handler16;
3203 else if (op->flags & OP_3BYTE3A)
3205 _cpustate->opcode_table33a_32[op->opcode] = op->handler32;
3206 _cpustate->opcode_table33a_16[op->opcode] = op->handler16;
3208 else if (op->flags & OP_4BYTE3866)
3210 _cpustate->opcode_table46638_32[op->opcode] = op->handler32;
3211 _cpustate->opcode_table46638_16[op->opcode] = op->handler16;
3213 else if (op->flags & OP_4BYTE3A66)
3215 _cpustate->opcode_table4663a_32[op->opcode] = op->handler32;
3216 _cpustate->opcode_table4663a_16[op->opcode] = op->handler16;
3218 else if (op->flags & OP_4BYTE38F2)
3220 _cpustate->opcode_table4f238_32[op->opcode] = op->handler32;
3221 _cpustate->opcode_table4f238_16[op->opcode] = op->handler16;
3223 else if (op->flags & OP_4BYTE3AF2)
3225 _cpustate->opcode_table4f23a_32[op->opcode] = op->handler32;
3226 _cpustate->opcode_table4f23a_16[op->opcode] = op->handler16;
3228 else if (op->flags & OP_4BYTE38F3)
3230 _cpustate->opcode_table4f338_32[op->opcode] = op->handler32;
3231 _cpustate->opcode_table4f338_16[op->opcode] = op->handler16;
3235 _cpustate->opcode_table1_32[op->opcode] = op->handler32;
3236 _cpustate->opcode_table1_16[op->opcode] = op->handler16;
3237 _cpustate->lock_table[0][op->opcode] = op->lockable;
3244 void I386_OPS_BASE::zero_state()
3246 memset( &cpustate->reg, 0, sizeof(cpustate->reg) );
3247 memset( cpustate->sreg, 0, sizeof(cpustate->sreg) );
3249 cpustate->total_cycles = 0;
3250 cpustate->prev_total_cycles = 0;
3254 cpustate->prev_eip = 0;
3255 cpustate->eflags = 0;
3256 cpustate->eflags_mask = 0;
3276 cpustate->performed_intersegment_jump = 0;
3277 cpustate->delayed_interrupt_enable = 0;
3278 memset( cpustate->cr, 0, sizeof(cpustate->cr) );
3279 memset( cpustate->dr, 0, sizeof(cpustate->dr) );
3280 memset( cpustate->tr, 0, sizeof(cpustate->tr) );
3281 memset( &cpustate->gdtr, 0, sizeof(cpustate->gdtr) );
3282 memset( &cpustate->idtr, 0, sizeof(cpustate->idtr) );
3283 memset( &cpustate->task, 0, sizeof(cpustate->task) );
3284 memset( &cpustate->ldtr, 0, sizeof(cpustate->ldtr) );
3286 cpustate->halted = 0;
3287 cpustate->busreq = 0;
3288 cpustate->shutdown = 0;
3289 cpustate->operand_size = 0;
3290 cpustate->xmm_operand_size = 0;
3291 cpustate->address_size = 0;
3292 cpustate->operand_prefix = 0;
3293 cpustate->address_prefix = 0;
3294 cpustate->segment_prefix = 0;
3295 cpustate->segment_override = 0;
3296 // cpustate->cycles = 0;
3297 // cpustate->base_cycles = 0;
3298 cpustate->opcode = 0;
3299 cpustate->irq_state = 0;
3300 cpustate->a20_mask = 0;
3301 cpustate->cpuid_max_input_value_eax = 0;
3302 cpustate->cpuid_id0 = 0;
3303 cpustate->cpuid_id1 = 0;
3304 cpustate->cpuid_id2 = 0;
3305 cpustate->cpu_version = 0;
3306 cpustate->feature_flags = 0;
3308 cpustate->perfctr[0] = cpustate->perfctr[1] = 0;
3309 memset( cpustate->x87_reg, 0, sizeof(cpustate->x87_reg) );
3310 cpustate->x87_cw = 0;
3311 cpustate->x87_sw = 0;
3312 cpustate->x87_tw = 0;
3313 cpustate->x87_data_ptr = 0;
3314 cpustate->x87_inst_ptr = 0;
3315 cpustate->x87_opcode = 0;
3316 memset( cpustate->sse_reg, 0, sizeof(cpustate->sse_reg) );
3317 cpustate->mxcsr = 0;
3318 cpustate->smm = false;
3319 cpustate->smi = false;
3320 cpustate->smi_latched = false;
3321 cpustate->nmi_masked = false;
3322 cpustate->nmi_latched = false;
3323 cpustate->smbase = 0;
3324 #ifdef DEBUG_MISSING_OPCODE
3325 memset( cpustate->opcode_bytes, 0, sizeof(cpustate->opcode_bytes) );
3326 cpustate->opcode_pc = 0;
3327 cpustate->opcode_bytes_length = 0;
3331 void I386_OPS_BASE::cpu_reset_i386(void)
3334 vtlb_flush_dynamic(cpustate->vtlb);
3336 cpustate->sreg[CS].selector = 0xf000;
3337 cpustate->sreg[CS].base = 0xffff0000;
3338 cpustate->sreg[CS].limit = 0xffff;
3339 cpustate->sreg[CS].flags = 0x93;
3340 cpustate->sreg[CS].valid = true;
3342 cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
3343 cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
3344 cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0093;
3345 cpustate->sreg[DS].valid = cpustate->sreg[ES].valid = cpustate->sreg[FS].valid = cpustate->sreg[GS].valid = cpustate->sreg[SS].valid =true;
3347 cpustate->idtr.base = 0;
3348 cpustate->idtr.limit = 0x3ff;
3349 cpustate->smm = false;
3350 cpustate->smi_latched = false;
3351 cpustate->nmi_masked = false;
3352 cpustate->nmi_latched = false;
3354 cpustate->a20_mask = ~0;
3356 cpustate->cr[0] = 0x7fffffe0; // reserved bits set to 1
3357 cpustate->eflags = 0;
3358 cpustate->eflags_mask = 0x00037fd7;
3359 cpustate->eip = 0xfff0;
3363 // [ 3:0] Stepping ID
3364 // Family 3 (386), Model 0 (DX), Stepping 8 (D1)
3366 REG32(EDX) = (3 << 8) | (0 << 4) | (8);
3370 CHANGE_PC(cpustate->eip);
3373 void I386_OPS_BASE::pentium_smi()
3375 UINT32 smram_state = cpustate->smbase + 0xfe00;
3376 UINT32 old_cr0 = cpustate->cr[0];
3377 UINT32 old_flags = get_flags();
3382 cpustate->cr[0] &= ~(0x8000000d);
3384 // if(!cpustate->smiact.isnull())
3385 // cpustate->smiact(true);
3386 cpustate->smm = true;
3387 cpustate->smi_latched = false;
3390 WRITE32(cpustate->cr[4], smram_state+SMRAM_IP5_CR4);
3391 WRITE32(cpustate->sreg[ES].limit, smram_state+SMRAM_IP5_ESLIM);
3392 WRITE32(cpustate->sreg[ES].base, smram_state+SMRAM_IP5_ESBASE);
3393 WRITE32(cpustate->sreg[ES].flags, smram_state+SMRAM_IP5_ESACC);
3394 WRITE32(cpustate->sreg[CS].limit, smram_state+SMRAM_IP5_CSLIM);
3395 WRITE32(cpustate->sreg[CS].base, smram_state+SMRAM_IP5_CSBASE);
3396 WRITE32(cpustate->sreg[CS].flags, smram_state+SMRAM_IP5_CSACC);
3397 WRITE32(cpustate->sreg[SS].limit, smram_state+SMRAM_IP5_SSLIM);
3398 WRITE32(cpustate->sreg[SS].base, smram_state+SMRAM_IP5_SSBASE);
3399 WRITE32(cpustate->sreg[SS].flags, smram_state+SMRAM_IP5_SSACC);
3400 WRITE32(cpustate->sreg[DS].limit, smram_state+SMRAM_IP5_DSLIM);
3401 WRITE32(cpustate->sreg[DS].base, smram_state+SMRAM_IP5_DSBASE);
3402 WRITE32(cpustate->sreg[DS].flags, smram_state+SMRAM_IP5_DSACC);
3403 WRITE32(cpustate->sreg[FS].limit, smram_state+SMRAM_IP5_FSLIM);
3404 WRITE32(cpustate->sreg[FS].base, smram_state+SMRAM_IP5_FSBASE);
3405 WRITE32(cpustate->sreg[FS].flags, smram_state+SMRAM_IP5_FSACC);
3406 WRITE32(cpustate->sreg[GS].limit, smram_state+SMRAM_IP5_GSLIM);
3407 WRITE32(cpustate->sreg[GS].base, smram_state+SMRAM_IP5_GSBASE);
3408 WRITE32(cpustate->sreg[GS].flags, smram_state+SMRAM_IP5_GSACC);
3409 WRITE32(cpustate->ldtr.flags, smram_state+SMRAM_IP5_LDTACC);
3410 WRITE32(cpustate->ldtr.limit, smram_state+SMRAM_IP5_LDTLIM);
3411 WRITE32(cpustate->ldtr.base, smram_state+SMRAM_IP5_LDTBASE);
3412 WRITE32(cpustate->gdtr.limit, smram_state+SMRAM_IP5_GDTLIM);
3413 WRITE32(cpustate->gdtr.base, smram_state+SMRAM_IP5_GDTBASE);
3414 WRITE32(cpustate->idtr.limit, smram_state+SMRAM_IP5_IDTLIM);
3415 WRITE32(cpustate->idtr.base, smram_state+SMRAM_IP5_IDTBASE);
3416 WRITE32(cpustate->task.limit, smram_state+SMRAM_IP5_TRLIM);
3417 WRITE32(cpustate->task.base, smram_state+SMRAM_IP5_TRBASE);
3418 WRITE32(cpustate->task.flags, smram_state+SMRAM_IP5_TRACC);
3420 WRITE32(cpustate->sreg[ES].selector, smram_state+SMRAM_ES);
3421 WRITE32(cpustate->sreg[CS].selector, smram_state+SMRAM_CS);
3422 WRITE32(cpustate->sreg[SS].selector, smram_state+SMRAM_SS);
3423 WRITE32(cpustate->sreg[DS].selector, smram_state+SMRAM_DS);
3424 WRITE32(cpustate->sreg[FS].selector, smram_state+SMRAM_FS);
3425 WRITE32(cpustate->sreg[GS].selector, smram_state+SMRAM_GS);
3426 WRITE32(cpustate->ldtr.segment, smram_state+SMRAM_LDTR);
3427 WRITE32(cpustate->task.segment, smram_state+SMRAM_TR);
3429 WRITE32(cpustate->dr[7], smram_state+SMRAM_DR7);
3430 WRITE32(cpustate->dr[6], smram_state+SMRAM_DR6);
3431 WRITE32(REG32(EAX), smram_state+SMRAM_EAX);
3432 WRITE32(REG32(ECX), smram_state+SMRAM_ECX);
3433 WRITE32(REG32(EDX), smram_state+SMRAM_EDX);
3434 WRITE32(REG32(EBX), smram_state+SMRAM_EBX);
3435 WRITE32(REG32(ESP), smram_state+SMRAM_ESP);
3436 WRITE32(REG32(EBP), smram_state+SMRAM_EBP);
3437 WRITE32(REG32(ESI), smram_state+SMRAM_ESI);
3438 WRITE32(REG32(EDI), smram_state+SMRAM_EDI);
3439 WRITE32(cpustate->eip, smram_state+SMRAM_EIP);
3440 WRITE32(old_flags, smram_state+SMRAM_EFLAGS);
3441 WRITE32(cpustate->cr[3], smram_state+SMRAM_CR3);
3442 WRITE32(old_cr0, smram_state+SMRAM_CR0);
3444 cpustate->sreg[DS].selector = cpustate->sreg[ES].selector = cpustate->sreg[FS].selector = cpustate->sreg[GS].selector = cpustate->sreg[SS].selector = 0;
3445 cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
3446 cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffffffff;
3447 cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x8093;
3448 cpustate->sreg[DS].valid = cpustate->sreg[ES].valid = cpustate->sreg[FS].valid = cpustate->sreg[GS].valid = cpustate->sreg[SS].valid =true;
3449 cpustate->sreg[CS].selector = 0x3000; // pentium only, ppro sel = smbase >> 4
3450 cpustate->sreg[CS].base = cpustate->smbase;
3451 cpustate->sreg[CS].limit = 0xffffffff;
3452 cpustate->sreg[CS].flags = 0x8093;
3453 cpustate->sreg[CS].valid = true;
3454 cpustate->cr[4] = 0;
3455 cpustate->dr[7] = 0x400;
3456 cpustate->eip = 0x8000;
3458 cpustate->nmi_masked = true;
3459 CHANGE_PC(cpustate->eip);
3462 void I386_OPS_BASE::i386_set_irq_line(int irqline, int state)
3464 int first_cycles = cpustate->cycles;
3466 if (state != CLEAR_LINE && cpustate->halted)
3468 cpustate->halted = 0;
3471 if ( irqline == INPUT_LINE_NMI )
3473 /* NMI (I do not think that this is 100% right) */
3474 if(cpustate->nmi_masked)
3476 cpustate->nmi_latched = true;
3484 cpustate->irq_state = state;
3486 cpustate->extra_cycles += first_cycles - cpustate->cycles;
3487 cpustate->cycles = first_cycles;
3490 void I386_OPS_BASE::i386_set_a20_line(int state)
3494 cpustate->a20_mask = ~0;
3498 cpustate->a20_mask = ~(1 << 20);
3500 // TODO: how does A20M and the tlb interact
3501 vtlb_flush_dynamic(cpustate->vtlb);
3504 // BASE execution : EXECUTE without DMA, BIOS and debugger.
3505 #include "../../debugger.h"
3506 int I386_OPS_BASE::cpu_execute_i386(int cycles)
3508 CHANGE_PC(cpustate->eip);
3510 if (cpustate->halted || cpustate->busreq)
3513 int passed_cycles = max(1, cpustate->extra_cycles);
3514 // this is main cpu, cpustate->cycles is not used
3515 /*cpustate->cycles = */cpustate->extra_cycles = 0;
3516 cpustate->tsc += passed_cycles;
3517 cpustate->total_cycles += passed_cycles;
3518 return passed_cycles;
3520 cpustate->cycles += cycles;
3521 cpustate->base_cycles = cpustate->cycles;
3523 /* adjust for any interrupts that came in */
3524 cpustate->cycles -= cpustate->extra_cycles;
3525 cpustate->extra_cycles = 0;
3527 /* if busreq is raised, spin cpu while remained clock */
3528 if (cpustate->cycles > 0) {
3529 cpustate->cycles = 0;
3531 int passed_cycles = cpustate->base_cycles - cpustate->cycles;
3532 cpustate->tsc += passed_cycles;
3533 cpustate->total_cycles += passed_cycles;
3534 return passed_cycles;
3539 cpustate->cycles = 1;
3541 cpustate->cycles += cycles;
3543 cpustate->base_cycles = cpustate->cycles;
3545 /* adjust for any interrupts that came in */
3546 cpustate->cycles -= cpustate->extra_cycles;
3547 cpustate->extra_cycles = 0;
3549 while( cpustate->cycles > 0 && !cpustate->busreq )
3551 int first_cycles = cpustate->cycles;
3552 i386_check_irq_line();
3553 cpustate->operand_size = cpustate->sreg[CS].d;
3554 cpustate->xmm_operand_size = 0;
3555 cpustate->address_size = cpustate->sreg[CS].d;
3556 cpustate->operand_prefix = 0;
3557 cpustate->address_prefix = 0;
3560 int old_tf = cpustate->TF;
3562 cpustate->debugger->add_cpu_trace(cpustate->pc);
3563 cpustate->segment_prefix = 0;
3564 cpustate->prev_eip = cpustate->eip;
3565 cpustate->prev_pc = cpustate->pc;
3567 if(cpustate->delayed_interrupt_enable != 0)
3570 cpustate->delayed_interrupt_enable = 0;
3572 #ifdef DEBUG_MISSING_OPCODE
3573 cpustate->opcode_bytes_length = 0;
3574 cpustate->opcode_pc = cpustate->pc;
3578 I386OP(decode_opcode)();
3579 if(cpustate->TF && old_tf)
3581 cpustate->prev_eip = cpustate->eip;
3585 if(cpustate->lock && (cpustate->opcode != 0xf0))
3586 cpustate->lock = false;
3591 i386_trap_with_error(e&0xffffffff,0,0,e>>32);
3593 /* adjust for any interrupts that came in */
3594 cpustate->cycles -= cpustate->extra_cycles;
3595 cpustate->extra_cycles = 0;
3596 cpustate->total_cycles += first_cycles - cpustate->cycles;
3599 /* if busreq is raised, spin cpu while remained clock */
3600 if (cpustate->cycles > 0 && cpustate->busreq) {
3601 cpustate->total_cycles += cpustate->cycles;
3602 cpustate->cycles = 0;
3604 int passed_cycles = cpustate->base_cycles - cpustate->cycles;
3605 cpustate->tsc += passed_cycles;
3606 cpustate->total_cycles += passed_cycles;
3607 return passed_cycles;
3610 /*************************************************************************/
3612 int I386_OPS_BASE::cpu_translate_i386(void *cpudevice, address_spacenum space, int intention, offs_t *address)
3614 i386_state *cpu_state = (i386_state *)cpudevice;
3616 if(space == AS_PROGRAM)
3617 ret = i386_translate_address(intention, address, NULL);
3618 *address &= cpu_state->a20_mask;
3622 /*****************************************************************************/
3626 void *I386_OPS_BASE::cpu_init_i486(void)
3628 i386_common_init(32);
3629 build_opcode_table(OP_I386 | OP_FPU | OP_I486);
3630 build_x87_opcode_table();
3631 cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_I486];
3632 cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_I486];
3636 void I386_OPS_BASE::cpu_reset_i486(void)
3639 vtlb_flush_dynamic(cpustate->vtlb);
3641 cpustate->sreg[CS].selector = 0xf000;
3642 cpustate->sreg[CS].base = 0xffff0000;
3643 cpustate->sreg[CS].limit = 0xffff;
3644 cpustate->sreg[CS].flags = 0x0093;
3646 cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
3647 cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
3648 cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0093;
3650 cpustate->idtr.base = 0;
3651 cpustate->idtr.limit = 0x3ff;
3653 cpustate->a20_mask = ~0;
3655 cpustate->cr[0] = 0x00000010;
3656 cpustate->eflags = 0;
3657 cpustate->eflags_mask = 0x00077fd7;
3658 cpustate->eip = 0xfff0;
3659 cpustate->smm = false;
3660 cpustate->smi_latched = false;
3661 cpustate->nmi_masked = false;
3662 cpustate->nmi_latched = false;
3668 // [ 3:0] Stepping ID
3669 // Family 4 (486), Model 0/1 (DX), Stepping 3
3671 REG32(EDX) = (4 << 8) | (0 << 4) | (3);
3673 CHANGE_PC(cpustate->eip);
3676 /*****************************************************************************/
3680 void *I386_OPS_BASE::cpu_init_pentium(void)
3682 // 64 dtlb small, 8 dtlb large, 32 itlb
3683 i386_common_init(96);
3684 build_opcode_table(OP_I386 | OP_FPU | OP_I486 | OP_PENTIUM);
3685 build_x87_opcode_table();
3686 cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_PENTIUM];
3687 cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_PENTIUM];
3691 void I386_OPS_BASE::cpu_reset_pentium(void)
3694 vtlb_flush_dynamic(cpustate->vtlb);
3696 cpustate->sreg[CS].selector = 0xf000;
3697 cpustate->sreg[CS].base = 0xffff0000;
3698 cpustate->sreg[CS].limit = 0xffff;
3699 cpustate->sreg[CS].flags = 0x0093;
3701 cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
3702 cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
3703 cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0093;
3705 cpustate->idtr.base = 0;
3706 cpustate->idtr.limit = 0x3ff;
3708 cpustate->a20_mask = ~0;
3710 cpustate->cr[0] = 0x00000010;
3711 cpustate->eflags = 0x00200000;
3712 cpustate->eflags_mask = 0x003f7fd7;
3713 cpustate->eip = 0xfff0;
3714 cpustate->mxcsr = 0x1f80;
3715 cpustate->smm = false;
3716 cpustate->smi_latched = false;
3717 cpustate->smbase = 0x30000;
3718 cpustate->nmi_masked = false;
3719 cpustate->nmi_latched = false;
3725 // [ 3:0] Stepping ID
3726 // Family 5 (Pentium), Model 2 (75 - 200MHz), Stepping 5
3728 REG32(EDX) = (5 << 8) | (2 << 4) | (5);
3730 cpustate->cpuid_id0 = 0x756e6547; // Genu
3731 cpustate->cpuid_id1 = 0x49656e69; // ineI
3732 cpustate->cpuid_id2 = 0x6c65746e; // ntel
3734 cpustate->cpuid_max_input_value_eax = 0x01;
3735 cpustate->cpu_version = REG32(EDX);
3737 // [ 0:0] FPU on chip
3738 // [ 2:2] I/O breakpoints
3739 // [ 4:4] Time Stamp Counter
3740 // [ 5:5] Pentium CPU style model specific registers
3741 // [ 7:7] Machine Check Exception
3742 // [ 8:8] CMPXCHG8B instruction
3743 cpustate->feature_flags = 0x000001bf;
3745 CHANGE_PC(cpustate->eip);
3748 /*****************************************************************************/
3752 void *I386_OPS_BASE::cpu_init_mediagx(void)
3754 // probably 32 unified
3755 i386_common_init(32);
3756 build_x87_opcode_table();
3757 build_opcode_table(OP_I386 | OP_FPU | OP_I486 | OP_PENTIUM | OP_CYRIX);
3758 cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_MEDIAGX];
3759 cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_MEDIAGX];
3763 void I386_OPS_BASE::cpu_reset_mediagx(void)
3766 vtlb_flush_dynamic(cpustate->vtlb);
3768 cpustate->sreg[CS].selector = 0xf000;
3769 cpustate->sreg[CS].base = 0xffff0000;
3770 cpustate->sreg[CS].limit = 0xffff;
3771 cpustate->sreg[CS].flags = 0x0093;
3773 cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
3774 cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
3775 cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0093;
3777 cpustate->idtr.base = 0;
3778 cpustate->idtr.limit = 0x3ff;
3780 cpustate->a20_mask = ~0;
3782 cpustate->cr[0] = 0x00000010;
3783 cpustate->eflags = 0x00200000;
3784 cpustate->eflags_mask = 0x00277fd7; /* TODO: is this correct? */
3785 cpustate->eip = 0xfff0;
3786 cpustate->smm = false;
3787 cpustate->smi_latched = false;
3788 cpustate->nmi_masked = false;
3789 cpustate->nmi_latched = false;
3795 // [ 3:0] Stepping ID
3796 // Family 4, Model 4 (MediaGX)
3798 REG32(EDX) = (4 << 8) | (4 << 4) | (1); /* TODO: is this correct? */
3800 cpustate->cpuid_id0 = 0x69727943; // Cyri
3801 cpustate->cpuid_id1 = 0x736e4978; // xIns
3802 cpustate->cpuid_id2 = 0x6d616574; // tead
3804 cpustate->cpuid_max_input_value_eax = 0x01;
3805 cpustate->cpu_version = REG32(EDX);
3807 // [ 0:0] FPU on chip
3808 cpustate->feature_flags = 0x00000001;
3810 CHANGE_PC(cpustate->eip);
3813 /*****************************************************************************/
3814 /* Intel Pentium Pro */
3816 void *I386_OPS_BASE::cpu_init_pentium_pro(void)
3818 // 64 dtlb small, 32 itlb
3819 i386_common_init(96);
3820 build_x87_opcode_table();
3821 build_opcode_table(OP_I386 | OP_FPU | OP_I486 | OP_PENTIUM | OP_PPRO);
3822 cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_PENTIUM]; // TODO: generate own cycle tables
3823 cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_PENTIUM]; // TODO: generate own cycle tables
3827 void I386_OPS_BASE::cpu_reset_pentium_pro(void)
3830 vtlb_flush_dynamic(cpustate->vtlb);
3832 cpustate->sreg[CS].selector = 0xf000;
3833 cpustate->sreg[CS].base = 0xffff0000;
3834 cpustate->sreg[CS].limit = 0xffff;
3835 cpustate->sreg[CS].flags = 0x0093;
3837 cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
3838 cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
3839 cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0093;
3841 cpustate->idtr.base = 0;
3842 cpustate->idtr.limit = 0x3ff;
3844 cpustate->a20_mask = ~0;
3846 cpustate->cr[0] = 0x60000010;
3847 cpustate->eflags = 0x00200000;
3848 cpustate->eflags_mask = 0x00277fd7; /* TODO: is this correct? */
3849 cpustate->eip = 0xfff0;
3850 cpustate->mxcsr = 0x1f80;
3851 cpustate->smm = false;
3852 cpustate->smi_latched = false;
3853 cpustate->smbase = 0x30000;
3854 cpustate->nmi_masked = false;
3855 cpustate->nmi_latched = false;
3861 // [ 3:0] Stepping ID
3862 // Family 6, Model 1 (Pentium Pro)
3864 REG32(EDX) = (6 << 8) | (1 << 4) | (1); /* TODO: is this correct? */
3866 cpustate->cpuid_id0 = 0x756e6547; // Genu
3867 cpustate->cpuid_id1 = 0x49656e69; // ineI
3868 cpustate->cpuid_id2 = 0x6c65746e; // ntel
3870 cpustate->cpuid_max_input_value_eax = 0x02;
3871 cpustate->cpu_version = REG32(EDX);
3873 // [ 0:0] FPU on chip
3874 // [ 2:2] I/O breakpoints
3875 // [ 4:4] Time Stamp Counter
3876 // [ 5:5] Pentium CPU style model specific registers
3877 // [ 7:7] Machine Check Exception
3878 // [ 8:8] CMPXCHG8B instruction
3879 // [15:15] CMOV and FCMOV
3881 cpustate->feature_flags = 0x000081bf;
3883 CHANGE_PC(cpustate->eip);
3886 /*****************************************************************************/
3887 /* Intel Pentium MMX */
3889 void *I386_OPS_BASE::cpu_init_pentium_mmx(void)
3891 // 64 dtlb small, 8 dtlb large, 32 itlb small, 2 itlb large
3892 i386_common_init(96);
3893 build_x87_opcode_table();
3894 build_opcode_table(OP_I386 | OP_FPU | OP_I486 | OP_PENTIUM | OP_MMX);
3895 cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_PENTIUM]; // TODO: generate own cycle tables
3896 cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_PENTIUM]; // TODO: generate own cycle tables
3900 void I386_OPS_BASE::cpu_reset_pentium_mmx(void)
3903 vtlb_flush_dynamic(cpustate->vtlb);
3905 cpustate->sreg[CS].selector = 0xf000;
3906 cpustate->sreg[CS].base = 0xffff0000;
3907 cpustate->sreg[CS].limit = 0xffff;
3908 cpustate->sreg[CS].flags = 0x0093;
3910 cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
3911 cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
3912 cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0093;
3914 cpustate->idtr.base = 0;
3915 cpustate->idtr.limit = 0x3ff;
3917 cpustate->a20_mask = ~0;
3919 cpustate->cr[0] = 0x60000010;
3920 cpustate->eflags = 0x00200000;
3921 cpustate->eflags_mask = 0x00277fd7; /* TODO: is this correct? */
3922 cpustate->eip = 0xfff0;
3923 cpustate->mxcsr = 0x1f80;
3924 cpustate->smm = false;
3925 cpustate->smi_latched = false;
3926 cpustate->smbase = 0x30000;
3927 cpustate->nmi_masked = false;
3928 cpustate->nmi_latched = false;
3934 // [ 3:0] Stepping ID
3935 // Family 5, Model 4 (P55C)
3937 REG32(EDX) = (5 << 8) | (4 << 4) | (1);
3939 cpustate->cpuid_id0 = 0x756e6547; // Genu
3940 cpustate->cpuid_id1 = 0x49656e69; // ineI
3941 cpustate->cpuid_id2 = 0x6c65746e; // ntel
3943 cpustate->cpuid_max_input_value_eax = 0x01;
3944 cpustate->cpu_version = REG32(EDX);
3946 // [ 0:0] FPU on chip
3947 // [ 2:2] I/O breakpoints
3948 // [ 4:4] Time Stamp Counter
3949 // [ 5:5] Pentium CPU style model specific registers
3950 // [ 7:7] Machine Check Exception
3951 // [ 8:8] CMPXCHG8B instruction
3952 // [23:23] MMX instructions
3953 cpustate->feature_flags = 0x008001bf;
3955 CHANGE_PC(cpustate->eip);
3958 /*****************************************************************************/
3959 /* Intel Pentium II */
3961 void *I386_OPS_BASE::cpu_init_pentium2(void)
3963 // 64 dtlb small, 8 dtlb large, 32 itlb small, 2 itlb large
3964 i386_common_init(96);
3965 build_x87_opcode_table();
3966 build_opcode_table(OP_I386 | OP_FPU | OP_I486 | OP_PENTIUM | OP_PPRO | OP_MMX);
3967 cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_PENTIUM]; // TODO: generate own cycle tables
3968 cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_PENTIUM]; // TODO: generate own cycle tables
3972 void I386_OPS_BASE::cpu_reset_pentium2(void)
3975 vtlb_flush_dynamic(cpustate->vtlb);
3977 cpustate->sreg[CS].selector = 0xf000;
3978 cpustate->sreg[CS].base = 0xffff0000;
3979 cpustate->sreg[CS].limit = 0xffff;
3980 cpustate->sreg[CS].flags = 0x0093;
3982 cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
3983 cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
3984 cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0093;
3986 cpustate->idtr.base = 0;
3987 cpustate->idtr.limit = 0x3ff;
3989 cpustate->a20_mask = ~0;
3991 cpustate->cr[0] = 0x60000010;
3992 cpustate->eflags = 0x00200000;
3993 cpustate->eflags_mask = 0x00277fd7; /* TODO: is this correct? */
3994 cpustate->eip = 0xfff0;
3995 cpustate->mxcsr = 0x1f80;
3996 cpustate->smm = false;
3997 cpustate->smi_latched = false;
3998 cpustate->smbase = 0x30000;
3999 cpustate->nmi_masked = false;
4000 cpustate->nmi_latched = false;
4006 // [ 3:0] Stepping ID
4007 // Family 6, Model 3 (Pentium II / Klamath)
4009 REG32(EDX) = (6 << 8) | (3 << 4) | (1); /* TODO: is this correct? */
4011 cpustate->cpuid_id0 = 0x756e6547; // Genu
4012 cpustate->cpuid_id1 = 0x49656e69; // ineI
4013 cpustate->cpuid_id2 = 0x6c65746e; // ntel
4015 cpustate->cpuid_max_input_value_eax = 0x02;
4016 cpustate->cpu_version = REG32(EDX);
4018 // [ 0:0] FPU on chip
4019 cpustate->feature_flags = 0x008081bf; // TODO: enable relevant flags here
4021 CHANGE_PC(cpustate->eip);
4024 /*****************************************************************************/
4025 /* Intel Pentium III */
4027 void *I386_OPS_BASE::cpu_init_pentium3(void)
4029 // 64 dtlb small, 8 dtlb large, 32 itlb small, 2 itlb large
4030 i386_common_init(96);
4031 build_x87_opcode_table();
4032 build_opcode_table(OP_I386 | OP_FPU | OP_I486 | OP_PENTIUM | OP_PPRO | OP_MMX | OP_SSE);
4033 cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_PENTIUM]; // TODO: generate own cycle tables
4034 cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_PENTIUM]; // TODO: generate own cycle tables
4038 void I386_OPS_BASE::cpu_reset_pentium3(void)
4041 vtlb_flush_dynamic(cpustate->vtlb);
4043 cpustate->sreg[CS].selector = 0xf000;
4044 cpustate->sreg[CS].base = 0xffff0000;
4045 cpustate->sreg[CS].limit = 0xffff;
4046 cpustate->sreg[CS].flags = 0x0093;
4048 cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
4049 cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
4050 cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0093;
4052 cpustate->idtr.base = 0;
4053 cpustate->idtr.limit = 0x3ff;
4055 cpustate->a20_mask = ~0;
4057 cpustate->cr[0] = 0x60000010;
4058 cpustate->eflags = 0x00200000;
4059 cpustate->eflags_mask = 0x00277fd7; /* TODO: is this correct? */
4060 cpustate->eip = 0xfff0;
4061 cpustate->mxcsr = 0x1f80;
4062 cpustate->smm = false;
4063 cpustate->smi_latched = false;
4064 cpustate->smbase = 0x30000;
4065 cpustate->nmi_masked = false;
4066 cpustate->nmi_latched = false;
4072 // [ 3:0] Stepping ID
4073 // Family 6, Model 8 (Pentium III / Coppermine)
4075 REG32(EDX) = (6 << 8) | (8 << 4) | (10);
4077 cpustate->cpuid_id0 = 0x756e6547; // Genu
4078 cpustate->cpuid_id1 = 0x49656e69; // ineI
4079 cpustate->cpuid_id2 = 0x6c65746e; // ntel
4081 cpustate->cpuid_max_input_value_eax = 0x03;
4082 cpustate->cpu_version = REG32(EDX);
4084 // [ 0:0] FPU on chip
4085 // [ 4:4] Time Stamp Counter
4086 // [ D:D] PTE Global Bit
4087 cpustate->feature_flags = 0x00002011; // TODO: enable relevant flags here
4089 CHANGE_PC(cpustate->eip);
4092 /*****************************************************************************/
4093 /* Intel Pentium 4 */
4095 void *I386_OPS_BASE::cpu_init_pentium4(void)
4097 // 128 dtlb, 64 itlb
4098 i386_common_init(196);
4099 build_x87_opcode_table();
4100 build_opcode_table(OP_I386 | OP_FPU | OP_I486 | OP_PENTIUM | OP_PPRO | OP_MMX | OP_SSE | OP_SSE2);
4101 cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_PENTIUM]; // TODO: generate own cycle tables
4102 cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_PENTIUM]; // TODO: generate own cycle tables
4106 void I386_OPS_BASE::cpu_reset_pentium4(void)
4109 vtlb_flush_dynamic(cpustate->vtlb);
4111 cpustate->sreg[CS].selector = 0xf000;
4112 cpustate->sreg[CS].base = 0xffff0000;
4113 cpustate->sreg[CS].limit = 0xffff;
4114 cpustate->sreg[CS].flags = 0x009b;
4116 cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
4117 cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
4118 cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0092;
4120 cpustate->idtr.base = 0;
4121 cpustate->idtr.limit = 0x3ff;
4123 cpustate->a20_mask = ~0;
4125 cpustate->cr[0] = 0x60000010;
4126 cpustate->eflags = 0x00200000;
4127 cpustate->eflags_mask = 0x00277fd7; /* TODO: is this correct? */
4128 cpustate->eip = 0xfff0;
4129 cpustate->mxcsr = 0x1f80;
4130 cpustate->smm = false;
4131 cpustate->smi_latched = false;
4132 cpustate->smbase = 0x30000;
4133 cpustate->nmi_masked = false;
4134 cpustate->nmi_latched = false;
4138 // [27:20] Extended family
4139 // [19:16] Extended model
4143 // [ 3: 0] Stepping ID
4144 // Family 15, Model 0 (Pentium 4 / Willamette)
4146 REG32(EDX) = (0 << 20) | (0xf << 8) | (0 << 4) | (1);
4148 cpustate->cpuid_id0 = 0x756e6547; // Genu
4149 cpustate->cpuid_id1 = 0x49656e69; // ineI
4150 cpustate->cpuid_id2 = 0x6c65746e; // ntel
4152 cpustate->cpuid_max_input_value_eax = 0x02;
4153 cpustate->cpu_version = REG32(EDX);
4155 // [ 0:0] FPU on chip
4156 cpustate->feature_flags = 0x00000001; // TODO: enable relevant flags here
4158 CHANGE_PC(cpustate->eip);