1 // license:BSD-3-Clause
2 // copyright-holders:Ville Linde, Barry Rodewald, Carl, Phil Bennett
20 #include "./i386_opdef.h"
21 #include "./i386ops.h"
23 /* seems to be defined on mingw-gcc */
26 #define FAULT(fault,error) {cpustate->ext = 1; i386_trap_with_error(fault,0,0,error); return;}
27 #define FAULT_EXP(fault,error) {cpustate->ext = 1; i386_trap_with_error(fault,0,trap_level+1,error); return;}
29 /*************************************************************************/
31 UINT32 I386_OPS_BASE::i386_load_protected_mode_segment( I386_SREG *seg, UINT64 *desc )
47 if ( seg->selector & 0x4 )
49 base = cpustate->ldtr.base;
50 limit = cpustate->ldtr.limit;
52 base = cpustate->gdtr.base;
53 limit = cpustate->gdtr.limit;
56 entry = seg->selector & ~0x7;
57 if (limit == 0 || entry + 7 > limit)
60 v1 = READ32PL0(base + entry );
61 v2 = READ32PL0(base + entry + 4 );
63 seg->flags = (v2 >> 8) & 0xf0ff;
64 seg->base = (v2 & 0xff000000) | ((v2 & 0xff) << 16) | ((v1 >> 16) & 0xffff);
65 seg->limit = (v2 & 0xf0000) | (v1 & 0xffff);
66 if (seg->flags & 0x8000)
67 seg->limit = (seg->limit << 12) | 0xfff;
68 seg->d = (seg->flags & 0x4000) ? 1 : 0;
72 *desc = ((UINT64)v2<<32)|v1;
76 void I386_OPS_BASE::i386_load_call_gate(I386_CALL_GATE *gate)
82 if ( gate->segment & 0x4 )
84 base = cpustate->ldtr.base;
85 limit = cpustate->ldtr.limit;
87 base = cpustate->gdtr.base;
88 limit = cpustate->gdtr.limit;
91 entry = gate->segment & ~0x7;
92 if (limit == 0 || entry + 7 > limit)
95 v1 = READ32PL0(base + entry );
96 v2 = READ32PL0(base + entry + 4 );
98 /* Note that for task gates, offset and dword_count are not used */
99 gate->selector = (v1 >> 16) & 0xffff;
100 gate->offset = (v1 & 0x0000ffff) | (v2 & 0xffff0000);
101 gate->ar = (v2 >> 8) & 0xff;
102 gate->dword_count = v2 & 0x001f;
103 gate->present = (gate->ar >> 7) & 0x01;
104 gate->dpl = (gate->ar >> 5) & 0x03;
107 void I386_OPS_BASE::i386_set_descriptor_accessed( UINT16 selector)
109 // assume the selector is valid, we don't need to check it again
115 if ( selector & 0x4 )
116 base = cpustate->ldtr.base;
118 base = cpustate->gdtr.base;
120 addr = base + (selector & ~7) + 5;
121 i386_translate_address(TRANSLATE_READ, &addr, NULL);
122 rights = cpustate->program->read_data8(addr);
123 // Should a fault be thrown if the table is read only?
124 cpustate->program->write_data8(addr, rights | 1);
127 void I386_OPS_BASE::i386_load_segment_descriptor( int segment )
133 i386_load_protected_mode_segment(&cpustate->sreg[segment], NULL );
134 if(cpustate->sreg[segment].selector)
135 i386_set_descriptor_accessed(cpustate->sreg[segment].selector);
139 cpustate->sreg[segment].base = cpustate->sreg[segment].selector << 4;
140 cpustate->sreg[segment].limit = 0xffff;
141 cpustate->sreg[segment].flags = (segment == CS) ? 0x00fb : 0x00f3;
142 cpustate->sreg[segment].d = 0;
143 cpustate->sreg[segment].valid = true;
148 cpustate->sreg[segment].base = cpustate->sreg[segment].selector << 4;
149 cpustate->sreg[segment].d = 0;
150 cpustate->sreg[segment].valid = true;
152 if( segment == CS && !cpustate->performed_intersegment_jump )
153 cpustate->sreg[segment].base |= 0xfff00000;
157 /* Retrieves the stack selector located in the current TSS */
158 UINT32 I386_OPS_BASE::i386_get_stack_segment(UINT8 privilege)
164 if(cpustate->task.flags & 8)
165 ret = READ32PL0((cpustate->task.base+8) + (8*privilege));
167 ret = READ16PL0((cpustate->task.base+4) + (4*privilege));
172 /* Retrieves the stack pointer located in the current TSS */
173 UINT32 I386_OPS_BASE::i386_get_stack_ptr(UINT8 privilege)
179 if(cpustate->task.flags & 8)
180 ret = READ32PL0((cpustate->task.base+4) + (8*privilege));
182 ret = READ16PL0((cpustate->task.base+2) + (4*privilege));
187 UINT32 I386_OPS_BASE::get_flags()
191 f |= cpustate->PF << 2;
192 f |= cpustate->AF << 4;
193 f |= cpustate->ZF << 6;
194 f |= cpustate->SF << 7;
195 f |= cpustate->TF << 8;
196 f |= cpustate->IF << 9;
197 f |= cpustate->DF << 10;
198 f |= cpustate->OF << 11;
199 f |= cpustate->IOP1 << 12;
200 f |= cpustate->IOP2 << 13;
201 f |= cpustate->NT << 14;
202 f |= cpustate->RF << 16;
203 f |= cpustate->VM << 17;
204 f |= cpustate->AC << 18;
205 f |= cpustate->VIF << 19;
206 f |= cpustate->VIP << 20;
207 f |= cpustate->ID << 21;
208 return (cpustate->eflags & ~cpustate->eflags_mask) | (f & cpustate->eflags_mask);
211 void I386_OPS_BASE::set_flags( UINT32 f )
213 cpustate->CF = (f & 0x1) ? 1 : 0;
214 cpustate->PF = (f & 0x4) ? 1 : 0;
215 cpustate->AF = (f & 0x10) ? 1 : 0;
216 cpustate->ZF = (f & 0x40) ? 1 : 0;
217 cpustate->SF = (f & 0x80) ? 1 : 0;
218 cpustate->TF = (f & 0x100) ? 1 : 0;
219 cpustate->IF = (f & 0x200) ? 1 : 0;
220 cpustate->DF = (f & 0x400) ? 1 : 0;
221 cpustate->OF = (f & 0x800) ? 1 : 0;
222 cpustate->IOP1 = (f & 0x1000) ? 1 : 0;
223 cpustate->IOP2 = (f & 0x2000) ? 1 : 0;
224 cpustate->NT = (f & 0x4000) ? 1 : 0;
225 cpustate->RF = (f & 0x10000) ? 1 : 0;
226 cpustate->VM = (f & 0x20000) ? 1 : 0;
227 cpustate->AC = (f & 0x40000) ? 1 : 0;
228 cpustate->VIF = (f & 0x80000) ? 1 : 0;
229 cpustate->VIP = (f & 0x100000) ? 1 : 0;
230 cpustate->ID = (f & 0x200000) ? 1 : 0;
231 cpustate->eflags = f & cpustate->eflags_mask;
234 void I386_OPS_BASE::sib_byte(UINT8 mod, UINT32* out_ea, UINT8* out_segment)
238 UINT8 scale, i, base;
240 scale = (sib >> 6) & 0x3;
241 i = (sib >> 3) & 0x7;
246 case 0: ea = REG32(EAX); segment = DS; break;
247 case 1: ea = REG32(ECX); segment = DS; break;
248 case 2: ea = REG32(EDX); segment = DS; break;
249 case 3: ea = REG32(EBX); segment = DS; break;
250 case 4: ea = REG32(ESP); segment = SS; break;
255 } else if( mod == 1 ) {
258 } else if( mod == 2 ) {
263 case 6: ea = REG32(ESI); segment = DS; break;
264 case 7: ea = REG32(EDI); segment = DS; break;
268 case 0: ea += REG32(EAX) * (1 << scale); break;
269 case 1: ea += REG32(ECX) * (1 << scale); break;
270 case 2: ea += REG32(EDX) * (1 << scale); break;
271 case 3: ea += REG32(EBX) * (1 << scale); break;
273 case 5: ea += REG32(EBP) * (1 << scale); break;
274 case 6: ea += REG32(ESI) * (1 << scale); break;
275 case 7: ea += REG32(EDI) * (1 << scale); break;
278 *out_segment = segment;
281 void I386_OPS_BASE::modrm_to_EA(UINT8 mod_rm, UINT32* out_ea, UINT8* out_segment)
286 UINT8 mod = (mod_rm >> 6) & 0x3;
287 UINT8 rm = mod_rm & 0x7;
292 fatalerror("i386: Called modrm_to_EA with modrm value %02X!\n",mod_rm);
295 if( cpustate->address_size ) {
299 case 0: ea = REG32(EAX); segment = DS; break;
300 case 1: ea = REG32(ECX); segment = DS; break;
301 case 2: ea = REG32(EDX); segment = DS; break;
302 case 3: ea = REG32(EBX); segment = DS; break;
303 case 4: sib_byte(mod, &ea, &segment ); break;
306 ea = FETCH32(); segment = DS;
308 ea = REG32(EBP); segment = SS;
311 case 6: ea = REG32(ESI); segment = DS; break;
312 case 7: ea = REG32(EDI); segment = DS; break;
317 } else if( mod == 2 ) {
322 if( cpustate->segment_prefix )
323 segment = cpustate->segment_override;
326 *out_segment = segment;
332 case 0: ea = REG16(BX) + REG16(SI); segment = DS; break;
333 case 1: ea = REG16(BX) + REG16(DI); segment = DS; break;
334 case 2: ea = REG16(BP) + REG16(SI); segment = SS; break;
335 case 3: ea = REG16(BP) + REG16(DI); segment = SS; break;
336 case 4: ea = REG16(SI); segment = DS; break;
337 case 5: ea = REG16(DI); segment = DS; break;
340 ea = FETCH16(); segment = DS;
342 ea = REG16(BP); segment = SS;
345 case 7: ea = REG16(BX); segment = DS; break;
350 } else if( mod == 2 ) {
355 if( cpustate->segment_prefix )
356 segment = cpustate->segment_override;
358 *out_ea = ea & 0xffff;
359 *out_segment = segment;
363 UINT32 I386_OPS_BASE::GetNonTranslatedEA(UINT8 modrm,UINT8 *seg)
367 modrm_to_EA(modrm, &ea, &segment );
368 if(seg) *seg = segment;
372 UINT32 I386_OPS_BASE::GetEA(UINT8 modrm, int rwn)
376 modrm_to_EA(modrm, &ea, &segment );
377 return i386_translate(segment, ea, rwn );
380 /* Check segment register for validity when changing privilege level after an RETF */
381 void I386_OPS_BASE::i386_check_sreg_validity(int reg)
383 UINT16 selector = cpustate->sreg[reg].selector;
384 UINT8 CPL = cpustate->CPL;
389 memset(&desc, 0, sizeof(desc));
390 desc.selector = selector;
391 i386_load_protected_mode_segment(&desc,NULL);
392 DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level
393 RPL = selector & 0x03;
395 /* Must be within the relevant descriptor table limits */
398 if((selector & ~0x07) > cpustate->ldtr.limit)
403 if((selector & ~0x07) > cpustate->gdtr.limit)
407 /* Must be either a data or readable code segment */
408 if(((desc.flags & 0x0018) == 0x0018 && (desc.flags & 0x0002)) || (desc.flags & 0x0018) == 0x0010)
413 /* If a data segment or non-conforming code segment, then either DPL >= CPL or DPL >= RPL */
414 if(((desc.flags & 0x0018) == 0x0018 && (desc.flags & 0x0004) == 0) || (desc.flags & 0x0018) == 0x0010)
416 if((DPL < CPL) || (DPL < RPL))
420 /* if segment is invalid, then segment register is nulled */
423 cpustate->sreg[reg].selector = 0;
424 i386_load_segment_descriptor(reg);
428 int I386_OPS_BASE::i386_limit_check( int seg, UINT32 offset)
430 if(PROTECTED_MODE && !V8086_MODE)
432 if((cpustate->sreg[seg].flags & 0x0018) == 0x0010 && cpustate->sreg[seg].flags & 0x0004) // if expand-down data segment
434 // compare if greater then 0xffffffff when we're passed the access size
435 if((offset <= cpustate->sreg[seg].limit) || ((cpustate->sreg[seg].d)?0:(offset > 0xffff)))
437 logerror("Limit check at 0x%08x failed. Segment %04x, limit %08x, offset %08x (expand-down)\n",cpustate->pc,cpustate->sreg[seg].selector,cpustate->sreg[seg].limit,offset);
443 if(offset > cpustate->sreg[seg].limit)
445 logerror("Limit check at 0x%08x failed. Segment %04x, limit %08x, offset %08x\n",cpustate->pc,cpustate->sreg[seg].selector,cpustate->sreg[seg].limit,offset);
453 void I386_OPS_BASE::i386_sreg_load( UINT16 selector, UINT8 reg, bool *fault)
455 // Checks done when MOV changes a segment register in protected mode
459 RPL = selector & 0x0003;
461 if(!PROTECTED_MODE || V8086_MODE)
463 cpustate->sreg[reg].selector = selector;
464 i386_load_segment_descriptor(reg);
465 if(fault) *fault = false;
469 if(fault) *fault = true;
474 memset(&stack, 0, sizeof(stack));
475 stack.selector = selector;
476 i386_load_protected_mode_segment(&stack,NULL);
477 DPL = (stack.flags >> 5) & 0x03;
479 if((selector & ~0x0003) == 0)
481 logerror("SReg Load (%08x): Selector is null.\n",cpustate->pc);
484 if(selector & 0x0004) // LDT
486 if((selector & ~0x0007) > cpustate->ldtr.limit)
488 logerror("SReg Load (%08x): Selector is out of LDT bounds.\n",cpustate->pc);
489 FAULT(FAULT_GP,selector & ~0x03)
494 if((selector & ~0x0007) > cpustate->gdtr.limit)
496 logerror("SReg Load (%08x): Selector is out of GDT bounds.\n",cpustate->pc);
497 FAULT(FAULT_GP,selector & ~0x03)
502 logerror("SReg Load (%08x): Selector RPL does not equal CPL.\n",cpustate->pc);
503 FAULT(FAULT_GP,selector & ~0x03)
505 if(((stack.flags & 0x0018) != 0x10) && (stack.flags & 0x0002) != 0)
507 logerror("SReg Load (%08x): Segment is not a writable data segment.\n",cpustate->pc);
508 FAULT(FAULT_GP,selector & ~0x03)
512 logerror("SReg Load (%08x): Segment DPL does not equal CPL.\n",cpustate->pc);
513 FAULT(FAULT_GP,selector & ~0x03)
515 if(!(stack.flags & 0x0080))
517 logerror("SReg Load (%08x): Segment is not present.\n",cpustate->pc);
518 FAULT(FAULT_SS,selector & ~0x03)
521 if(reg == DS || reg == ES || reg == FS || reg == GS)
525 if((selector & ~0x0003) == 0)
527 cpustate->sreg[reg].selector = selector;
528 i386_load_segment_descriptor(reg );
529 if(fault) *fault = false;
533 memset(&desc, 0, sizeof(desc));
534 desc.selector = selector;
535 i386_load_protected_mode_segment(&desc,NULL);
536 DPL = (desc.flags >> 5) & 0x03;
538 if(selector & 0x0004) // LDT
540 if((selector & ~0x0007) > cpustate->ldtr.limit)
542 logerror("SReg Load (%08x): Selector is out of LDT bounds.\n",cpustate->pc);
543 FAULT(FAULT_GP,selector & ~0x03)
548 if((selector & ~0x0007) > cpustate->gdtr.limit)
550 logerror("SReg Load (%08x): Selector is out of GDT bounds.\n",cpustate->pc);
551 FAULT(FAULT_GP,selector & ~0x03)
554 if((desc.flags & 0x0018) != 0x10)
556 if((((desc.flags & 0x0002) != 0) && ((desc.flags & 0x0018) != 0x18)) || !(desc.flags & 0x10))
558 logerror("SReg Load (%08x): Segment is not a data segment or readable code segment.\n",cpustate->pc);
559 FAULT(FAULT_GP,selector & ~0x03)
562 if(((desc.flags & 0x0018) == 0x10) || ((!(desc.flags & 0x0004)) && ((desc.flags & 0x0018) == 0x18)))
564 // if data or non-conforming code segment
565 if((RPL > DPL) || (CPL > DPL))
567 logerror("SReg Load (%08x): Selector RPL or CPL is not less or equal to segment DPL.\n",cpustate->pc);
568 FAULT(FAULT_GP,selector & ~0x03)
571 if(!(desc.flags & 0x0080))
573 logerror("SReg Load (%08x): Segment is not present.\n",cpustate->pc);
574 FAULT(FAULT_NP,selector & ~0x03)
578 cpustate->sreg[reg].selector = selector;
579 i386_load_segment_descriptor(reg );
580 if(fault) *fault = false;
583 void I386_OPS_BASE::i386_trap(int irq, int irq_gate, int trap_level)
585 /* I386 Interrupts/Traps/Faults:
587 * 0x00 Divide by zero
588 * 0x01 Debug exception
592 * 0x05 Array bounds check
593 * 0x06 Illegal Opcode
594 * 0x07 FPU not available
596 * 0x09 Coprocessor segment overrun
597 * 0x0a Invalid task state
598 * 0x0b Segment not present
599 * 0x0c Stack exception
600 * 0x0d General Protection Fault
603 * 0x10 Coprocessor error
606 UINT32 offset, oldflags = get_flags();
608 int entry = irq * (PROTECTED_MODE ? 8 : 4);
610 cpustate->lock = false;
612 if( !(PROTECTED_MODE) )
615 PUSH16(oldflags & 0xffff );
616 PUSH16(cpustate->sreg[CS].selector );
617 if(irq == 3 || irq == 4 || irq == 9 || irq_gate == 1)
618 PUSH16(cpustate->eip );
620 PUSH16(cpustate->prev_eip );
622 cpustate->sreg[CS].selector = READ16(cpustate->idtr.base + entry + 2 );
623 cpustate->eip = READ16(cpustate->idtr.base + entry );
633 UINT8 CPL = cpustate->CPL, DPL = 0; //, RPL = 0;
636 v1 = READ32PL0(cpustate->idtr.base + entry );
637 v2 = READ32PL0(cpustate->idtr.base + entry + 4 );
638 offset = (v2 & 0xffff0000) | (v1 & 0xffff);
639 segment = (v1 >> 16) & 0xffff;
640 type = (v2>>8) & 0x1F;
641 flags = (v2>>8) & 0xf0ff;
645 logerror("IRQ: Double fault.\n");
646 FAULT_EXP(FAULT_DF,0);
650 logerror("IRQ: Triple fault. CPU reset.\n");
651 CPU_RESET_CALL(i386); //!
652 cpustate->shutdown = 1;
656 /* segment privilege checks */
657 if(entry >= cpustate->idtr.limit)
659 logerror("IRQ (%08x): Vector %02xh is past IDT limit.\n",cpustate->pc,entry);
660 FAULT_EXP(FAULT_GP,entry+2)
662 /* segment must be interrupt gate, trap gate, or task gate */
663 if(type != 0x05 && type != 0x06 && type != 0x07 && type != 0x0e && type != 0x0f)
665 logerror("IRQ#%02x (%08x): Vector segment %04x is not an interrupt, trap or task gate.\n",irq,cpustate->pc,segment);
666 FAULT_EXP(FAULT_GP,entry+2)
669 if(cpustate->ext == 0) // if software interrupt (caused by INT/INTO/INT3)
671 if(((flags >> 5) & 0x03) < CPL)
673 logerror("IRQ (%08x): Software IRQ - gate DPL is less than CPL.\n",cpustate->pc);
674 FAULT_EXP(FAULT_GP,entry+2)
678 if((!cpustate->IOP1 || !cpustate->IOP2) && (cpustate->opcode != 0xcc))
680 logerror("IRQ (%08x): Is in Virtual 8086 mode and IOPL != 3.\n",cpustate->pc);
687 if((flags & 0x0080) == 0)
689 logerror("IRQ: Vector segment is not present.\n");
690 FAULT_EXP(FAULT_NP,entry+2)
696 memset(&desc, 0, sizeof(desc));
697 desc.selector = segment;
698 i386_load_protected_mode_segment(&desc,NULL);
701 logerror("IRQ: Task gate: TSS is not in the GDT.\n");
702 FAULT_EXP(FAULT_TS,segment & ~0x03);
706 if(segment > cpustate->gdtr.limit)
708 logerror("IRQ: Task gate: TSS is past GDT limit.\n");
709 FAULT_EXP(FAULT_TS,segment & ~0x03);
712 if((desc.flags & 0x000f) != 0x09 && (desc.flags & 0x000f) != 0x01)
714 logerror("IRQ: Task gate: TSS is not an available TSS.\n");
715 FAULT_EXP(FAULT_TS,segment & ~0x03);
717 if((desc.flags & 0x0080) == 0)
719 logerror("IRQ: Task gate: TSS is not present.\n");
720 FAULT_EXP(FAULT_NP,segment & ~0x03);
722 if(!(irq == 3 || irq == 4 || irq == 9 || irq_gate == 1))
723 cpustate->eip = cpustate->prev_eip;
724 if(desc.flags & 0x08)
725 i386_task_switch(desc.selector,1);
727 i286_task_switch(desc.selector,1);
732 /* Interrupt or Trap gate */
733 memset(&desc, 0, sizeof(desc));
734 desc.selector = segment;
735 i386_load_protected_mode_segment(&desc,NULL);
736 CPL = cpustate->CPL; // current privilege level
737 DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level
738 // RPL = segment & 0x03; // requested privilege level
740 if((segment & ~0x03) == 0)
742 logerror("IRQ: Gate segment is null.\n");
743 FAULT_EXP(FAULT_GP,cpustate->ext)
747 if((segment & ~0x07) > cpustate->ldtr.limit)
749 logerror("IRQ: Gate segment is past LDT limit.\n");
750 FAULT_EXP(FAULT_GP,(segment & 0x03)+cpustate->ext)
755 if((segment & ~0x07) > cpustate->gdtr.limit)
757 logerror("IRQ: Gate segment is past GDT limit.\n");
758 FAULT_EXP(FAULT_GP,(segment & 0x03)+cpustate->ext)
761 if((desc.flags & 0x0018) != 0x18)
763 logerror("IRQ: Gate descriptor is not a code segment.\n");
764 FAULT_EXP(FAULT_GP,(segment & 0x03)+cpustate->ext)
766 if((desc.flags & 0x0080) == 0)
768 logerror("IRQ: Gate segment is not present.\n");
769 FAULT_EXP(FAULT_NP,(segment & 0x03)+cpustate->ext)
771 if((desc.flags & 0x0004) == 0 && (DPL < CPL))
773 /* IRQ to inner privilege */
775 UINT32 newESP,oldSS,oldESP;
777 if(V8086_MODE && DPL)
779 logerror("IRQ: Gate to CPL>0 from VM86 mode.\n");
780 FAULT_EXP(FAULT_GP,segment & ~0x03);
782 /* Check new stack segment in TSS */
783 memset(&stack, 0, sizeof(stack));
784 stack.selector = i386_get_stack_segment(DPL);
785 i386_load_protected_mode_segment(&stack,NULL);
786 oldSS = cpustate->sreg[SS].selector;
791 if((stack.selector & ~0x03) == 0)
793 logerror("IRQ: New stack selector is null.\n");
794 FAULT_EXP(FAULT_GP,cpustate->ext)
796 if(stack.selector & 0x04)
798 if((stack.selector & ~0x07) > cpustate->ldtr.base)
800 logerror("IRQ: New stack selector is past LDT limit.\n");
801 FAULT_EXP(FAULT_TS,(stack.selector & ~0x03)+cpustate->ext)
806 if((stack.selector & ~0x07) > cpustate->gdtr.base)
808 logerror("IRQ: New stack selector is past GDT limit.\n");
809 FAULT_EXP(FAULT_TS,(stack.selector & ~0x03)+cpustate->ext)
812 if((stack.selector & 0x03) != DPL)
814 logerror("IRQ: New stack selector RPL is not equal to code segment DPL.\n");
815 FAULT_EXP(FAULT_TS,(stack.selector & ~0x03)+cpustate->ext)
817 if(((stack.flags >> 5) & 0x03) != DPL)
819 logerror("IRQ: New stack segment DPL is not equal to code segment DPL.\n");
820 FAULT_EXP(FAULT_TS,(stack.selector & ~0x03)+cpustate->ext)
822 if(((stack.flags & 0x0018) != 0x10) && (stack.flags & 0x0002) != 0)
824 logerror("IRQ: New stack segment is not a writable data segment.\n");
825 FAULT_EXP(FAULT_TS,(stack.selector & ~0x03)+cpustate->ext) // #TS(stack selector + EXT)
827 if((stack.flags & 0x0080) == 0)
829 logerror("IRQ: New stack segment is not present.\n");
830 FAULT_EXP(FAULT_SS,(stack.selector & ~0x03)+cpustate->ext) // #TS(stack selector + EXT)
832 newESP = i386_get_stack_ptr(DPL);
833 if(type & 0x08) // 32-bit gate
835 if(((newESP < (V8086_MODE?36:20)) && !(stack.flags & 0x4)) || ((~stack.limit < (~(newESP - 1) + (V8086_MODE?36:20))) && (stack.flags & 0x4)))
837 logerror("IRQ: New stack has no space for return addresses.\n");
838 FAULT_EXP(FAULT_SS,0)
844 if(((newESP < (V8086_MODE?18:10)) && !(stack.flags & 0x4)) || ((~stack.limit < (~(newESP - 1) + (V8086_MODE?18:10))) && (stack.flags & 0x4)))
846 logerror("IRQ: New stack has no space for return addresses.\n");
847 FAULT_EXP(FAULT_SS,0)
850 if(offset > desc.limit)
852 logerror("IRQ: New EIP is past code segment limit.\n");
853 FAULT_EXP(FAULT_GP,0)
855 /* change CPL before accessing the stack */
857 /* check for page fault at new stack TODO: check if stack frame crosses page boundary */
858 WRITE_TEST(stack.base+newESP-1);
859 /* Load new stack segment descriptor */
860 cpustate->sreg[SS].selector = stack.selector;
861 i386_load_protected_mode_segment(&cpustate->sreg[SS],NULL);
862 i386_set_descriptor_accessed(stack.selector);
866 //logerror("IRQ (%08x): Interrupt during V8086 task\n",cpustate->pc);
869 PUSH32(cpustate->sreg[GS].selector & 0xffff);
870 PUSH32(cpustate->sreg[FS].selector & 0xffff);
871 PUSH32(cpustate->sreg[DS].selector & 0xffff);
872 PUSH32(cpustate->sreg[ES].selector & 0xffff);
876 PUSH16(cpustate->sreg[GS].selector);
877 PUSH16(cpustate->sreg[FS].selector);
878 PUSH16(cpustate->sreg[DS].selector);
879 PUSH16(cpustate->sreg[ES].selector);
881 cpustate->sreg[GS].selector = 0;
882 cpustate->sreg[FS].selector = 0;
883 cpustate->sreg[DS].selector = 0;
884 cpustate->sreg[ES].selector = 0;
886 i386_load_segment_descriptor(GS);
887 i386_load_segment_descriptor(FS);
888 i386_load_segment_descriptor(DS);
889 i386_load_segment_descriptor(ES);
908 if((desc.flags & 0x0004) || (DPL == CPL))
910 /* IRQ to same privilege */
911 if(V8086_MODE && !cpustate->ext)
913 logerror("IRQ: Gate to same privilege from VM86 mode.\n");
914 FAULT_EXP(FAULT_GP,segment & ~0x03);
916 if(type == 0x0e || type == 0x0f) // 32-bit gate
920 // TODO: Add check for error code (2 extra bytes)
921 if(REG32(ESP) < stack_limit)
923 logerror("IRQ: Stack has no space left (needs %i bytes).\n",stack_limit);
924 FAULT_EXP(FAULT_SS,0)
926 if(offset > desc.limit)
928 logerror("IRQ: Gate segment offset is past segment limit.\n");
929 FAULT_EXP(FAULT_GP,0)
935 logerror("IRQ: Gate descriptor is non-conforming, and DPL does not equal CPL.\n");
936 FAULT_EXP(FAULT_GP,segment)
940 UINT32 tempSP = REG32(ESP);
943 // this is ugly but the alternative is worse
944 if(type != 0x0e && type != 0x0f) // if not 386 interrupt or trap gate
946 PUSH16(oldflags & 0xffff );
947 PUSH16(cpustate->sreg[CS].selector );
948 if(irq == 3 || irq == 4 || irq == 9 || irq_gate == 1)
949 PUSH16(cpustate->eip );
951 PUSH16(cpustate->prev_eip );
955 PUSH32(oldflags & 0x00ffffff );
956 PUSH32(cpustate->sreg[CS].selector );
957 if(irq == 3 || irq == 4 || irq == 9 || irq_gate == 1)
958 PUSH32(cpustate->eip );
960 PUSH32(cpustate->prev_eip );
969 segment = (segment & ~0x03) | cpustate->CPL;
970 cpustate->sreg[CS].selector = segment;
971 cpustate->eip = offset;
973 if(type == 0x0e || type == 0x06)
979 i386_load_segment_descriptor(CS);
980 CHANGE_PC(cpustate->eip);
984 void I386_OPS_BASE::i386_trap_with_error(int irq, int irq_gate, int trap_level, UINT32 error)
986 i386_trap(irq,irq_gate,trap_level);
987 if(irq == 8 || irq == 10 || irq == 11 || irq == 12 || irq == 13 || irq == 14)
989 // for these exceptions, an error code is pushed onto the stack by the processor.
990 // no error code is pushed for software interrupts, either.
993 UINT32 entry = irq * 8;
995 v2 = READ32PL0(cpustate->idtr.base + entry + 4 );
996 type = (v2>>8) & 0x1F;
999 v2 = READ32PL0(cpustate->idtr.base + entry);
1000 v2 = READ32PL0(cpustate->gdtr.base + ((v2 >> 16) & 0xfff8) + 4);
1001 type = (v2>>8) & 0x1F;
1014 void I386_OPS_BASE::i286_task_switch( UINT16 selector, UINT8 nested)
1019 UINT8 ar_byte; // access rights byte
1021 /* TODO: Task State Segment privilege checks */
1023 /* For tasks that aren't nested, clear the busy bit in the task's descriptor */
1026 if(cpustate->task.segment & 0x0004)
1028 ar_byte = READ8(cpustate->ldtr.base + (cpustate->task.segment & ~0x0007) + 5);
1029 WRITE8(cpustate->ldtr.base + (cpustate->task.segment & ~0x0007) + 5,ar_byte & ~0x02);
1033 ar_byte = READ8(cpustate->gdtr.base + (cpustate->task.segment & ~0x0007) + 5);
1034 WRITE8(cpustate->gdtr.base + (cpustate->task.segment & ~0x0007) + 5,ar_byte & ~0x02);
1038 /* Save the state of the current task in the current TSS (TR register base) */
1039 tss = cpustate->task.base;
1040 WRITE16(tss+0x0e,cpustate->eip & 0x0000ffff);
1041 WRITE16(tss+0x10,get_flags() & 0x0000ffff);
1042 WRITE16(tss+0x12,REG16(AX));
1043 WRITE16(tss+0x14,REG16(CX));
1044 WRITE16(tss+0x16,REG16(DX));
1045 WRITE16(tss+0x18,REG16(BX));
1046 WRITE16(tss+0x1a,REG16(SP));
1047 WRITE16(tss+0x1c,REG16(BP));
1048 WRITE16(tss+0x1e,REG16(SI));
1049 WRITE16(tss+0x20,REG16(DI));
1050 WRITE16(tss+0x22,cpustate->sreg[ES].selector);
1051 WRITE16(tss+0x24,cpustate->sreg[CS].selector);
1052 WRITE16(tss+0x26,cpustate->sreg[SS].selector);
1053 WRITE16(tss+0x28,cpustate->sreg[DS].selector);
1055 old_task = cpustate->task.segment;
1057 /* Load task register with the selector of the incoming task */
1058 cpustate->task.segment = selector;
1059 memset(&seg, 0, sizeof(seg));
1060 seg.selector = cpustate->task.segment;
1061 i386_load_protected_mode_segment(&seg,NULL);
1062 cpustate->task.limit = seg.limit;
1063 cpustate->task.base = seg.base;
1064 cpustate->task.flags = seg.flags;
1066 /* Set TS bit in CR0 */
1067 cpustate->cr[0] |= 0x08;
1069 /* Load incoming task state from the new task's TSS */
1070 tss = cpustate->task.base;
1071 cpustate->ldtr.segment = READ16(tss+0x2a) & 0xffff;
1072 seg.selector = cpustate->ldtr.segment;
1073 i386_load_protected_mode_segment(&seg,NULL);
1074 cpustate->ldtr.limit = seg.limit;
1075 cpustate->ldtr.base = seg.base;
1076 cpustate->ldtr.flags = seg.flags;
1077 cpustate->eip = READ16(tss+0x0e);
1078 set_flags(READ16(tss+0x10));
1079 REG16(AX) = READ16(tss+0x12);
1080 REG16(CX) = READ16(tss+0x14);
1081 REG16(DX) = READ16(tss+0x16);
1082 REG16(BX) = READ16(tss+0x18);
1083 REG16(SP) = READ16(tss+0x1a);
1084 REG16(BP) = READ16(tss+0x1c);
1085 REG16(SI) = READ16(tss+0x1e);
1086 REG16(DI) = READ16(tss+0x20);
1087 cpustate->sreg[ES].selector = READ16(tss+0x22) & 0xffff;
1088 i386_load_segment_descriptor(ES);
1089 cpustate->sreg[CS].selector = READ16(tss+0x24) & 0xffff;
1090 i386_load_segment_descriptor(CS);
1091 cpustate->sreg[SS].selector = READ16(tss+0x26) & 0xffff;
1092 i386_load_segment_descriptor(SS);
1093 cpustate->sreg[DS].selector = READ16(tss+0x28) & 0xffff;
1094 i386_load_segment_descriptor(DS);
1096 /* Set the busy bit in the new task's descriptor */
1097 if(selector & 0x0004)
1099 ar_byte = READ8(cpustate->ldtr.base + (selector & ~0x0007) + 5);
1100 WRITE8(cpustate->ldtr.base + (selector & ~0x0007) + 5,ar_byte | 0x02);
1104 ar_byte = READ8(cpustate->gdtr.base + (selector & ~0x0007) + 5);
1105 WRITE8(cpustate->gdtr.base + (selector & ~0x0007) + 5,ar_byte | 0x02);
1108 /* For nested tasks, we write the outgoing task's selector to the back-link field of the new TSS,
1109 and set the NT flag in the EFLAGS register */
1112 WRITE16(tss+0,old_task);
1115 CHANGE_PC(cpustate->eip);
1117 cpustate->CPL = (cpustate->sreg[SS].flags >> 5) & 3;
1118 // printf("286 Task Switch from selector %04x to %04x\n",old_task,selector);
1121 void I386_OPS_BASE::i386_task_switch( UINT16 selector, UINT8 nested)
1126 UINT8 ar_byte; // access rights byte
1127 UINT32 oldcr3 = cpustate->cr[3];
1129 /* TODO: Task State Segment privilege checks */
1131 /* For tasks that aren't nested, clear the busy bit in the task's descriptor */
1134 if(cpustate->task.segment & 0x0004)
1136 ar_byte = READ8(cpustate->ldtr.base + (cpustate->task.segment & ~0x0007) + 5);
1137 WRITE8(cpustate->ldtr.base + (cpustate->task.segment & ~0x0007) + 5,ar_byte & ~0x02);
1141 ar_byte = READ8(cpustate->gdtr.base + (cpustate->task.segment & ~0x0007) + 5);
1142 WRITE8(cpustate->gdtr.base + (cpustate->task.segment & ~0x0007) + 5,ar_byte & ~0x02);
1146 /* Save the state of the current task in the current TSS (TR register base) */
1147 tss = cpustate->task.base;
1148 WRITE32(tss+0x1c,cpustate->cr[3]); // correct?
1149 WRITE32(tss+0x20,cpustate->eip);
1150 WRITE32(tss+0x24,get_flags());
1151 WRITE32(tss+0x28,REG32(EAX));
1152 WRITE32(tss+0x2c,REG32(ECX));
1153 WRITE32(tss+0x30,REG32(EDX));
1154 WRITE32(tss+0x34,REG32(EBX));
1155 WRITE32(tss+0x38,REG32(ESP));
1156 WRITE32(tss+0x3c,REG32(EBP));
1157 WRITE32(tss+0x40,REG32(ESI));
1158 WRITE32(tss+0x44,REG32(EDI));
1159 WRITE32(tss+0x48,cpustate->sreg[ES].selector);
1160 WRITE32(tss+0x4c,cpustate->sreg[CS].selector);
1161 WRITE32(tss+0x50,cpustate->sreg[SS].selector);
1162 WRITE32(tss+0x54,cpustate->sreg[DS].selector);
1163 WRITE32(tss+0x58,cpustate->sreg[FS].selector);
1164 WRITE32(tss+0x5c,cpustate->sreg[GS].selector);
1166 old_task = cpustate->task.segment;
1168 /* Load task register with the selector of the incoming task */
1169 cpustate->task.segment = selector;
1170 memset(&seg, 0, sizeof(seg));
1171 seg.selector = cpustate->task.segment;
1172 i386_load_protected_mode_segment(&seg,NULL);
1173 cpustate->task.limit = seg.limit;
1174 cpustate->task.base = seg.base;
1175 cpustate->task.flags = seg.flags;
1177 /* Set TS bit in CR0 */
1178 cpustate->cr[0] |= 0x08;
1180 /* Load incoming task state from the new task's TSS */
1181 tss = cpustate->task.base;
1182 cpustate->ldtr.segment = READ32(tss+0x60) & 0xffff;
1183 seg.selector = cpustate->ldtr.segment;
1184 i386_load_protected_mode_segment(&seg,NULL);
1185 cpustate->ldtr.limit = seg.limit;
1186 cpustate->ldtr.base = seg.base;
1187 cpustate->ldtr.flags = seg.flags;
1188 cpustate->eip = READ32(tss+0x20);
1189 set_flags(READ32(tss+0x24));
1190 REG32(EAX) = READ32(tss+0x28);
1191 REG32(ECX) = READ32(tss+0x2c);
1192 REG32(EDX) = READ32(tss+0x30);
1193 REG32(EBX) = READ32(tss+0x34);
1194 REG32(ESP) = READ32(tss+0x38);
1195 REG32(EBP) = READ32(tss+0x3c);
1196 REG32(ESI) = READ32(tss+0x40);
1197 REG32(EDI) = READ32(tss+0x44);
1198 cpustate->sreg[ES].selector = READ32(tss+0x48) & 0xffff;
1199 i386_load_segment_descriptor(ES);
1200 cpustate->sreg[CS].selector = READ32(tss+0x4c) & 0xffff;
1201 i386_load_segment_descriptor(CS);
1202 cpustate->sreg[SS].selector = READ32(tss+0x50) & 0xffff;
1203 i386_load_segment_descriptor(SS);
1204 cpustate->sreg[DS].selector = READ32(tss+0x54) & 0xffff;
1205 i386_load_segment_descriptor(DS);
1206 cpustate->sreg[FS].selector = READ32(tss+0x58) & 0xffff;
1207 i386_load_segment_descriptor(FS);
1208 cpustate->sreg[GS].selector = READ32(tss+0x5c) & 0xffff;
1209 i386_load_segment_descriptor(GS);
1210 /* For nested tasks, we write the outgoing task's selector to the back-link field of the new TSS,
1211 and set the NT flag in the EFLAGS register before setting cr3 as the old tss address might be gone */
1214 WRITE32(tss+0,old_task);
1217 cpustate->cr[3] = READ32(tss+0x1c); // CR3 (PDBR)
1218 if(oldcr3 != cpustate->cr[3])
1219 vtlb_flush_dynamic(cpustate->vtlb);
1221 /* Set the busy bit in the new task's descriptor */
1222 if(selector & 0x0004)
1224 ar_byte = READ8(cpustate->ldtr.base + (selector & ~0x0007) + 5);
1225 WRITE8(cpustate->ldtr.base + (selector & ~0x0007) + 5,ar_byte | 0x02);
1229 ar_byte = READ8(cpustate->gdtr.base + (selector & ~0x0007) + 5);
1230 WRITE8(cpustate->gdtr.base + (selector & ~0x0007) + 5,ar_byte | 0x02);
1233 CHANGE_PC(cpustate->eip);
1235 cpustate->CPL = (cpustate->sreg[SS].flags >> 5) & 3;
1236 // printf("386 Task Switch from selector %04x to %04x\n",old_task,selector);
1239 void I386_OPS_BASE::i386_check_irq_line()
1241 if(!cpustate->smm && cpustate->smi)
1247 /* Check if the interrupts are enabled */
1248 if ( (cpustate->irq_state) && cpustate->IF )
1250 cpustate->cycles -= 2;
1251 i386_trap(cpustate->pic->get_intr_ack(), 1, 0);
1252 cpustate->irq_state = 0;
1256 void I386_OPS_BASE::i386_protected_mode_jump( UINT16 seg, UINT32 off, int indirect, int operand32)
1259 I386_CALL_GATE call_gate;
1262 UINT16 segment = seg;
1263 UINT32 offset = off;
1265 /* Check selector is not null */
1266 if((segment & ~0x03) == 0)
1268 logerror("JMP: Segment is null.\n");
1271 /* Selector is within descriptor table limit */
1272 if((segment & 0x04) == 0)
1274 /* check GDT limit */
1275 if((segment & ~0x07) > (cpustate->gdtr.limit))
1277 logerror("JMP: Segment is past GDT limit.\n");
1278 FAULT(FAULT_GP,segment & 0xfffc)
1283 /* check LDT limit */
1284 if((segment & ~0x07) > (cpustate->ldtr.limit))
1286 logerror("JMP: Segment is past LDT limit.\n");
1287 FAULT(FAULT_GP,segment & 0xfffc)
1290 /* Determine segment type */
1291 memset(&desc, 0, sizeof(desc));
1292 desc.selector = segment;
1293 i386_load_protected_mode_segment(&desc,NULL);
1294 CPL = cpustate->CPL; // current privilege level
1295 DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level
1296 RPL = segment & 0x03; // requested privilege level
1297 if((desc.flags & 0x0018) == 0x0018)
1300 if((desc.flags & 0x0004) == 0)
1302 /* non-conforming */
1305 logerror("JMP: RPL %i is less than CPL %i\n",RPL,CPL);
1306 FAULT(FAULT_GP,segment & 0xfffc)
1310 logerror("JMP: DPL %i is not equal CPL %i\n",DPL,CPL);
1311 FAULT(FAULT_GP,segment & 0xfffc)
1319 logerror("JMP: DPL %i is less than CPL %i\n",DPL,CPL);
1320 FAULT(FAULT_GP,segment & 0xfffc)
1324 if((desc.flags & 0x0080) == 0)
1326 logerror("JMP: Segment is not present\n");
1327 FAULT(FAULT_NP,segment & 0xfffc)
1329 if(offset > desc.limit)
1331 logerror("JMP: Offset is past segment limit\n");
1337 if((desc.flags & 0x0010) != 0)
1339 logerror("JMP: Segment is a data segment\n");
1340 FAULT(FAULT_GP,segment & 0xfffc) // #GP (cannot execute code in a data segment)
1344 switch(desc.flags & 0x000f)
1346 case 0x01: // 286 Available TSS
1347 case 0x09: // 386 Available TSS
1348 logerror("JMP: Available 386 TSS at %08x\n",cpustate->pc);
1349 memset(&desc, 0, sizeof(desc));
1350 desc.selector = segment;
1351 i386_load_protected_mode_segment(&desc,NULL);
1352 DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level
1355 logerror("JMP: TSS: DPL %i is less than CPL %i\n",DPL,CPL);
1356 FAULT(FAULT_GP,segment & 0xfffc)
1360 logerror("JMP: TSS: DPL %i is less than TSS RPL %i\n",DPL,RPL);
1361 FAULT(FAULT_GP,segment & 0xfffc)
1363 if((desc.flags & 0x0080) == 0)
1365 logerror("JMP: TSS: Segment is not present\n");
1366 FAULT(FAULT_GP,segment & 0xfffc)
1368 if(desc.flags & 0x0008)
1369 i386_task_switch(desc.selector,0);
1371 i286_task_switch(desc.selector,0);
1373 case 0x04: // 286 Call Gate
1374 case 0x0c: // 386 Call Gate
1375 //logerror("JMP: Call gate at %08x\n",cpustate->pc);
1377 memset(&call_gate, 0, sizeof(call_gate));
1378 call_gate.segment = segment;
1379 i386_load_call_gate(&call_gate);
1380 DPL = call_gate.dpl;
1383 logerror("JMP: Call Gate: DPL %i is less than CPL %i\n",DPL,CPL);
1384 FAULT(FAULT_GP,segment & 0xfffc)
1388 logerror("JMP: Call Gate: DPL %i is less than RPL %i\n",DPL,RPL);
1389 FAULT(FAULT_GP,segment & 0xfffc)
1391 if((desc.flags & 0x0080) == 0)
1393 logerror("JMP: Call Gate: Segment is not present\n");
1394 FAULT(FAULT_NP,segment & 0xfffc)
1396 /* Now we examine the segment that the call gate refers to */
1397 if(call_gate.selector == 0)
1399 logerror("JMP: Call Gate: Gate selector is null\n");
1402 if(call_gate.selector & 0x04)
1404 if((call_gate.selector & ~0x07) > cpustate->ldtr.limit)
1406 logerror("JMP: Call Gate: Gate Selector is past LDT segment limit\n");
1407 FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1412 if((call_gate.selector & ~0x07) > cpustate->gdtr.limit)
1414 logerror("JMP: Call Gate: Gate Selector is past GDT segment limit\n");
1415 FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1418 desc.selector = call_gate.selector;
1419 i386_load_protected_mode_segment(&desc,NULL);
1420 DPL = (desc.flags >> 5) & 0x03;
1421 if((desc.flags & 0x0018) != 0x18)
1423 logerror("JMP: Call Gate: Gate does not point to a code segment\n");
1424 FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1426 if((desc.flags & 0x0004) == 0)
1430 logerror("JMP: Call Gate: Gate DPL does not equal CPL\n");
1431 FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1438 logerror("JMP: Call Gate: Gate DPL is greater than CPL\n");
1439 FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1442 if((desc.flags & 0x0080) == 0)
1444 logerror("JMP: Call Gate: Gate Segment is not present\n");
1445 FAULT(FAULT_NP,call_gate.selector & 0xfffc)
1447 if(call_gate.offset > desc.limit)
1449 logerror("JMP: Call Gate: Gate offset is past Gate segment limit\n");
1450 FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1452 segment = call_gate.selector;
1453 offset = call_gate.offset;
1455 case 0x05: // Task Gate
1456 logerror("JMP: Task gate at %08x\n",cpustate->pc);
1457 memset(&call_gate, 0, sizeof(call_gate));
1458 call_gate.segment = segment;
1459 i386_load_call_gate(&call_gate);
1460 DPL = call_gate.dpl;
1463 logerror("JMP: Task Gate: Gate DPL %i is less than CPL %i\n",DPL,CPL);
1464 FAULT(FAULT_GP,segment & 0xfffc)
1468 logerror("JMP: Task Gate: Gate DPL %i is less than CPL %i\n",DPL,CPL);
1469 FAULT(FAULT_GP,segment & 0xfffc)
1471 if(call_gate.present == 0)
1473 logerror("JMP: Task Gate: Gate is not present.\n");
1474 FAULT(FAULT_GP,segment & 0xfffc)
1476 /* Check the TSS that the task gate points to */
1477 desc.selector = call_gate.selector;
1478 i386_load_protected_mode_segment(&desc,NULL);
1479 DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level
1480 RPL = call_gate.selector & 0x03; // requested privilege level
1481 if(call_gate.selector & 0x04)
1483 logerror("JMP: Task Gate TSS: TSS must be global.\n");
1484 FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1488 if((call_gate.selector & ~0x07) > cpustate->gdtr.limit)
1490 logerror("JMP: Task Gate TSS: TSS is past GDT limit.\n");
1491 FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1494 if((call_gate.ar & 0x000f) == 0x0009 || (call_gate.ar & 0x000f) == 0x0001)
1496 logerror("JMP: Task Gate TSS: Segment is not an available TSS.\n");
1497 FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1499 if(call_gate.present == 0)
1501 logerror("JMP: Task Gate TSS: TSS is not present.\n");
1502 FAULT(FAULT_NP,call_gate.selector & 0xfffc)
1504 if(call_gate.ar & 0x08)
1505 i386_task_switch(call_gate.selector,0);
1507 i286_task_switch(call_gate.selector,0);
1509 default: // invalid segment type
1510 logerror("JMP: Invalid segment type (%i) to jump to.\n",desc.flags & 0x000f);
1511 FAULT(FAULT_GP,segment & 0xfffc)
1517 segment = (segment & ~0x03) | cpustate->CPL;
1519 cpustate->eip = offset & 0x0000ffff;
1521 cpustate->eip = offset;
1522 cpustate->sreg[CS].selector = segment;
1523 cpustate->performed_intersegment_jump = 1;
1524 i386_load_segment_descriptor(CS);
1525 CHANGE_PC(cpustate->eip);
1528 void I386_OPS_BASE::i386_protected_mode_call( UINT16 seg, UINT32 off, int indirect, int operand32)
1531 I386_CALL_GATE gate;
1533 UINT8 CPL, DPL, RPL;
1534 UINT16 selector = seg;
1535 UINT32 offset = off;
1538 if((selector & ~0x03) == 0)
1540 logerror("CALL (%08x): Selector is null.\n",cpustate->pc);
1541 FAULT(FAULT_GP,0) // #GP(0)
1545 if((selector & ~0x07) > cpustate->ldtr.limit)
1547 logerror("CALL: Selector is past LDT limit.\n");
1548 FAULT(FAULT_GP,selector & ~0x03) // #GP(selector)
1553 if((selector & ~0x07) > cpustate->gdtr.limit)
1555 logerror("CALL: Selector is past GDT limit.\n");
1556 FAULT(FAULT_GP,selector & ~0x03) // #GP(selector)
1560 /* Determine segment type */
1561 memset(&desc, 0, sizeof(desc));
1562 desc.selector = selector;
1563 i386_load_protected_mode_segment(&desc,NULL);
1564 CPL = cpustate->CPL; // current privilege level
1565 DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level
1566 RPL = selector & 0x03; // requested privilege level
1567 if((desc.flags & 0x0018) == 0x18) // is a code segment
1569 if(desc.flags & 0x0004)
1574 logerror("CALL: Code segment DPL %i is greater than CPL %i\n",DPL,CPL);
1575 FAULT(FAULT_GP,selector & ~0x03) // #GP(selector)
1580 /* non-conforming */
1583 logerror("CALL: RPL %i is greater than CPL %i\n",RPL,CPL);
1584 FAULT(FAULT_GP,selector & ~0x03) // #GP(selector)
1588 logerror("CALL: Code segment DPL %i is not equal to CPL %i\n",DPL,CPL);
1589 FAULT(FAULT_GP,selector & ~0x03) // #GP(selector)
1593 if((desc.flags & 0x0080) == 0)
1595 logerror("CALL (%08x): Code segment is not present.\n",cpustate->pc);
1596 FAULT(FAULT_NP,selector & ~0x03) // #NP(selector)
1598 if (operand32 != 0) // if 32-bit
1600 if(i386_limit_check(SS, REG32(ESP) - 8))
1602 logerror("CALL (%08x): Stack has no room for return address.\n",cpustate->pc);
1603 FAULT(FAULT_SS,0) // #SS(0)
1608 if(i386_limit_check(SS, (REG16(SP) - 4) & 0xffff))
1610 logerror("CALL (%08x): Stack has no room for return address.\n",cpustate->pc);
1611 FAULT(FAULT_SS,0) // #SS(0)
1614 if(offset > desc.limit)
1616 logerror("CALL: EIP is past segment limit.\n");
1617 FAULT(FAULT_GP,0) // #GP(0)
1622 /* special segment type */
1623 if(desc.flags & 0x0010)
1625 logerror("CALL: Segment is a data segment.\n");
1626 FAULT(FAULT_GP,desc.selector & ~0x03) // #GP(selector)
1630 switch(desc.flags & 0x000f)
1632 case 0x01: // Available 286 TSS
1633 case 0x09: // Available 386 TSS
1634 logerror("CALL: Available TSS at %08x\n",cpustate->pc);
1637 logerror("CALL: TSS: DPL is less than CPL.\n");
1638 FAULT(FAULT_TS,selector & ~0x03) // #TS(selector)
1642 logerror("CALL: TSS: DPL is less than RPL.\n");
1643 FAULT(FAULT_TS,selector & ~0x03) // #TS(selector)
1645 if(desc.flags & 0x0002)
1647 logerror("CALL: TSS: TSS is busy.\n");
1648 FAULT(FAULT_TS,selector & ~0x03) // #TS(selector)
1650 if((desc.flags & 0x0080) == 0)
1652 logerror("CALL: TSS: Segment %02x is not present.\n",selector);
1653 FAULT(FAULT_NP,selector & ~0x03) // #NP(selector)
1655 if(desc.flags & 0x08)
1656 i386_task_switch(desc.selector,1);
1658 i286_task_switch(desc.selector,1);
1660 case 0x04: // 286 call gate
1661 case 0x0c: // 386 call gate
1662 if((desc.flags & 0x000f) == 0x04)
1666 memset(&gate, 0, sizeof(gate));
1667 gate.segment = selector;
1668 i386_load_call_gate(&gate);
1670 //logerror("CALL: Call gate at %08x (%i parameters)\n",cpustate->pc,gate.dword_count);
1673 logerror("CALL: Call gate DPL %i is less than CPL %i.\n",DPL,CPL);
1674 FAULT(FAULT_GP,desc.selector & ~0x03) // #GP(selector)
1678 logerror("CALL: Call gate DPL %i is less than RPL %i.\n",DPL,RPL);
1679 FAULT(FAULT_GP,desc.selector & ~0x03) // #GP(selector)
1681 if(gate.present == 0)
1683 logerror("CALL: Call gate is not present.\n");
1684 FAULT(FAULT_NP,desc.selector & ~0x03) // #GP(selector)
1686 desc.selector = gate.selector;
1687 if((gate.selector & ~0x03) == 0)
1689 logerror("CALL: Call gate: Segment is null.\n");
1690 FAULT(FAULT_GP,0) // #GP(0)
1692 if(desc.selector & 0x04)
1694 if((desc.selector & ~0x07) > cpustate->ldtr.limit)
1696 logerror("CALL: Call gate: Segment is past LDT limit\n");
1697 FAULT(FAULT_GP,desc.selector & ~0x03) // #GP(selector)
1702 if((desc.selector & ~0x07) > cpustate->gdtr.limit)
1704 logerror("CALL: Call gate: Segment is past GDT limit\n");
1705 FAULT(FAULT_GP,desc.selector & ~0x03) // #GP(selector)
1708 i386_load_protected_mode_segment(&desc,NULL);
1709 if((desc.flags & 0x0018) != 0x18)
1711 logerror("CALL: Call gate: Segment is not a code segment.\n");
1712 FAULT(FAULT_GP,desc.selector & ~0x03) // #GP(selector)
1714 DPL = ((desc.flags >> 5) & 0x03);
1717 logerror("CALL: Call gate: Segment DPL %i is greater than CPL %i.\n",DPL,CPL);
1718 FAULT(FAULT_GP,desc.selector & ~0x03) // #GP(selector)
1720 if((desc.flags & 0x0080) == 0)
1722 logerror("CALL (%08x): Code segment is not present.\n",cpustate->pc);
1723 FAULT(FAULT_NP,desc.selector & ~0x03) // #NP(selector)
1725 if(DPL < CPL && (desc.flags & 0x0004) == 0)
1729 UINT32 oldSS,oldESP;
1730 /* more privilege */
1731 /* Check new SS segment for privilege level from TSS */
1732 memset(&stack, 0, sizeof(stack));
1733 stack.selector = i386_get_stack_segment(DPL);
1734 i386_load_protected_mode_segment(&stack,NULL);
1735 if((stack.selector & ~0x03) == 0)
1737 logerror("CALL: Call gate: TSS selector is null\n");
1738 FAULT(FAULT_TS,0) // #TS(0)
1740 if(stack.selector & 0x04)
1742 if((stack.selector & ~0x07) > cpustate->ldtr.limit)
1744 logerror("CALL: Call gate: TSS selector is past LDT limit\n");
1745 FAULT(FAULT_TS,stack.selector) // #TS(SS selector)
1750 if((stack.selector & ~0x07) > cpustate->gdtr.limit)
1752 logerror("CALL: Call gate: TSS selector is past GDT limit\n");
1753 FAULT(FAULT_TS,stack.selector) // #TS(SS selector)
1756 if((stack.selector & 0x03) != DPL)
1758 logerror("CALL: Call gate: Stack selector RPL does not equal code segment DPL %i\n",DPL);
1759 FAULT(FAULT_TS,stack.selector) // #TS(SS selector)
1761 if(((stack.flags >> 5) & 0x03) != DPL)
1763 logerror("CALL: Call gate: Stack DPL does not equal code segment DPL %i\n",DPL);
1764 FAULT(FAULT_TS,stack.selector) // #TS(SS selector)
1766 if((stack.flags & 0x0018) != 0x10 && (stack.flags & 0x0002))
1768 logerror("CALL: Call gate: Stack segment is not a writable data segment\n");
1769 FAULT(FAULT_TS,stack.selector) // #TS(SS selector)
1771 if((stack.flags & 0x0080) == 0)
1773 logerror("CALL: Call gate: Stack segment is not present\n");
1774 FAULT(FAULT_SS,stack.selector) // #SS(SS selector)
1776 UINT32 newESP = i386_get_stack_ptr(DPL);
1783 if(newESP < ((gate.dword_count & 0x1f) + 16))
1785 logerror("CALL: Call gate: New stack has no room for 32-bit return address and parameters.\n");
1786 FAULT(FAULT_SS,0) // #SS(0)
1788 if(gate.offset > desc.limit)
1790 logerror("CALL: Call gate: EIP is past segment limit.\n");
1791 FAULT(FAULT_GP,0) // #GP(0)
1796 if(newESP < ((gate.dword_count & 0x1f) + 8))
1798 logerror("CALL: Call gate: New stack has no room for 16-bit return address and parameters.\n");
1799 FAULT(FAULT_SS,0) // #SS(0)
1801 if((gate.offset & 0xffff) > desc.limit)
1803 logerror("CALL: Call gate: IP is past segment limit.\n");
1804 FAULT(FAULT_GP,0) // #GP(0)
1807 selector = gate.selector;
1808 offset = gate.offset;
1810 cpustate->CPL = (stack.flags >> 5) & 0x03;
1811 /* check for page fault at new stack */
1812 WRITE_TEST(stack.base+newESP-1);
1813 /* switch to new stack */
1814 oldSS = cpustate->sreg[SS].selector;
1815 cpustate->sreg[SS].selector = i386_get_stack_segment(cpustate->CPL);
1818 oldESP = REG32(ESP);
1824 i386_load_segment_descriptor(SS );
1825 REG32(ESP) = newESP;
1835 PUSH16(oldESP & 0xffff);
1838 memset(&temp, 0, sizeof(temp));
1839 temp.selector = oldSS;
1840 i386_load_protected_mode_segment(&temp,NULL);
1841 /* copy parameters from old stack to new stack */
1842 for(x=(gate.dword_count & 0x1f)-1;x>=0;x--)
1844 UINT32 addr = oldESP + (operand32?(x*4):(x*2));
1845 addr = temp.base + (temp.d?addr:(addr&0xffff));
1847 PUSH32(READ32(addr));
1849 PUSH16(READ16(addr));
1855 /* same privilege */
1856 if (operand32 != 0) // if 32-bit
1858 if(i386_limit_check(SS, REG32(ESP) - 8))
1860 logerror("CALL: Stack has no room for return address.\n");
1861 FAULT(FAULT_SS,0) // #SS(0)
1863 selector = gate.selector;
1864 offset = gate.offset;
1868 if(i386_limit_check(SS, (REG16(SP) - 4) & 0xffff))
1870 logerror("CALL: Stack has no room for return address.\n");
1871 FAULT(FAULT_SS,0) // #SS(0)
1873 selector = gate.selector;
1874 offset = gate.offset & 0xffff;
1876 if(offset > desc.limit)
1878 logerror("CALL: EIP is past segment limit.\n");
1879 FAULT(FAULT_GP,0) // #GP(0)
1884 case 0x05: // task gate
1885 logerror("CALL: Task gate at %08x\n",cpustate->pc);
1886 memset(&gate, 0, sizeof(gate));
1887 gate.segment = selector;
1888 i386_load_call_gate(&gate);
1892 logerror("CALL: Task Gate: Gate DPL is less than CPL.\n");
1893 FAULT(FAULT_TS,selector & ~0x03) // #TS(selector)
1897 logerror("CALL: Task Gate: Gate DPL is less than RPL.\n");
1898 FAULT(FAULT_TS,selector & ~0x03) // #TS(selector)
1900 if((gate.ar & 0x0080) == 0)
1902 logerror("CALL: Task Gate: Gate is not present.\n");
1903 FAULT(FAULT_NP,selector & ~0x03) // #NP(selector)
1905 /* Check the TSS that the task gate points to */
1906 desc.selector = gate.selector;
1907 i386_load_protected_mode_segment(&desc,NULL);
1908 if(gate.selector & 0x04)
1910 logerror("CALL: Task Gate: TSS is not global.\n");
1911 FAULT(FAULT_TS,gate.selector & ~0x03) // #TS(selector)
1915 if((gate.selector & ~0x07) > cpustate->gdtr.limit)
1917 logerror("CALL: Task Gate: TSS is past GDT limit.\n");
1918 FAULT(FAULT_TS,gate.selector & ~0x03) // #TS(selector)
1921 if(desc.flags & 0x0002)
1923 logerror("CALL: Task Gate: TSS is busy.\n");
1924 FAULT(FAULT_TS,gate.selector & ~0x03) // #TS(selector)
1926 if((desc.flags & 0x0080) == 0)
1928 logerror("CALL: Task Gate: TSS is not present.\n");
1929 FAULT(FAULT_NP,gate.selector & ~0x03) // #TS(selector)
1931 if(desc.flags & 0x08)
1932 i386_task_switch(desc.selector,1); // with nesting
1934 i286_task_switch(desc.selector,1);
1937 logerror("CALL: Invalid special segment type (%i) to jump to.\n",desc.flags & 0x000f);
1938 FAULT(FAULT_GP,selector & ~0x07) // #GP(selector)
1944 selector = (selector & ~0x03) | cpustate->CPL;
1946 UINT32 tempSP = REG32(ESP);
1949 // this is ugly but the alternative is worse
1952 /* 16-bit operand size */
1953 PUSH16(cpustate->sreg[CS].selector );
1954 PUSH16(cpustate->eip & 0x0000ffff );
1955 cpustate->sreg[CS].selector = selector;
1956 cpustate->performed_intersegment_jump = 1;
1957 cpustate->eip = offset;
1958 i386_load_segment_descriptor(CS);
1962 /* 32-bit operand size */
1963 PUSH32(cpustate->sreg[CS].selector );
1964 PUSH32(cpustate->eip );
1965 cpustate->sreg[CS].selector = selector;
1966 cpustate->performed_intersegment_jump = 1;
1967 cpustate->eip = offset;
1968 i386_load_segment_descriptor(CS );
1973 REG32(ESP) = tempSP;
1977 CHANGE_PC(cpustate->eip);
1980 void I386_OPS_BASE::i386_protected_mode_retf(UINT8 count, UINT8 operand32)
1982 UINT32 newCS, newEIP;
1984 UINT8 CPL, RPL, DPL;
1986 UINT32 ea = i386_translate(SS, (STACK_32BIT)?REG32(ESP):REG16(SP), 0);
1990 newEIP = READ16(ea) & 0xffff;
1991 newCS = READ16(ea+2) & 0xffff;
1995 newEIP = READ32(ea);
1996 newCS = READ32(ea+4) & 0xffff;
1999 memset(&desc, 0, sizeof(desc));
2000 desc.selector = newCS;
2001 i386_load_protected_mode_segment(&desc,NULL);
2002 CPL = cpustate->CPL; // current privilege level
2003 DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level
2008 logerror("RETF (%08x): Return segment RPL is less than CPL.\n",cpustate->pc);
2009 FAULT(FAULT_GP,newCS & ~0x03)
2014 /* same privilege level */
2015 if((newCS & ~0x03) == 0)
2017 logerror("RETF: Return segment is null.\n");
2022 if((newCS & ~0x07) >= cpustate->ldtr.limit)
2024 logerror("RETF: Return segment is past LDT limit.\n");
2025 FAULT(FAULT_GP,newCS & ~0x03)
2030 if((newCS & ~0x07) >= cpustate->gdtr.limit)
2032 logerror("RETF: Return segment is past GDT limit.\n");
2033 FAULT(FAULT_GP,newCS & ~0x03)
2036 if((desc.flags & 0x0018) != 0x0018)
2038 logerror("RETF: Return segment is not a code segment.\n");
2039 FAULT(FAULT_GP,newCS & ~0x03)
2041 if(desc.flags & 0x0004)
2045 logerror("RETF: Conforming code segment DPL is greater than CS RPL.\n");
2046 FAULT(FAULT_GP,newCS & ~0x03)
2053 logerror("RETF: Non-conforming code segment DPL does not equal CS RPL.\n");
2054 FAULT(FAULT_GP,newCS & ~0x03)
2057 if((desc.flags & 0x0080) == 0)
2059 logerror("RETF (%08x): Code segment is not present.\n",cpustate->pc);
2060 FAULT(FAULT_NP,newCS & ~0x03)
2062 if(newEIP > desc.limit)
2064 logerror("RETF: EIP is past code segment limit.\n");
2069 UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2070 if(i386_limit_check(SS,offset+count+3) != 0)
2072 logerror("RETF (%08x): SP is past stack segment limit.\n",cpustate->pc);
2078 UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2079 if(i386_limit_check(SS,offset+count+7) != 0)
2081 logerror("RETF: ESP is past stack segment limit.\n");
2086 REG16(SP) += (4+count);
2088 REG32(ESP) += (8+count);
2092 UINT32 newSS, newESP; // when changing privilege
2093 /* outer privilege level */
2096 UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2097 if(i386_limit_check(SS,offset+count+7) != 0)
2099 logerror("RETF (%08x): SP is past stack segment limit.\n",cpustate->pc);
2105 UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2106 if(i386_limit_check(SS,offset+count+15) != 0)
2108 logerror("RETF: ESP is past stack segment limit.\n");
2112 /* Check CS selector and descriptor */
2113 if((newCS & ~0x03) == 0)
2115 logerror("RETF: CS segment is null.\n");
2120 if((newCS & ~0x07) >= cpustate->ldtr.limit)
2122 logerror("RETF: CS segment selector is past LDT limit.\n");
2123 FAULT(FAULT_GP,newCS & ~0x03)
2128 if((newCS & ~0x07) >= cpustate->gdtr.limit)
2130 logerror("RETF: CS segment selector is past GDT limit.\n");
2131 FAULT(FAULT_GP,newCS & ~0x03)
2134 if((desc.flags & 0x0018) != 0x0018)
2136 logerror("RETF: CS segment is not a code segment.\n");
2137 FAULT(FAULT_GP,newCS & ~0x03)
2139 if(desc.flags & 0x0004)
2143 logerror("RETF: Conforming CS segment DPL is greater than return selector RPL.\n");
2144 FAULT(FAULT_GP,newCS & ~0x03)
2151 logerror("RETF: Non-conforming CS segment DPL is not equal to return selector RPL.\n");
2152 FAULT(FAULT_GP,newCS & ~0x03)
2155 if((desc.flags & 0x0080) == 0)
2157 logerror("RETF: CS segment is not present.\n");
2158 FAULT(FAULT_NP,newCS & ~0x03)
2160 if(newEIP > desc.limit)
2162 logerror("RETF: EIP is past return CS segment limit.\n");
2169 newESP = READ16(ea) & 0xffff;
2170 newSS = READ16(ea+2) & 0xffff;
2175 newESP = READ32(ea);
2176 newSS = READ32(ea+4) & 0xffff;
2179 /* Check SS selector and descriptor */
2180 desc.selector = newSS;
2181 i386_load_protected_mode_segment(&desc,NULL);
2182 DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level
2183 if((newSS & ~0x07) == 0)
2185 logerror("RETF: SS segment is null.\n");
2190 if((newSS & ~0x07) > cpustate->ldtr.limit)
2192 logerror("RETF (%08x): SS segment selector is past LDT limit.\n",cpustate->pc);
2193 FAULT(FAULT_GP,newSS & ~0x03)
2198 if((newSS & ~0x07) > cpustate->gdtr.limit)
2200 logerror("RETF (%08x): SS segment selector is past GDT limit.\n",cpustate->pc);
2201 FAULT(FAULT_GP,newSS & ~0x03)
2204 if((newSS & 0x03) != RPL)
2206 logerror("RETF: SS segment RPL is not equal to CS segment RPL.\n");
2207 FAULT(FAULT_GP,newSS & ~0x03)
2209 if((desc.flags & 0x0018) != 0x0010 || (desc.flags & 0x0002) == 0)
2211 logerror("RETF: SS segment is not a writable data segment.\n");
2212 FAULT(FAULT_GP,newSS & ~0x03)
2214 if(((desc.flags >> 5) & 0x03) != RPL)
2216 logerror("RETF: SS DPL is not equal to CS segment RPL.\n");
2217 FAULT(FAULT_GP,newSS & ~0x03)
2219 if((desc.flags & 0x0080) == 0)
2221 logerror("RETF: SS segment is not present.\n");
2222 FAULT(FAULT_GP,newSS & ~0x03)
2224 cpustate->CPL = newCS & 0x03;
2226 /* Load new SS:(E)SP */
2228 REG16(SP) = (newESP+count) & 0xffff;
2230 REG32(ESP) = newESP+count;
2231 cpustate->sreg[SS].selector = newSS;
2232 i386_load_segment_descriptor(SS );
2234 /* Check that DS, ES, FS and GS are valid for the new privilege level */
2235 i386_check_sreg_validity(DS);
2236 i386_check_sreg_validity(ES);
2237 i386_check_sreg_validity(FS);
2238 i386_check_sreg_validity(GS);
2241 /* Load new CS:(E)IP */
2243 cpustate->eip = newEIP & 0xffff;
2245 cpustate->eip = newEIP;
2246 cpustate->sreg[CS].selector = newCS;
2247 i386_load_segment_descriptor(CS );
2248 CHANGE_PC(cpustate->eip);
2251 void I386_OPS_BASE::i386_protected_mode_iret(int operand32)
2253 UINT32 newCS, newEIP;
2254 UINT32 newSS, newESP; // when changing privilege
2255 I386_SREG desc,stack;
2256 UINT8 CPL, RPL, DPL;
2259 CPL = cpustate->CPL;
2260 UINT32 ea = i386_translate(SS, (STACK_32BIT)?REG32(ESP):REG16(SP), 0);
2263 newEIP = READ16(ea) & 0xffff;
2264 newCS = READ16(ea+2) & 0xffff;
2265 newflags = READ16(ea+4) & 0xffff;
2269 newEIP = READ32(ea);
2270 newCS = READ32(ea+4) & 0xffff;
2271 newflags = READ32(ea+8);
2276 UINT32 oldflags = get_flags();
2277 if(!cpustate->IOP1 || !cpustate->IOP2)
2279 logerror("IRET (%08x): Is in Virtual 8086 mode and IOPL != 3.\n",cpustate->pc);
2284 cpustate->eip = newEIP & 0xffff;
2285 cpustate->sreg[CS].selector = newCS & 0xffff;
2286 newflags &= ~(3<<12);
2287 newflags |= (((oldflags>>12)&3)<<12); // IOPL cannot be changed in V86 mode
2288 set_flags((newflags & 0xffff) | (oldflags & ~0xffff));
2293 cpustate->eip = newEIP;
2294 cpustate->sreg[CS].selector = newCS & 0xffff;
2295 newflags &= ~(3<<12);
2296 newflags |= 0x20000 | (((oldflags>>12)&3)<<12); // IOPL and VM cannot be changed in V86 mode
2297 set_flags(newflags);
2301 else if(NESTED_TASK)
2303 UINT32 task = READ32(cpustate->task.base);
2305 logerror("IRET (%08x): Nested task return.\n",cpustate->pc);
2306 /* Check back-link selector in TSS */
2309 logerror("IRET: Task return: Back-linked TSS is not in GDT.\n");
2310 FAULT(FAULT_TS,task & ~0x03)
2312 if((task & ~0x07) >= cpustate->gdtr.limit)
2314 logerror("IRET: Task return: Back-linked TSS is not in GDT.\n");
2315 FAULT(FAULT_TS,task & ~0x03)
2317 memset(&desc, 0, sizeof(desc));
2318 desc.selector = task;
2319 i386_load_protected_mode_segment(&desc,NULL);
2320 if((desc.flags & 0x001f) != 0x000b)
2322 logerror("IRET (%08x): Task return: Back-linked TSS is not a busy TSS.\n",cpustate->pc);
2323 FAULT(FAULT_TS,task & ~0x03)
2325 if((desc.flags & 0x0080) == 0)
2327 logerror("IRET: Task return: Back-linked TSS is not present.\n");
2328 FAULT(FAULT_NP,task & ~0x03)
2330 if(desc.flags & 0x08)
2331 i386_task_switch(desc.selector,0);
2333 i286_task_switch(desc.selector,0);
2338 if(newflags & 0x00020000) // if returning to virtual 8086 mode
2340 // 16-bit iret can't reach here
2341 newESP = READ32(ea+12);
2342 newSS = READ32(ea+16) & 0xffff;
2343 /* Return to v86 mode */
2344 //logerror("IRET (%08x): Returning to Virtual 8086 mode.\n",cpustate->pc);
2347 UINT32 oldflags = get_flags();
2348 newflags = (newflags & ~0x00003000) | (oldflags & 0x00003000);
2350 set_flags(newflags);
2351 cpustate->eip = POP32() & 0xffff; // high 16 bits are ignored
2352 cpustate->sreg[CS].selector = POP32() & 0xffff;
2353 POP32(); // already set flags
2355 newSS = POP32() & 0xffff;
2356 cpustate->sreg[ES].selector = POP32() & 0xffff;
2357 cpustate->sreg[DS].selector = POP32() & 0xffff;
2358 cpustate->sreg[FS].selector = POP32() & 0xffff;
2359 cpustate->sreg[GS].selector = POP32() & 0xffff;
2360 REG32(ESP) = newESP; // all 32 bits are loaded
2361 cpustate->sreg[SS].selector = newSS;
2362 i386_load_segment_descriptor(ES);
2363 i386_load_segment_descriptor(DS);
2364 i386_load_segment_descriptor(FS);
2365 i386_load_segment_descriptor(GS);
2366 i386_load_segment_descriptor(SS);
2367 cpustate->CPL = 3; // Virtual 8086 tasks are always run at CPL 3
2373 UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2374 if(i386_limit_check(SS,offset+3) != 0)
2376 logerror("IRET: Data on stack is past SS limit.\n");
2382 UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2383 if(i386_limit_check(SS,offset+7) != 0)
2385 logerror("IRET: Data on stack is past SS limit.\n");
2392 logerror("IRET (%08x): Return CS RPL is less than CPL.\n",cpustate->pc);
2393 FAULT(FAULT_GP,newCS & ~0x03)
2397 /* return to same privilege level */
2400 UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2401 if(i386_limit_check(SS,offset+5) != 0)
2403 logerror("IRET (%08x): Data on stack is past SS limit.\n",cpustate->pc);
2409 UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2410 if(i386_limit_check(SS,offset+11) != 0)
2412 logerror("IRET (%08x): Data on stack is past SS limit.\n",cpustate->pc);
2416 if((newCS & ~0x03) == 0)
2418 logerror("IRET: Return CS selector is null.\n");
2423 if((newCS & ~0x07) >= cpustate->ldtr.limit)
2425 logerror("IRET: Return CS selector (%04x) is past LDT limit.\n",newCS);
2426 FAULT(FAULT_GP,newCS & ~0x03)
2431 if((newCS & ~0x07) >= cpustate->gdtr.limit)
2433 logerror("IRET: Return CS selector is past GDT limit.\n");
2434 FAULT(FAULT_GP,newCS & ~0x03)
2437 memset(&desc, 0, sizeof(desc));
2438 desc.selector = newCS;
2439 i386_load_protected_mode_segment(&desc,NULL);
2440 DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level
2442 if((desc.flags & 0x0018) != 0x0018)
2444 logerror("IRET (%08x): Return CS segment is not a code segment.\n",cpustate->pc);
2445 FAULT(FAULT_GP,newCS & ~0x07)
2447 if(desc.flags & 0x0004)
2451 logerror("IRET: Conforming return CS DPL is greater than CS RPL.\n");
2452 FAULT(FAULT_GP,newCS & ~0x03)
2459 logerror("IRET: Non-conforming return CS DPL is not equal to CS RPL.\n");
2460 FAULT(FAULT_GP,newCS & ~0x03)
2463 if((desc.flags & 0x0080) == 0)
2465 logerror("IRET: Return CS segment is not present.\n");
2466 FAULT(FAULT_NP,newCS & ~0x03)
2468 if(newEIP > desc.limit)
2470 logerror("IRET: Return EIP is past return CS limit.\n");
2476 UINT32 oldflags = get_flags();
2477 newflags = (newflags & ~0x00003000) | (oldflags & 0x00003000);
2482 cpustate->eip = newEIP;
2483 cpustate->sreg[CS].selector = newCS;
2484 set_flags(newflags);
2489 cpustate->eip = newEIP;
2490 cpustate->sreg[CS].selector = newCS & 0xffff;
2491 set_flags(newflags);
2497 /* return to outer privilege level */
2498 memset(&desc, 0, sizeof(desc));
2499 desc.selector = newCS;
2500 i386_load_protected_mode_segment(&desc,NULL);
2501 DPL = (desc.flags >> 5) & 0x03; // descriptor privilege level
2505 UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2506 if(i386_limit_check(SS,offset+9) != 0)
2508 logerror("IRET: SP is past SS limit.\n");
2514 UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2515 if(i386_limit_check(SS,offset+19) != 0)
2517 logerror("IRET: ESP is past SS limit.\n");
2521 /* Check CS selector and descriptor */
2522 if((newCS & ~0x03) == 0)
2524 logerror("IRET: Return CS selector is null.\n");
2529 if((newCS & ~0x07) >= cpustate->ldtr.limit)
2531 logerror("IRET: Return CS selector is past LDT limit.\n");
2532 FAULT(FAULT_GP,newCS & ~0x03);
2537 if((newCS & ~0x07) >= cpustate->gdtr.limit)
2539 logerror("IRET: Return CS selector is past GDT limit.\n");
2540 FAULT(FAULT_GP,newCS & ~0x03);
2543 if((desc.flags & 0x0018) != 0x0018)
2545 logerror("IRET: Return CS segment is not a code segment.\n");
2546 FAULT(FAULT_GP,newCS & ~0x03)
2548 if(desc.flags & 0x0004)
2552 logerror("IRET: Conforming return CS DPL is greater than CS RPL.\n");
2553 FAULT(FAULT_GP,newCS & ~0x03)
2560 logerror("IRET: Non-conforming return CS DPL does not equal CS RPL.\n");
2561 FAULT(FAULT_GP,newCS & ~0x03)
2564 if((desc.flags & 0x0080) == 0)
2566 logerror("IRET: Return CS segment is not present.\n");
2567 FAULT(FAULT_NP,newCS & ~0x03)
2570 /* Check SS selector and descriptor */
2573 newESP = READ16(ea+6) & 0xffff;
2574 newSS = READ16(ea+8) & 0xffff;
2578 newESP = READ32(ea+12);
2579 newSS = READ32(ea+16) & 0xffff;
2581 memset(&stack, 0, sizeof(stack));
2582 stack.selector = newSS;
2583 i386_load_protected_mode_segment(&stack,NULL);
2584 DPL = (stack.flags >> 5) & 0x03;
2585 if((newSS & ~0x03) == 0)
2587 logerror("IRET: Return SS selector is null.\n");
2592 if((newSS & ~0x07) >= cpustate->ldtr.limit)
2594 logerror("IRET: Return SS selector is past LDT limit.\n");
2595 FAULT(FAULT_GP,newSS & ~0x03);
2600 if((newSS & ~0x07) >= cpustate->gdtr.limit)
2602 logerror("IRET: Return SS selector is past GDT limit.\n");
2603 FAULT(FAULT_GP,newSS & ~0x03);
2606 if((newSS & 0x03) != RPL)
2608 logerror("IRET: Return SS RPL is not equal to return CS RPL.\n");
2609 FAULT(FAULT_GP,newSS & ~0x03)
2611 if((stack.flags & 0x0018) != 0x0010)
2613 logerror("IRET: Return SS segment is not a data segment.\n");
2614 FAULT(FAULT_GP,newSS & ~0x03)
2616 if((stack.flags & 0x0002) == 0)
2618 logerror("IRET: Return SS segment is not writable.\n");
2619 FAULT(FAULT_GP,newSS & ~0x03)
2623 logerror("IRET: Return SS DPL does not equal SS RPL.\n");
2624 FAULT(FAULT_GP,newSS & ~0x03)
2626 if((stack.flags & 0x0080) == 0)
2628 logerror("IRET: Return SS segment is not present.\n");
2629 FAULT(FAULT_NP,newSS & ~0x03)
2631 if(newEIP > desc.limit)
2633 logerror("IRET: EIP is past return CS limit.\n");
2637 // if(operand32 == 0)
2640 // REG32(ESP) += 20;
2642 // IOPL can only change if CPL is zero
2645 UINT32 oldflags = get_flags();
2646 newflags = (newflags & ~0x00003000) | (oldflags & 0x00003000);
2651 cpustate->eip = newEIP & 0xffff;
2652 cpustate->sreg[CS].selector = newCS;
2653 set_flags(newflags);
2654 REG16(SP) = newESP & 0xffff;
2655 cpustate->sreg[SS].selector = newSS;
2659 cpustate->eip = newEIP;
2660 cpustate->sreg[CS].selector = newCS & 0xffff;
2661 set_flags(newflags);
2662 REG32(ESP) = newESP;
2663 cpustate->sreg[SS].selector = newSS & 0xffff;
2665 cpustate->CPL = newCS & 0x03;
2666 i386_load_segment_descriptor(SS);
2668 /* Check that DS, ES, FS and GS are valid for the new privilege level */
2669 i386_check_sreg_validity(DS);
2670 i386_check_sreg_validity(ES);
2671 i386_check_sreg_validity(FS);
2672 i386_check_sreg_validity(GS);
2677 i386_load_segment_descriptor(CS);
2678 CHANGE_PC(cpustate->eip);
2681 //#include "cycles.h"
2683 void I386_OPS_BASE::build_cycle_table()
2686 for (j=0; j < X86_NUM_CPUS; j++)
2688 // cycle_table_rm[j] = (UINT8 *)malloc(CYCLES_NUM_OPCODES);
2689 // cycle_table_pm[j] = (UINT8 *)malloc(CYCLES_NUM_OPCODES);
2691 for (i=0; i < sizeof(x86_cycle_table)/sizeof(X86_CYCLE_TABLE); i++)
2693 int opcode = x86_cycle_table[i].op;
2694 cycle_table_rm[j][opcode] = x86_cycle_table[i].cpu_cycles[j][0];
2695 cycle_table_pm[j][opcode] = x86_cycle_table[i].cpu_cycles[j][1];
2700 void I386_OPS_BASE::report_invalid_opcode()
2702 #ifndef DEBUG_MISSING_OPCODE
2703 logerror("i386: Invalid opcode %02X at %08X %s\n", cpustate->opcode, cpustate->pc - 1, cpustate->lock ? "with lock" : "");
2705 logerror("i386: Invalid opcode");
2706 for (int a = 0; a < cpustate->opcode_bytes_length; a++)
2707 logerror(" %02X", cpustate->opcode_bytes[a]);
2708 logerror(" at %08X\n", cpustate->opcode_pc);
2712 void I386_OPS_BASE::report_invalid_modrm( const char* opcode, UINT8 modrm)
2714 #ifndef DEBUG_MISSING_OPCODE
2715 logerror("i386: Invalid %s modrm %01X at %08X\n", opcode, modrm, cpustate->pc - 2);
2717 logerror("i386: Invalid %s modrm %01X", opcode, modrm);
2718 for (int a = 0; a < cpustate->opcode_bytes_length; a++)
2719 logerror(" %02X", cpustate->opcode_bytes[a]);
2720 logerror(" at %08X\n", cpustate->opcode_pc);
2725 /* Forward declarations */
2728 void I386_OPS_BASE::I386OP(decode_opcode)()
2730 cpustate->opcode = FETCH();
2732 if(cpustate->lock && !cpustate->lock_table[0][cpustate->opcode])
2733 return I386OP(invalid)();
2735 if( cpustate->operand_size )
2736 (this->*cpustate->opcode_table1_32[cpustate->opcode])();
2738 (this->*cpustate->opcode_table1_16[cpustate->opcode])();
2741 /* Two-byte opcode 0f xx */
2742 void I386_OPS_BASE::I386OP(decode_two_byte)()
2744 cpustate->opcode = FETCH();
2746 if(cpustate->lock && !cpustate->lock_table[1][cpustate->opcode])
2747 return I386OP(invalid)();
2749 if( cpustate->operand_size )
2750 (this->*cpustate->opcode_table2_32[cpustate->opcode])();
2752 (this->*cpustate->opcode_table2_16[cpustate->opcode])();
2755 /* Three-byte opcode 0f 38 xx */
2756 void I386_OPS_BASE::I386OP(decode_three_byte38)()
2758 cpustate->opcode = FETCH();
2760 if (cpustate->operand_size)
2761 (this->*cpustate->opcode_table338_32[cpustate->opcode])();
2763 (this->*cpustate->opcode_table338_16[cpustate->opcode])();
2766 /* Three-byte opcode 0f 3a xx */
2767 void I386_OPS_BASE::I386OP(decode_three_byte3a)()
2769 cpustate->opcode = FETCH();
2771 if (cpustate->operand_size)
2772 (this->*cpustate->opcode_table33a_32[cpustate->opcode])();
2774 (this->*cpustate->opcode_table33a_16[cpustate->opcode])();
2777 /* Three-byte opcode prefix 66 0f xx */
2778 void I386_OPS_BASE::I386OP(decode_three_byte66)()
2780 cpustate->opcode = FETCH();
2781 if( cpustate->operand_size )
2782 (this->*cpustate->opcode_table366_32[cpustate->opcode])();
2784 (this->*cpustate->opcode_table366_16[cpustate->opcode])();
2787 /* Three-byte opcode prefix f2 0f xx */
2788 void I386_OPS_BASE::I386OP(decode_three_bytef2)()
2790 cpustate->opcode = FETCH();
2791 if( cpustate->operand_size )
2792 (this->*cpustate->opcode_table3f2_32[cpustate->opcode])();
2794 (this->*cpustate->opcode_table3f2_16[cpustate->opcode])();
2797 /* Three-byte opcode prefix f3 0f */
2798 void I386_OPS_BASE::I386OP(decode_three_bytef3)()
2800 cpustate->opcode = FETCH();
2801 if( cpustate->operand_size )
2802 (this->*cpustate->opcode_table3f3_32[cpustate->opcode])();
2804 (this->*cpustate->opcode_table3f3_16[cpustate->opcode])();
2807 /* Four-byte opcode prefix 66 0f 38 xx */
2808 void I386_OPS_BASE::I386OP(decode_four_byte3866)()
2810 cpustate->opcode = FETCH();
2811 if (cpustate->operand_size)
2812 (this->*cpustate->opcode_table46638_32[cpustate->opcode])();
2814 (this->*cpustate->opcode_table46638_16[cpustate->opcode])();
2817 /* Four-byte opcode prefix 66 0f 3a xx */
2818 void I386_OPS_BASE::I386OP(decode_four_byte3a66)()
2820 cpustate->opcode = FETCH();
2821 if (cpustate->operand_size)
2822 (this->*cpustate->opcode_table4663a_32[cpustate->opcode])();
2824 (this->*cpustate->opcode_table4663a_16[cpustate->opcode])();
2827 /* Four-byte opcode prefix f2 0f 38 xx */
2828 void I386_OPS_BASE::I386OP(decode_four_byte38f2)()
2830 cpustate->opcode = FETCH();
2831 if (cpustate->operand_size)
2832 (this->*cpustate->opcode_table4f238_32[cpustate->opcode])();
2834 (this->*cpustate->opcode_table4f238_16[cpustate->opcode])();
2837 /* Four-byte opcode prefix f2 0f 3a xx */
2838 void I386_OPS_BASE::I386OP(decode_four_byte3af2)()
2840 cpustate->opcode = FETCH();
2841 if (cpustate->operand_size)
2842 (this->*cpustate->opcode_table4f23a_32[cpustate->opcode])();
2844 (this->*cpustate->opcode_table4f23a_16[cpustate->opcode])();
2847 /* Four-byte opcode prefix f3 0f 38 xx */
2848 void I386_OPS_BASE::I386OP(decode_four_byte38f3)()
2850 cpustate->opcode = FETCH();
2851 if (cpustate->operand_size)
2852 (this->*cpustate->opcode_table4f338_32[cpustate->opcode])();
2854 (this->*cpustate->opcode_table4f338_16[cpustate->opcode])();
2858 /*************************************************************************/
2860 void I386_OPS_BASE::i386_postload()
2863 for (i = 0; i < 6; i++)
2864 i386_load_segment_descriptor(i);
2865 CHANGE_PC(cpustate->eip);
2868 #include "./cycles.h"
2869 #include "./i386_ops_table.h"
2871 i386_state *I386_OPS_BASE::i386_common_init(int tlbsize)
2874 static const int regs8[8] = {AL,CL,DL,BL,AH,CH,DH,BH};
2875 static const int regs16[8] = {AX,CX,DX,BX,SP,BP,SI,DI};
2876 static const int regs32[8] = {EAX,ECX,EDX,EBX,ESP,EBP,ESI,EDI};
2877 cpustate = (i386_state *)malloc(sizeof(i386_state));
2878 x86_cycle_table = _x86_cycle_table_real;
2879 //x86_opcode_table = _x86_opcode_table_fake;
2881 assert((sizeof(XMM_REG)/sizeof(double)) == 2);
2883 build_cycle_table();
2885 for( i=0; i < 256; i++ ) {
2887 for( j=0; j < 8; j++ ) {
2891 i386_parity_table[i] = ~(c & 0x1) & 0x1;
2894 for( i=0; i < 256; i++ ) {
2895 i386_MODRM_table[i].reg.b = regs8[(i >> 3) & 0x7];
2896 i386_MODRM_table[i].reg.w = regs16[(i >> 3) & 0x7];
2897 i386_MODRM_table[i].reg.d = regs32[(i >> 3) & 0x7];
2899 i386_MODRM_table[i].rm.b = regs8[i & 0x7];
2900 i386_MODRM_table[i].rm.w = regs16[i & 0x7];
2901 i386_MODRM_table[i].rm.d = regs32[i & 0x7];
2904 cpustate->vtlb = vtlb_alloc((void *)cpustate, AS_PROGRAM, 0, tlbsize);
2905 cpustate->smi = false;
2906 cpustate->lock = false;
2908 // i386_interface *intf = (i386_interface *) device->static_config();
2910 // if (intf != NULL)
2911 // cpustate->smiact.resolve(intf->smiact, *device);
2913 // memset(&cpustate->smiact, 0, sizeof(cpustate->smiact));
2916 cpustate->program = d_mem;
2917 cpustate->io = d_io;
2918 cpustate->pic = d_pic;
2922 void I386_OPS_BASE::i386_vtlb_free(void)
2924 vtlb_free(cpustate->vtlb);
2927 void I386_OPS_BASE::i386_free_state(void)
2929 if(cpustate != NULL) free(cpustate);
2932 void *I386_OPS_BASE::cpu_init_i386(void)
2934 i386_common_init(32);
2935 build_opcode_table(OP_I386);
2936 cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_I386];
2937 cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_I386];
2941 //#include "./i386_ops_table.h"
2943 void I386_OPS_BASE::save_state(FILEIO *state_fio)
2945 state_fio->Fwrite(cpustate, sizeof(i386_state), 1);
2948 bool I386_OPS_BASE::load_state(FILEIO *state_fio)
2950 state_fio->Fread(cpustate, sizeof(i386_state), 1);
2955 void I386_OPS_BASE::build_opcode_table(UINT32 features)
2958 i386_state *_cpustate = cpustate;
2959 for (i=0; i < 256; i++)
2961 _cpustate->opcode_table1_16[i] = &I386_OPS_BASE::I386OP(invalid);
2962 _cpustate->opcode_table1_32[i] = &I386_OPS_BASE::I386OP(invalid);
2963 _cpustate->opcode_table2_16[i] = &I386_OPS_BASE::I386OP(invalid);
2964 _cpustate->opcode_table2_32[i] = &I386_OPS_BASE::I386OP(invalid);
2965 _cpustate->opcode_table366_16[i] = &I386_OPS_BASE::I386OP(invalid);
2966 _cpustate->opcode_table366_32[i] = &I386_OPS_BASE::I386OP(invalid);
2967 _cpustate->opcode_table3f2_16[i] = &I386_OPS_BASE::I386OP(invalid);
2968 _cpustate->opcode_table3f2_32[i] = &I386_OPS_BASE::I386OP(invalid);
2969 _cpustate->opcode_table3f3_16[i] = &I386_OPS_BASE::I386OP(invalid);
2970 _cpustate->opcode_table3f3_32[i] = &I386_OPS_BASE::I386OP(invalid);
2971 _cpustate->lock_table[0][i] = false;
2972 _cpustate->lock_table[1][i] = false;
2975 for (i=0; i < sizeof(x86_opcode_table)/sizeof(X86_OPCODE); i++)
2977 const X86_OPCODE *op = &x86_opcode_table[i];
2979 if ((op->flags & features))
2981 if (op->flags & OP_2BYTE)
2983 _cpustate->opcode_table2_32[op->opcode] = op->handler32;
2984 _cpustate->opcode_table2_16[op->opcode] = op->handler16;
2985 _cpustate->opcode_table366_32[op->opcode] = op->handler32;
2986 _cpustate->opcode_table366_16[op->opcode] = op->handler16;
2987 _cpustate->lock_table[1][op->opcode] = op->lockable;
2989 else if (op->flags & OP_3BYTE66)
2991 _cpustate->opcode_table366_32[op->opcode] = op->handler32;
2992 _cpustate->opcode_table366_16[op->opcode] = op->handler16;
2994 else if (op->flags & OP_3BYTEF2)
2996 _cpustate->opcode_table3f2_32[op->opcode] = op->handler32;
2997 _cpustate->opcode_table3f2_16[op->opcode] = op->handler16;
2999 else if (op->flags & OP_3BYTEF3)
3001 _cpustate->opcode_table3f3_32[op->opcode] = op->handler32;
3002 _cpustate->opcode_table3f3_16[op->opcode] = op->handler16;
3004 else if (op->flags & OP_3BYTE38)
3006 _cpustate->opcode_table338_32[op->opcode] = op->handler32;
3007 _cpustate->opcode_table338_16[op->opcode] = op->handler16;
3009 else if (op->flags & OP_3BYTE3A)
3011 _cpustate->opcode_table33a_32[op->opcode] = op->handler32;
3012 _cpustate->opcode_table33a_16[op->opcode] = op->handler16;
3014 else if (op->flags & OP_4BYTE3866)
3016 _cpustate->opcode_table46638_32[op->opcode] = op->handler32;
3017 _cpustate->opcode_table46638_16[op->opcode] = op->handler16;
3019 else if (op->flags & OP_4BYTE3A66)
3021 _cpustate->opcode_table4663a_32[op->opcode] = op->handler32;
3022 _cpustate->opcode_table4663a_16[op->opcode] = op->handler16;
3024 else if (op->flags & OP_4BYTE38F2)
3026 _cpustate->opcode_table4f238_32[op->opcode] = op->handler32;
3027 _cpustate->opcode_table4f238_16[op->opcode] = op->handler16;
3029 else if (op->flags & OP_4BYTE3AF2)
3031 _cpustate->opcode_table4f23a_32[op->opcode] = op->handler32;
3032 _cpustate->opcode_table4f23a_16[op->opcode] = op->handler16;
3034 else if (op->flags & OP_4BYTE38F3)
3036 _cpustate->opcode_table4f338_32[op->opcode] = op->handler32;
3037 _cpustate->opcode_table4f338_16[op->opcode] = op->handler16;
3041 _cpustate->opcode_table1_32[op->opcode] = op->handler32;
3042 _cpustate->opcode_table1_16[op->opcode] = op->handler16;
3043 _cpustate->lock_table[0][op->opcode] = op->lockable;
3050 void I386_OPS_BASE::zero_state()
3052 memset( &cpustate->reg, 0, sizeof(cpustate->reg) );
3053 memset( cpustate->sreg, 0, sizeof(cpustate->sreg) );
3056 cpustate->prev_eip = 0;
3057 cpustate->eflags = 0;
3058 cpustate->eflags_mask = 0;
3078 cpustate->performed_intersegment_jump = 0;
3079 cpustate->delayed_interrupt_enable = 0;
3080 memset( cpustate->cr, 0, sizeof(cpustate->cr) );
3081 memset( cpustate->dr, 0, sizeof(cpustate->dr) );
3082 memset( cpustate->tr, 0, sizeof(cpustate->tr) );
3083 memset( &cpustate->gdtr, 0, sizeof(cpustate->gdtr) );
3084 memset( &cpustate->idtr, 0, sizeof(cpustate->idtr) );
3085 memset( &cpustate->task, 0, sizeof(cpustate->task) );
3086 memset( &cpustate->ldtr, 0, sizeof(cpustate->ldtr) );
3088 cpustate->halted = 0;
3089 cpustate->operand_size = 0;
3090 cpustate->xmm_operand_size = 0;
3091 cpustate->address_size = 0;
3092 cpustate->operand_prefix = 0;
3093 cpustate->address_prefix = 0;
3094 cpustate->segment_prefix = 0;
3095 cpustate->segment_override = 0;
3096 cpustate->cycles = 0;
3097 cpustate->base_cycles = 0;
3098 cpustate->opcode = 0;
3099 cpustate->irq_state = 0;
3100 cpustate->a20_mask = 0;
3101 cpustate->cpuid_max_input_value_eax = 0;
3102 cpustate->cpuid_id0 = 0;
3103 cpustate->cpuid_id1 = 0;
3104 cpustate->cpuid_id2 = 0;
3105 cpustate->cpu_version = 0;
3106 cpustate->feature_flags = 0;
3108 cpustate->perfctr[0] = cpustate->perfctr[1] = 0;
3109 memset( cpustate->x87_reg, 0, sizeof(cpustate->x87_reg) );
3110 cpustate->x87_cw = 0;
3111 cpustate->x87_sw = 0;
3112 cpustate->x87_tw = 0;
3113 cpustate->x87_data_ptr = 0;
3114 cpustate->x87_inst_ptr = 0;
3115 cpustate->x87_opcode = 0;
3116 memset( cpustate->sse_reg, 0, sizeof(cpustate->sse_reg) );
3117 cpustate->mxcsr = 0;
3118 cpustate->smm = false;
3119 cpustate->smi = false;
3120 cpustate->smi_latched = false;
3121 cpustate->nmi_masked = false;
3122 cpustate->nmi_latched = false;
3123 cpustate->smbase = 0;
3124 #ifdef DEBUG_MISSING_OPCODE
3125 memset( cpustate->opcode_bytes, 0, sizeof(cpustate->opcode_bytes) );
3126 cpustate->opcode_pc = 0;
3127 cpustate->opcode_bytes_length = 0;
3131 void I386_OPS_BASE::cpu_reset_i386(void)
3134 vtlb_flush_dynamic(cpustate->vtlb);
3136 cpustate->sreg[CS].selector = 0xf000;
3137 cpustate->sreg[CS].base = 0xffff0000;
3138 cpustate->sreg[CS].limit = 0xffff;
3139 cpustate->sreg[CS].flags = 0x9b;
3140 cpustate->sreg[CS].valid = true;
3142 cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
3143 cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
3144 cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0092;
3145 cpustate->sreg[DS].valid = cpustate->sreg[ES].valid = cpustate->sreg[FS].valid = cpustate->sreg[GS].valid = cpustate->sreg[SS].valid =true;
3147 cpustate->idtr.base = 0;
3148 cpustate->idtr.limit = 0x3ff;
3149 cpustate->smm = false;
3150 cpustate->smi_latched = false;
3151 cpustate->nmi_masked = false;
3152 cpustate->nmi_latched = false;
3154 cpustate->a20_mask = ~0;
3156 cpustate->cr[0] = 0x7fffffe0; // reserved bits set to 1
3157 cpustate->eflags = 0;
3158 cpustate->eflags_mask = 0x00037fd7;
3159 cpustate->eip = 0xfff0;
3163 // [ 3:0] Stepping ID
3164 // Family 3 (386), Model 0 (DX), Stepping 8 (D1)
3166 REG32(EDX) = (3 << 8) | (0 << 4) | (8);
3170 CHANGE_PC(cpustate->eip);
3173 void I386_OPS_BASE::pentium_smi()
3175 UINT32 smram_state = cpustate->smbase + 0xfe00;
3176 UINT32 old_cr0 = cpustate->cr[0];
3177 UINT32 old_flags = get_flags();
3182 cpustate->cr[0] &= ~(0x8000000d);
3184 // if(!cpustate->smiact.isnull())
3185 // cpustate->smiact(true);
3186 cpustate->smm = true;
3187 cpustate->smi_latched = false;
3190 WRITE32(cpustate->cr[4], smram_state+SMRAM_IP5_CR4);
3191 WRITE32(cpustate->sreg[ES].limit, smram_state+SMRAM_IP5_ESLIM);
3192 WRITE32(cpustate->sreg[ES].base, smram_state+SMRAM_IP5_ESBASE);
3193 WRITE32(cpustate->sreg[ES].flags, smram_state+SMRAM_IP5_ESACC);
3194 WRITE32(cpustate->sreg[CS].limit, smram_state+SMRAM_IP5_CSLIM);
3195 WRITE32(cpustate->sreg[CS].base, smram_state+SMRAM_IP5_CSBASE);
3196 WRITE32(cpustate->sreg[CS].flags, smram_state+SMRAM_IP5_CSACC);
3197 WRITE32(cpustate->sreg[SS].limit, smram_state+SMRAM_IP5_SSLIM);
3198 WRITE32(cpustate->sreg[SS].base, smram_state+SMRAM_IP5_SSBASE);
3199 WRITE32(cpustate->sreg[SS].flags, smram_state+SMRAM_IP5_SSACC);
3200 WRITE32(cpustate->sreg[DS].limit, smram_state+SMRAM_IP5_DSLIM);
3201 WRITE32(cpustate->sreg[DS].base, smram_state+SMRAM_IP5_DSBASE);
3202 WRITE32(cpustate->sreg[DS].flags, smram_state+SMRAM_IP5_DSACC);
3203 WRITE32(cpustate->sreg[FS].limit, smram_state+SMRAM_IP5_FSLIM);
3204 WRITE32(cpustate->sreg[FS].base, smram_state+SMRAM_IP5_FSBASE);
3205 WRITE32(cpustate->sreg[FS].flags, smram_state+SMRAM_IP5_FSACC);
3206 WRITE32(cpustate->sreg[GS].limit, smram_state+SMRAM_IP5_GSLIM);
3207 WRITE32(cpustate->sreg[GS].base, smram_state+SMRAM_IP5_GSBASE);
3208 WRITE32(cpustate->sreg[GS].flags, smram_state+SMRAM_IP5_GSACC);
3209 WRITE32(cpustate->ldtr.flags, smram_state+SMRAM_IP5_LDTACC);
3210 WRITE32(cpustate->ldtr.limit, smram_state+SMRAM_IP5_LDTLIM);
3211 WRITE32(cpustate->ldtr.base, smram_state+SMRAM_IP5_LDTBASE);
3212 WRITE32(cpustate->gdtr.limit, smram_state+SMRAM_IP5_GDTLIM);
3213 WRITE32(cpustate->gdtr.base, smram_state+SMRAM_IP5_GDTBASE);
3214 WRITE32(cpustate->idtr.limit, smram_state+SMRAM_IP5_IDTLIM);
3215 WRITE32(cpustate->idtr.base, smram_state+SMRAM_IP5_IDTBASE);
3216 WRITE32(cpustate->task.limit, smram_state+SMRAM_IP5_TRLIM);
3217 WRITE32(cpustate->task.base, smram_state+SMRAM_IP5_TRBASE);
3218 WRITE32(cpustate->task.flags, smram_state+SMRAM_IP5_TRACC);
3220 WRITE32(cpustate->sreg[ES].selector, smram_state+SMRAM_ES);
3221 WRITE32(cpustate->sreg[CS].selector, smram_state+SMRAM_CS);
3222 WRITE32(cpustate->sreg[SS].selector, smram_state+SMRAM_SS);
3223 WRITE32(cpustate->sreg[DS].selector, smram_state+SMRAM_DS);
3224 WRITE32(cpustate->sreg[FS].selector, smram_state+SMRAM_FS);
3225 WRITE32(cpustate->sreg[GS].selector, smram_state+SMRAM_GS);
3226 WRITE32(cpustate->ldtr.segment, smram_state+SMRAM_LDTR);
3227 WRITE32(cpustate->task.segment, smram_state+SMRAM_TR);
3229 WRITE32(cpustate->dr[7], smram_state+SMRAM_DR7);
3230 WRITE32(cpustate->dr[6], smram_state+SMRAM_DR6);
3231 WRITE32(REG32(EAX), smram_state+SMRAM_EAX);
3232 WRITE32(REG32(ECX), smram_state+SMRAM_ECX);
3233 WRITE32(REG32(EDX), smram_state+SMRAM_EDX);
3234 WRITE32(REG32(EBX), smram_state+SMRAM_EBX);
3235 WRITE32(REG32(ESP), smram_state+SMRAM_ESP);
3236 WRITE32(REG32(EBP), smram_state+SMRAM_EBP);
3237 WRITE32(REG32(ESI), smram_state+SMRAM_ESI);
3238 WRITE32(REG32(EDI), smram_state+SMRAM_EDI);
3239 WRITE32(cpustate->eip, smram_state+SMRAM_EIP);
3240 WRITE32(old_flags, smram_state+SMRAM_EAX);
3241 WRITE32(cpustate->cr[3], smram_state+SMRAM_CR3);
3242 WRITE32(old_cr0, smram_state+SMRAM_CR0);
3244 cpustate->sreg[DS].selector = cpustate->sreg[ES].selector = cpustate->sreg[FS].selector = cpustate->sreg[GS].selector = cpustate->sreg[SS].selector = 0;
3245 cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
3246 cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffffffff;
3247 cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x8093;
3248 cpustate->sreg[DS].valid = cpustate->sreg[ES].valid = cpustate->sreg[FS].valid = cpustate->sreg[GS].valid = cpustate->sreg[SS].valid =true;
3249 cpustate->sreg[CS].selector = 0x3000; // pentium only, ppro sel = smbase >> 4
3250 cpustate->sreg[CS].base = cpustate->smbase;
3251 cpustate->sreg[CS].limit = 0xffffffff;
3252 cpustate->sreg[CS].flags = 0x809b;
3253 cpustate->sreg[CS].valid = true;
3254 cpustate->cr[4] = 0;
3255 cpustate->dr[7] = 0x400;
3256 cpustate->eip = 0x8000;
3258 cpustate->nmi_masked = true;
3259 CHANGE_PC(cpustate->eip);
3262 void I386_OPS_BASE::i386_set_irq_line(int irqline, int state)
3264 if (state != CLEAR_LINE && cpustate->halted)
3266 cpustate->halted = 0;
3269 if ( irqline == INPUT_LINE_NMI )
3271 /* NMI (I do not think that this is 100% right) */
3272 if(cpustate->nmi_masked)
3274 cpustate->nmi_latched = true;
3282 cpustate->irq_state = state;
3286 void I386_OPS_BASE::i386_set_a20_line(int state)
3290 cpustate->a20_mask = ~0;
3294 cpustate->a20_mask = ~(1 << 20);
3296 // TODO: how does A20M and the tlb interact
3297 vtlb_flush_dynamic(cpustate->vtlb);
3300 // BASE execution : EXECUTE without DMA, BIOS and debugger.
3301 int I386_OPS_BASE::cpu_execute_i386(int cycles)
3303 CHANGE_PC(cpustate->eip);
3305 if (cpustate->halted || cpustate->busreq)
3308 int passed_cycles = max(1, cpustate->extra_cycles);
3309 // this is main cpu, cpustate->cycles is not used
3310 /*cpustate->cycles = */cpustate->extra_cycles = 0;
3311 cpustate->tsc += passed_cycles;
3312 return passed_cycles;
3314 cpustate->cycles += cycles;
3315 cpustate->base_cycles = cpustate->cycles;
3317 /* adjust for any interrupts that came in */
3318 cpustate->cycles -= cpustate->extra_cycles;
3319 cpustate->extra_cycles = 0;
3321 /* if busreq is raised, spin cpu while remained clock */
3322 if (cpustate->cycles > 0) {
3323 cpustate->cycles = 0;
3325 int passed_cycles = cpustate->base_cycles - cpustate->cycles;
3326 cpustate->tsc += passed_cycles;
3327 return passed_cycles;
3332 cpustate->cycles = 1;
3334 cpustate->cycles += cycles;
3336 cpustate->base_cycles = cpustate->cycles;
3338 /* adjust for any interrupts that came in */
3339 cpustate->cycles -= cpustate->extra_cycles;
3340 cpustate->extra_cycles = 0;
3342 while( cpustate->cycles > 0 && !cpustate->busreq )
3344 i386_check_irq_line();
3345 cpustate->operand_size = cpustate->sreg[CS].d;
3346 cpustate->xmm_operand_size = 0;
3347 cpustate->address_size = cpustate->sreg[CS].d;
3348 cpustate->operand_prefix = 0;
3349 cpustate->address_prefix = 0;
3352 int old_tf = cpustate->TF;
3354 cpustate->segment_prefix = 0;
3355 cpustate->prev_eip = cpustate->eip;
3356 cpustate->prev_pc = cpustate->pc;
3358 if(cpustate->delayed_interrupt_enable != 0)
3361 cpustate->delayed_interrupt_enable = 0;
3363 #ifdef DEBUG_MISSING_OPCODE
3364 cpustate->opcode_bytes_length = 0;
3365 cpustate->opcode_pc = cpustate->pc;
3369 I386OP(decode_opcode)();
3370 if(cpustate->TF && old_tf)
3372 cpustate->prev_eip = cpustate->eip;
3376 if(cpustate->lock && (cpustate->opcode != 0xf0))
3377 cpustate->lock = false;
3382 i386_trap_with_error(e&0xffffffff,0,0,e>>32);
3384 /* adjust for any interrupts that came in */
3385 cpustate->cycles -= cpustate->extra_cycles;
3386 cpustate->extra_cycles = 0;
3389 /* if busreq is raised, spin cpu while remained clock */
3390 if (cpustate->cycles > 0 && cpustate->busreq) {
3391 cpustate->cycles = 0;
3393 int passed_cycles = cpustate->base_cycles - cpustate->cycles;
3394 cpustate->tsc += passed_cycles;
3395 return passed_cycles;
3398 /*************************************************************************/
3400 int I386_OPS_BASE::cpu_translate_i386(void *cpudevice, address_spacenum space, int intention, offs_t *address)
3402 i386_state *cpu_state = (i386_state *)cpudevice;
3404 if(space == AS_PROGRAM)
3405 ret = i386_translate_address(intention, address, NULL);
3406 *address &= cpu_state->a20_mask;
3410 /*****************************************************************************/
3414 void *I386_OPS_BASE::cpu_init_i486(void)
3416 i386_common_init(32);
3417 build_opcode_table(OP_I386 | OP_FPU | OP_I486);
3418 build_x87_opcode_table();
3419 cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_I486];
3420 cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_I486];
3424 void I386_OPS_BASE::cpu_reset_i486(void)
3427 vtlb_flush_dynamic(cpustate->vtlb);
3429 cpustate->sreg[CS].selector = 0xf000;
3430 cpustate->sreg[CS].base = 0xffff0000;
3431 cpustate->sreg[CS].limit = 0xffff;
3432 cpustate->sreg[CS].flags = 0x009b;
3434 cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
3435 cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
3436 cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0092;
3438 cpustate->idtr.base = 0;
3439 cpustate->idtr.limit = 0x3ff;
3441 cpustate->a20_mask = ~0;
3443 cpustate->cr[0] = 0x00000010;
3444 cpustate->eflags = 0;
3445 cpustate->eflags_mask = 0x00077fd7;
3446 cpustate->eip = 0xfff0;
3447 cpustate->smm = false;
3448 cpustate->smi_latched = false;
3449 cpustate->nmi_masked = false;
3450 cpustate->nmi_latched = false;
3456 // [ 3:0] Stepping ID
3457 // Family 4 (486), Model 0/1 (DX), Stepping 3
3459 REG32(EDX) = (4 << 8) | (0 << 4) | (3);
3461 CHANGE_PC(cpustate->eip);
3464 /*****************************************************************************/
3468 void *I386_OPS_BASE::cpu_init_pentium(void)
3470 // 64 dtlb small, 8 dtlb large, 32 itlb
3471 i386_common_init(96);
3472 build_opcode_table(OP_I386 | OP_FPU | OP_I486 | OP_PENTIUM);
3473 build_x87_opcode_table();
3474 cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_PENTIUM];
3475 cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_PENTIUM];
3479 void I386_OPS_BASE::cpu_reset_pentium(void)
3482 vtlb_flush_dynamic(cpustate->vtlb);
3484 cpustate->sreg[CS].selector = 0xf000;
3485 cpustate->sreg[CS].base = 0xffff0000;
3486 cpustate->sreg[CS].limit = 0xffff;
3487 cpustate->sreg[CS].flags = 0x009b;
3489 cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
3490 cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
3491 cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0092;
3493 cpustate->idtr.base = 0;
3494 cpustate->idtr.limit = 0x3ff;
3496 cpustate->a20_mask = ~0;
3498 cpustate->cr[0] = 0x00000010;
3499 cpustate->eflags = 0x00200000;
3500 cpustate->eflags_mask = 0x003f7fd7;
3501 cpustate->eip = 0xfff0;
3502 cpustate->mxcsr = 0x1f80;
3503 cpustate->smm = false;
3504 cpustate->smi_latched = false;
3505 cpustate->smbase = 0x30000;
3506 cpustate->nmi_masked = false;
3507 cpustate->nmi_latched = false;
3513 // [ 3:0] Stepping ID
3514 // Family 5 (Pentium), Model 2 (75 - 200MHz), Stepping 5
3516 REG32(EDX) = (5 << 8) | (2 << 4) | (5);
3518 cpustate->cpuid_id0 = 0x756e6547; // Genu
3519 cpustate->cpuid_id1 = 0x49656e69; // ineI
3520 cpustate->cpuid_id2 = 0x6c65746e; // ntel
3522 cpustate->cpuid_max_input_value_eax = 0x01;
3523 cpustate->cpu_version = REG32(EDX);
3525 // [ 0:0] FPU on chip
3526 // [ 2:2] I/O breakpoints
3527 // [ 4:4] Time Stamp Counter
3528 // [ 5:5] Pentium CPU style model specific registers
3529 // [ 7:7] Machine Check Exception
3530 // [ 8:8] CMPXCHG8B instruction
3531 cpustate->feature_flags = 0x000001bf;
3533 CHANGE_PC(cpustate->eip);
3536 /*****************************************************************************/
3540 void *I386_OPS_BASE::cpu_init_mediagx(void)
3542 // probably 32 unified
3543 i386_common_init(32);
3544 build_x87_opcode_table();
3545 build_opcode_table(OP_I386 | OP_FPU | OP_I486 | OP_PENTIUM | OP_CYRIX);
3546 cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_MEDIAGX];
3547 cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_MEDIAGX];
3551 void I386_OPS_BASE::cpu_reset_mediagx(void)
3554 vtlb_flush_dynamic(cpustate->vtlb);
3556 cpustate->sreg[CS].selector = 0xf000;
3557 cpustate->sreg[CS].base = 0xffff0000;
3558 cpustate->sreg[CS].limit = 0xffff;
3559 cpustate->sreg[CS].flags = 0x009b;
3561 cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
3562 cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
3563 cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0092;
3565 cpustate->idtr.base = 0;
3566 cpustate->idtr.limit = 0x3ff;
3568 cpustate->a20_mask = ~0;
3570 cpustate->cr[0] = 0x00000010;
3571 cpustate->eflags = 0x00200000;
3572 cpustate->eflags_mask = 0x00277fd7; /* TODO: is this correct? */
3573 cpustate->eip = 0xfff0;
3574 cpustate->smm = false;
3575 cpustate->smi_latched = false;
3576 cpustate->nmi_masked = false;
3577 cpustate->nmi_latched = false;
3583 // [ 3:0] Stepping ID
3584 // Family 4, Model 4 (MediaGX)
3586 REG32(EDX) = (4 << 8) | (4 << 4) | (1); /* TODO: is this correct? */
3588 cpustate->cpuid_id0 = 0x69727943; // Cyri
3589 cpustate->cpuid_id1 = 0x736e4978; // xIns
3590 cpustate->cpuid_id2 = 0x6d616574; // tead
3592 cpustate->cpuid_max_input_value_eax = 0x01;
3593 cpustate->cpu_version = REG32(EDX);
3595 // [ 0:0] FPU on chip
3596 cpustate->feature_flags = 0x00000001;
3598 CHANGE_PC(cpustate->eip);
3601 /*****************************************************************************/
3602 /* Intel Pentium Pro */
3604 void *I386_OPS_BASE::cpu_init_pentium_pro(void)
3606 // 64 dtlb small, 32 itlb
3607 i386_common_init(96);
3608 build_x87_opcode_table();
3609 build_opcode_table(OP_I386 | OP_FPU | OP_I486 | OP_PENTIUM | OP_PPRO);
3610 cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_PENTIUM]; // TODO: generate own cycle tables
3611 cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_PENTIUM]; // TODO: generate own cycle tables
3615 void I386_OPS_BASE::cpu_reset_pentium_pro(void)
3618 vtlb_flush_dynamic(cpustate->vtlb);
3620 cpustate->sreg[CS].selector = 0xf000;
3621 cpustate->sreg[CS].base = 0xffff0000;
3622 cpustate->sreg[CS].limit = 0xffff;
3623 cpustate->sreg[CS].flags = 0x009b;
3625 cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
3626 cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
3627 cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0092;
3629 cpustate->idtr.base = 0;
3630 cpustate->idtr.limit = 0x3ff;
3632 cpustate->a20_mask = ~0;
3634 cpustate->cr[0] = 0x60000010;
3635 cpustate->eflags = 0x00200000;
3636 cpustate->eflags_mask = 0x00277fd7; /* TODO: is this correct? */
3637 cpustate->eip = 0xfff0;
3638 cpustate->mxcsr = 0x1f80;
3639 cpustate->smm = false;
3640 cpustate->smi_latched = false;
3641 cpustate->smbase = 0x30000;
3642 cpustate->nmi_masked = false;
3643 cpustate->nmi_latched = false;
3649 // [ 3:0] Stepping ID
3650 // Family 6, Model 1 (Pentium Pro)
3652 REG32(EDX) = (6 << 8) | (1 << 4) | (1); /* TODO: is this correct? */
3654 cpustate->cpuid_id0 = 0x756e6547; // Genu
3655 cpustate->cpuid_id1 = 0x49656e69; // ineI
3656 cpustate->cpuid_id2 = 0x6c65746e; // ntel
3658 cpustate->cpuid_max_input_value_eax = 0x02;
3659 cpustate->cpu_version = REG32(EDX);
3661 // [ 0:0] FPU on chip
3662 // [ 2:2] I/O breakpoints
3663 // [ 4:4] Time Stamp Counter
3664 // [ 5:5] Pentium CPU style model specific registers
3665 // [ 7:7] Machine Check Exception
3666 // [ 8:8] CMPXCHG8B instruction
3667 // [15:15] CMOV and FCMOV
3669 cpustate->feature_flags = 0x000081bf;
3671 CHANGE_PC(cpustate->eip);
3674 /*****************************************************************************/
3675 /* Intel Pentium MMX */
3677 void *I386_OPS_BASE::cpu_init_pentium_mmx(void)
3679 // 64 dtlb small, 8 dtlb large, 32 itlb small, 2 itlb large
3680 i386_common_init(96);
3681 build_x87_opcode_table();
3682 build_opcode_table(OP_I386 | OP_FPU | OP_I486 | OP_PENTIUM | OP_MMX);
3683 cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_PENTIUM]; // TODO: generate own cycle tables
3684 cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_PENTIUM]; // TODO: generate own cycle tables
3688 void I386_OPS_BASE::cpu_reset_pentium_mmx(void)
3691 vtlb_flush_dynamic(cpustate->vtlb);
3693 cpustate->sreg[CS].selector = 0xf000;
3694 cpustate->sreg[CS].base = 0xffff0000;
3695 cpustate->sreg[CS].limit = 0xffff;
3696 cpustate->sreg[CS].flags = 0x009b;
3698 cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
3699 cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
3700 cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0092;
3702 cpustate->idtr.base = 0;
3703 cpustate->idtr.limit = 0x3ff;
3705 cpustate->a20_mask = ~0;
3707 cpustate->cr[0] = 0x60000010;
3708 cpustate->eflags = 0x00200000;
3709 cpustate->eflags_mask = 0x00277fd7; /* TODO: is this correct? */
3710 cpustate->eip = 0xfff0;
3711 cpustate->mxcsr = 0x1f80;
3712 cpustate->smm = false;
3713 cpustate->smi_latched = false;
3714 cpustate->smbase = 0x30000;
3715 cpustate->nmi_masked = false;
3716 cpustate->nmi_latched = false;
3722 // [ 3:0] Stepping ID
3723 // Family 5, Model 4 (P55C)
3725 REG32(EDX) = (5 << 8) | (4 << 4) | (1);
3727 cpustate->cpuid_id0 = 0x756e6547; // Genu
3728 cpustate->cpuid_id1 = 0x49656e69; // ineI
3729 cpustate->cpuid_id2 = 0x6c65746e; // ntel
3731 cpustate->cpuid_max_input_value_eax = 0x01;
3732 cpustate->cpu_version = REG32(EDX);
3734 // [ 0:0] FPU on chip
3735 // [ 2:2] I/O breakpoints
3736 // [ 4:4] Time Stamp Counter
3737 // [ 5:5] Pentium CPU style model specific registers
3738 // [ 7:7] Machine Check Exception
3739 // [ 8:8] CMPXCHG8B instruction
3740 // [23:23] MMX instructions
3741 cpustate->feature_flags = 0x008001bf;
3743 CHANGE_PC(cpustate->eip);
3746 /*****************************************************************************/
3747 /* Intel Pentium II */
3749 void *I386_OPS_BASE::cpu_init_pentium2(void)
3751 // 64 dtlb small, 8 dtlb large, 32 itlb small, 2 itlb large
3752 i386_common_init(96);
3753 build_x87_opcode_table();
3754 build_opcode_table(OP_I386 | OP_FPU | OP_I486 | OP_PENTIUM | OP_PPRO | OP_MMX);
3755 cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_PENTIUM]; // TODO: generate own cycle tables
3756 cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_PENTIUM]; // TODO: generate own cycle tables
3760 void I386_OPS_BASE::cpu_reset_pentium2(void)
3763 vtlb_flush_dynamic(cpustate->vtlb);
3765 cpustate->sreg[CS].selector = 0xf000;
3766 cpustate->sreg[CS].base = 0xffff0000;
3767 cpustate->sreg[CS].limit = 0xffff;
3768 cpustate->sreg[CS].flags = 0x009b;
3770 cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
3771 cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
3772 cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0092;
3774 cpustate->idtr.base = 0;
3775 cpustate->idtr.limit = 0x3ff;
3777 cpustate->a20_mask = ~0;
3779 cpustate->cr[0] = 0x60000010;
3780 cpustate->eflags = 0x00200000;
3781 cpustate->eflags_mask = 0x00277fd7; /* TODO: is this correct? */
3782 cpustate->eip = 0xfff0;
3783 cpustate->mxcsr = 0x1f80;
3784 cpustate->smm = false;
3785 cpustate->smi_latched = false;
3786 cpustate->smbase = 0x30000;
3787 cpustate->nmi_masked = false;
3788 cpustate->nmi_latched = false;
3794 // [ 3:0] Stepping ID
3795 // Family 6, Model 3 (Pentium II / Klamath)
3797 REG32(EDX) = (6 << 8) | (3 << 4) | (1); /* TODO: is this correct? */
3799 cpustate->cpuid_id0 = 0x756e6547; // Genu
3800 cpustate->cpuid_id1 = 0x49656e69; // ineI
3801 cpustate->cpuid_id2 = 0x6c65746e; // ntel
3803 cpustate->cpuid_max_input_value_eax = 0x02;
3804 cpustate->cpu_version = REG32(EDX);
3806 // [ 0:0] FPU on chip
3807 cpustate->feature_flags = 0x008081bf; // TODO: enable relevant flags here
3809 CHANGE_PC(cpustate->eip);
3812 /*****************************************************************************/
3813 /* Intel Pentium III */
3815 void *I386_OPS_BASE::cpu_init_pentium3(void)
3817 // 64 dtlb small, 8 dtlb large, 32 itlb small, 2 itlb large
3818 i386_common_init(96);
3819 build_x87_opcode_table();
3820 build_opcode_table(OP_I386 | OP_FPU | OP_I486 | OP_PENTIUM | OP_PPRO | OP_MMX | OP_SSE);
3821 cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_PENTIUM]; // TODO: generate own cycle tables
3822 cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_PENTIUM]; // TODO: generate own cycle tables
3826 void I386_OPS_BASE::cpu_reset_pentium3(void)
3829 vtlb_flush_dynamic(cpustate->vtlb);
3831 cpustate->sreg[CS].selector = 0xf000;
3832 cpustate->sreg[CS].base = 0xffff0000;
3833 cpustate->sreg[CS].limit = 0xffff;
3834 cpustate->sreg[CS].flags = 0x009b;
3836 cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
3837 cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
3838 cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0092;
3840 cpustate->idtr.base = 0;
3841 cpustate->idtr.limit = 0x3ff;
3843 cpustate->a20_mask = ~0;
3845 cpustate->cr[0] = 0x60000010;
3846 cpustate->eflags = 0x00200000;
3847 cpustate->eflags_mask = 0x00277fd7; /* TODO: is this correct? */
3848 cpustate->eip = 0xfff0;
3849 cpustate->mxcsr = 0x1f80;
3850 cpustate->smm = false;
3851 cpustate->smi_latched = false;
3852 cpustate->smbase = 0x30000;
3853 cpustate->nmi_masked = false;
3854 cpustate->nmi_latched = false;
3860 // [ 3:0] Stepping ID
3861 // Family 6, Model 8 (Pentium III / Coppermine)
3863 REG32(EDX) = (6 << 8) | (8 << 4) | (10);
3865 cpustate->cpuid_id0 = 0x756e6547; // Genu
3866 cpustate->cpuid_id1 = 0x49656e69; // ineI
3867 cpustate->cpuid_id2 = 0x6c65746e; // ntel
3869 cpustate->cpuid_max_input_value_eax = 0x03;
3870 cpustate->cpu_version = REG32(EDX);
3872 // [ 0:0] FPU on chip
3873 // [ 4:4] Time Stamp Counter
3874 // [ D:D] PTE Global Bit
3875 cpustate->feature_flags = 0x00002011; // TODO: enable relevant flags here
3877 CHANGE_PC(cpustate->eip);
3880 /*****************************************************************************/
3881 /* Intel Pentium 4 */
3883 void *I386_OPS_BASE::cpu_init_pentium4(void)
3885 // 128 dtlb, 64 itlb
3886 i386_common_init(196);
3887 build_x87_opcode_table();
3888 build_opcode_table(OP_I386 | OP_FPU | OP_I486 | OP_PENTIUM | OP_PPRO | OP_MMX | OP_SSE | OP_SSE2);
3889 cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_PENTIUM]; // TODO: generate own cycle tables
3890 cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_PENTIUM]; // TODO: generate own cycle tables
3894 void I386_OPS_BASE::cpu_reset_pentium4(void)
3897 vtlb_flush_dynamic(cpustate->vtlb);
3899 cpustate->sreg[CS].selector = 0xf000;
3900 cpustate->sreg[CS].base = 0xffff0000;
3901 cpustate->sreg[CS].limit = 0xffff;
3902 cpustate->sreg[CS].flags = 0x009b;
3904 cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
3905 cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
3906 cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0092;
3908 cpustate->idtr.base = 0;
3909 cpustate->idtr.limit = 0x3ff;
3911 cpustate->a20_mask = ~0;
3913 cpustate->cr[0] = 0x60000010;
3914 cpustate->eflags = 0x00200000;
3915 cpustate->eflags_mask = 0x00277fd7; /* TODO: is this correct? */
3916 cpustate->eip = 0xfff0;
3917 cpustate->mxcsr = 0x1f80;
3918 cpustate->smm = false;
3919 cpustate->smi_latched = false;
3920 cpustate->smbase = 0x30000;
3921 cpustate->nmi_masked = false;
3922 cpustate->nmi_latched = false;
3926 // [27:20] Extended family
3927 // [19:16] Extended model
3931 // [ 3: 0] Stepping ID
3932 // Family 15, Model 0 (Pentium 4 / Willamette)
3934 REG32(EDX) = (0 << 20) | (0xf << 8) | (0 << 4) | (1);
3936 cpustate->cpuid_id0 = 0x756e6547; // Genu
3937 cpustate->cpuid_id1 = 0x49656e69; // ineI
3938 cpustate->cpuid_id2 = 0x6c65746e; // ntel
3940 cpustate->cpuid_max_input_value_eax = 0x02;
3941 cpustate->cpu_version = REG32(EDX);
3943 // [ 0:0] FPU on chip
3944 cpustate->feature_flags = 0x00000001; // TODO: enable relevant flags here
3946 CHANGE_PC(cpustate->eip);