OSDN Git Service

[General] Merge Upstream 2018-12-27.
[csp-qt/common_source_project-fm7.git] / source / src / vm / mame / emu / cpu / i386 / i386.c
1 // license:BSD-3-Clause
2 // copyright-holders:Ville Linde, Barry Rodewald, Carl, Philip Bennett
3 /*
4     Intel 386 emulator
5
6     Written by Ville Linde
7
8     Currently supports:
9         Intel 386
10         Intel 486
11         Intel Pentium
12         Cyrix MediaGX
13         Intel Pentium MMX
14         Intel Pentium Pro
15         Intel Pentium II
16         Intel Pentium III
17         Intel Pentium 4
18 */
19
20 //#include "emu.h"
21 //#include "debugger.h"
22 #include "i386priv.h"
23 //#include "i386.h"
24
25 //#include "debug/debugcpu.h"
26
27 /* seems to be defined on mingw-gcc */
28 #undef i386
29
30 static CPU_RESET( CPU_MODEL );
31
32 int i386_parity_table[256];
33 MODRM_TABLE i386_MODRM_table[256];
34
35 static void i386_trap_with_error(i386_state* cpustate, int irq, int irq_gate, int trap_level, UINT32 err);
36 static void i286_task_switch(i386_state* cpustate, UINT16 selector, UINT8 nested);
37 static void i386_task_switch(i386_state* cpustate, UINT16 selector, UINT8 nested);
38 static void build_opcode_table(i386_state *cpustate, UINT32 features);
39 static void zero_state(i386_state *cpustate);
40 static void pentium_smi(i386_state* cpustate);
41
42 #define FAULT(fault,error) {cpustate->ext = 1; i386_trap_with_error(cpustate,fault,0,0,error); return;}
43 #define FAULT_EXP(fault,error) {cpustate->ext = 1; i386_trap_with_error(cpustate,fault,0,trap_level+1,error); return;}
44
45 /*************************************************************************/
46
47 static UINT32 i386_load_protected_mode_segment(i386_state *cpustate, I386_SREG *seg, UINT64 *desc )
48 {
49         UINT32 v1,v2;
50         UINT32 base, limit;
51         int entry;
52
53         if(!seg->selector)
54         {
55                 seg->flags = 0;
56                 seg->base = 0;
57                 seg->limit = 0;
58                 seg->d = 0;
59                 seg->valid = false;
60                 return 0;
61         }
62
63         if ( seg->selector & 0x4 )
64         {
65                 base = cpustate->ldtr.base;
66                 limit = cpustate->ldtr.limit;
67         } else {
68                 base = cpustate->gdtr.base;
69                 limit = cpustate->gdtr.limit;
70         }
71
72         entry = seg->selector & ~0x7;
73         if (limit == 0 || entry + 7 > limit)
74                 return 0;
75
76         v1 = READ32PL0(cpustate, base + entry );
77         v2 = READ32PL0(cpustate, base + entry + 4 );
78
79         seg->flags = (v2 >> 8) & 0xf0ff;
80         seg->base = (v2 & 0xff000000) | ((v2 & 0xff) << 16) | ((v1 >> 16) & 0xffff);
81         seg->limit = (v2 & 0xf0000) | (v1 & 0xffff);
82         if (seg->flags & 0x8000)
83                 seg->limit = (seg->limit << 12) | 0xfff;
84         seg->d = (seg->flags & 0x4000) ? 1 : 0;
85         seg->valid = true;
86
87         if(desc)
88                 *desc = ((UINT64)v2<<32)|v1;
89         return 1;
90 }
91
92 static void i386_load_call_gate(i386_state* cpustate, I386_CALL_GATE *gate)
93 {
94         UINT32 v1,v2;
95         UINT32 base,limit;
96         int entry;
97
98         if ( gate->segment & 0x4 )
99         {
100                 base = cpustate->ldtr.base;
101                 limit = cpustate->ldtr.limit;
102         } else {
103                 base = cpustate->gdtr.base;
104                 limit = cpustate->gdtr.limit;
105         }
106
107         entry = gate->segment & ~0x7;
108         if (limit == 0 || entry + 7 > limit)
109                 return;
110
111         v1 = READ32PL0(cpustate, base + entry );
112         v2 = READ32PL0(cpustate, base + entry + 4 );
113
114         /* Note that for task gates, offset and dword_count are not used */
115         gate->selector = (v1 >> 16) & 0xffff;
116         gate->offset = (v1 & 0x0000ffff) | (v2 & 0xffff0000);
117         gate->ar = (v2 >> 8) & 0xff;
118         gate->dword_count = v2 & 0x001f;
119         gate->present = (gate->ar >> 7) & 0x01;
120         gate->dpl = (gate->ar >> 5) & 0x03;
121 }
122
123 static void i386_set_descriptor_accessed(i386_state *cpustate, UINT16 selector)
124 {
125         // assume the selector is valid, we don't need to check it again
126         UINT32 base, addr;
127         UINT8 rights;
128         if(!(selector & ~3))
129                 return;
130
131         if ( selector & 0x4 )
132                 base = cpustate->ldtr.base;
133         else
134                 base = cpustate->gdtr.base;
135
136         addr = base + (selector & ~7) + 5;
137         i386_translate_address(cpustate, TRANSLATE_READ, &addr, NULL);
138         rights = cpustate->program->read_data8(addr);
139         // Should a fault be thrown if the table is read only?
140         cpustate->program->write_data8(addr, rights | 1);
141 }
142
143 static void i386_load_segment_descriptor(i386_state *cpustate, int segment )
144 {
145         if (PROTECTED_MODE)
146         {
147                 UINT16 old_flags = cpustate->sreg[segment].flags;
148                 if (!V8086_MODE)
149                 {
150                         i386_load_protected_mode_segment(cpustate, &cpustate->sreg[segment], NULL );
151                         if(cpustate->sreg[segment].selector)
152                         {
153                                 i386_set_descriptor_accessed(cpustate, cpustate->sreg[segment].selector);
154                                 cpustate->sreg[segment].flags |= 0x0001;
155                         }
156                 }
157                 else
158                 {
159                         cpustate->sreg[segment].base = cpustate->sreg[segment].selector << 4;
160                         cpustate->sreg[segment].limit = 0xffff;
161                         cpustate->sreg[segment].flags = (segment == CS) ? 0x00fb : 0x00f3;
162                         cpustate->sreg[segment].d = 0;
163                         cpustate->sreg[segment].valid = true;
164                 }
165 //              if (segment == CS && cpustate->sreg[segment].flags != old_flags)
166 //                      debugger_privilege_hook();
167         }
168         else
169         {
170                 cpustate->sreg[segment].base = cpustate->sreg[segment].selector << 4;
171                 cpustate->sreg[segment].d = 0;
172                 cpustate->sreg[segment].valid = true;
173
174                 if( segment == CS )
175                 {
176                         if( !cpustate->performed_intersegment_jump )
177                                 cpustate->sreg[segment].base |= 0xfff00000;
178                         if(cpustate->cpu_version < 0x5000)
179                                 cpustate->sreg[segment].flags = 0x93;
180                 }
181         }
182 }
183
184 /* Retrieves the stack selector located in the current TSS */
185 static UINT32 i386_get_stack_segment(i386_state* cpustate, UINT8 privilege)
186 {
187         UINT32 ret;
188         if(privilege >= 3)
189                 return 0;
190
191         if(cpustate->task.flags & 8)
192                 ret = READ32PL0(cpustate,(cpustate->task.base+8) + (8*privilege));
193         else
194                 ret = READ16PL0(cpustate,(cpustate->task.base+4) + (4*privilege));
195
196         return ret;
197 }
198
199 /* Retrieves the stack pointer located in the current TSS */
200 static UINT32 i386_get_stack_ptr(i386_state* cpustate, UINT8 privilege)
201 {
202         UINT32 ret;
203         if(privilege >= 3)
204                 return 0;
205
206         if(cpustate->task.flags & 8)
207                 ret = READ32PL0(cpustate,(cpustate->task.base+4) + (8*privilege));
208         else
209                 ret = READ16PL0(cpustate,(cpustate->task.base+2) + (4*privilege));
210
211         return ret;
212 }
213
214 static UINT32 get_flags(i386_state *cpustate)
215 {
216         UINT32 f = 0x2;
217         f |= cpustate->CF;
218         f |= cpustate->PF << 2;
219         f |= cpustate->AF << 4;
220         f |= cpustate->ZF << 6;
221         f |= cpustate->SF << 7;
222         f |= cpustate->TF << 8;
223         f |= cpustate->IF << 9;
224         f |= cpustate->DF << 10;
225         f |= cpustate->OF << 11;
226         f |= cpustate->IOP1 << 12;
227         f |= cpustate->IOP2 << 13;
228         f |= cpustate->NT << 14;
229         f |= cpustate->RF << 16;
230         f |= cpustate->VM << 17;
231         f |= cpustate->AC << 18;
232         f |= cpustate->VIF << 19;
233         f |= cpustate->VIP << 20;
234         f |= cpustate->ID << 21;
235         return (cpustate->eflags & ~cpustate->eflags_mask) | (f & cpustate->eflags_mask);
236 }
237
238 static void set_flags(i386_state *cpustate, UINT32 f )
239 {
240         f &= cpustate->eflags_mask;;
241         cpustate->CF = (f & 0x1) ? 1 : 0;
242         cpustate->PF = (f & 0x4) ? 1 : 0;
243         cpustate->AF = (f & 0x10) ? 1 : 0;
244         cpustate->ZF = (f & 0x40) ? 1 : 0;
245         cpustate->SF = (f & 0x80) ? 1 : 0;
246         cpustate->TF = (f & 0x100) ? 1 : 0;
247         cpustate->IF = (f & 0x200) ? 1 : 0;
248         cpustate->DF = (f & 0x400) ? 1 : 0;
249         cpustate->OF = (f & 0x800) ? 1 : 0;
250         cpustate->IOP1 = (f & 0x1000) ? 1 : 0;
251         cpustate->IOP2 = (f & 0x2000) ? 1 : 0;
252         cpustate->NT = (f & 0x4000) ? 1 : 0;
253         cpustate->RF = (f & 0x10000) ? 1 : 0;
254         cpustate->VM = (f & 0x20000) ? 1 : 0;
255         cpustate->AC = (f & 0x40000) ? 1 : 0;
256         cpustate->VIF = (f & 0x80000) ? 1 : 0;
257         cpustate->VIP = (f & 0x100000) ? 1 : 0;
258         cpustate->ID = (f & 0x200000) ? 1 : 0;
259         cpustate->eflags = f;
260 }
261
262 static void sib_byte(i386_state *cpustate,UINT8 mod, UINT32* out_ea, UINT8* out_segment)
263 {
264         UINT32 ea = 0;
265         UINT8 segment = 0;
266         UINT8 scale, i, base;
267         UINT8 sib = FETCH(cpustate);
268         scale = (sib >> 6) & 0x3;
269         i = (sib >> 3) & 0x7;
270         base = sib & 0x7;
271
272         switch( base )
273         {
274                 case 0: ea = REG32(EAX); segment = DS; break;
275                 case 1: ea = REG32(ECX); segment = DS; break;
276                 case 2: ea = REG32(EDX); segment = DS; break;
277                 case 3: ea = REG32(EBX); segment = DS; break;
278                 case 4: ea = REG32(ESP); segment = SS; break;
279                 case 5:
280                         if( mod == 0 ) {
281                                 ea = FETCH32(cpustate);
282                                 segment = DS;
283                         } else if( mod == 1 ) {
284                                 ea = REG32(EBP);
285                                 segment = SS;
286                         } else if( mod == 2 ) {
287                                 ea = REG32(EBP);
288                                 segment = SS;
289                         }
290                         break;
291                 case 6: ea = REG32(ESI); segment = DS; break;
292                 case 7: ea = REG32(EDI); segment = DS; break;
293         }
294         switch( i )
295         {
296                 case 0: ea += REG32(EAX) * (1 << scale); break;
297                 case 1: ea += REG32(ECX) * (1 << scale); break;
298                 case 2: ea += REG32(EDX) * (1 << scale); break;
299                 case 3: ea += REG32(EBX) * (1 << scale); break;
300                 case 4: break;
301                 case 5: ea += REG32(EBP) * (1 << scale); break;
302                 case 6: ea += REG32(ESI) * (1 << scale); break;
303                 case 7: ea += REG32(EDI) * (1 << scale); break;
304         }
305         *out_ea = ea;
306         *out_segment = segment;
307 }
308
309 static void modrm_to_EA(i386_state *cpustate,UINT8 mod_rm, UINT32* out_ea, UINT8* out_segment)
310 {
311         INT8 disp8;
312         INT16 disp16;
313         INT32 disp32;
314         UINT8 mod = (mod_rm >> 6) & 0x3;
315         UINT8 rm = mod_rm & 0x7;
316         UINT32 ea;
317         UINT8 segment;
318
319         if( mod_rm >= 0xc0 )
320                 fatalerror("i386: Called modrm_to_EA with modrm value %02X!\n",mod_rm);
321
322
323         if( cpustate->address_size ) {
324                 switch( rm )
325                 {
326                         default:
327                         case 0: ea = REG32(EAX); segment = DS; break;
328                         case 1: ea = REG32(ECX); segment = DS; break;
329                         case 2: ea = REG32(EDX); segment = DS; break;
330                         case 3: ea = REG32(EBX); segment = DS; break;
331                         case 4: sib_byte(cpustate, mod, &ea, &segment ); break;
332                         case 5:
333                                 if( mod == 0 ) {
334                                         ea = FETCH32(cpustate); segment = DS;
335                                 } else {
336                                         ea = REG32(EBP); segment = SS;
337                                 }
338                                 break;
339                         case 6: ea = REG32(ESI); segment = DS; break;
340                         case 7: ea = REG32(EDI); segment = DS; break;
341                 }
342                 if( mod == 1 ) {
343                         disp8 = FETCH(cpustate);
344                         ea += (INT32)disp8;
345                 } else if( mod == 2 ) {
346                         disp32 = FETCH32(cpustate);
347                         ea += disp32;
348                 }
349
350                 if( cpustate->segment_prefix )
351                         segment = cpustate->segment_override;
352
353                 *out_ea = ea;
354                 *out_segment = segment;
355
356         } else {
357                 switch( rm )
358                 {
359                         default:
360                         case 0: ea = REG16(BX) + REG16(SI); segment = DS; break;
361                         case 1: ea = REG16(BX) + REG16(DI); segment = DS; break;
362                         case 2: ea = REG16(BP) + REG16(SI); segment = SS; break;
363                         case 3: ea = REG16(BP) + REG16(DI); segment = SS; break;
364                         case 4: ea = REG16(SI); segment = DS; break;
365                         case 5: ea = REG16(DI); segment = DS; break;
366                         case 6:
367                                 if( mod == 0 ) {
368                                         ea = FETCH16(cpustate); segment = DS;
369                                 } else {
370                                         ea = REG16(BP); segment = SS;
371                                 }
372                                 break;
373                         case 7: ea = REG16(BX); segment = DS; break;
374                 }
375                 if( mod == 1 ) {
376                         disp8 = FETCH(cpustate);
377                         ea += (INT32)disp8;
378                 } else if( mod == 2 ) {
379                         disp16 = FETCH16(cpustate);
380                         ea += (INT32)disp16;
381                 }
382
383                 if( cpustate->segment_prefix )
384                         segment = cpustate->segment_override;
385
386                 *out_ea = ea & 0xffff;
387                 *out_segment = segment;
388         }
389 }
390
391 static UINT32 GetNonTranslatedEA(i386_state *cpustate,UINT8 modrm,UINT8 *seg)
392 {
393         UINT8 segment;
394         UINT32 ea;
395         modrm_to_EA(cpustate, modrm, &ea, &segment );
396         if(seg) *seg = segment;
397         return ea;
398 }
399
400 static UINT32 GetEA(i386_state *cpustate,UINT8 modrm, int rwn, UINT32 size)
401 {
402         UINT8 segment;
403         UINT32 ea;
404         modrm_to_EA(cpustate, modrm, &ea, &segment );
405         return i386_translate(cpustate, segment, ea, rwn, size );
406 }
407
408 /* Check segment register for validity when changing privilege level after an RETF */
409 static void i386_check_sreg_validity(i386_state* cpustate, int reg)
410 {
411         UINT16 selector = cpustate->sreg[reg].selector;
412         UINT8 CPL = cpustate->CPL;
413         UINT8 DPL,RPL;
414         I386_SREG desc;
415         int invalid = 0;
416
417         memset(&desc, 0, sizeof(desc));
418         desc.selector = selector;
419         i386_load_protected_mode_segment(cpustate,&desc,NULL);
420         DPL = (desc.flags >> 5) & 0x03;  // descriptor privilege level
421         RPL = selector & 0x03;
422
423         /* Must be within the relevant descriptor table limits */
424         if(selector & 0x04)
425         {
426                 if((selector & ~0x07) > cpustate->ldtr.limit)
427                         invalid = 1;
428         }
429         else
430         {
431                 if((selector & ~0x07) > cpustate->gdtr.limit)
432                         invalid = 1;
433         }
434
435         /* Must be either a data or readable code segment */
436         if(((desc.flags & 0x0018) == 0x0018 && (desc.flags & 0x0002)) || (desc.flags & 0x0018) == 0x0010)
437                 invalid = 0;
438         else
439                 invalid = 1;
440
441         /* If a data segment or non-conforming code segment, then either DPL >= CPL or DPL >= RPL */
442         if(((desc.flags & 0x0018) == 0x0018 && (desc.flags & 0x0004) == 0) || (desc.flags & 0x0018) == 0x0010)
443         {
444                 if((DPL < CPL) || (DPL < RPL))
445                         invalid = 1;
446         }
447
448         /* if segment is invalid, then segment register is nulled */
449         if(invalid != 0)
450         {
451                 cpustate->sreg[reg].selector = 0;
452                 i386_load_segment_descriptor(cpustate,reg);
453         }
454 }
455
456 static int i386_limit_check(i386_state *cpustate, int seg, UINT32 offset, UINT32 size)
457 {
458         if(PROTECTED_MODE && !V8086_MODE)
459         {
460                 if((cpustate->sreg[seg].flags & 0x0018) == 0x0010 && cpustate->sreg[seg].flags & 0x0004) // if expand-down data segment
461                 {
462                         // compare if greater then 0xffffffff when we're passed the access size
463                         if((offset <= cpustate->sreg[seg].limit) || ((cpustate->sreg[seg].d)?0:((offset + size - 1) > 0xffff)))
464                         {
465                                 logerror("Limit check at 0x%08x failed. Segment %04x, limit %08x, offset %08x (expand-down)\n",cpustate->pc,cpustate->sreg[seg].selector,cpustate->sreg[seg].limit,offset);
466                                 return 1;
467                         }
468                 }
469                 else
470                 {
471                         if((offset + size - 1) > cpustate->sreg[seg].limit)
472                         {
473                                 logerror("Limit check at 0x%08x failed. Segment %04x, limit %08x, offset %08x\n",cpustate->pc,cpustate->sreg[seg].selector,cpustate->sreg[seg].limit,offset);
474                                 return 1;
475                         }
476                 }
477         }
478         return 0;
479 }
480
481 static void i386_sreg_load(i386_state *cpustate, UINT16 selector, UINT8 reg, bool *fault)
482 {
483         // Checks done when MOV changes a segment register in protected mode
484         UINT8 CPL,RPL,DPL;
485
486         CPL = cpustate->CPL;
487         RPL = selector & 0x0003;
488
489         if(!PROTECTED_MODE || V8086_MODE)
490         {
491                 cpustate->sreg[reg].selector = selector;
492                 i386_load_segment_descriptor(cpustate, reg);
493                 if(fault) *fault = false;
494                 return;
495         }
496
497         if(fault) *fault = true;
498         if(reg == SS)
499         {
500                 I386_SREG stack;
501
502                 memset(&stack, 0, sizeof(stack));
503                 stack.selector = selector;
504                 i386_load_protected_mode_segment(cpustate,&stack,NULL);
505                 DPL = (stack.flags >> 5) & 0x03;
506
507                 if((selector & ~0x0003) == 0)
508                 {
509                         logerror("SReg Load (%08x): Selector is null.\n",cpustate->pc);
510                         FAULT(FAULT_GP,0)
511                 }
512                 if(selector & 0x0004)  // LDT
513                 {
514                         if((selector & ~0x0007) > cpustate->ldtr.limit)
515                         {
516                                 logerror("SReg Load (%08x): Selector is out of LDT bounds.\n",cpustate->pc);
517                                 FAULT(FAULT_GP,selector & ~0x03)
518                         }
519                 }
520                 else  // GDT
521                 {
522                         if((selector & ~0x0007) > cpustate->gdtr.limit)
523                         {
524                                 logerror("SReg Load (%08x): Selector is out of GDT bounds.\n",cpustate->pc);
525                                 FAULT(FAULT_GP,selector & ~0x03)
526                         }
527                 }
528                 if (RPL != CPL)
529                 {
530                         logerror("SReg Load (%08x): Selector RPL does not equal CPL.\n",cpustate->pc);
531                         FAULT(FAULT_GP,selector & ~0x03)
532                 }
533                 if(((stack.flags & 0x0018) != 0x10) && (stack.flags & 0x0002) != 0)
534                 {
535                         logerror("SReg Load (%08x): Segment is not a writable data segment.\n",cpustate->pc);
536                         FAULT(FAULT_GP,selector & ~0x03)
537                 }
538                 if(DPL != CPL)
539                 {
540                         logerror("SReg Load (%08x): Segment DPL does not equal CPL.\n",cpustate->pc);
541                         FAULT(FAULT_GP,selector & ~0x03)
542                 }
543                 if(!(stack.flags & 0x0080))
544                 {
545                         logerror("SReg Load (%08x): Segment is not present.\n",cpustate->pc);
546                         FAULT(FAULT_SS,selector & ~0x03)
547                 }
548         }
549         if(reg == DS || reg == ES || reg == FS || reg == GS)
550         {
551                 I386_SREG desc;
552
553                 if((selector & ~0x0003) == 0)
554                 {
555                         cpustate->sreg[reg].selector = selector;
556                         i386_load_segment_descriptor(cpustate, reg );
557                         if(fault) *fault = false;
558                         return;
559                 }
560
561                 memset(&desc, 0, sizeof(desc));
562                 desc.selector = selector;
563                 i386_load_protected_mode_segment(cpustate,&desc,NULL);
564                 DPL = (desc.flags >> 5) & 0x03;
565
566                 if(selector & 0x0004)  // LDT
567                 {
568                         if((selector & ~0x0007) > cpustate->ldtr.limit)
569                         {
570                                 logerror("SReg Load (%08x): Selector is out of LDT bounds.\n",cpustate->pc);
571                                 FAULT(FAULT_GP,selector & ~0x03)
572                         }
573                 }
574                 else  // GDT
575                 {
576                         if((selector & ~0x0007) > cpustate->gdtr.limit)
577                         {
578                                 logerror("SReg Load (%08x): Selector is out of GDT bounds.\n",cpustate->pc);
579                                 FAULT(FAULT_GP,selector & ~0x03)
580                         }
581                 }
582                 if((desc.flags & 0x0018) != 0x10)
583                 {
584                         if((((desc.flags & 0x0002) != 0) && ((desc.flags & 0x0018) != 0x18)) || !(desc.flags & 0x10))
585                         {
586                                 logerror("SReg Load (%08x): Segment is not a data segment or readable code segment.\n",cpustate->pc);
587                                 FAULT(FAULT_GP,selector & ~0x03)
588                         }
589                 }
590                 if(((desc.flags & 0x0018) == 0x10) || ((!(desc.flags & 0x0004)) && ((desc.flags & 0x0018) == 0x18)))
591                 {
592                         // if data or non-conforming code segment
593                         if((RPL > DPL) || (CPL > DPL))
594                         {
595                                 logerror("SReg Load (%08x): Selector RPL or CPL is not less or equal to segment DPL.\n",cpustate->pc);
596                                 FAULT(FAULT_GP,selector & ~0x03)
597                         }
598                 }
599                 if(!(desc.flags & 0x0080))
600                 {
601                         logerror("SReg Load (%08x): Segment is not present.\n",cpustate->pc);
602                         FAULT(FAULT_NP,selector & ~0x03)
603                 }
604         }
605
606         cpustate->sreg[reg].selector = selector;
607         i386_load_segment_descriptor(cpustate, reg );
608         if(fault) *fault = false;
609 }
610
611 static void i386_trap(i386_state *cpustate,int irq, int irq_gate, int trap_level)
612 {
613         /*  I386 Interrupts/Traps/Faults:
614          *
615          *  0x00    Divide by zero
616          *  0x01    Debug exception
617          *  0x02    NMI
618          *  0x03    Int3
619          *  0x04    Overflow
620          *  0x05    Array bounds check
621          *  0x06    Illegal Opcode
622          *  0x07    FPU not available
623          *  0x08    Double fault
624          *  0x09    Coprocessor segment overrun
625          *  0x0a    Invalid task state
626          *  0x0b    Segment not present
627          *  0x0c    Stack exception
628          *  0x0d    General Protection Fault
629          *  0x0e    Page fault
630          *  0x0f    Reserved
631          *  0x10    Coprocessor error
632          */
633         UINT32 v1, v2;
634         UINT32 offset, oldflags = get_flags(cpustate);
635         UINT16 segment;
636         int entry = irq * (PROTECTED_MODE ? 8 : 4);
637         int SetRPL = 0;
638         cpustate->lock = false;
639
640         if( !(PROTECTED_MODE) )
641         {
642                 /* 16-bit */
643                 PUSH16(cpustate, oldflags & 0xffff );
644                 PUSH16(cpustate, cpustate->sreg[CS].selector );
645                 if(irq == 3 || irq == 4 || irq == 9 || irq_gate == 1)
646                         PUSH16(cpustate, cpustate->eip );
647                 else
648                         PUSH16(cpustate, cpustate->prev_eip );
649
650                 cpustate->sreg[CS].selector = READ16(cpustate, cpustate->idtr.base + entry + 2 );
651                 cpustate->eip = READ16(cpustate, cpustate->idtr.base + entry );
652
653                 cpustate->TF = 0;
654                 cpustate->IF = 0;
655         }
656         else
657         {
658                 int type;
659                 UINT16 flags;
660                 I386_SREG desc;
661                 UINT8 CPL = cpustate->CPL, DPL = 0; //, RPL = 0;
662
663                 /* 32-bit */
664                 v1 = READ32PL0(cpustate, cpustate->idtr.base + entry );
665                 v2 = READ32PL0(cpustate, cpustate->idtr.base + entry + 4 );
666                 offset = (v2 & 0xffff0000) | (v1 & 0xffff);
667                 segment = (v1 >> 16) & 0xffff;
668                 type = (v2>>8) & 0x1F;
669                 flags = (v2>>8) & 0xf0ff;
670
671                 if(trap_level == 2)
672                 {
673                         logerror("IRQ: Double fault.\n");
674                         FAULT_EXP(FAULT_DF,0);
675                 }
676                 if(trap_level >= 3)
677                 {
678                         logerror("IRQ: Triple fault. CPU reset.\n");
679                         CPU_RESET_CALL(CPU_MODEL);
680                         cpustate->shutdown = 1;
681                         return;
682                 }
683
684                 /* segment privilege checks */
685                 if(entry >= cpustate->idtr.limit)
686                 {
687                         logerror("IRQ (%08x): Vector %02xh is past IDT limit.\n",cpustate->pc,entry);
688                         FAULT_EXP(FAULT_GP,entry+2)
689                 }
690                 /* segment must be interrupt gate, trap gate, or task gate */
691                 if(type != 0x05 && type != 0x06 && type != 0x07 && type != 0x0e && type != 0x0f)
692                 {
693                         logerror("IRQ#%02x (%08x): Vector segment %04x is not an interrupt, trap or task gate.\n",irq,cpustate->pc,segment);
694                         FAULT_EXP(FAULT_GP,entry+2)
695                 }
696
697                 if(cpustate->ext == 0) // if software interrupt (caused by INT/INTO/INT3)
698                 {
699                         if(((flags >> 5) & 0x03) < CPL)
700                         {
701                                 logerror("IRQ (%08x): Software IRQ - gate DPL is less than CPL.\n",cpustate->pc);
702                                 FAULT_EXP(FAULT_GP,entry+2)
703                         }
704                         if(V8086_MODE)
705                         {
706                                 if((!cpustate->IOP1 || !cpustate->IOP2) && (cpustate->opcode != 0xcc))
707                                 {
708                                         logerror("IRQ (%08x): Is in Virtual 8086 mode and IOPL != 3.\n",cpustate->pc);
709                                         FAULT(FAULT_GP,0)
710                                 }
711
712                         }
713                 }
714
715                 if((flags & 0x0080) == 0)
716                 {
717                         logerror("IRQ: Vector segment is not present.\n");
718                         FAULT_EXP(FAULT_NP,entry+2)
719                 }
720
721                 if(type == 0x05)
722                 {
723                         /* Task gate */
724                         memset(&desc, 0, sizeof(desc));
725                         desc.selector = segment;
726                         i386_load_protected_mode_segment(cpustate,&desc,NULL);
727                         if(segment & 0x04)
728                         {
729                                 logerror("IRQ: Task gate: TSS is not in the GDT.\n");
730                                 FAULT_EXP(FAULT_TS,segment & ~0x03);
731                         }
732                         else
733                         {
734                                 if(segment > cpustate->gdtr.limit)
735                                 {
736                                         logerror("IRQ: Task gate: TSS is past GDT limit.\n");
737                                         FAULT_EXP(FAULT_TS,segment & ~0x03);
738                                 }
739                         }
740                         if((desc.flags & 0x000f) != 0x09 && (desc.flags & 0x000f) != 0x01)
741                         {
742                                 logerror("IRQ: Task gate: TSS is not an available TSS.\n");
743                                 FAULT_EXP(FAULT_TS,segment & ~0x03);
744                         }
745                         if((desc.flags & 0x0080) == 0)
746                         {
747                                 logerror("IRQ: Task gate: TSS is not present.\n");
748                                 FAULT_EXP(FAULT_NP,segment & ~0x03);
749                         }
750                         if(!(irq == 3 || irq == 4 || irq == 9 || irq_gate == 1))
751                                 cpustate->eip = cpustate->prev_eip;
752                         if(desc.flags & 0x08)
753                                 i386_task_switch(cpustate,desc.selector,1);
754                         else
755                                 i286_task_switch(cpustate,desc.selector,1);
756                         return;
757                 }
758                 else
759                 {
760                         /* Interrupt or Trap gate */
761                         memset(&desc, 0, sizeof(desc));
762                         desc.selector = segment;
763                         i386_load_protected_mode_segment(cpustate,&desc,NULL);
764                         CPL = cpustate->CPL;  // current privilege level
765                         DPL = (desc.flags >> 5) & 0x03;  // descriptor privilege level
766 //          RPL = segment & 0x03;  // requested privilege level
767
768                         if((segment & ~0x03) == 0)
769                         {
770                                 logerror("IRQ: Gate segment is null.\n");
771                                 FAULT_EXP(FAULT_GP,cpustate->ext)
772                         }
773                         if(segment & 0x04)
774                         {
775                                 if((segment & ~0x07) > cpustate->ldtr.limit)
776                                 {
777                                         logerror("IRQ: Gate segment is past LDT limit.\n");
778                                         FAULT_EXP(FAULT_GP,(segment & 0x03)+cpustate->ext)
779                                 }
780                         }
781                         else
782                         {
783                                 if((segment & ~0x07) > cpustate->gdtr.limit)
784                                 {
785                                         logerror("IRQ: Gate segment is past GDT limit.\n");
786                                         FAULT_EXP(FAULT_GP,(segment & 0x03)+cpustate->ext)
787                                 }
788                         }
789                         if((desc.flags & 0x0018) != 0x18)
790                         {
791                                 logerror("IRQ: Gate descriptor is not a code segment.\n");
792                                 FAULT_EXP(FAULT_GP,(segment & 0x03)+cpustate->ext)
793                         }
794                         if((desc.flags & 0x0080) == 0)
795                         {
796                                 logerror("IRQ: Gate segment is not present.\n");
797                                 FAULT_EXP(FAULT_NP,(segment & 0x03)+cpustate->ext)
798                         }
799                         if((desc.flags & 0x0004) == 0 && (DPL < CPL))
800                         {
801                                 /* IRQ to inner privilege */
802                                 I386_SREG stack;
803                                 UINT32 newESP,oldSS,oldESP;
804
805                                 if(V8086_MODE && DPL)
806                                 {
807                                         logerror("IRQ: Gate to CPL>0 from VM86 mode.\n");
808                                         FAULT_EXP(FAULT_GP,segment & ~0x03);
809                                 }
810                                 /* Check new stack segment in TSS */
811                                 memset(&stack, 0, sizeof(stack));
812                                 stack.selector = i386_get_stack_segment(cpustate,DPL);
813                                 i386_load_protected_mode_segment(cpustate,&stack,NULL);
814                                 oldSS = cpustate->sreg[SS].selector;
815                                 if(flags & 0x0008)
816                                         oldESP = REG32(ESP);
817                                 else
818                                         oldESP = REG16(SP);
819                                 if((stack.selector & ~0x03) == 0)
820                                 {
821                                         logerror("IRQ: New stack selector is null.\n");
822                                         FAULT_EXP(FAULT_GP,cpustate->ext)
823                                 }
824                                 if(stack.selector & 0x04)
825                                 {
826                                         if((stack.selector & ~0x07) > cpustate->ldtr.base)
827                                         {
828                                                 logerror("IRQ: New stack selector is past LDT limit.\n");
829                                                 FAULT_EXP(FAULT_TS,(stack.selector & ~0x03)+cpustate->ext)
830                                         }
831                                 }
832                                 else
833                                 {
834                                         if((stack.selector & ~0x07) > cpustate->gdtr.base)
835                                         {
836                                                 logerror("IRQ: New stack selector is past GDT limit.\n");
837                                                 FAULT_EXP(FAULT_TS,(stack.selector & ~0x03)+cpustate->ext)
838                                         }
839                                 }
840                                 if((stack.selector & 0x03) != DPL)
841                                 {
842                                         logerror("IRQ: New stack selector RPL is not equal to code segment DPL.\n");
843                                         FAULT_EXP(FAULT_TS,(stack.selector & ~0x03)+cpustate->ext)
844                                 }
845                                 if(((stack.flags >> 5) & 0x03) != DPL)
846                                 {
847                                         logerror("IRQ: New stack segment DPL is not equal to code segment DPL.\n");
848                                         FAULT_EXP(FAULT_TS,(stack.selector & ~0x03)+cpustate->ext)
849                                 }
850                                 if(((stack.flags & 0x0018) != 0x10) && (stack.flags & 0x0002) != 0)
851                                 {
852                                         logerror("IRQ: New stack segment is not a writable data segment.\n");
853                                         FAULT_EXP(FAULT_TS,(stack.selector & ~0x03)+cpustate->ext) // #TS(stack selector + EXT)
854                                 }
855                                 if((stack.flags & 0x0080) == 0)
856                                 {
857                                         logerror("IRQ: New stack segment is not present.\n");
858                                         FAULT_EXP(FAULT_SS,(stack.selector & ~0x03)+cpustate->ext) // #TS(stack selector + EXT)
859                                 }
860                                 newESP = i386_get_stack_ptr(cpustate,DPL);
861                                 if(type & 0x08) // 32-bit gate
862                                 {
863                                         if(((newESP < (V8086_MODE?36:20)) && !(stack.flags & 0x4)) || ((~stack.limit < (~(newESP - 1) + (V8086_MODE?36:20))) && (stack.flags & 0x4)))
864                                         {
865                                                 logerror("IRQ: New stack has no space for return addresses.\n");
866                                                 FAULT_EXP(FAULT_SS,0)
867                                         }
868                                 }
869                                 else // 16-bit gate
870                                 {
871                                         newESP &= 0xffff;
872                                         if(((newESP < (V8086_MODE?18:10)) && !(stack.flags & 0x4)) || ((~stack.limit < (~(newESP - 1) + (V8086_MODE?18:10))) && (stack.flags & 0x4)))
873                                         {
874                                                 logerror("IRQ: New stack has no space for return addresses.\n");
875                                                 FAULT_EXP(FAULT_SS,0)
876                                         }
877                                 }
878                                 if(offset > desc.limit)
879                                 {
880                                         logerror("IRQ: New EIP is past code segment limit.\n");
881                                         FAULT_EXP(FAULT_GP,0)
882                                 }
883                                 /* change CPL before accessing the stack */
884                                 cpustate->CPL = DPL;
885                                 /* check for page fault at new stack TODO: check if stack frame crosses page boundary */
886                                 WRITE_TEST(cpustate, stack.base+newESP-1);
887                                 /* Load new stack segment descriptor */
888                                 cpustate->sreg[SS].selector = stack.selector;
889                                 i386_load_protected_mode_segment(cpustate,&cpustate->sreg[SS],NULL);
890                                 i386_set_descriptor_accessed(cpustate, stack.selector);
891                                 REG32(ESP) = newESP;
892                                 if(V8086_MODE)
893                                 {
894                                         //logerror("IRQ (%08x): Interrupt during V8086 task\n",cpustate->pc);
895                                         if(type & 0x08)
896                                         {
897                                                 PUSH32SEG(cpustate,cpustate->sreg[GS].selector & 0xffff);
898                                                 PUSH32SEG(cpustate,cpustate->sreg[FS].selector & 0xffff);
899                                                 PUSH32SEG(cpustate,cpustate->sreg[DS].selector & 0xffff);
900                                                 PUSH32SEG(cpustate,cpustate->sreg[ES].selector & 0xffff);
901                                         }
902                                         else
903                                         {
904                                                 PUSH16(cpustate,cpustate->sreg[GS].selector);
905                                                 PUSH16(cpustate,cpustate->sreg[FS].selector);
906                                                 PUSH16(cpustate,cpustate->sreg[DS].selector);
907                                                 PUSH16(cpustate,cpustate->sreg[ES].selector);
908                                         }
909                                         cpustate->sreg[GS].selector = 0;
910                                         cpustate->sreg[FS].selector = 0;
911                                         cpustate->sreg[DS].selector = 0;
912                                         cpustate->sreg[ES].selector = 0;
913                                         cpustate->VM = 0;
914                                         i386_load_segment_descriptor(cpustate,GS);
915                                         i386_load_segment_descriptor(cpustate,FS);
916                                         i386_load_segment_descriptor(cpustate,DS);
917                                         i386_load_segment_descriptor(cpustate,ES);
918                                 }
919                                 if(type & 0x08)
920                                 {
921                                         // 32-bit gate
922                                         PUSH32SEG(cpustate,oldSS);
923                                         PUSH32(cpustate,oldESP);
924                                 }
925                                 else
926                                 {
927                                         // 16-bit gate
928                                         PUSH16(cpustate,oldSS);
929                                         PUSH16(cpustate,oldESP);
930                                 }
931                                 SetRPL = 1;
932                         }
933                         else
934                         {
935                                 int stack_limit;
936                                 if((desc.flags & 0x0004) || (DPL == CPL))
937                                 {
938                                         /* IRQ to same privilege */
939                                         if(V8086_MODE && !cpustate->ext)
940                                         {
941                                                 logerror("IRQ: Gate to same privilege from VM86 mode.\n");
942                                                 FAULT_EXP(FAULT_GP,segment & ~0x03);
943                                         }
944                                         if(type == 0x0e || type == 0x0f)  // 32-bit gate
945                                                 stack_limit = 10;
946                                         else
947                                                 stack_limit = 6;
948                                         // TODO: Add check for error code (2 extra bytes)
949                                         if(REG32(ESP) < stack_limit)
950                                         {
951                                                 logerror("IRQ: Stack has no space left (needs %i bytes).\n",stack_limit);
952                                                 FAULT_EXP(FAULT_SS,0)
953                                         }
954                                         if(offset > desc.limit)
955                                         {
956                                                 logerror("IRQ: Gate segment offset is past segment limit.\n");
957                                                 FAULT_EXP(FAULT_GP,0)
958                                         }
959                                         SetRPL = 1;
960                                 }
961                                 else
962                                 {
963                                         logerror("IRQ: Gate descriptor is non-conforming, and DPL does not equal CPL.\n");
964                                         FAULT_EXP(FAULT_GP,segment)
965                                 }
966                         }
967                 }
968                 UINT32 tempSP = REG32(ESP);
969                 try
970                 {
971                         // this is ugly but the alternative is worse
972                         if(type != 0x0e && type != 0x0f)  // if not 386 interrupt or trap gate
973                         {
974                                 PUSH16(cpustate, oldflags & 0xffff );
975                                 PUSH16(cpustate, cpustate->sreg[CS].selector );
976                                 if(irq == 3 || irq == 4 || irq == 9 || irq_gate == 1)
977                                         PUSH16(cpustate, cpustate->eip );
978                                 else
979                                         PUSH16(cpustate, cpustate->prev_eip );
980                         }
981                         else
982                         {
983                                 PUSH32(cpustate, oldflags & 0x00ffffff );
984                                 PUSH32SEG(cpustate, cpustate->sreg[CS].selector );
985                                 if(irq == 3 || irq == 4 || irq == 9 || irq_gate == 1)
986                                         PUSH32(cpustate, cpustate->eip );
987                                 else
988                                         PUSH32(cpustate, cpustate->prev_eip );
989                         }
990                 }
991                 catch(UINT64 e)
992                 {
993                         REG32(ESP) = tempSP;
994                         throw e;
995                 }
996                 if(SetRPL != 0)
997                         segment = (segment & ~0x03) | cpustate->CPL;
998                 cpustate->sreg[CS].selector = segment;
999                 cpustate->eip = offset;
1000
1001                 if(type == 0x0e || type == 0x06)
1002                         cpustate->IF = 0;
1003                 cpustate->TF = 0;
1004                 cpustate->NT = 0;
1005         }
1006
1007         i386_load_segment_descriptor(cpustate,CS);
1008         CHANGE_PC(cpustate,cpustate->eip);
1009
1010 }
1011
1012 static void i386_trap_with_error(i386_state *cpustate,int irq, int irq_gate, int trap_level, UINT32 error)
1013 {
1014         i386_trap(cpustate,irq,irq_gate,trap_level);
1015         if(irq == 8 || irq == 10 || irq == 11 || irq == 12 || irq == 13 || irq == 14)
1016         {
1017                 // for these exceptions, an error code is pushed onto the stack by the processor.
1018                 // no error code is pushed for software interrupts, either.
1019                 if(PROTECTED_MODE)
1020                 {
1021                         UINT32 entry = irq * 8;
1022                         UINT32 v2,type;
1023                         v2 = READ32PL0(cpustate, cpustate->idtr.base + entry + 4 );
1024                         type = (v2>>8) & 0x1F;
1025                         if(type == 5)
1026                         {
1027                                 v2 = READ32PL0(cpustate, cpustate->idtr.base + entry);
1028                                 v2 = READ32PL0(cpustate, cpustate->gdtr.base + ((v2 >> 16) & 0xfff8) + 4);
1029                                 type = (v2>>8) & 0x1F;
1030                         }
1031                         if(type >= 9)
1032                                 PUSH32(cpustate,error);
1033                         else
1034                                 PUSH16(cpustate,error);
1035                 }
1036                 else
1037                         PUSH16(cpustate,error);
1038         }
1039 }
1040
1041
1042 static void i286_task_switch(i386_state *cpustate, UINT16 selector, UINT8 nested)
1043 {
1044         UINT32 tss;
1045         I386_SREG seg;
1046         UINT16 old_task;
1047         UINT8 ar_byte;  // access rights byte
1048
1049         /* TODO: Task State Segment privilege checks */
1050
1051         /* For tasks that aren't nested, clear the busy bit in the task's descriptor */
1052         if(nested == 0)
1053         {
1054                 if(cpustate->task.segment & 0x0004)
1055                 {
1056                         ar_byte = READ8(cpustate,cpustate->ldtr.base + (cpustate->task.segment & ~0x0007) + 5);
1057                         WRITE8(cpustate,cpustate->ldtr.base + (cpustate->task.segment & ~0x0007) + 5,ar_byte & ~0x02);
1058                 }
1059                 else
1060                 {
1061                         ar_byte = READ8(cpustate,cpustate->gdtr.base + (cpustate->task.segment & ~0x0007) + 5);
1062                         WRITE8(cpustate,cpustate->gdtr.base + (cpustate->task.segment & ~0x0007) + 5,ar_byte & ~0x02);
1063                 }
1064         }
1065
1066         /* Save the state of the current task in the current TSS (TR register base) */
1067         tss = cpustate->task.base;
1068         WRITE16(cpustate,tss+0x0e,cpustate->eip & 0x0000ffff);
1069         WRITE16(cpustate,tss+0x10,get_flags(cpustate) & 0x0000ffff);
1070         WRITE16(cpustate,tss+0x12,REG16(AX));
1071         WRITE16(cpustate,tss+0x14,REG16(CX));
1072         WRITE16(cpustate,tss+0x16,REG16(DX));
1073         WRITE16(cpustate,tss+0x18,REG16(BX));
1074         WRITE16(cpustate,tss+0x1a,REG16(SP));
1075         WRITE16(cpustate,tss+0x1c,REG16(BP));
1076         WRITE16(cpustate,tss+0x1e,REG16(SI));
1077         WRITE16(cpustate,tss+0x20,REG16(DI));
1078         WRITE16(cpustate,tss+0x22,cpustate->sreg[ES].selector);
1079         WRITE16(cpustate,tss+0x24,cpustate->sreg[CS].selector);
1080         WRITE16(cpustate,tss+0x26,cpustate->sreg[SS].selector);
1081         WRITE16(cpustate,tss+0x28,cpustate->sreg[DS].selector);
1082
1083         old_task = cpustate->task.segment;
1084
1085         /* Load task register with the selector of the incoming task */
1086         cpustate->task.segment = selector;
1087         memset(&seg, 0, sizeof(seg));
1088         seg.selector = cpustate->task.segment;
1089         i386_load_protected_mode_segment(cpustate,&seg,NULL);
1090         cpustate->task.limit = seg.limit;
1091         cpustate->task.base = seg.base;
1092         cpustate->task.flags = seg.flags;
1093
1094         /* Set TS bit in CR0 */
1095         cpustate->cr[0] |= 0x08;
1096
1097         /* Load incoming task state from the new task's TSS */
1098         tss = cpustate->task.base;
1099         cpustate->ldtr.segment = READ16(cpustate,tss+0x2a) & 0xffff;
1100         seg.selector = cpustate->ldtr.segment;
1101         i386_load_protected_mode_segment(cpustate,&seg,NULL);
1102         cpustate->ldtr.limit = seg.limit;
1103         cpustate->ldtr.base = seg.base;
1104         cpustate->ldtr.flags = seg.flags;
1105         cpustate->eip = READ16(cpustate,tss+0x0e);
1106         set_flags(cpustate,READ16(cpustate,tss+0x10));
1107         REG16(AX) = READ16(cpustate,tss+0x12);
1108         REG16(CX) = READ16(cpustate,tss+0x14);
1109         REG16(DX) = READ16(cpustate,tss+0x16);
1110         REG16(BX) = READ16(cpustate,tss+0x18);
1111         REG16(SP) = READ16(cpustate,tss+0x1a);
1112         REG16(BP) = READ16(cpustate,tss+0x1c);
1113         REG16(SI) = READ16(cpustate,tss+0x1e);
1114         REG16(DI) = READ16(cpustate,tss+0x20);
1115         cpustate->sreg[ES].selector = READ16(cpustate,tss+0x22) & 0xffff;
1116         i386_load_segment_descriptor(cpustate, ES);
1117         cpustate->sreg[CS].selector = READ16(cpustate,tss+0x24) & 0xffff;
1118         i386_load_segment_descriptor(cpustate, CS);
1119         cpustate->sreg[SS].selector = READ16(cpustate,tss+0x26) & 0xffff;
1120         i386_load_segment_descriptor(cpustate, SS);
1121         cpustate->sreg[DS].selector = READ16(cpustate,tss+0x28) & 0xffff;
1122         i386_load_segment_descriptor(cpustate, DS);
1123
1124         /* Set the busy bit in the new task's descriptor */
1125         if(selector & 0x0004)
1126         {
1127                 ar_byte = READ8(cpustate,cpustate->ldtr.base + (selector & ~0x0007) + 5);
1128                 WRITE8(cpustate,cpustate->ldtr.base + (selector & ~0x0007) + 5,ar_byte | 0x02);
1129         }
1130         else
1131         {
1132                 ar_byte = READ8(cpustate,cpustate->gdtr.base + (selector & ~0x0007) + 5);
1133                 WRITE8(cpustate,cpustate->gdtr.base + (selector & ~0x0007) + 5,ar_byte | 0x02);
1134         }
1135
1136         /* For nested tasks, we write the outgoing task's selector to the back-link field of the new TSS,
1137            and set the NT flag in the EFLAGS register */
1138         if(nested != 0)
1139         {
1140                 WRITE16(cpustate,tss+0,old_task);
1141                 cpustate->NT = 1;
1142         }
1143         CHANGE_PC(cpustate,cpustate->eip);
1144
1145         cpustate->CPL = (cpustate->sreg[SS].flags >> 5) & 3;
1146 //  printf("286 Task Switch from selector %04x to %04x\n",old_task,selector);
1147 }
1148
1149 static void i386_task_switch(i386_state *cpustate, UINT16 selector, UINT8 nested)
1150 {
1151         UINT32 tss;
1152         I386_SREG seg;
1153         UINT16 old_task;
1154         UINT8 ar_byte;  // access rights byte
1155         UINT32 oldcr3 = cpustate->cr[3];
1156
1157         /* TODO: Task State Segment privilege checks */
1158
1159         /* For tasks that aren't nested, clear the busy bit in the task's descriptor */
1160         if(nested == 0)
1161         {
1162                 if(cpustate->task.segment & 0x0004)
1163                 {
1164                         ar_byte = READ8(cpustate,cpustate->ldtr.base + (cpustate->task.segment & ~0x0007) + 5);
1165                         WRITE8(cpustate,cpustate->ldtr.base + (cpustate->task.segment & ~0x0007) + 5,ar_byte & ~0x02);
1166                 }
1167                 else
1168                 {
1169                         ar_byte = READ8(cpustate,cpustate->gdtr.base + (cpustate->task.segment & ~0x0007) + 5);
1170                         WRITE8(cpustate,cpustate->gdtr.base + (cpustate->task.segment & ~0x0007) + 5,ar_byte & ~0x02);
1171                 }
1172         }
1173
1174         /* Save the state of the current task in the current TSS (TR register base) */
1175         tss = cpustate->task.base;
1176         WRITE32(cpustate,tss+0x1c,cpustate->cr[3]);  // correct?
1177         WRITE32(cpustate,tss+0x20,cpustate->eip);
1178         WRITE32(cpustate,tss+0x24,get_flags(cpustate));
1179         WRITE32(cpustate,tss+0x28,REG32(EAX));
1180         WRITE32(cpustate,tss+0x2c,REG32(ECX));
1181         WRITE32(cpustate,tss+0x30,REG32(EDX));
1182         WRITE32(cpustate,tss+0x34,REG32(EBX));
1183         WRITE32(cpustate,tss+0x38,REG32(ESP));
1184         WRITE32(cpustate,tss+0x3c,REG32(EBP));
1185         WRITE32(cpustate,tss+0x40,REG32(ESI));
1186         WRITE32(cpustate,tss+0x44,REG32(EDI));
1187         WRITE32(cpustate,tss+0x48,cpustate->sreg[ES].selector);
1188         WRITE32(cpustate,tss+0x4c,cpustate->sreg[CS].selector);
1189         WRITE32(cpustate,tss+0x50,cpustate->sreg[SS].selector);
1190         WRITE32(cpustate,tss+0x54,cpustate->sreg[DS].selector);
1191         WRITE32(cpustate,tss+0x58,cpustate->sreg[FS].selector);
1192         WRITE32(cpustate,tss+0x5c,cpustate->sreg[GS].selector);
1193
1194         old_task = cpustate->task.segment;
1195
1196         /* Load task register with the selector of the incoming task */
1197         cpustate->task.segment = selector;
1198         memset(&seg, 0, sizeof(seg));
1199         seg.selector = cpustate->task.segment;
1200         i386_load_protected_mode_segment(cpustate,&seg,NULL);
1201         cpustate->task.limit = seg.limit;
1202         cpustate->task.base = seg.base;
1203         cpustate->task.flags = seg.flags;
1204
1205         /* Set TS bit in CR0 */
1206         cpustate->cr[0] |= 0x08;
1207
1208         /* Load incoming task state from the new task's TSS */
1209         tss = cpustate->task.base;
1210         cpustate->ldtr.segment = READ32(cpustate,tss+0x60) & 0xffff;
1211         seg.selector = cpustate->ldtr.segment;
1212         i386_load_protected_mode_segment(cpustate,&seg,NULL);
1213         cpustate->ldtr.limit = seg.limit;
1214         cpustate->ldtr.base = seg.base;
1215         cpustate->ldtr.flags = seg.flags;
1216         cpustate->eip = READ32(cpustate,tss+0x20);
1217         set_flags(cpustate,READ32(cpustate,tss+0x24));
1218         REG32(EAX) = READ32(cpustate,tss+0x28);
1219         REG32(ECX) = READ32(cpustate,tss+0x2c);
1220         REG32(EDX) = READ32(cpustate,tss+0x30);
1221         REG32(EBX) = READ32(cpustate,tss+0x34);
1222         REG32(ESP) = READ32(cpustate,tss+0x38);
1223         REG32(EBP) = READ32(cpustate,tss+0x3c);
1224         REG32(ESI) = READ32(cpustate,tss+0x40);
1225         REG32(EDI) = READ32(cpustate,tss+0x44);
1226         cpustate->sreg[ES].selector = READ32(cpustate,tss+0x48) & 0xffff;
1227         i386_load_segment_descriptor(cpustate, ES);
1228         cpustate->sreg[CS].selector = READ32(cpustate,tss+0x4c) & 0xffff;
1229         i386_load_segment_descriptor(cpustate, CS);
1230         cpustate->sreg[SS].selector = READ32(cpustate,tss+0x50) & 0xffff;
1231         i386_load_segment_descriptor(cpustate, SS);
1232         cpustate->sreg[DS].selector = READ32(cpustate,tss+0x54) & 0xffff;
1233         i386_load_segment_descriptor(cpustate, DS);
1234         cpustate->sreg[FS].selector = READ32(cpustate,tss+0x58) & 0xffff;
1235         i386_load_segment_descriptor(cpustate, FS);
1236         cpustate->sreg[GS].selector = READ32(cpustate,tss+0x5c) & 0xffff;
1237         i386_load_segment_descriptor(cpustate, GS);
1238         /* For nested tasks, we write the outgoing task's selector to the back-link field of the new TSS,
1239            and set the NT flag in the EFLAGS register before setting cr3 as the old tss address might be gone */
1240         if(nested != 0)
1241         {
1242                 WRITE32(cpustate,tss+0,old_task);
1243                 cpustate->NT = 1;
1244         }
1245         cpustate->cr[3] = READ32(cpustate,tss+0x1c);  // CR3 (PDBR)
1246         if(oldcr3 != cpustate->cr[3])
1247                 vtlb_flush_dynamic(cpustate->vtlb);
1248
1249         /* Set the busy bit in the new task's descriptor */
1250         if(selector & 0x0004)
1251         {
1252                 ar_byte = READ8(cpustate,cpustate->ldtr.base + (selector & ~0x0007) + 5);
1253                 WRITE8(cpustate,cpustate->ldtr.base + (selector & ~0x0007) + 5,ar_byte | 0x02);
1254         }
1255         else
1256         {
1257                 ar_byte = READ8(cpustate,cpustate->gdtr.base + (selector & ~0x0007) + 5);
1258                 WRITE8(cpustate,cpustate->gdtr.base + (selector & ~0x0007) + 5,ar_byte | 0x02);
1259         }
1260
1261         CHANGE_PC(cpustate,cpustate->eip);
1262
1263         cpustate->CPL = (cpustate->sreg[SS].flags >> 5) & 3;
1264 //  printf("386 Task Switch from selector %04x to %04x\n",old_task,selector);
1265 }
1266
1267 static void i386_check_irq_line(i386_state *cpustate)
1268 {
1269         if(!cpustate->smm && cpustate->smi)
1270         {
1271                 pentium_smi(cpustate);
1272                 return;
1273         }
1274
1275         /* Check if the interrupts are enabled */
1276         if ( (cpustate->irq_state) && cpustate->IF )
1277         {
1278                 cpustate->cycles -= 2;
1279                 i386_trap(cpustate, cpustate->pic->get_intr_ack(), 1, 0);
1280                 cpustate->irq_state = 0;
1281         }
1282 }
1283
1284 static void i386_protected_mode_jump(i386_state *cpustate, UINT16 seg, UINT32 off, int indirect, int operand32)
1285 {
1286         I386_SREG desc;
1287         I386_CALL_GATE call_gate;
1288         UINT8 CPL,DPL,RPL;
1289         UINT8 SetRPL = 0;
1290         UINT16 segment = seg;
1291         UINT32 offset = off;
1292
1293         /* Check selector is not null */
1294         if((segment & ~0x03) == 0)
1295         {
1296                 logerror("JMP: Segment is null.\n");
1297                 FAULT(FAULT_GP,0)
1298         }
1299         /* Selector is within descriptor table limit */
1300         if((segment & 0x04) == 0)
1301         {
1302                 /* check GDT limit */
1303                 if((segment & ~0x07) > (cpustate->gdtr.limit))
1304                 {
1305                         logerror("JMP: Segment is past GDT limit.\n");
1306                         FAULT(FAULT_GP,segment & 0xfffc)
1307                 }
1308         }
1309         else
1310         {
1311                 /* check LDT limit */
1312                 if((segment & ~0x07) > (cpustate->ldtr.limit))
1313                 {
1314                         logerror("JMP: Segment is past LDT limit.\n");
1315                         FAULT(FAULT_GP,segment & 0xfffc)
1316                 }
1317         }
1318         /* Determine segment type */
1319         memset(&desc, 0, sizeof(desc));
1320         desc.selector = segment;
1321         i386_load_protected_mode_segment(cpustate,&desc,NULL);
1322         CPL = cpustate->CPL;  // current privilege level
1323         DPL = (desc.flags >> 5) & 0x03;  // descriptor privilege level
1324         RPL = segment & 0x03;  // requested privilege level
1325         if((desc.flags & 0x0018) == 0x0018)
1326         {
1327                 /* code segment */
1328                 if((desc.flags & 0x0004) == 0)
1329                 {
1330                         /* non-conforming */
1331                         if(RPL > CPL)
1332                         {
1333                                 logerror("JMP: RPL %i is less than CPL %i\n",RPL,CPL);
1334                                 FAULT(FAULT_GP,segment & 0xfffc)
1335                         }
1336                         if(DPL != CPL)
1337                         {
1338                                 logerror("JMP: DPL %i is not equal CPL %i\n",DPL,CPL);
1339                                 FAULT(FAULT_GP,segment & 0xfffc)
1340                         }
1341                 }
1342                 else
1343                 {
1344                         /* conforming */
1345                         if(DPL > CPL)
1346                         {
1347                                 logerror("JMP: DPL %i is less than CPL %i\n",DPL,CPL);
1348                                 FAULT(FAULT_GP,segment & 0xfffc)
1349                         }
1350                 }
1351                 SetRPL = 1;
1352                 if((desc.flags & 0x0080) == 0)
1353                 {
1354                         logerror("JMP: Segment is not present\n");
1355                         FAULT(FAULT_NP,segment & 0xfffc)
1356                 }
1357                 if(offset > desc.limit)
1358                 {
1359                         logerror("JMP: Offset is past segment limit\n");
1360                         FAULT(FAULT_GP,0)
1361                 }
1362         }
1363         else
1364         {
1365                 if((desc.flags & 0x0010) != 0)
1366                 {
1367                         logerror("JMP: Segment is a data segment\n");
1368                         FAULT(FAULT_GP,segment & 0xfffc)  // #GP (cannot execute code in a data segment)
1369                 }
1370                 else
1371                 {
1372                         switch(desc.flags & 0x000f)
1373                         {
1374                         case 0x01:  // 286 Available TSS
1375                         case 0x09:  // 386 Available TSS
1376                                 logerror("JMP: Available 386 TSS at %08x\n",cpustate->pc);
1377                                 memset(&desc, 0, sizeof(desc));
1378                                 desc.selector = segment;
1379                                 i386_load_protected_mode_segment(cpustate,&desc,NULL);
1380                                 DPL = (desc.flags >> 5) & 0x03;  // descriptor privilege level
1381                                 if(DPL < CPL)
1382                                 {
1383                                         logerror("JMP: TSS: DPL %i is less than CPL %i\n",DPL,CPL);
1384                                         FAULT(FAULT_GP,segment & 0xfffc)
1385                                 }
1386                                 if(DPL < RPL)
1387                                 {
1388                                         logerror("JMP: TSS: DPL %i is less than TSS RPL %i\n",DPL,RPL);
1389                                         FAULT(FAULT_GP,segment & 0xfffc)
1390                                 }
1391                                 if((desc.flags & 0x0080) == 0)
1392                                 {
1393                                         logerror("JMP: TSS: Segment is not present\n");
1394                                         FAULT(FAULT_GP,segment & 0xfffc)
1395                                 }
1396                                 if(desc.flags & 0x0008)
1397                                         i386_task_switch(cpustate,desc.selector,0);
1398                                 else
1399                                         i286_task_switch(cpustate,desc.selector,0);
1400                                 return;
1401                         case 0x04:  // 286 Call Gate
1402                         case 0x0c:  // 386 Call Gate
1403                                 //logerror("JMP: Call gate at %08x\n",cpustate->pc);
1404                                 SetRPL = 1;
1405                                 memset(&call_gate, 0, sizeof(call_gate));
1406                                 call_gate.segment = segment;
1407                                 i386_load_call_gate(cpustate,&call_gate);
1408                                 DPL = call_gate.dpl;
1409                                 if(DPL < CPL)
1410                                 {
1411                                         logerror("JMP: Call Gate: DPL %i is less than CPL %i\n",DPL,CPL);
1412                                         FAULT(FAULT_GP,segment & 0xfffc)
1413                                 }
1414                                 if(DPL < RPL)
1415                                 {
1416                                         logerror("JMP: Call Gate: DPL %i is less than RPL %i\n",DPL,RPL);
1417                                         FAULT(FAULT_GP,segment & 0xfffc)
1418                                 }
1419                                 if((desc.flags & 0x0080) == 0)
1420                                 {
1421                                         logerror("JMP: Call Gate: Segment is not present\n");
1422                                         FAULT(FAULT_NP,segment & 0xfffc)
1423                                 }
1424                                 /* Now we examine the segment that the call gate refers to */
1425                                 if(call_gate.selector == 0)
1426                                 {
1427                                         logerror("JMP: Call Gate: Gate selector is null\n");
1428                                         FAULT(FAULT_GP,0)
1429                                 }
1430                                 if(call_gate.selector & 0x04)
1431                                 {
1432                                         if((call_gate.selector & ~0x07) > cpustate->ldtr.limit)
1433                                         {
1434                                                 logerror("JMP: Call Gate: Gate Selector is past LDT segment limit\n");
1435                                                 FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1436                                         }
1437                                 }
1438                                 else
1439                                 {
1440                                         if((call_gate.selector & ~0x07) > cpustate->gdtr.limit)
1441                                         {
1442                                                 logerror("JMP: Call Gate: Gate Selector is past GDT segment limit\n");
1443                                                 FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1444                                         }
1445                                 }
1446                                 desc.selector = call_gate.selector;
1447                                 i386_load_protected_mode_segment(cpustate,&desc,NULL);
1448                                 DPL = (desc.flags >> 5) & 0x03;
1449                                 if((desc.flags & 0x0018) != 0x18)
1450                                 {
1451                                         logerror("JMP: Call Gate: Gate does not point to a code segment\n");
1452                                         FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1453                                 }
1454                                 if((desc.flags & 0x0004) == 0)
1455                                 {  // non-conforming
1456                                         if(DPL != CPL)
1457                                         {
1458                                                 logerror("JMP: Call Gate: Gate DPL does not equal CPL\n");
1459                                                 FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1460                                         }
1461                                 }
1462                                 else
1463                                 {  // conforming
1464                                         if(DPL > CPL)
1465                                         {
1466                                                 logerror("JMP: Call Gate: Gate DPL is greater than CPL\n");
1467                                                 FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1468                                         }
1469                                 }
1470                                 if((desc.flags & 0x0080) == 0)
1471                                 {
1472                                         logerror("JMP: Call Gate: Gate Segment is not present\n");
1473                                         FAULT(FAULT_NP,call_gate.selector & 0xfffc)
1474                                 }
1475                                 if(call_gate.offset > desc.limit)
1476                                 {
1477                                         logerror("JMP: Call Gate: Gate offset is past Gate segment limit\n");
1478                                         FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1479                                 }
1480                                 segment = call_gate.selector;
1481                                 offset = call_gate.offset;
1482                                 break;
1483                         case 0x05:  // Task Gate
1484                                 logerror("JMP: Task gate at %08x\n",cpustate->pc);
1485                                 memset(&call_gate, 0, sizeof(call_gate));
1486                                 call_gate.segment = segment;
1487                                 i386_load_call_gate(cpustate,&call_gate);
1488                                 DPL = call_gate.dpl;
1489                                 if(DPL < CPL)
1490                                 {
1491                                         logerror("JMP: Task Gate: Gate DPL %i is less than CPL %i\n",DPL,CPL);
1492                                         FAULT(FAULT_GP,segment & 0xfffc)
1493                                 }
1494                                 if(DPL < RPL)
1495                                 {
1496                                         logerror("JMP: Task Gate: Gate DPL %i is less than CPL %i\n",DPL,CPL);
1497                                         FAULT(FAULT_GP,segment & 0xfffc)
1498                                 }
1499                                 if(call_gate.present == 0)
1500                                 {
1501                                         logerror("JMP: Task Gate: Gate is not present.\n");
1502                                         FAULT(FAULT_GP,segment & 0xfffc)
1503                                 }
1504                                 /* Check the TSS that the task gate points to */
1505                                 desc.selector = call_gate.selector;
1506                                 i386_load_protected_mode_segment(cpustate,&desc,NULL);
1507                                 DPL = (desc.flags >> 5) & 0x03;  // descriptor privilege level
1508                                 RPL = call_gate.selector & 0x03;  // requested privilege level
1509                                 if(call_gate.selector & 0x04)
1510                                 {
1511                                         logerror("JMP: Task Gate TSS: TSS must be global.\n");
1512                                         FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1513                                 }
1514                                 else
1515                                 {
1516                                         if((call_gate.selector & ~0x07) > cpustate->gdtr.limit)
1517                                         {
1518                                                 logerror("JMP: Task Gate TSS: TSS is past GDT limit.\n");
1519                                                 FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1520                                         }
1521                                 }
1522                                 if((call_gate.ar & 0x000f) == 0x0009 || (call_gate.ar & 0x000f) == 0x0001)
1523                                 {
1524                                         logerror("JMP: Task Gate TSS: Segment is not an available TSS.\n");
1525                                         FAULT(FAULT_GP,call_gate.selector & 0xfffc)
1526                                 }
1527                                 if(call_gate.present == 0)
1528                                 {
1529                                         logerror("JMP: Task Gate TSS: TSS is not present.\n");
1530                                         FAULT(FAULT_NP,call_gate.selector & 0xfffc)
1531                                 }
1532                                 if(call_gate.ar & 0x08)
1533                                         i386_task_switch(cpustate,call_gate.selector,0);
1534                                 else
1535                                         i286_task_switch(cpustate,call_gate.selector,0);
1536                                 return;
1537                         default:  // invalid segment type
1538                                 logerror("JMP: Invalid segment type (%i) to jump to.\n",desc.flags & 0x000f);
1539                                 FAULT(FAULT_GP,segment & 0xfffc)
1540                         }
1541                 }
1542         }
1543
1544         if(SetRPL != 0)
1545                 segment = (segment & ~0x03) | cpustate->CPL;
1546         if(operand32 == 0)
1547                 cpustate->eip = offset & 0x0000ffff;
1548         else
1549                 cpustate->eip = offset;
1550         cpustate->sreg[CS].selector = segment;
1551         cpustate->performed_intersegment_jump = 1;
1552         i386_load_segment_descriptor(cpustate,CS);
1553         CHANGE_PC(cpustate,cpustate->eip);
1554 }
1555
1556 static void i386_protected_mode_call(i386_state *cpustate, UINT16 seg, UINT32 off, int indirect, int operand32)
1557 {
1558         I386_SREG desc;
1559         I386_CALL_GATE gate;
1560         UINT8 SetRPL = 0;
1561         UINT8 CPL, DPL, RPL;
1562         UINT16 selector = seg;
1563         UINT32 offset = off;
1564         int x;
1565
1566         if((selector & ~0x03) == 0)
1567         {
1568                 logerror("CALL (%08x): Selector is null.\n",cpustate->pc);
1569                 FAULT(FAULT_GP,0)  // #GP(0)
1570         }
1571         if(selector & 0x04)
1572         {
1573                 if((selector & ~0x07) > cpustate->ldtr.limit)
1574                 {
1575                         logerror("CALL: Selector is past LDT limit.\n");
1576                         FAULT(FAULT_GP,selector & ~0x03)  // #GP(selector)
1577                 }
1578         }
1579         else
1580         {
1581                 if((selector & ~0x07) > cpustate->gdtr.limit)
1582                 {
1583                         logerror("CALL: Selector is past GDT limit.\n");
1584                         FAULT(FAULT_GP,selector & ~0x03)  // #GP(selector)
1585                 }
1586         }
1587
1588         /* Determine segment type */
1589         memset(&desc, 0, sizeof(desc));
1590         desc.selector = selector;
1591         i386_load_protected_mode_segment(cpustate,&desc,NULL);
1592         CPL = cpustate->CPL;  // current privilege level
1593         DPL = (desc.flags >> 5) & 0x03;  // descriptor privilege level
1594         RPL = selector & 0x03;  // requested privilege level
1595         if((desc.flags & 0x0018) == 0x18)  // is a code segment
1596         {
1597                 if(desc.flags & 0x0004)
1598                 {
1599                         /* conforming */
1600                         if(DPL > CPL)
1601                         {
1602                                 logerror("CALL: Code segment DPL %i is greater than CPL %i\n",DPL,CPL);
1603                                 FAULT(FAULT_GP,selector & ~0x03)  // #GP(selector)
1604                         }
1605                 }
1606                 else
1607                 {
1608                         /* non-conforming */
1609                         if(RPL > CPL)
1610                         {
1611                                 logerror("CALL: RPL %i is greater than CPL %i\n",RPL,CPL);
1612                                 FAULT(FAULT_GP,selector & ~0x03)  // #GP(selector)
1613                         }
1614                         if(DPL != CPL)
1615                         {
1616                                 logerror("CALL: Code segment DPL %i is not equal to CPL %i\n",DPL,CPL);
1617                                 FAULT(FAULT_GP,selector & ~0x03)  // #GP(selector)
1618                         }
1619                 }
1620                 SetRPL = 1;
1621                 if((desc.flags & 0x0080) == 0)
1622                 {
1623                         logerror("CALL (%08x): Code segment is not present.\n",cpustate->pc);
1624                         FAULT(FAULT_NP,selector & ~0x03)  // #NP(selector)
1625                 }
1626                 if (operand32 != 0)  // if 32-bit
1627                 {
1628                         UINT32 offset = (STACK_32BIT ? REG32(ESP) - 8 : (REG16(SP) - 8) & 0xffff);
1629                         if(i386_limit_check(cpustate, SS, offset, 8))
1630                         {
1631                                 logerror("CALL (%08x): Stack has no room for return address.\n",cpustate->pc);
1632                                 FAULT(FAULT_SS,0)  // #SS(0)
1633                         }
1634                 }
1635                 else
1636                 {
1637                         UINT32 offset = (STACK_32BIT ? REG32(ESP) - 4 : (REG16(SP) - 4) & 0xffff);
1638                         if(i386_limit_check(cpustate, SS, offset, 4))
1639                         {
1640                                 logerror("CALL (%08x): Stack has no room for return address.\n",cpustate->pc);
1641                                 FAULT(FAULT_SS,0)  // #SS(0)
1642                         }
1643                 }
1644                 if(offset > desc.limit)
1645                 {
1646                         logerror("CALL: EIP is past segment limit.\n");
1647                         FAULT(FAULT_GP,0)  // #GP(0)
1648                 }
1649         }
1650         else
1651         {
1652                 /* special segment type */
1653                 if(desc.flags & 0x0010)
1654                 {
1655                         logerror("CALL: Segment is a data segment.\n");
1656                         FAULT(FAULT_GP,desc.selector & ~0x03)  // #GP(selector)
1657                 }
1658                 else
1659                 {
1660                         switch(desc.flags & 0x000f)
1661                         {
1662                         case 0x01:  // Available 286 TSS
1663                         case 0x09:  // Available 386 TSS
1664                                 logerror("CALL: Available TSS at %08x\n",cpustate->pc);
1665                                 if(DPL < CPL)
1666                                 {
1667                                         logerror("CALL: TSS: DPL is less than CPL.\n");
1668                                         FAULT(FAULT_TS,selector & ~0x03) // #TS(selector)
1669                                 }
1670                                 if(DPL < RPL)
1671                                 {
1672                                         logerror("CALL: TSS: DPL is less than RPL.\n");
1673                                         FAULT(FAULT_TS,selector & ~0x03) // #TS(selector)
1674                                 }
1675                                 if(desc.flags & 0x0002)
1676                                 {
1677                                         logerror("CALL: TSS: TSS is busy.\n");
1678                                         FAULT(FAULT_TS,selector & ~0x03) // #TS(selector)
1679                                 }
1680                                 if((desc.flags & 0x0080) == 0)
1681                                 {
1682                                         logerror("CALL: TSS: Segment %02x is not present.\n",selector);
1683                                         FAULT(FAULT_NP,selector & ~0x03) // #NP(selector)
1684                                 }
1685                                 if(desc.flags & 0x08)
1686                                         i386_task_switch(cpustate,desc.selector,1);
1687                                 else
1688                                         i286_task_switch(cpustate,desc.selector,1);
1689                                 return;
1690                         case 0x04:  // 286 call gate
1691                         case 0x0c:  // 386 call gate
1692                                 if((desc.flags & 0x000f) == 0x04)
1693                                         operand32 = 0;
1694                                 else
1695                                         operand32 = 1;
1696                                 memset(&gate, 0, sizeof(gate));
1697                                 gate.segment = selector;
1698                                 i386_load_call_gate(cpustate,&gate);
1699                                 DPL = gate.dpl;
1700                                 //logerror("CALL: Call gate at %08x (%i parameters)\n",cpustate->pc,gate.dword_count);
1701                                 if(DPL < CPL)
1702                                 {
1703                                         logerror("CALL: Call gate DPL %i is less than CPL %i.\n",DPL,CPL);
1704                                         FAULT(FAULT_GP,desc.selector & ~0x03)  // #GP(selector)
1705                                 }
1706                                 if(DPL < RPL)
1707                                 {
1708                                         logerror("CALL: Call gate DPL %i is less than RPL %i.\n",DPL,RPL);
1709                                         FAULT(FAULT_GP,desc.selector & ~0x03)  // #GP(selector)
1710                                 }
1711                                 if(gate.present == 0)
1712                                 {
1713                                         logerror("CALL: Call gate is not present.\n");
1714                                         FAULT(FAULT_NP,desc.selector & ~0x03)  // #GP(selector)
1715                                 }
1716                                 desc.selector = gate.selector;
1717                                 if((gate.selector & ~0x03) == 0)
1718                                 {
1719                                         logerror("CALL: Call gate: Segment is null.\n");
1720                                         FAULT(FAULT_GP,0)  // #GP(0)
1721                                 }
1722                                 if(desc.selector & 0x04)
1723                                 {
1724                                         if((desc.selector & ~0x07) > cpustate->ldtr.limit)
1725                                         {
1726                                                 logerror("CALL: Call gate: Segment is past LDT limit\n");
1727                                                 FAULT(FAULT_GP,desc.selector & ~0x03)  // #GP(selector)
1728                                         }
1729                                 }
1730                                 else
1731                                 {
1732                                         if((desc.selector & ~0x07) > cpustate->gdtr.limit)
1733                                         {
1734                                                 logerror("CALL: Call gate: Segment is past GDT limit\n");
1735                                                 FAULT(FAULT_GP,desc.selector & ~0x03)  // #GP(selector)
1736                                         }
1737                                 }
1738                                 i386_load_protected_mode_segment(cpustate,&desc,NULL);
1739                                 if((desc.flags & 0x0018) != 0x18)
1740                                 {
1741                                         logerror("CALL: Call gate: Segment is not a code segment.\n");
1742                                         FAULT(FAULT_GP,desc.selector & ~0x03)  // #GP(selector)
1743                                 }
1744                                 DPL = ((desc.flags >> 5) & 0x03);
1745                                 if(DPL > CPL)
1746                                 {
1747                                         logerror("CALL: Call gate: Segment DPL %i is greater than CPL %i.\n",DPL,CPL);
1748                                         FAULT(FAULT_GP,desc.selector & ~0x03)  // #GP(selector)
1749                                 }
1750                                 if((desc.flags & 0x0080) == 0)
1751                                 {
1752                                         logerror("CALL (%08x): Code segment is not present.\n",cpustate->pc);
1753                                         FAULT(FAULT_NP,desc.selector & ~0x03)  // #NP(selector)
1754                                 }
1755                                 if(DPL < CPL && (desc.flags & 0x0004) == 0)
1756                                 {
1757                                         I386_SREG stack;
1758                                         I386_SREG temp;
1759                                         UINT32 oldSS,oldESP;
1760                                         /* more privilege */
1761                                         /* Check new SS segment for privilege level from TSS */
1762                                         memset(&stack, 0, sizeof(stack));
1763                                         stack.selector = i386_get_stack_segment(cpustate,DPL);
1764                                         i386_load_protected_mode_segment(cpustate,&stack,NULL);
1765                                         if((stack.selector & ~0x03) == 0)
1766                                         {
1767                                                 logerror("CALL: Call gate: TSS selector is null\n");
1768                                                 FAULT(FAULT_TS,0)  // #TS(0)
1769                                         }
1770                                         if(stack.selector & 0x04)
1771                                         {
1772                                                 if((stack.selector & ~0x07) > cpustate->ldtr.limit)
1773                                                 {
1774                                                         logerror("CALL: Call gate: TSS selector is past LDT limit\n");
1775                                                         FAULT(FAULT_TS,stack.selector)  // #TS(SS selector)
1776                                                 }
1777                                         }
1778                                         else
1779                                         {
1780                                                 if((stack.selector & ~0x07) > cpustate->gdtr.limit)
1781                                                 {
1782                                                         logerror("CALL: Call gate: TSS selector is past GDT limit\n");
1783                                                         FAULT(FAULT_TS,stack.selector)  // #TS(SS selector)
1784                                                 }
1785                                         }
1786                                         if((stack.selector & 0x03) != DPL)
1787                                         {
1788                                                 logerror("CALL: Call gate: Stack selector RPL does not equal code segment DPL %i\n",DPL);
1789                                                 FAULT(FAULT_TS,stack.selector)  // #TS(SS selector)
1790                                         }
1791                                         if(((stack.flags >> 5) & 0x03) != DPL)
1792                                         {
1793                                                 logerror("CALL: Call gate: Stack DPL does not equal code segment DPL %i\n",DPL);
1794                                                 FAULT(FAULT_TS,stack.selector)  // #TS(SS selector)
1795                                         }
1796                                         if((stack.flags & 0x0018) != 0x10 && (stack.flags & 0x0002))
1797                                         {
1798                                                 logerror("CALL: Call gate: Stack segment is not a writable data segment\n");
1799                                                 FAULT(FAULT_TS,stack.selector)  // #TS(SS selector)
1800                                         }
1801                                         if((stack.flags & 0x0080) == 0)
1802                                         {
1803                                                 logerror("CALL: Call gate: Stack segment is not present\n");
1804                                                 FAULT(FAULT_SS,stack.selector)  // #SS(SS selector)
1805                                         }
1806                                         UINT32 newESP = i386_get_stack_ptr(cpustate,DPL);
1807                                         if(!stack.d)
1808                                         {
1809                                                 newESP &= 0xffff;
1810                                         }
1811                                         if(operand32 != 0)
1812                                         {
1813                                                 if(newESP < ((gate.dword_count & 0x1f) + 16))
1814                                                 {
1815                                                         logerror("CALL: Call gate: New stack has no room for 32-bit return address and parameters.\n");
1816                                                         FAULT(FAULT_SS,0) // #SS(0)
1817                                                 }
1818                                                 if(gate.offset > desc.limit)
1819                                                 {
1820                                                         logerror("CALL: Call gate: EIP is past segment limit.\n");
1821                                                         FAULT(FAULT_GP,0) // #GP(0)
1822                                                 }
1823                                         }
1824                                         else
1825                                         {
1826                                                 if(newESP < ((gate.dword_count & 0x1f) + 8))
1827                                                 {
1828                                                         logerror("CALL: Call gate: New stack has no room for 16-bit return address and parameters.\n");
1829                                                         FAULT(FAULT_SS,0) // #SS(0)
1830                                                 }
1831                                                 if((gate.offset & 0xffff) > desc.limit)
1832                                                 {
1833                                                         logerror("CALL: Call gate: IP is past segment limit.\n");
1834                                                         FAULT(FAULT_GP,0) // #GP(0)
1835                                                 }
1836                                         }
1837                                         selector = gate.selector;
1838                                         offset = gate.offset;
1839
1840                                         cpustate->CPL = (stack.flags >> 5) & 0x03;
1841                                         /* check for page fault at new stack */
1842                                         WRITE_TEST(cpustate, stack.base+newESP-1);
1843                                         /* switch to new stack */
1844                                         oldSS = cpustate->sreg[SS].selector;
1845                                         cpustate->sreg[SS].selector = i386_get_stack_segment(cpustate,cpustate->CPL);
1846                                         if(operand32 != 0)
1847                                         {
1848                                                 oldESP = REG32(ESP);
1849                                         }
1850                                         else
1851                                         {
1852                                                 oldESP = REG16(SP);
1853                                         }
1854                                         i386_load_segment_descriptor(cpustate, SS );
1855                                         REG32(ESP) = newESP;
1856
1857                                         if(operand32 != 0)
1858                                         {
1859                                                 PUSH32SEG(cpustate,oldSS);
1860                                                 PUSH32(cpustate,oldESP);
1861                                         }
1862                                         else
1863                                         {
1864                                                 PUSH16(cpustate,oldSS);
1865                                                 PUSH16(cpustate,oldESP & 0xffff);
1866                                         }
1867
1868                                         memset(&temp, 0, sizeof(temp));
1869                                         temp.selector = oldSS;
1870                                         i386_load_protected_mode_segment(cpustate,&temp,NULL);
1871                                         /* copy parameters from old stack to new stack */
1872                                         for(x=(gate.dword_count & 0x1f)-1;x>=0;x--)
1873                                         {
1874                                                 UINT32 addr = oldESP + (operand32?(x*4):(x*2));
1875                                                 addr = temp.base + (temp.d?addr:(addr&0xffff));
1876                                                 if(operand32)
1877                                                         PUSH32(cpustate,READ32(cpustate,addr));
1878                                                 else
1879                                                         PUSH16(cpustate,READ16(cpustate,addr));
1880                                         }
1881                                         SetRPL = 1;
1882                                 }
1883                                 else
1884                                 {
1885                                         /* same privilege */
1886                                         if (operand32 != 0)  // if 32-bit
1887                                         {
1888                                                 UINT32 stkoff = (STACK_32BIT ? REG32(ESP) - 8 : (REG16(SP) - 8) & 0xffff);
1889                                                 if(i386_limit_check(cpustate, SS, stkoff, 8))
1890                                                 {
1891                                                         logerror("CALL: Stack has no room for return address.\n");
1892                                                         FAULT(FAULT_SS,0) // #SS(0)
1893                                                 }
1894                                                 selector = gate.selector;
1895                                                 offset = gate.offset;
1896                                         }
1897                                         else
1898                                         {
1899                                                 UINT32 stkoff = (STACK_32BIT ? REG32(ESP) - 4 : (REG16(SP) - 4) & 0xffff);
1900                                                 if(i386_limit_check(cpustate, SS, stkoff, 4))
1901                                                 {
1902                                                         logerror("CALL: Stack has no room for return address.\n");
1903                                                         FAULT(FAULT_SS,0) // #SS(0)
1904                                                 }
1905                                                 selector = gate.selector;
1906                                                 offset = gate.offset & 0xffff;
1907                                         }
1908                                         if(offset > desc.limit)
1909                                         {
1910                                                 logerror("CALL: EIP is past segment limit.\n");
1911                                                 FAULT(FAULT_GP,0) // #GP(0)
1912                                         }
1913                                         SetRPL = 1;
1914                                 }
1915                                 break;
1916                         case 0x05:  // task gate
1917                                 logerror("CALL: Task gate at %08x\n",cpustate->pc);
1918                                 memset(&gate, 0, sizeof(gate));
1919                                 gate.segment = selector;
1920                                 i386_load_call_gate(cpustate,&gate);
1921                                 DPL = gate.dpl;
1922                                 if(DPL < CPL)
1923                                 {
1924                                         logerror("CALL: Task Gate: Gate DPL is less than CPL.\n");
1925                                         FAULT(FAULT_TS,selector & ~0x03) // #TS(selector)
1926                                 }
1927                                 if(DPL < RPL)
1928                                 {
1929                                         logerror("CALL: Task Gate: Gate DPL is less than RPL.\n");
1930                                         FAULT(FAULT_TS,selector & ~0x03) // #TS(selector)
1931                                 }
1932                                 if((gate.ar & 0x0080) == 0)
1933                                 {
1934                                         logerror("CALL: Task Gate: Gate is not present.\n");
1935                                         FAULT(FAULT_NP,selector & ~0x03) // #NP(selector)
1936                                 }
1937                                 /* Check the TSS that the task gate points to */
1938                                 desc.selector = gate.selector;
1939                                 i386_load_protected_mode_segment(cpustate,&desc,NULL);
1940                                 if(gate.selector & 0x04)
1941                                 {
1942                                         logerror("CALL: Task Gate: TSS is not global.\n");
1943                                         FAULT(FAULT_TS,gate.selector & ~0x03) // #TS(selector)
1944                                 }
1945                                 else
1946                                 {
1947                                         if((gate.selector & ~0x07) > cpustate->gdtr.limit)
1948                                         {
1949                                                 logerror("CALL: Task Gate: TSS is past GDT limit.\n");
1950                                                 FAULT(FAULT_TS,gate.selector & ~0x03) // #TS(selector)
1951                                         }
1952                                 }
1953                                 if(desc.flags & 0x0002)
1954                                 {
1955                                         logerror("CALL: Task Gate: TSS is busy.\n");
1956                                         FAULT(FAULT_TS,gate.selector & ~0x03) // #TS(selector)
1957                                 }
1958                                 if((desc.flags & 0x0080) == 0)
1959                                 {
1960                                         logerror("CALL: Task Gate: TSS is not present.\n");
1961                                         FAULT(FAULT_NP,gate.selector & ~0x03) // #TS(selector)
1962                                 }
1963                                 if(desc.flags & 0x08)
1964                                         i386_task_switch(cpustate,desc.selector,1);  // with nesting
1965                                 else
1966                                         i286_task_switch(cpustate,desc.selector,1);
1967                                 return;
1968                         default:
1969                                 logerror("CALL: Invalid special segment type (%i) to jump to.\n",desc.flags & 0x000f);
1970                                 FAULT(FAULT_GP,selector & ~0x07)  // #GP(selector)
1971                         }
1972                 }
1973         }
1974
1975         if(SetRPL != 0)
1976                 selector = (selector & ~0x03) | cpustate->CPL;
1977
1978         UINT32 tempSP = REG32(ESP);
1979         try
1980         {
1981                 // this is ugly but the alternative is worse
1982                 if(operand32 == 0)
1983                 {
1984                         /* 16-bit operand size */
1985                         PUSH16(cpustate, cpustate->sreg[CS].selector );
1986                         PUSH16(cpustate, cpustate->eip & 0x0000ffff );
1987                         cpustate->sreg[CS].selector = selector;
1988                         cpustate->performed_intersegment_jump = 1;
1989                         cpustate->eip = offset;
1990                         i386_load_segment_descriptor(cpustate,CS);
1991                 }
1992                 else
1993                 {
1994                         /* 32-bit operand size */
1995                         PUSH32SEG(cpustate, cpustate->sreg[CS].selector );
1996                         PUSH32(cpustate, cpustate->eip );
1997                         cpustate->sreg[CS].selector = selector;
1998                         cpustate->performed_intersegment_jump = 1;
1999                         cpustate->eip = offset;
2000                         i386_load_segment_descriptor(cpustate, CS );
2001                 }
2002         }
2003         catch(UINT64 e)
2004         {
2005                 REG32(ESP) = tempSP;
2006                 throw e;
2007         }
2008
2009         CHANGE_PC(cpustate,cpustate->eip);
2010 }
2011
2012 static void i386_protected_mode_retf(i386_state* cpustate, UINT8 count, UINT8 operand32)
2013 {
2014         UINT32 newCS, newEIP;
2015         I386_SREG desc;
2016         UINT8 CPL, RPL, DPL;
2017
2018         UINT32 ea = i386_translate(cpustate, SS, (STACK_32BIT)?REG32(ESP):REG16(SP), 0, (operand32)?8:4);
2019
2020         if(operand32 == 0)
2021         {
2022                 newEIP = READ16(cpustate, ea) & 0xffff;
2023                 newCS = READ16(cpustate, ea+2) & 0xffff;
2024         }
2025         else
2026         {
2027                 newEIP = READ32(cpustate, ea);
2028                 newCS = READ32(cpustate, ea+4) & 0xffff;
2029         }
2030
2031         memset(&desc, 0, sizeof(desc));
2032         desc.selector = newCS;
2033         i386_load_protected_mode_segment(cpustate,&desc,NULL);
2034         CPL = cpustate->CPL;  // current privilege level
2035         DPL = (desc.flags >> 5) & 0x03;  // descriptor privilege level
2036         RPL = newCS & 0x03;
2037
2038         if(RPL < CPL)
2039         {
2040                 logerror("RETF (%08x): Return segment RPL is less than CPL.\n",cpustate->pc);
2041                 FAULT(FAULT_GP,newCS & ~0x03)
2042         }
2043
2044         if(RPL == CPL)
2045         {
2046                 /* same privilege level */
2047                 if((newCS & ~0x03) == 0)
2048                 {
2049                         logerror("RETF: Return segment is null.\n");
2050                         FAULT(FAULT_GP,0)
2051                 }
2052                 if(newCS & 0x04)
2053                 {
2054                         if((newCS & ~0x07) >= cpustate->ldtr.limit)
2055                         {
2056                                 logerror("RETF: Return segment is past LDT limit.\n");
2057                                 FAULT(FAULT_GP,newCS & ~0x03)
2058                         }
2059                 }
2060                 else
2061                 {
2062                         if((newCS & ~0x07) >= cpustate->gdtr.limit)
2063                         {
2064                                 logerror("RETF: Return segment is past GDT limit.\n");
2065                                 FAULT(FAULT_GP,newCS & ~0x03)
2066                         }
2067                 }
2068                 if((desc.flags & 0x0018) != 0x0018)
2069                 {
2070                         logerror("RETF: Return segment is not a code segment.\n");
2071                         FAULT(FAULT_GP,newCS & ~0x03)
2072                 }
2073                 if(desc.flags & 0x0004)
2074                 {
2075                         if(DPL > RPL)
2076                         {
2077                                 logerror("RETF: Conforming code segment DPL is greater than CS RPL.\n");
2078                                 FAULT(FAULT_GP,newCS & ~0x03)
2079                         }
2080                 }
2081                 else
2082                 {
2083                         if(DPL != RPL)
2084                         {
2085                                 logerror("RETF: Non-conforming code segment DPL does not equal CS RPL.\n");
2086                                 FAULT(FAULT_GP,newCS & ~0x03)
2087                         }
2088                 }
2089                 if((desc.flags & 0x0080) == 0)
2090                 {
2091                         logerror("RETF (%08x): Code segment is not present.\n",cpustate->pc);
2092                         FAULT(FAULT_NP,newCS & ~0x03)
2093                 }
2094                 if(newEIP > desc.limit)
2095                 {
2096                         logerror("RETF: EIP is past code segment limit.\n");
2097                         FAULT(FAULT_GP,0)
2098                 }
2099                 if(operand32 == 0)
2100                 {
2101                         UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2102                         if(i386_limit_check(cpustate,SS,offset,count+4) != 0)
2103                         {
2104                                 logerror("RETF (%08x): SP is past stack segment limit.\n",cpustate->pc);
2105                                 FAULT(FAULT_SS,0)
2106                         }
2107                 }
2108                 else
2109                 {
2110                         UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2111                         if(i386_limit_check(cpustate,SS,offset,count+8) != 0)
2112                         {
2113                                 logerror("RETF: ESP is past stack segment limit.\n");
2114                                 FAULT(FAULT_SS,0)
2115                         }
2116                 }
2117                 if(STACK_32BIT)
2118                         REG16(SP) += (4+count);
2119                 else
2120                         REG32(ESP) += (8+count);
2121         }
2122         else if(RPL > CPL)
2123         {
2124                 UINT32 newSS, newESP;  // when changing privilege
2125                 /* outer privilege level */
2126                 if(operand32 == 0)
2127                 {
2128                         UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2129                         if(i386_limit_check(cpustate,SS,offset,count+8) != 0)
2130                         {
2131                                 logerror("RETF (%08x): SP is past stack segment limit.\n",cpustate->pc);
2132                                 FAULT(FAULT_SS,0)
2133                         }
2134                 }
2135                 else
2136                 {
2137                         UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2138                         if(i386_limit_check(cpustate,SS,offset,count+16) != 0)
2139                         {
2140                                 logerror("RETF: ESP is past stack segment limit.\n");
2141                                 FAULT(FAULT_SS,0)
2142                         }
2143                 }
2144                 /* Check CS selector and descriptor */
2145                 if((newCS & ~0x03) == 0)
2146                 {
2147                         logerror("RETF: CS segment is null.\n");
2148                         FAULT(FAULT_GP,0)
2149                 }
2150                 if(newCS & 0x04)
2151                 {
2152                         if((newCS & ~0x07) >= cpustate->ldtr.limit)
2153                         {
2154                                 logerror("RETF: CS segment selector is past LDT limit.\n");
2155                                 FAULT(FAULT_GP,newCS & ~0x03)
2156                         }
2157                 }
2158                 else
2159                 {
2160                         if((newCS & ~0x07) >= cpustate->gdtr.limit)
2161                         {
2162                                 logerror("RETF: CS segment selector is past GDT limit.\n");
2163                                 FAULT(FAULT_GP,newCS & ~0x03)
2164                         }
2165                 }
2166                 if((desc.flags & 0x0018) != 0x0018)
2167                 {
2168                         logerror("RETF: CS segment is not a code segment.\n");
2169                         FAULT(FAULT_GP,newCS & ~0x03)
2170                 }
2171                 if(desc.flags & 0x0004)
2172                 {
2173                         if(DPL > RPL)
2174                         {
2175                                 logerror("RETF: Conforming CS segment DPL is greater than return selector RPL.\n");
2176                                 FAULT(FAULT_GP,newCS & ~0x03)
2177                         }
2178                 }
2179                 else
2180                 {
2181                         if(DPL != RPL)
2182                         {
2183                                 logerror("RETF: Non-conforming CS segment DPL is not equal to return selector RPL.\n");
2184                                 FAULT(FAULT_GP,newCS & ~0x03)
2185                         }
2186                 }
2187                 if((desc.flags & 0x0080) == 0)
2188                 {
2189                         logerror("RETF: CS segment is not present.\n");
2190                         FAULT(FAULT_NP,newCS & ~0x03)
2191                 }
2192                 if(newEIP > desc.limit)
2193                 {
2194                         logerror("RETF: EIP is past return CS segment limit.\n");
2195                         FAULT(FAULT_GP,0)
2196                 }
2197
2198                 if(operand32 == 0)
2199                 {
2200                         ea += count+4;
2201                         newESP = READ16(cpustate, ea) & 0xffff;
2202                         newSS = READ16(cpustate, ea+2) & 0xffff;
2203                 }
2204                 else
2205                 {
2206                         ea += count+8;
2207                         newESP = READ32(cpustate, ea);
2208                         newSS = READ32(cpustate, ea+4) & 0xffff;
2209                 }
2210
2211                 /* Check SS selector and descriptor */
2212                 desc.selector = newSS;
2213                 i386_load_protected_mode_segment(cpustate,&desc,NULL);
2214                 DPL = (desc.flags >> 5) & 0x03;  // descriptor privilege level
2215                 if((newSS & ~0x07) == 0)
2216                 {
2217                         logerror("RETF: SS segment is null.\n");
2218                         FAULT(FAULT_GP,0)
2219                 }
2220                 if(newSS & 0x04)
2221                 {
2222                         if((newSS & ~0x07) > cpustate->ldtr.limit)
2223                         {
2224                                 logerror("RETF (%08x): SS segment selector is past LDT limit.\n",cpustate->pc);
2225                                 FAULT(FAULT_GP,newSS & ~0x03)
2226                         }
2227                 }
2228                 else
2229                 {
2230                         if((newSS & ~0x07) > cpustate->gdtr.limit)
2231                         {
2232                                 logerror("RETF (%08x): SS segment selector is past GDT limit.\n",cpustate->pc);
2233                                 FAULT(FAULT_GP,newSS & ~0x03)
2234                         }
2235                 }
2236                 if((newSS & 0x03) != RPL)
2237                 {
2238                         logerror("RETF: SS segment RPL is not equal to CS segment RPL.\n");
2239                         FAULT(FAULT_GP,newSS & ~0x03)
2240                 }
2241                 if((desc.flags & 0x0018) != 0x0010 || (desc.flags & 0x0002) == 0)
2242                 {
2243                         logerror("RETF: SS segment is not a writable data segment.\n");
2244                         FAULT(FAULT_GP,newSS & ~0x03)
2245                 }
2246                 if(((desc.flags >> 5) & 0x03) != RPL)
2247                 {
2248                         logerror("RETF: SS DPL is not equal to CS segment RPL.\n");
2249                         FAULT(FAULT_GP,newSS & ~0x03)
2250                 }
2251                 if((desc.flags & 0x0080) == 0)
2252                 {
2253                         logerror("RETF: SS segment is not present.\n");
2254                         FAULT(FAULT_GP,newSS & ~0x03)
2255                 }
2256                 cpustate->CPL = newCS & 0x03;
2257
2258                 /* Load new SS:(E)SP */
2259                 if(operand32 == 0)
2260                         REG16(SP) = (newESP+count) & 0xffff;
2261                 else
2262                         REG32(ESP) = newESP+count;
2263                 cpustate->sreg[SS].selector = newSS;
2264                 i386_load_segment_descriptor(cpustate, SS );
2265
2266                 /* Check that DS, ES, FS and GS are valid for the new privilege level */
2267                 i386_check_sreg_validity(cpustate,DS);
2268                 i386_check_sreg_validity(cpustate,ES);
2269                 i386_check_sreg_validity(cpustate,FS);
2270                 i386_check_sreg_validity(cpustate,GS);
2271         }
2272
2273         /* Load new CS:(E)IP */
2274         if(operand32 == 0)
2275                 cpustate->eip = newEIP & 0xffff;
2276         else
2277                 cpustate->eip = newEIP;
2278         cpustate->sreg[CS].selector = newCS;
2279         i386_load_segment_descriptor(cpustate, CS );
2280         CHANGE_PC(cpustate,cpustate->eip);
2281 }
2282
2283 static void i386_protected_mode_iret(i386_state* cpustate, int operand32)
2284 {
2285         UINT32 newCS, newEIP;
2286         UINT32 newSS, newESP;  // when changing privilege
2287         I386_SREG desc,stack;
2288         UINT8 CPL, RPL, DPL;
2289         UINT32 newflags;
2290         UINT8 IOPL = cpustate->IOP1 | (cpustate->IOP2 << 1);
2291
2292         CPL = cpustate->CPL;
2293         UINT32 ea = i386_translate(cpustate, SS, (STACK_32BIT)?REG32(ESP):REG16(SP), 0, (operand32)?12:6);
2294         if(operand32 == 0)
2295         {
2296                 newEIP = READ16(cpustate, ea) & 0xffff;
2297                 newCS = READ16(cpustate, ea+2) & 0xffff;
2298                 newflags = READ16(cpustate, ea+4) & 0xffff;
2299         }
2300         else
2301         {
2302                 newEIP = READ32(cpustate, ea);
2303                 newCS = READ32(cpustate, ea+4) & 0xffff;
2304                 newflags = READ32(cpustate, ea+8);
2305         }
2306
2307         if(V8086_MODE)
2308         {
2309                 UINT32 oldflags = get_flags(cpustate);
2310                 if(IOPL != 3)
2311                 {
2312                         logerror("IRET (%08x): Is in Virtual 8086 mode and IOPL != 3.\n",cpustate->pc);
2313                         FAULT(FAULT_GP,0)
2314                 }
2315                 if(operand32 == 0)
2316                 {
2317                         cpustate->eip = newEIP & 0xffff;
2318                         cpustate->sreg[CS].selector = newCS & 0xffff;
2319                         newflags &= ~(3<<12);
2320                         newflags |= (((oldflags>>12)&3)<<12);  // IOPL cannot be changed in V86 mode
2321                         set_flags(cpustate,(newflags & 0xffff) | (oldflags & ~0xffff));
2322                         REG16(SP) += 6;
2323                 }
2324                 else
2325                 {
2326                         cpustate->eip = newEIP;
2327                         cpustate->sreg[CS].selector = newCS & 0xffff;
2328                         newflags &= ~(3<<12);
2329                         newflags |= 0x20000 | (((oldflags>>12)&3)<<12);  // IOPL and VM cannot be changed in V86 mode
2330                         set_flags(cpustate,newflags);
2331                         REG32(ESP) += 12;
2332                 }
2333         }
2334         else if(NESTED_TASK)
2335         {
2336                 UINT32 task = READ32(cpustate,cpustate->task.base);
2337                 /* Task Return */
2338                 logerror("IRET (%08x): Nested task return.\n",cpustate->pc);
2339                 /* Check back-link selector in TSS */
2340                 if(task & 0x04)
2341                 {
2342                         logerror("IRET: Task return: Back-linked TSS is not in GDT.\n");
2343                         FAULT(FAULT_TS,task & ~0x03)
2344                 }
2345                 if((task & ~0x07) >= cpustate->gdtr.limit)
2346                 {
2347                         logerror("IRET: Task return: Back-linked TSS is not in GDT.\n");
2348                         FAULT(FAULT_TS,task & ~0x03)
2349                 }
2350                 memset(&desc, 0, sizeof(desc));
2351                 desc.selector = task;
2352                 i386_load_protected_mode_segment(cpustate,&desc,NULL);
2353                 if((desc.flags & 0x001f) != 0x000b)
2354                 {
2355                         logerror("IRET (%08x): Task return: Back-linked TSS is not a busy TSS.\n",cpustate->pc);
2356                         FAULT(FAULT_TS,task & ~0x03)
2357                 }
2358                 if((desc.flags & 0x0080) == 0)
2359                 {
2360                         logerror("IRET: Task return: Back-linked TSS is not present.\n");
2361                         FAULT(FAULT_NP,task & ~0x03)
2362                 }
2363                 if(desc.flags & 0x08)
2364                         i386_task_switch(cpustate,desc.selector,0);
2365                 else
2366                         i286_task_switch(cpustate,desc.selector,0);
2367                 return;
2368         }
2369         else
2370         {
2371                 if(newflags & 0x00020000) // if returning to virtual 8086 mode
2372                 {
2373                         // 16-bit iret can't reach here
2374                         newESP = READ32(cpustate, ea+12);
2375                         newSS = READ32(cpustate, ea+16) & 0xffff;
2376                         /* Return to v86 mode */
2377                         //logerror("IRET (%08x): Returning to Virtual 8086 mode.\n",cpustate->pc);
2378                         if(CPL != 0)
2379                         {
2380                                 UINT32 oldflags = get_flags(cpustate);
2381                                 newflags = (newflags & ~0x00003000) | (oldflags & 0x00003000);
2382                                 if(CPL > IOPL)
2383                                         newflags = (newflags & ~0x200 ) | (oldflags & 0x200);
2384                         }
2385                         set_flags(cpustate,newflags);
2386                         cpustate->eip = POP32(cpustate) & 0xffff;  // high 16 bits are ignored
2387                         cpustate->sreg[CS].selector = POP32(cpustate) & 0xffff;
2388                         POP32(cpustate);  // already set flags
2389                         newESP = POP32(cpustate);
2390                         newSS = POP32(cpustate) & 0xffff;
2391                         cpustate->sreg[ES].selector = POP32(cpustate) & 0xffff;
2392                         cpustate->sreg[DS].selector = POP32(cpustate) & 0xffff;
2393                         cpustate->sreg[FS].selector = POP32(cpustate) & 0xffff;
2394                         cpustate->sreg[GS].selector = POP32(cpustate) & 0xffff;
2395                         REG32(ESP) = newESP;  // all 32 bits are loaded
2396                         cpustate->sreg[SS].selector = newSS;
2397                         i386_load_segment_descriptor(cpustate,ES);
2398                         i386_load_segment_descriptor(cpustate,DS);
2399                         i386_load_segment_descriptor(cpustate,FS);
2400                         i386_load_segment_descriptor(cpustate,GS);
2401                         i386_load_segment_descriptor(cpustate,SS);
2402                         cpustate->CPL = 3;  // Virtual 8086 tasks are always run at CPL 3
2403                 }
2404                 else
2405                 {
2406                         if(operand32 == 0)
2407                         {
2408                                 UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2409                                 if(i386_limit_check(cpustate,SS,offset,4) != 0)
2410                                 {
2411                                         logerror("IRET: Data on stack is past SS limit.\n");
2412                                         FAULT(FAULT_SS,0)
2413                                 }
2414                         }
2415                         else
2416                         {
2417                                 UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2418                                 if(i386_limit_check(cpustate,SS,offset,8) != 0)
2419                                 {
2420                                         logerror("IRET: Data on stack is past SS limit.\n");
2421                                         FAULT(FAULT_SS,0)
2422                                 }
2423                         }
2424                         RPL = newCS & 0x03;
2425                         if(RPL < CPL)
2426                         {
2427                                 logerror("IRET (%08x): Return CS RPL is less than CPL.\n",cpustate->pc);
2428                                 FAULT(FAULT_GP,newCS & ~0x03)
2429                         }
2430                         if(RPL == CPL)
2431                         {
2432                                 /* return to same privilege level */
2433                                 if(operand32 == 0)
2434                                 {
2435                                         UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2436                                         if(i386_limit_check(cpustate,SS,offset,6) != 0)
2437                                         {
2438                                                 logerror("IRET (%08x): Data on stack is past SS limit.\n",cpustate->pc);
2439                                                 FAULT(FAULT_SS,0)
2440                                         }
2441                                 }
2442                                 else
2443                                 {
2444                                         UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2445                                         if(i386_limit_check(cpustate,SS,offset,12) != 0)
2446                                         {
2447                                                 logerror("IRET (%08x): Data on stack is past SS limit.\n",cpustate->pc);
2448                                                 FAULT(FAULT_SS,0)
2449                                         }
2450                                 }
2451                                 if((newCS & ~0x03) == 0)
2452                                 {
2453                                         logerror("IRET: Return CS selector is null.\n");
2454                                         FAULT(FAULT_GP,0)
2455                                 }
2456                                 if(newCS & 0x04)
2457                                 {
2458                                         if((newCS & ~0x07) >= cpustate->ldtr.limit)
2459                                         {
2460                                                 logerror("IRET: Return CS selector (%04x) is past LDT limit.\n",newCS);
2461                                                 FAULT(FAULT_GP,newCS & ~0x03)
2462                                         }
2463                                 }
2464                                 else
2465                                 {
2466                                         if((newCS & ~0x07) >= cpustate->gdtr.limit)
2467                                         {
2468                                                 logerror("IRET: Return CS selector is past GDT limit.\n");
2469                                                 FAULT(FAULT_GP,newCS & ~0x03)
2470                                         }
2471                                 }
2472                                 memset(&desc, 0, sizeof(desc));
2473                                 desc.selector = newCS;
2474                                 i386_load_protected_mode_segment(cpustate,&desc,NULL);
2475                                 DPL = (desc.flags >> 5) & 0x03;  // descriptor privilege level
2476                                 RPL = newCS & 0x03;
2477                                 if((desc.flags & 0x0018) != 0x0018)
2478                                 {
2479                                         logerror("IRET (%08x): Return CS segment is not a code segment.\n",cpustate->pc);
2480                                         FAULT(FAULT_GP,newCS & ~0x07)
2481                                 }
2482                                 if(desc.flags & 0x0004)
2483                                 {
2484                                         if(DPL > RPL)
2485                                         {
2486                                                 logerror("IRET: Conforming return CS DPL is greater than CS RPL.\n");
2487                                                 FAULT(FAULT_GP,newCS & ~0x03)
2488                                         }
2489                                 }
2490                                 else
2491                                 {
2492                                         if(DPL != RPL)
2493                                         {
2494                                                 logerror("IRET: Non-conforming return CS DPL is not equal to CS RPL.\n");
2495                                                 FAULT(FAULT_GP,newCS & ~0x03)
2496                                         }
2497                                 }
2498                                 if((desc.flags & 0x0080) == 0)
2499                                 {
2500                                         logerror("IRET: (%08x) Return CS segment is not present.\n", cpustate->pc);
2501                                         FAULT(FAULT_NP,newCS & ~0x03)
2502                                 }
2503                                 if(newEIP > desc.limit)
2504                                 {
2505                                         logerror("IRET: Return EIP is past return CS limit.\n");
2506                                         FAULT(FAULT_GP,0)
2507                                 }
2508
2509                                 if(CPL != 0)
2510                                 {
2511                                         UINT32 oldflags = get_flags(cpustate);
2512                                         newflags = (newflags & ~0x00003000) | (oldflags & 0x00003000);
2513                                         if(CPL > IOPL)
2514                                                 newflags = (newflags & ~0x200 ) | (oldflags & 0x200);
2515                                 }
2516
2517                                 if(operand32 == 0)
2518                                 {
2519                                         cpustate->eip = newEIP;
2520                                         cpustate->sreg[CS].selector = newCS;
2521                                         set_flags(cpustate,newflags);
2522                                         REG16(SP) += 6;
2523                                 }
2524                                 else
2525                                 {
2526                                         cpustate->eip = newEIP;
2527                                         cpustate->sreg[CS].selector = newCS & 0xffff;
2528                                         set_flags(cpustate,newflags);
2529                                         REG32(ESP) += 12;
2530                                 }
2531                         }
2532                         else if(RPL > CPL)
2533                         {
2534                                 /* return to outer privilege level */
2535                                 memset(&desc, 0, sizeof(desc));
2536                                 desc.selector = newCS;
2537                                 i386_load_protected_mode_segment(cpustate,&desc,NULL);
2538                                 DPL = (desc.flags >> 5) & 0x03;  // descriptor privilege level
2539                                 RPL = newCS & 0x03;
2540                                 if(operand32 == 0)
2541                                 {
2542                                         UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2543                                         if(i386_limit_check(cpustate,SS,offset,10) != 0)
2544                                         {
2545                                                 logerror("IRET: SP is past SS limit.\n");
2546                                                 FAULT(FAULT_SS,0)
2547                                         }
2548                                 }
2549                                 else
2550                                 {
2551                                         UINT32 offset = (STACK_32BIT ? REG32(ESP) : REG16(SP));
2552                                         if(i386_limit_check(cpustate,SS,offset,20) != 0)
2553                                         {
2554                                                 logerror("IRET: ESP is past SS limit.\n");
2555                                                 FAULT(FAULT_SS,0)
2556                                         }
2557                                 }
2558                                 /* Check CS selector and descriptor */
2559                                 if((newCS & ~0x03) == 0)
2560                                 {
2561                                         logerror("IRET: Return CS selector is null.\n");
2562                                         FAULT(FAULT_GP,0)
2563                                 }
2564                                 if(newCS & 0x04)
2565                                 {
2566                                         if((newCS & ~0x07) >= cpustate->ldtr.limit)
2567                                         {
2568                                                 logerror("IRET: Return CS selector is past LDT limit.\n");
2569                                                 FAULT(FAULT_GP,newCS & ~0x03);
2570                                         }
2571                                 }
2572                                 else
2573                                 {
2574                                         if((newCS & ~0x07) >= cpustate->gdtr.limit)
2575                                         {
2576                                                 logerror("IRET: Return CS selector is past GDT limit.\n");
2577                                                 FAULT(FAULT_GP,newCS & ~0x03);
2578                                         }
2579                                 }
2580                                 if((desc.flags & 0x0018) != 0x0018)
2581                                 {
2582                                         logerror("IRET: Return CS segment is not a code segment.\n");
2583                                         FAULT(FAULT_GP,newCS & ~0x03)
2584                                 }
2585                                 if(desc.flags & 0x0004)
2586                                 {
2587                                         if(DPL > RPL)
2588                                         {
2589                                                 logerror("IRET: Conforming return CS DPL is greater than CS RPL.\n");
2590                                                 FAULT(FAULT_GP,newCS & ~0x03)
2591                                         }
2592                                 }
2593                                 else
2594                                 {
2595                                         if(DPL != RPL)
2596                                         {
2597                                                 logerror("IRET: Non-conforming return CS DPL does not equal CS RPL.\n");
2598                                                 FAULT(FAULT_GP,newCS & ~0x03)
2599                                         }
2600                                 }
2601                                 if((desc.flags & 0x0080) == 0)
2602                                 {
2603                                         logerror("IRET: Return CS segment is not present.\n");
2604                                         FAULT(FAULT_NP,newCS & ~0x03)
2605                                 }
2606
2607                                 /* Check SS selector and descriptor */
2608                                 if(operand32 == 0)
2609                                 {
2610                                         newESP = READ16(cpustate, ea+6) & 0xffff;
2611                                         newSS = READ16(cpustate, ea+8) & 0xffff;
2612                                 }
2613                                 else
2614                                 {
2615                                         newESP = READ32(cpustate, ea+12);
2616                                         newSS = READ32(cpustate, ea+16) & 0xffff;
2617                                 }
2618                                 memset(&stack, 0, sizeof(stack));
2619                                 stack.selector = newSS;
2620                                 i386_load_protected_mode_segment(cpustate,&stack,NULL);
2621                                 DPL = (stack.flags >> 5) & 0x03;
2622                                 if((newSS & ~0x03) == 0)
2623                                 {
2624                                         logerror("IRET: Return SS selector is null.\n");
2625                                         FAULT(FAULT_GP,0)
2626                                 }
2627                                 if(newSS & 0x04)
2628                                 {
2629                                         if((newSS & ~0x07) >= cpustate->ldtr.limit)
2630                                         {
2631                                                 logerror("IRET: Return SS selector is past LDT limit.\n");
2632                                                 FAULT(FAULT_GP,newSS & ~0x03);
2633                                         }
2634                                 }
2635                                 else
2636                                 {
2637                                         if((newSS & ~0x07) >= cpustate->gdtr.limit)
2638                                         {
2639                                                 logerror("IRET: Return SS selector is past GDT limit.\n");
2640                                                 FAULT(FAULT_GP,newSS & ~0x03);
2641                                         }
2642                                 }
2643                                 if((newSS & 0x03) != RPL)
2644                                 {
2645                                         logerror("IRET: Return SS RPL is not equal to return CS RPL.\n");
2646                                         FAULT(FAULT_GP,newSS & ~0x03)
2647                                 }
2648                                 if((stack.flags & 0x0018) != 0x0010)
2649                                 {
2650                                         logerror("IRET: Return SS segment is not a data segment.\n");
2651                                         FAULT(FAULT_GP,newSS & ~0x03)
2652                                 }
2653                                 if((stack.flags & 0x0002) == 0)
2654                                 {
2655                                         logerror("IRET: Return SS segment is not writable.\n");
2656                                         FAULT(FAULT_GP,newSS & ~0x03)
2657                                 }
2658                                 if(DPL != RPL)
2659                                 {
2660                                         logerror("IRET: Return SS DPL does not equal SS RPL.\n");
2661                                         FAULT(FAULT_GP,newSS & ~0x03)
2662                                 }
2663                                 if((stack.flags & 0x0080) == 0)
2664                                 {
2665                                         logerror("IRET: Return SS segment is not present.\n");
2666                                         FAULT(FAULT_NP,newSS & ~0x03)
2667                                 }
2668                                 if(newEIP > desc.limit)
2669                                 {
2670                                         logerror("IRET: EIP is past return CS limit.\n");
2671                                         FAULT(FAULT_GP,0)
2672                                 }
2673
2674 //              if(operand32 == 0)
2675 //                  REG16(SP) += 10;
2676 //              else
2677 //                  REG32(ESP) += 20;
2678
2679                                 // IOPL can only change if CPL is zero
2680                                 if(CPL != 0)
2681                                 {
2682                                         UINT32 oldflags = get_flags(cpustate);
2683                                         newflags = (newflags & ~0x00003000) | (oldflags & 0x00003000);
2684                                         if(CPL > IOPL)
2685                                                 newflags = (newflags & ~0x200 ) | (oldflags & 0x200);
2686                                 }
2687
2688                                 if(operand32 == 0)
2689                                 {
2690                                         cpustate->eip = newEIP & 0xffff;
2691                                         cpustate->sreg[CS].selector = newCS;
2692                                         set_flags(cpustate,newflags);
2693                                         REG16(SP) = newESP & 0xffff;
2694                                         cpustate->sreg[SS].selector = newSS;
2695                                 }
2696                                 else
2697                                 {
2698                                         cpustate->eip = newEIP;
2699                                         cpustate->sreg[CS].selector = newCS & 0xffff;
2700                                         set_flags(cpustate,newflags);
2701                                         REG32(ESP) = newESP;
2702                                         cpustate->sreg[SS].selector = newSS & 0xffff;
2703                                 }
2704                                 cpustate->CPL = newCS & 0x03;
2705                                 i386_load_segment_descriptor(cpustate,SS);
2706
2707                                 /* Check that DS, ES, FS and GS are valid for the new privilege level */
2708                                 i386_check_sreg_validity(cpustate,DS);
2709                                 i386_check_sreg_validity(cpustate,ES);
2710                                 i386_check_sreg_validity(cpustate,FS);
2711                                 i386_check_sreg_validity(cpustate,GS);
2712                         }
2713                 }
2714         }
2715
2716         i386_load_segment_descriptor(cpustate,CS);
2717         CHANGE_PC(cpustate,cpustate->eip);
2718 }
2719
2720 #include "cycles.h"
2721
2722 static UINT8 cycle_table_rm[X86_NUM_CPUS][CYCLES_NUM_OPCODES];
2723 static UINT8 cycle_table_pm[X86_NUM_CPUS][CYCLES_NUM_OPCODES];
2724
2725 #define CYCLES_NUM(x)   (cpustate->cycles -= (x))
2726
2727 INLINE void CYCLES(i386_state *cpustate,int x)
2728 {
2729         if (PROTECTED_MODE)
2730         {
2731                 cpustate->cycles -= cpustate->cycle_table_pm[x];
2732         }
2733         else
2734         {
2735                 cpustate->cycles -= cpustate->cycle_table_rm[x];
2736         }
2737 }
2738
2739 INLINE void CYCLES_RM(i386_state *cpustate,int modrm, int r, int m)
2740 {
2741         if (modrm >= 0xc0)
2742         {
2743                 if (PROTECTED_MODE)
2744                 {
2745                         cpustate->cycles -= cpustate->cycle_table_pm[r];
2746                 }
2747                 else
2748                 {
2749                         cpustate->cycles -= cpustate->cycle_table_rm[r];
2750                 }
2751         }
2752         else
2753         {
2754                 if (PROTECTED_MODE)
2755                 {
2756                         cpustate->cycles -= cpustate->cycle_table_pm[m];
2757                 }
2758                 else
2759                 {
2760                         cpustate->cycles -= cpustate->cycle_table_rm[m];
2761                 }
2762         }
2763 }
2764
2765 static void build_cycle_table()
2766 {
2767         int i, j;
2768         for (j=0; j < X86_NUM_CPUS; j++)
2769         {
2770 //              cycle_table_rm[j] = (UINT8 *)malloc(CYCLES_NUM_OPCODES);
2771 //              cycle_table_pm[j] = (UINT8 *)malloc(CYCLES_NUM_OPCODES);
2772
2773                 for (i=0; i < sizeof(x86_cycle_table)/sizeof(X86_CYCLE_TABLE); i++)
2774                 {
2775                         int opcode = x86_cycle_table[i].op;
2776                         cycle_table_rm[j][opcode] = x86_cycle_table[i].cpu_cycles[j][0];
2777                         cycle_table_pm[j][opcode] = x86_cycle_table[i].cpu_cycles[j][1];
2778                 }
2779         }
2780 }
2781
2782 static void report_invalid_opcode(i386_state *cpustate)
2783 {
2784 #ifndef DEBUG_MISSING_OPCODE
2785         logerror("i386: Invalid opcode %02X at %08X %s\n", cpustate->opcode, cpustate->pc - 1, cpustate->lock ? "with lock" : "");
2786 #else
2787         logerror("i386: Invalid opcode");
2788         for (int a = 0; a < cpustate->opcode_bytes_length; a++)
2789                 logerror(" %02X", cpustate->opcode_bytes[a]);
2790         logerror(" at %08X\n", cpustate->opcode_pc);
2791 #endif
2792 }
2793
2794 static void report_invalid_modrm(i386_state *cpustate, const char* opcode, UINT8 modrm)
2795 {
2796 #ifndef DEBUG_MISSING_OPCODE
2797         logerror("i386: Invalid %s modrm %01X at %08X\n", opcode, modrm, cpustate->pc - 2);
2798 #else
2799         logerror("i386: Invalid %s modrm %01X", opcode, modrm);
2800         for (int a = 0; a < cpustate->opcode_bytes_length; a++)
2801                 logerror(" %02X", cpustate->opcode_bytes[a]);
2802         logerror(" at %08X\n", cpustate->opcode_pc);
2803 #endif
2804         i386_trap(cpustate, 6, 0, 0);
2805 }
2806
2807 /* Forward declarations */
2808 static void I386OP(decode_opcode)(i386_state *cpustate);
2809 static void I386OP(decode_two_byte)(i386_state *cpustate);
2810 static void I386OP(decode_three_byte38)(i386_state *cpustate);
2811 static void I386OP(decode_three_byte3a)(i386_state *cpustate);
2812 static void I386OP(decode_three_byte66)(i386_state *cpustate);
2813 static void I386OP(decode_three_bytef2)(i386_state *cpustate);
2814 static void I386OP(decode_three_bytef3)(i386_state *cpustate);
2815 static void I386OP(decode_four_byte3866)(i386_state *cpustate);
2816 static void I386OP(decode_four_byte3a66)(i386_state *cpustate);
2817 static void I386OP(decode_four_byte38f2)(i386_state *cpustate);
2818 static void I386OP(decode_four_byte3af2)(i386_state *cpustate);
2819 static void I386OP(decode_four_byte38f3)(i386_state *cpustate);
2820
2821
2822
2823 #include "i386ops.c"
2824 #include "i386op16.c"
2825 #include "i386op32.c"
2826 #include "i486ops.c"
2827 #include "pentops.c"
2828 #include "x87ops.c"
2829 #include "i386ops.h"
2830
2831 static void I386OP(decode_opcode)(i386_state *cpustate)
2832 {
2833         cpustate->opcode = FETCH(cpustate);
2834
2835         if(cpustate->lock && !cpustate->lock_table[0][cpustate->opcode])
2836                 return I386OP(invalid)(cpustate);
2837
2838         if( cpustate->operand_size )
2839                 cpustate->opcode_table1_32[cpustate->opcode](cpustate);
2840         else
2841                 cpustate->opcode_table1_16[cpustate->opcode](cpustate);
2842 }
2843
2844 /* Two-byte opcode 0f xx */
2845 static void I386OP(decode_two_byte)(i386_state *cpustate)
2846 {
2847         cpustate->opcode = FETCH(cpustate);
2848
2849         if(cpustate->lock && !cpustate->lock_table[1][cpustate->opcode])
2850                 return I386OP(invalid)(cpustate);
2851
2852         if( cpustate->operand_size )
2853                 cpustate->opcode_table2_32[cpustate->opcode](cpustate);
2854         else
2855                 cpustate->opcode_table2_16[cpustate->opcode](cpustate);
2856 }
2857
2858 /* Three-byte opcode 0f 38 xx */
2859 static void I386OP(decode_three_byte38)(i386_state *cpustate)
2860 {
2861         cpustate->opcode = FETCH(cpustate);
2862
2863         if (cpustate->operand_size)
2864                 cpustate->opcode_table338_32[cpustate->opcode](cpustate);
2865         else
2866                 cpustate->opcode_table338_16[cpustate->opcode](cpustate);
2867 }
2868
2869 /* Three-byte opcode 0f 3a xx */
2870 static void I386OP(decode_three_byte3a)(i386_state *cpustate)
2871 {
2872         cpustate->opcode = FETCH(cpustate);
2873
2874         if (cpustate->operand_size)
2875                 cpustate->opcode_table33a_32[cpustate->opcode](cpustate);
2876         else
2877                 cpustate->opcode_table33a_16[cpustate->opcode](cpustate);
2878 }
2879
2880 /* Three-byte opcode prefix 66 0f xx */
2881 static void I386OP(decode_three_byte66)(i386_state *cpustate)
2882 {
2883         cpustate->opcode = FETCH(cpustate);
2884         if( cpustate->operand_size )
2885                 cpustate->opcode_table366_32[cpustate->opcode](cpustate);
2886         else
2887                 cpustate->opcode_table366_16[cpustate->opcode](cpustate);
2888 }
2889
2890 /* Three-byte opcode prefix f2 0f xx */
2891 static void I386OP(decode_three_bytef2)(i386_state *cpustate)
2892 {
2893         cpustate->opcode = FETCH(cpustate);
2894         if( cpustate->operand_size )
2895                 cpustate->opcode_table3f2_32[cpustate->opcode](cpustate);
2896         else
2897                 cpustate->opcode_table3f2_16[cpustate->opcode](cpustate);
2898 }
2899
2900 /* Three-byte opcode prefix f3 0f */
2901 static void I386OP(decode_three_bytef3)(i386_state *cpustate)
2902 {
2903         cpustate->opcode = FETCH(cpustate);
2904         if( cpustate->operand_size )
2905                 cpustate->opcode_table3f3_32[cpustate->opcode](cpustate);
2906         else
2907                 cpustate->opcode_table3f3_16[cpustate->opcode](cpustate);
2908 }
2909
2910 /* Four-byte opcode prefix 66 0f 38 xx */
2911 static void I386OP(decode_four_byte3866)(i386_state *cpustate)
2912 {
2913         cpustate->opcode = FETCH(cpustate);
2914         if (cpustate->operand_size)
2915                 cpustate->opcode_table46638_32[cpustate->opcode](cpustate);
2916         else
2917                 cpustate->opcode_table46638_16[cpustate->opcode](cpustate);
2918 }
2919
2920 /* Four-byte opcode prefix 66 0f 3a xx */
2921 static void I386OP(decode_four_byte3a66)(i386_state *cpustate)
2922 {
2923         cpustate->opcode = FETCH(cpustate);
2924         if (cpustate->operand_size)
2925                 cpustate->opcode_table4663a_32[cpustate->opcode](cpustate);
2926         else
2927                 cpustate->opcode_table4663a_16[cpustate->opcode](cpustate);
2928 }
2929
2930 /* Four-byte opcode prefix f2 0f 38 xx */
2931 static void I386OP(decode_four_byte38f2)(i386_state *cpustate)
2932 {
2933         cpustate->opcode = FETCH(cpustate);
2934         if (cpustate->operand_size)
2935                 cpustate->opcode_table4f238_32[cpustate->opcode](cpustate);
2936         else
2937                 cpustate->opcode_table4f238_16[cpustate->opcode](cpustate);
2938 }
2939
2940 /* Four-byte opcode prefix f2 0f 3a xx */
2941 static void I386OP(decode_four_byte3af2)(i386_state *cpustate)
2942 {
2943         cpustate->opcode = FETCH(cpustate);
2944         if (cpustate->operand_size)
2945                 cpustate->opcode_table4f23a_32[cpustate->opcode](cpustate);
2946         else
2947                 cpustate->opcode_table4f23a_16[cpustate->opcode](cpustate);
2948 }
2949
2950 /* Four-byte opcode prefix f3 0f 38 xx */
2951 static void I386OP(decode_four_byte38f3)(i386_state *cpustate)
2952 {
2953         cpustate->opcode = FETCH(cpustate);
2954         if (cpustate->operand_size)
2955                 cpustate->opcode_table4f338_32[cpustate->opcode](cpustate);
2956         else
2957                 cpustate->opcode_table4f338_16[cpustate->opcode](cpustate);
2958 }
2959
2960
2961 /*************************************************************************/
2962
2963 static void i386_postload(i386_state *cpustate)
2964 {
2965         int i;
2966         for (i = 0; i < 6; i++)
2967                 i386_load_segment_descriptor(cpustate,i);
2968         CHANGE_PC(cpustate,cpustate->eip);
2969 }
2970
2971 static i386_state *i386_common_init(int tlbsize)
2972 {
2973         int i, j;
2974         static const int regs8[8] = {AL,CL,DL,BL,AH,CH,DH,BH};
2975         static const int regs16[8] = {AX,CX,DX,BX,SP,BP,SI,DI};
2976         static const int regs32[8] = {EAX,ECX,EDX,EBX,ESP,EBP,ESI,EDI};
2977         i386_state *cpustate = (i386_state *)calloc(1, sizeof(i386_state));
2978
2979         assert((sizeof(XMM_REG)/sizeof(double)) == 2);
2980
2981         build_cycle_table();
2982
2983         for( i=0; i < 256; i++ ) {
2984                 int c=0;
2985                 for( j=0; j < 8; j++ ) {
2986                         if( i & (1 << j) )
2987                                 c++;
2988                 }
2989                 i386_parity_table[i] = ~(c & 0x1) & 0x1;
2990         }
2991
2992         for( i=0; i < 256; i++ ) {
2993                 i386_MODRM_table[i].reg.b = regs8[(i >> 3) & 0x7];
2994                 i386_MODRM_table[i].reg.w = regs16[(i >> 3) & 0x7];
2995                 i386_MODRM_table[i].reg.d = regs32[(i >> 3) & 0x7];
2996
2997                 i386_MODRM_table[i].rm.b = regs8[i & 0x7];
2998                 i386_MODRM_table[i].rm.w = regs16[i & 0x7];
2999                 i386_MODRM_table[i].rm.d = regs32[i & 0x7];
3000         }
3001
3002         cpustate->vtlb = vtlb_alloc(cpustate, AS_PROGRAM, 0, tlbsize);
3003         cpustate->smi = false;
3004         cpustate->lock = false;
3005
3006 //      i386_interface *intf = (i386_interface *) device->static_config();
3007 //
3008 //      if (intf != NULL)
3009 //              cpustate->smiact.resolve(intf->smiact, *device);
3010 //      else
3011 //              memset(&cpustate->smiact, 0, sizeof(cpustate->smiact));
3012
3013         zero_state(cpustate);
3014
3015         return cpustate;
3016 }
3017
3018 CPU_INIT( i386 )
3019 {
3020         i386_state *cpustate = i386_common_init(32);
3021         build_opcode_table(cpustate, OP_I386);
3022         cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_I386];
3023         cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_I386];
3024         return cpustate;
3025 }
3026
3027 static void build_opcode_table(i386_state *cpustate, UINT32 features)
3028 {
3029         int i;
3030         for (i=0; i < 256; i++)
3031         {
3032                 cpustate->opcode_table1_16[i] = I386OP(invalid);
3033                 cpustate->opcode_table1_32[i] = I386OP(invalid);
3034                 cpustate->opcode_table2_16[i] = I386OP(invalid);
3035                 cpustate->opcode_table2_32[i] = I386OP(invalid);
3036                 cpustate->opcode_table366_16[i] = I386OP(invalid);
3037                 cpustate->opcode_table366_32[i] = I386OP(invalid);
3038                 cpustate->opcode_table3f2_16[i] = I386OP(invalid);
3039                 cpustate->opcode_table3f2_32[i] = I386OP(invalid);
3040                 cpustate->opcode_table3f3_16[i] = I386OP(invalid);
3041                 cpustate->opcode_table3f3_32[i] = I386OP(invalid);
3042                 cpustate->lock_table[0][i] = false;
3043                 cpustate->lock_table[1][i] = false;
3044         }
3045
3046         for (i=0; i < sizeof(x86_opcode_table)/sizeof(X86_OPCODE); i++)
3047         {
3048                 const X86_OPCODE *op = &x86_opcode_table[i];
3049
3050                 if ((op->flags & features))
3051                 {
3052                         if (op->flags & OP_2BYTE)
3053                         {
3054                                 cpustate->opcode_table2_32[op->opcode] = op->handler32;
3055                                 cpustate->opcode_table2_16[op->opcode] = op->handler16;
3056                                 cpustate->opcode_table366_32[op->opcode] = op->handler32;
3057                                 cpustate->opcode_table366_16[op->opcode] = op->handler16;
3058                                 cpustate->lock_table[1][op->opcode] = op->lockable;
3059                         }
3060                         else if (op->flags & OP_3BYTE66)
3061                         {
3062                                 cpustate->opcode_table366_32[op->opcode] = op->handler32;
3063                                 cpustate->opcode_table366_16[op->opcode] = op->handler16;
3064                         }
3065                         else if (op->flags & OP_3BYTEF2)
3066                         {
3067                                 cpustate->opcode_table3f2_32[op->opcode] = op->handler32;
3068                                 cpustate->opcode_table3f2_16[op->opcode] = op->handler16;
3069                         }
3070                         else if (op->flags & OP_3BYTEF3)
3071                         {
3072                                 cpustate->opcode_table3f3_32[op->opcode] = op->handler32;
3073                                 cpustate->opcode_table3f3_16[op->opcode] = op->handler16;
3074                         }
3075                         else if (op->flags & OP_3BYTE38)
3076                         {
3077                                 cpustate->opcode_table338_32[op->opcode] = op->handler32;
3078                                 cpustate->opcode_table338_16[op->opcode] = op->handler16;
3079                         }
3080                         else if (op->flags & OP_3BYTE3A)
3081                         {
3082                                 cpustate->opcode_table33a_32[op->opcode] = op->handler32;
3083                                 cpustate->opcode_table33a_16[op->opcode] = op->handler16;
3084                         }
3085                         else if (op->flags & OP_4BYTE3866)
3086                         {
3087                                 cpustate->opcode_table46638_32[op->opcode] = op->handler32;
3088                                 cpustate->opcode_table46638_16[op->opcode] = op->handler16;
3089                         }
3090                         else if (op->flags & OP_4BYTE3A66)
3091                         {
3092                                 cpustate->opcode_table4663a_32[op->opcode] = op->handler32;
3093                                 cpustate->opcode_table4663a_16[op->opcode] = op->handler16;
3094                         }
3095                         else if (op->flags & OP_4BYTE38F2)
3096                         {
3097                                 cpustate->opcode_table4f238_32[op->opcode] = op->handler32;
3098                                 cpustate->opcode_table4f238_16[op->opcode] = op->handler16;
3099                         }
3100                         else if (op->flags & OP_4BYTE3AF2)
3101                         {
3102                                 cpustate->opcode_table4f23a_32[op->opcode] = op->handler32;
3103                                 cpustate->opcode_table4f23a_16[op->opcode] = op->handler16;
3104                         }
3105                         else if (op->flags & OP_4BYTE38F3)
3106                         {
3107                                 cpustate->opcode_table4f338_32[op->opcode] = op->handler32;
3108                                 cpustate->opcode_table4f338_16[op->opcode] = op->handler16;
3109                         }
3110                         else
3111                         {
3112                                 cpustate->opcode_table1_32[op->opcode] = op->handler32;
3113                                 cpustate->opcode_table1_16[op->opcode] = op->handler16;
3114                                 cpustate->lock_table[0][op->opcode] = op->lockable;
3115                         }
3116                 }
3117         }
3118 }
3119
3120 static void zero_state(i386_state *cpustate)
3121 {
3122         memset( &cpustate->reg, 0, sizeof(cpustate->reg) );
3123         memset( cpustate->sreg, 0, sizeof(cpustate->sreg) );
3124         cpustate->eip = 0;
3125         cpustate->pc = 0;
3126         cpustate->prev_eip = 0;
3127         cpustate->eflags = 0;
3128         cpustate->eflags_mask = 0;
3129         cpustate->CF = 0;
3130         cpustate->DF = 0;
3131         cpustate->SF = 0;
3132         cpustate->OF = 0;
3133         cpustate->ZF = 0;
3134         cpustate->PF = 0;
3135         cpustate->AF = 0;
3136         cpustate->IF = 0;
3137         cpustate->TF = 0;
3138         cpustate->IOP1 = 0;
3139         cpustate->IOP2 = 0;
3140         cpustate->NT = 0;
3141         cpustate->RF = 0;
3142         cpustate->VM = 0;
3143         cpustate->AC = 0;
3144         cpustate->VIF = 0;
3145         cpustate->VIP = 0;
3146         cpustate->ID = 0;
3147         cpustate->CPL = 0;
3148         cpustate->performed_intersegment_jump = 0;
3149         cpustate->delayed_interrupt_enable = 0;
3150         memset( cpustate->cr, 0, sizeof(cpustate->cr) );
3151         memset( cpustate->dr, 0, sizeof(cpustate->dr) );
3152         memset( cpustate->tr, 0, sizeof(cpustate->tr) );
3153         memset( &cpustate->gdtr, 0, sizeof(cpustate->gdtr) );
3154         memset( &cpustate->idtr, 0, sizeof(cpustate->idtr) );
3155         memset( &cpustate->task, 0, sizeof(cpustate->task) );
3156         memset( &cpustate->ldtr, 0, sizeof(cpustate->ldtr) );
3157         cpustate->ext = 0;
3158         cpustate->halted = 0;
3159         cpustate->busreq = 0;
3160         cpustate->shutdown = 0;
3161         cpustate->operand_size = 0;
3162         cpustate->xmm_operand_size = 0;
3163         cpustate->address_size = 0;
3164         cpustate->operand_prefix = 0;
3165         cpustate->address_prefix = 0;
3166         cpustate->segment_prefix = 0;
3167         cpustate->segment_override = 0;
3168 //      cpustate->cycles = 0;
3169 //      cpustate->base_cycles = 0;
3170         cpustate->opcode = 0;
3171         cpustate->irq_state = 0;
3172         cpustate->a20_mask = 0;
3173         cpustate->cpuid_max_input_value_eax = 0;
3174         cpustate->cpuid_id0 = 0;
3175         cpustate->cpuid_id1 = 0;
3176         cpustate->cpuid_id2 = 0;
3177         cpustate->cpu_version = 0;
3178         cpustate->feature_flags = 0;
3179         cpustate->tsc = 0;
3180         cpustate->perfctr[0] = cpustate->perfctr[1] = 0;
3181         memset( cpustate->x87_reg, 0, sizeof(cpustate->x87_reg) );
3182         cpustate->x87_cw = 0;
3183         cpustate->x87_sw = 0;
3184         cpustate->x87_tw = 0;
3185         cpustate->x87_data_ptr = 0;
3186         cpustate->x87_inst_ptr = 0;
3187         cpustate->x87_opcode = 0;
3188         memset( cpustate->sse_reg, 0, sizeof(cpustate->sse_reg) );
3189         cpustate->mxcsr = 0;
3190         cpustate->smm = false;
3191         cpustate->smi = false;
3192         cpustate->smi_latched = false;
3193         cpustate->nmi_masked = false;
3194         cpustate->nmi_latched = false;
3195         cpustate->smbase = 0;
3196 #ifdef DEBUG_MISSING_OPCODE
3197         memset( cpustate->opcode_bytes, 0, sizeof(cpustate->opcode_bytes) );
3198         cpustate->opcode_pc = 0;
3199         cpustate->opcode_bytes_length = 0;
3200 #endif
3201 }
3202
3203 static CPU_RESET( i386 )
3204 {
3205         zero_state(cpustate);
3206         vtlb_flush_dynamic(cpustate->vtlb);
3207
3208         cpustate->sreg[CS].selector = 0xf000;
3209         cpustate->sreg[CS].base     = 0xffff0000;
3210         cpustate->sreg[CS].limit    = 0xffff;
3211         cpustate->sreg[CS].flags    = 0x93;
3212         cpustate->sreg[CS].valid    = true;
3213
3214         cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
3215         cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
3216         cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0093;
3217         cpustate->sreg[DS].valid = cpustate->sreg[ES].valid = cpustate->sreg[FS].valid = cpustate->sreg[GS].valid = cpustate->sreg[SS].valid =true;
3218
3219         cpustate->idtr.base = 0;
3220         cpustate->idtr.limit = 0x3ff;
3221         cpustate->smm = false;
3222         cpustate->smi_latched = false;
3223         cpustate->nmi_masked = false;
3224         cpustate->nmi_latched = false;
3225
3226         cpustate->a20_mask = ~0;
3227
3228         cpustate->cr[0] = 0x7fffffe0; // reserved bits set to 1
3229         cpustate->eflags = 0;
3230         cpustate->eflags_mask = 0x00037fd7;
3231         cpustate->eip = 0xfff0;
3232
3233         // [11:8] Family
3234         // [ 7:4] Model
3235         // [ 3:0] Stepping ID
3236         // Family 3 (386), Model 0 (DX), Stepping 8 (D1)
3237         REG32(EAX) = 0;
3238         REG32(EDX) = (3 << 8) | (0 << 4) | (8);
3239
3240         cpustate->CPL = 0;
3241
3242         CHANGE_PC(cpustate,cpustate->eip);
3243 }
3244
3245 static void pentium_smi(i386_state *cpustate)
3246 {
3247         UINT32 smram_state = cpustate->smbase + 0xfe00;
3248         UINT32 old_cr0 = cpustate->cr[0];
3249         UINT32 old_flags = get_flags(cpustate);
3250
3251         if(cpustate->smm)
3252                 return;
3253
3254         cpustate->cr[0] &= ~(0x8000000d);
3255         set_flags(cpustate, 2);
3256 //      if(!cpustate->smiact.isnull())
3257 //              cpustate->smiact(true);
3258         cpustate->smm = true;
3259         cpustate->smi_latched = false;
3260
3261         // save state
3262         WRITE32(cpustate, cpustate->cr[4], smram_state+SMRAM_IP5_CR4);
3263         WRITE32(cpustate, cpustate->sreg[ES].limit, smram_state+SMRAM_IP5_ESLIM);
3264         WRITE32(cpustate, cpustate->sreg[ES].base, smram_state+SMRAM_IP5_ESBASE);
3265         WRITE32(cpustate, cpustate->sreg[ES].flags, smram_state+SMRAM_IP5_ESACC);
3266         WRITE32(cpustate, cpustate->sreg[CS].limit, smram_state+SMRAM_IP5_CSLIM);
3267         WRITE32(cpustate, cpustate->sreg[CS].base, smram_state+SMRAM_IP5_CSBASE);
3268         WRITE32(cpustate, cpustate->sreg[CS].flags, smram_state+SMRAM_IP5_CSACC);
3269         WRITE32(cpustate, cpustate->sreg[SS].limit, smram_state+SMRAM_IP5_SSLIM);
3270         WRITE32(cpustate, cpustate->sreg[SS].base, smram_state+SMRAM_IP5_SSBASE);
3271         WRITE32(cpustate, cpustate->sreg[SS].flags, smram_state+SMRAM_IP5_SSACC);
3272         WRITE32(cpustate, cpustate->sreg[DS].limit, smram_state+SMRAM_IP5_DSLIM);
3273         WRITE32(cpustate, cpustate->sreg[DS].base, smram_state+SMRAM_IP5_DSBASE);
3274         WRITE32(cpustate, cpustate->sreg[DS].flags, smram_state+SMRAM_IP5_DSACC);
3275         WRITE32(cpustate, cpustate->sreg[FS].limit, smram_state+SMRAM_IP5_FSLIM);
3276         WRITE32(cpustate, cpustate->sreg[FS].base, smram_state+SMRAM_IP5_FSBASE);
3277         WRITE32(cpustate, cpustate->sreg[FS].flags, smram_state+SMRAM_IP5_FSACC);
3278         WRITE32(cpustate, cpustate->sreg[GS].limit, smram_state+SMRAM_IP5_GSLIM);
3279         WRITE32(cpustate, cpustate->sreg[GS].base, smram_state+SMRAM_IP5_GSBASE);
3280         WRITE32(cpustate, cpustate->sreg[GS].flags, smram_state+SMRAM_IP5_GSACC);
3281         WRITE32(cpustate, cpustate->ldtr.flags, smram_state+SMRAM_IP5_LDTACC);
3282         WRITE32(cpustate, cpustate->ldtr.limit, smram_state+SMRAM_IP5_LDTLIM);
3283         WRITE32(cpustate, cpustate->ldtr.base, smram_state+SMRAM_IP5_LDTBASE);
3284         WRITE32(cpustate, cpustate->gdtr.limit, smram_state+SMRAM_IP5_GDTLIM);
3285         WRITE32(cpustate, cpustate->gdtr.base, smram_state+SMRAM_IP5_GDTBASE);
3286         WRITE32(cpustate, cpustate->idtr.limit, smram_state+SMRAM_IP5_IDTLIM);
3287         WRITE32(cpustate, cpustate->idtr.base, smram_state+SMRAM_IP5_IDTBASE);
3288         WRITE32(cpustate, cpustate->task.limit, smram_state+SMRAM_IP5_TRLIM);
3289         WRITE32(cpustate, cpustate->task.base, smram_state+SMRAM_IP5_TRBASE);
3290         WRITE32(cpustate, cpustate->task.flags, smram_state+SMRAM_IP5_TRACC);
3291
3292         WRITE32(cpustate, cpustate->sreg[ES].selector, smram_state+SMRAM_ES);
3293         WRITE32(cpustate, cpustate->sreg[CS].selector, smram_state+SMRAM_CS);
3294         WRITE32(cpustate, cpustate->sreg[SS].selector, smram_state+SMRAM_SS);
3295         WRITE32(cpustate, cpustate->sreg[DS].selector, smram_state+SMRAM_DS);
3296         WRITE32(cpustate, cpustate->sreg[FS].selector, smram_state+SMRAM_FS);
3297         WRITE32(cpustate, cpustate->sreg[GS].selector, smram_state+SMRAM_GS);
3298         WRITE32(cpustate, cpustate->ldtr.segment, smram_state+SMRAM_LDTR);
3299         WRITE32(cpustate, cpustate->task.segment, smram_state+SMRAM_TR);
3300
3301         WRITE32(cpustate, cpustate->dr[7], smram_state+SMRAM_DR7);
3302         WRITE32(cpustate, cpustate->dr[6], smram_state+SMRAM_DR6);
3303         WRITE32(cpustate, REG32(EAX), smram_state+SMRAM_EAX);
3304         WRITE32(cpustate, REG32(ECX), smram_state+SMRAM_ECX);
3305         WRITE32(cpustate, REG32(EDX), smram_state+SMRAM_EDX);
3306         WRITE32(cpustate, REG32(EBX), smram_state+SMRAM_EBX);
3307         WRITE32(cpustate, REG32(ESP), smram_state+SMRAM_ESP);
3308         WRITE32(cpustate, REG32(EBP), smram_state+SMRAM_EBP);
3309         WRITE32(cpustate, REG32(ESI), smram_state+SMRAM_ESI);
3310         WRITE32(cpustate, REG32(EDI), smram_state+SMRAM_EDI);
3311         WRITE32(cpustate, cpustate->eip, smram_state+SMRAM_EIP);
3312         WRITE32(cpustate, old_flags, smram_state+SMRAM_EFLAGS);
3313         WRITE32(cpustate, cpustate->cr[3], smram_state+SMRAM_CR3);
3314         WRITE32(cpustate, old_cr0, smram_state+SMRAM_CR0);
3315
3316         cpustate->sreg[DS].selector = cpustate->sreg[ES].selector = cpustate->sreg[FS].selector = cpustate->sreg[GS].selector = cpustate->sreg[SS].selector = 0;
3317         cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
3318         cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffffffff;
3319         cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x8093;
3320         cpustate->sreg[DS].valid = cpustate->sreg[ES].valid = cpustate->sreg[FS].valid = cpustate->sreg[GS].valid = cpustate->sreg[SS].valid =true;
3321         cpustate->sreg[CS].selector = 0x3000; // pentium only, ppro sel = smbase >> 4
3322         cpustate->sreg[CS].base = cpustate->smbase;
3323         cpustate->sreg[CS].limit = 0xffffffff;
3324         cpustate->sreg[CS].flags = 0x8093;
3325         cpustate->sreg[CS].valid = true;
3326         cpustate->cr[4] = 0;
3327         cpustate->dr[7] = 0x400;
3328         cpustate->eip = 0x8000;
3329
3330         cpustate->nmi_masked = true;
3331         CHANGE_PC(cpustate,cpustate->eip);
3332 }
3333
3334 static void i386_set_irq_line(i386_state *cpustate,int irqline, int state)
3335 {
3336         int first_cycles = cpustate->cycles;
3337
3338         if (state != CLEAR_LINE && cpustate->halted)
3339         {
3340                 cpustate->halted = 0;
3341         }
3342
3343         if ( irqline == INPUT_LINE_NMI )
3344         {
3345                 /* NMI (I do not think that this is 100% right) */
3346                 if(cpustate->nmi_masked)
3347                 {
3348                         cpustate->nmi_latched = true;
3349                         return;
3350                 }
3351                 if ( state )
3352                         i386_trap(cpustate,2, 1, 0);
3353         }
3354         else
3355         {
3356                 cpustate->irq_state = state;
3357         }
3358         cpustate->extra_cycles += first_cycles - cpustate->cycles;
3359         cpustate->cycles = first_cycles;
3360 }
3361
3362 static void i386_set_a20_line(i386_state *cpustate,int state)
3363 {
3364         if (state)
3365         {
3366                 cpustate->a20_mask = ~0;
3367         }
3368         else
3369         {
3370                 cpustate->a20_mask = ~(1 << 20);
3371         }
3372         // TODO: how does A20M and the tlb interact
3373         vtlb_flush_dynamic(cpustate->vtlb);
3374 }
3375
3376 static CPU_EXECUTE( i386 )
3377 {
3378         CHANGE_PC(cpustate,cpustate->eip);
3379
3380         if (cpustate->halted || cpustate->busreq)
3381         {
3382 #ifdef SINGLE_MODE_DMA
3383                 if(cpustate->dma != NULL) {
3384                         cpustate->dma->do_dma();
3385                 }
3386 #endif
3387                 if (cycles == -1) {
3388                         int passed_cycles = max(1, cpustate->extra_cycles);
3389                         // this is main cpu, cpustate->cycles is not used
3390                         /*cpustate->cycles = */cpustate->extra_cycles = 0;
3391                         cpustate->tsc += passed_cycles;
3392 #ifdef USE_DEBUGGER
3393                         cpustate->total_cycles += passed_cycles;
3394 #endif
3395                         return passed_cycles;
3396                 } else {
3397                         cpustate->cycles += cycles;
3398                         cpustate->base_cycles = cpustate->cycles;
3399
3400                         /* adjust for any interrupts that came in */
3401                         cpustate->cycles -= cpustate->extra_cycles;
3402                         cpustate->extra_cycles = 0;
3403
3404                         /* if busreq is raised, spin cpu while remained clock */
3405                         if (cpustate->cycles > 0) {
3406                                 cpustate->cycles = 0;
3407                         }
3408                         int passed_cycles = cpustate->base_cycles - cpustate->cycles;
3409                         cpustate->tsc += passed_cycles;
3410 #ifdef USE_DEBUGGER
3411                         cpustate->total_cycles += passed_cycles;
3412 #endif
3413                         return passed_cycles;
3414                 }
3415         }
3416
3417         if (cycles == -1) {
3418                 cpustate->cycles = 1;
3419         } else {
3420                 cpustate->cycles += cycles;
3421         }
3422         cpustate->base_cycles = cpustate->cycles;
3423
3424         /* adjust for any interrupts that came in */
3425 #ifdef USE_DEBUGGER
3426         cpustate->total_cycles += cpustate->extra_cycles;
3427 #endif
3428         cpustate->cycles -= cpustate->extra_cycles;
3429         cpustate->extra_cycles = 0;
3430
3431         while( cpustate->cycles > 0 && !cpustate->busreq )
3432         {
3433 #ifdef USE_DEBUGGER
3434                 bool now_debugging = cpustate->debugger->now_debugging;
3435                 if(now_debugging) {
3436                         cpustate->debugger->check_break_points(cpustate->pc);
3437                         if(cpustate->debugger->now_suspended) {
3438                                 cpustate->emu->mute_sound();
3439                                 cpustate->debugger->now_waiting = true;
3440                                 while(cpustate->debugger->now_debugging && cpustate->debugger->now_suspended) {
3441                                         cpustate->emu->sleep(10);
3442                                 }
3443                                 cpustate->debugger->now_waiting = false;
3444                         }
3445                         if(cpustate->debugger->now_debugging) {
3446                                 cpustate->program = cpustate->io = cpustate->debugger;
3447                         } else {
3448                                 now_debugging = false;
3449                         }
3450                         int first_cycles = cpustate->cycles;
3451                         i386_check_irq_line(cpustate);
3452                         cpustate->operand_size = cpustate->sreg[CS].d;
3453                         cpustate->xmm_operand_size = 0;
3454                         cpustate->address_size = cpustate->sreg[CS].d;
3455                         cpustate->operand_prefix = 0;
3456                         cpustate->address_prefix = 0;
3457
3458                         cpustate->ext = 1;
3459                         int old_tf = cpustate->TF;
3460
3461                         cpustate->debugger->add_cpu_trace(cpustate->pc);
3462                         cpustate->segment_prefix = 0;
3463                         cpustate->prev_eip = cpustate->eip;
3464                         cpustate->prev_pc = cpustate->pc;
3465
3466                         if(cpustate->delayed_interrupt_enable != 0)
3467                         {
3468                                 cpustate->IF = 1;
3469                                 cpustate->delayed_interrupt_enable = 0;
3470                         }
3471 #ifdef DEBUG_MISSING_OPCODE
3472                         cpustate->opcode_bytes_length = 0;
3473                         cpustate->opcode_pc = cpustate->pc;
3474 #endif
3475                         try
3476                         {
3477                                 I386OP(decode_opcode)(cpustate);
3478                                 if(cpustate->TF && old_tf)
3479                                 {
3480                                         cpustate->prev_eip = cpustate->eip;
3481                                         cpustate->ext = 1;
3482                                         i386_trap(cpustate,1,0,0);
3483                                 }
3484                                 if(cpustate->lock && (cpustate->opcode != 0xf0))
3485                                         cpustate->lock = false;
3486                         }
3487                         catch(UINT64 e)
3488                         {
3489                                 cpustate->ext = 1;
3490                                 i386_trap_with_error(cpustate,e&0xffffffff,0,0,e>>32);
3491                         }
3492 #ifdef SINGLE_MODE_DMA
3493                         if(cpustate->dma != NULL) {
3494                                 cpustate->dma->do_dma();
3495                         }
3496 #endif
3497                         /* adjust for any interrupts that came in */
3498                         cpustate->cycles -= cpustate->extra_cycles;
3499                         cpustate->extra_cycles = 0;
3500                         cpustate->total_cycles += first_cycles - cpustate->cycles;
3501                         
3502                         if(now_debugging) {
3503                                 if(!cpustate->debugger->now_going) {
3504                                         cpustate->debugger->now_suspended = true;
3505                                 }
3506                                 cpustate->program = cpustate->program_stored;
3507                                 cpustate->io = cpustate->io_stored;
3508                         }
3509                 } else {
3510                         int first_cycles = cpustate->cycles;
3511 #endif
3512                         i386_check_irq_line(cpustate);
3513                         cpustate->operand_size = cpustate->sreg[CS].d;
3514                         cpustate->xmm_operand_size = 0;
3515                         cpustate->address_size = cpustate->sreg[CS].d;
3516                         cpustate->operand_prefix = 0;
3517                         cpustate->address_prefix = 0;
3518
3519                         cpustate->ext = 1;
3520                         int old_tf = cpustate->TF;
3521
3522 #ifdef USE_DEBUGGER
3523                         cpustate->debugger->add_cpu_trace(cpustate->pc);
3524 #endif
3525                         cpustate->segment_prefix = 0;
3526                         cpustate->prev_eip = cpustate->eip;
3527                         cpustate->prev_pc = cpustate->pc;
3528
3529                         if(cpustate->delayed_interrupt_enable != 0)
3530                         {
3531                                 cpustate->IF = 1;
3532                                 cpustate->delayed_interrupt_enable = 0;
3533                         }
3534 #ifdef DEBUG_MISSING_OPCODE
3535                         cpustate->opcode_bytes_length = 0;
3536                         cpustate->opcode_pc = cpustate->pc;
3537 #endif
3538                         try
3539                         {
3540                                 I386OP(decode_opcode)(cpustate);
3541                                 if(cpustate->TF && old_tf)
3542                                 {
3543                                         cpustate->prev_eip = cpustate->eip;
3544                                         cpustate->ext = 1;
3545                                         i386_trap(cpustate,1,0,0);
3546                                 }
3547                                 if(cpustate->lock && (cpustate->opcode != 0xf0))
3548                                         cpustate->lock = false;
3549                         }
3550                         catch(UINT64 e)
3551                         {
3552                                 cpustate->ext = 1;
3553                                 i386_trap_with_error(cpustate,e&0xffffffff,0,0,e>>32);
3554                         }
3555 #ifdef SINGLE_MODE_DMA
3556                         if(cpustate->dma != NULL) {
3557                                 cpustate->dma->do_dma();
3558                         }
3559 #endif
3560                         /* adjust for any interrupts that came in */
3561                         cpustate->cycles -= cpustate->extra_cycles;
3562                         cpustate->extra_cycles = 0;
3563 #ifdef USE_DEBUGGER
3564                         cpustate->total_cycles += first_cycles - cpustate->cycles;
3565                 }
3566 #endif
3567         }
3568
3569         /* if busreq is raised, spin cpu while remained clock */
3570         if (cpustate->cycles > 0 && cpustate->busreq) {
3571 #ifdef USE_DEBUGGER
3572                 cpustate->total_cycles += cpustate->cycles;
3573 #endif
3574                 cpustate->cycles = 0;
3575         }
3576         int passed_cycles = cpustate->base_cycles - cpustate->cycles;
3577         cpustate->tsc += passed_cycles;
3578         return passed_cycles;
3579 }
3580
3581 /*************************************************************************/
3582
3583 static CPU_TRANSLATE( i386 )
3584 {
3585         i386_state *cpustate = (i386_state *)cpudevice;
3586         int ret = TRUE;
3587         if(space == AS_PROGRAM)
3588                 ret = i386_translate_address(cpustate, intention, address, NULL);
3589         *address &= cpustate->a20_mask;
3590         return ret;
3591 }
3592
3593 /*****************************************************************************/
3594 /* Intel 486 */
3595
3596
3597 static CPU_INIT( i486 )
3598 {
3599         i386_state *cpustate = i386_common_init(32);
3600         build_opcode_table(cpustate, OP_I386 | OP_FPU | OP_I486);
3601         build_x87_opcode_table(cpustate);
3602         cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_I486];
3603         cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_I486];
3604         return cpustate;
3605 }
3606
3607 static CPU_RESET( i486 )
3608 {
3609         zero_state(cpustate);
3610         vtlb_flush_dynamic(cpustate->vtlb);
3611
3612         cpustate->sreg[CS].selector = 0xf000;
3613         cpustate->sreg[CS].base     = 0xffff0000;
3614         cpustate->sreg[CS].limit    = 0xffff;
3615         cpustate->sreg[CS].flags    = 0x0093;
3616
3617         cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
3618         cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
3619         cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0093;
3620
3621         cpustate->idtr.base = 0;
3622         cpustate->idtr.limit = 0x3ff;
3623
3624         cpustate->a20_mask = ~0;
3625
3626         cpustate->cr[0] = 0x00000010;
3627         cpustate->eflags = 0;
3628         cpustate->eflags_mask = 0x00077fd7;
3629         cpustate->eip = 0xfff0;
3630         cpustate->smm = false;
3631         cpustate->smi_latched = false;
3632         cpustate->nmi_masked = false;
3633         cpustate->nmi_latched = false;
3634
3635         x87_reset(cpustate);
3636
3637         // [11:8] Family
3638         // [ 7:4] Model
3639         // [ 3:0] Stepping ID
3640         // Family 4 (486), Model 0/1 (DX), Stepping 3
3641         REG32(EAX) = 0;
3642         REG32(EDX) = (4 << 8) | (0 << 4) | (3);
3643
3644         CHANGE_PC(cpustate,cpustate->eip);
3645 }
3646
3647 /*****************************************************************************/
3648 /* Pentium */
3649
3650
3651 static CPU_INIT( pentium )
3652 {
3653         // 64 dtlb small, 8 dtlb large, 32 itlb
3654         i386_state *cpustate = i386_common_init(96);
3655         build_opcode_table(cpustate, OP_I386 | OP_FPU | OP_I486 | OP_PENTIUM);
3656         build_x87_opcode_table(cpustate);
3657         cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_PENTIUM];
3658         cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_PENTIUM];
3659         return cpustate;
3660 }
3661
3662 static CPU_RESET( pentium )
3663 {
3664         zero_state(cpustate);
3665         vtlb_flush_dynamic(cpustate->vtlb);
3666
3667         cpustate->sreg[CS].selector = 0xf000;
3668         cpustate->sreg[CS].base     = 0xffff0000;
3669         cpustate->sreg[CS].limit    = 0xffff;
3670         cpustate->sreg[CS].flags    = 0x0093;
3671
3672         cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
3673         cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
3674         cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0093;
3675
3676         cpustate->idtr.base = 0;
3677         cpustate->idtr.limit = 0x3ff;
3678
3679         cpustate->a20_mask = ~0;
3680
3681         cpustate->cr[0] = 0x00000010;
3682         cpustate->eflags = 0x00200000;
3683         cpustate->eflags_mask = 0x003f7fd7;
3684         cpustate->eip = 0xfff0;
3685         cpustate->mxcsr = 0x1f80;
3686         cpustate->smm = false;
3687         cpustate->smi_latched = false;
3688         cpustate->smbase = 0x30000;
3689         cpustate->nmi_masked = false;
3690         cpustate->nmi_latched = false;
3691
3692         x87_reset(cpustate);
3693
3694         // [11:8] Family
3695         // [ 7:4] Model
3696         // [ 3:0] Stepping ID
3697         // Family 5 (Pentium), Model 2 (75 - 200MHz), Stepping 5
3698         REG32(EAX) = 0;
3699         REG32(EDX) = (5 << 8) | (2 << 4) | (5);
3700
3701         cpustate->cpuid_id0 = 0x756e6547;   // Genu
3702         cpustate->cpuid_id1 = 0x49656e69;   // ineI
3703         cpustate->cpuid_id2 = 0x6c65746e;   // ntel
3704
3705         cpustate->cpuid_max_input_value_eax = 0x01;
3706         cpustate->cpu_version = REG32(EDX);
3707
3708         // [ 0:0] FPU on chip
3709         // [ 2:2] I/O breakpoints
3710         // [ 4:4] Time Stamp Counter
3711         // [ 5:5] Pentium CPU style model specific registers
3712         // [ 7:7] Machine Check Exception
3713         // [ 8:8] CMPXCHG8B instruction
3714         cpustate->feature_flags = 0x000001bf;
3715
3716         CHANGE_PC(cpustate,cpustate->eip);
3717 }
3718
3719 /*****************************************************************************/
3720 /* Cyrix MediaGX */
3721
3722
3723 static CPU_INIT( mediagx )
3724 {
3725         // probably 32 unified
3726         i386_state *cpustate = i386_common_init(32);
3727         build_x87_opcode_table(cpustate);
3728         build_opcode_table(cpustate, OP_I386 | OP_FPU | OP_I486 | OP_PENTIUM | OP_CYRIX);
3729         cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_MEDIAGX];
3730         cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_MEDIAGX];
3731         return cpustate;
3732 }
3733
3734 static CPU_RESET( mediagx )
3735 {
3736         zero_state(cpustate);
3737         vtlb_flush_dynamic(cpustate->vtlb);
3738
3739         cpustate->sreg[CS].selector = 0xf000;
3740         cpustate->sreg[CS].base     = 0xffff0000;
3741         cpustate->sreg[CS].limit    = 0xffff;
3742         cpustate->sreg[CS].flags    = 0x0093;
3743
3744         cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
3745         cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
3746         cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0093;
3747
3748         cpustate->idtr.base = 0;
3749         cpustate->idtr.limit = 0x3ff;
3750
3751         cpustate->a20_mask = ~0;
3752
3753         cpustate->cr[0] = 0x00000010;
3754         cpustate->eflags = 0x00200000;
3755         cpustate->eflags_mask = 0x00277fd7; /* TODO: is this correct? */
3756         cpustate->eip = 0xfff0;
3757         cpustate->smm = false;
3758         cpustate->smi_latched = false;
3759         cpustate->nmi_masked = false;
3760         cpustate->nmi_latched = false;
3761
3762         x87_reset(cpustate);
3763
3764         // [11:8] Family
3765         // [ 7:4] Model
3766         // [ 3:0] Stepping ID
3767         // Family 4, Model 4 (MediaGX)
3768         REG32(EAX) = 0;
3769         REG32(EDX) = (4 << 8) | (4 << 4) | (1); /* TODO: is this correct? */
3770
3771         cpustate->cpuid_id0 = 0x69727943;   // Cyri
3772         cpustate->cpuid_id1 = 0x736e4978;   // xIns
3773         cpustate->cpuid_id2 = 0x6d616574;   // tead
3774
3775         cpustate->cpuid_max_input_value_eax = 0x01;
3776         cpustate->cpu_version = REG32(EDX);
3777
3778         // [ 0:0] FPU on chip
3779         cpustate->feature_flags = 0x00000001;
3780
3781         CHANGE_PC(cpustate,cpustate->eip);
3782 }
3783
3784 /*****************************************************************************/
3785 /* Intel Pentium Pro */
3786
3787 static CPU_INIT( pentium_pro )
3788 {
3789         // 64 dtlb small, 32 itlb
3790         i386_state *cpustate = i386_common_init(96);
3791         build_x87_opcode_table(cpustate);
3792         build_opcode_table(cpustate, OP_I386 | OP_FPU | OP_I486 | OP_PENTIUM | OP_PPRO);
3793         cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_PENTIUM];  // TODO: generate own cycle tables
3794         cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_PENTIUM];  // TODO: generate own cycle tables
3795         return cpustate;
3796 }
3797
3798 static CPU_RESET( pentium_pro )
3799 {
3800         zero_state(cpustate);
3801         vtlb_flush_dynamic(cpustate->vtlb);
3802
3803         cpustate->sreg[CS].selector = 0xf000;
3804         cpustate->sreg[CS].base     = 0xffff0000;
3805         cpustate->sreg[CS].limit    = 0xffff;
3806         cpustate->sreg[CS].flags    = 0x0093;
3807
3808         cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
3809         cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
3810         cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0093;
3811
3812         cpustate->idtr.base = 0;
3813         cpustate->idtr.limit = 0x3ff;
3814
3815         cpustate->a20_mask = ~0;
3816
3817         cpustate->cr[0] = 0x60000010;
3818         cpustate->eflags = 0x00200000;
3819         cpustate->eflags_mask = 0x00277fd7; /* TODO: is this correct? */
3820         cpustate->eip = 0xfff0;
3821         cpustate->mxcsr = 0x1f80;
3822         cpustate->smm = false;
3823         cpustate->smi_latched = false;
3824         cpustate->smbase = 0x30000;
3825         cpustate->nmi_masked = false;
3826         cpustate->nmi_latched = false;
3827
3828         x87_reset(cpustate);
3829
3830         // [11:8] Family
3831         // [ 7:4] Model
3832         // [ 3:0] Stepping ID
3833         // Family 6, Model 1 (Pentium Pro)
3834         REG32(EAX) = 0;
3835         REG32(EDX) = (6 << 8) | (1 << 4) | (1); /* TODO: is this correct? */
3836
3837         cpustate->cpuid_id0 = 0x756e6547;   // Genu
3838         cpustate->cpuid_id1 = 0x49656e69;   // ineI
3839         cpustate->cpuid_id2 = 0x6c65746e;   // ntel
3840
3841         cpustate->cpuid_max_input_value_eax = 0x02;
3842         cpustate->cpu_version = REG32(EDX);
3843
3844         // [ 0:0] FPU on chip
3845         // [ 2:2] I/O breakpoints
3846         // [ 4:4] Time Stamp Counter
3847         // [ 5:5] Pentium CPU style model specific registers
3848         // [ 7:7] Machine Check Exception
3849         // [ 8:8] CMPXCHG8B instruction
3850         // [15:15] CMOV and FCMOV
3851         // No MMX
3852         cpustate->feature_flags = 0x000081bf;
3853
3854         CHANGE_PC(cpustate,cpustate->eip);
3855 }
3856
3857 /*****************************************************************************/
3858 /* Intel Pentium MMX */
3859
3860 static CPU_INIT( pentium_mmx )
3861 {
3862         // 64 dtlb small, 8 dtlb large, 32 itlb small, 2 itlb large
3863         i386_state *cpustate = i386_common_init(96);
3864         build_x87_opcode_table(cpustate);
3865         build_opcode_table(cpustate, OP_I386 | OP_FPU | OP_I486 | OP_PENTIUM | OP_MMX);
3866         cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_PENTIUM];  // TODO: generate own cycle tables
3867         cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_PENTIUM];  // TODO: generate own cycle tables
3868         return cpustate;
3869 }
3870
3871 static CPU_RESET( pentium_mmx )
3872 {
3873         zero_state(cpustate);
3874         vtlb_flush_dynamic(cpustate->vtlb);
3875
3876         cpustate->sreg[CS].selector = 0xf000;
3877         cpustate->sreg[CS].base     = 0xffff0000;
3878         cpustate->sreg[CS].limit    = 0xffff;
3879         cpustate->sreg[CS].flags    = 0x0093;
3880
3881         cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
3882         cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
3883         cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0093;
3884
3885         cpustate->idtr.base = 0;
3886         cpustate->idtr.limit = 0x3ff;
3887
3888         cpustate->a20_mask = ~0;
3889
3890         cpustate->cr[0] = 0x60000010;
3891         cpustate->eflags = 0x00200000;
3892         cpustate->eflags_mask = 0x00277fd7; /* TODO: is this correct? */
3893         cpustate->eip = 0xfff0;
3894         cpustate->mxcsr = 0x1f80;
3895         cpustate->smm = false;
3896         cpustate->smi_latched = false;
3897         cpustate->smbase = 0x30000;
3898         cpustate->nmi_masked = false;
3899         cpustate->nmi_latched = false;
3900
3901         x87_reset(cpustate);
3902
3903         // [11:8] Family
3904         // [ 7:4] Model
3905         // [ 3:0] Stepping ID
3906         // Family 5, Model 4 (P55C)
3907         REG32(EAX) = 0;
3908         REG32(EDX) = (5 << 8) | (4 << 4) | (1);
3909
3910         cpustate->cpuid_id0 = 0x756e6547;   // Genu
3911         cpustate->cpuid_id1 = 0x49656e69;   // ineI
3912         cpustate->cpuid_id2 = 0x6c65746e;   // ntel
3913
3914         cpustate->cpuid_max_input_value_eax = 0x01;
3915         cpustate->cpu_version = REG32(EDX);
3916
3917         // [ 0:0] FPU on chip
3918         // [ 2:2] I/O breakpoints
3919         // [ 4:4] Time Stamp Counter
3920         // [ 5:5] Pentium CPU style model specific registers
3921         // [ 7:7] Machine Check Exception
3922         // [ 8:8] CMPXCHG8B instruction
3923         // [23:23] MMX instructions
3924         cpustate->feature_flags = 0x008001bf;
3925
3926         CHANGE_PC(cpustate,cpustate->eip);
3927 }
3928
3929 /*****************************************************************************/
3930 /* Intel Pentium II */
3931
3932 static CPU_INIT( pentium2 )
3933 {
3934         // 64 dtlb small, 8 dtlb large, 32 itlb small, 2 itlb large
3935         i386_state *cpustate = i386_common_init(96);
3936         build_x87_opcode_table(cpustate);
3937         build_opcode_table(cpustate, OP_I386 | OP_FPU | OP_I486 | OP_PENTIUM | OP_PPRO | OP_MMX);
3938         cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_PENTIUM];  // TODO: generate own cycle tables
3939         cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_PENTIUM];  // TODO: generate own cycle tables
3940         return cpustate;
3941 }
3942
3943 static CPU_RESET( pentium2 )
3944 {
3945         zero_state(cpustate);
3946         vtlb_flush_dynamic(cpustate->vtlb);
3947
3948         cpustate->sreg[CS].selector = 0xf000;
3949         cpustate->sreg[CS].base     = 0xffff0000;
3950         cpustate->sreg[CS].limit    = 0xffff;
3951         cpustate->sreg[CS].flags    = 0x0093;
3952
3953         cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
3954         cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
3955         cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0093;
3956
3957         cpustate->idtr.base = 0;
3958         cpustate->idtr.limit = 0x3ff;
3959
3960         cpustate->a20_mask = ~0;
3961
3962         cpustate->cr[0] = 0x60000010;
3963         cpustate->eflags = 0x00200000;
3964         cpustate->eflags_mask = 0x00277fd7; /* TODO: is this correct? */
3965         cpustate->eip = 0xfff0;
3966         cpustate->mxcsr = 0x1f80;
3967         cpustate->smm = false;
3968         cpustate->smi_latched = false;
3969         cpustate->smbase = 0x30000;
3970         cpustate->nmi_masked = false;
3971         cpustate->nmi_latched = false;
3972
3973         x87_reset(cpustate);
3974
3975         // [11:8] Family
3976         // [ 7:4] Model
3977         // [ 3:0] Stepping ID
3978         // Family 6, Model 3 (Pentium II / Klamath)
3979         REG32(EAX) = 0;
3980         REG32(EDX) = (6 << 8) | (3 << 4) | (1); /* TODO: is this correct? */
3981
3982         cpustate->cpuid_id0 = 0x756e6547;   // Genu
3983         cpustate->cpuid_id1 = 0x49656e69;   // ineI
3984         cpustate->cpuid_id2 = 0x6c65746e;   // ntel
3985
3986         cpustate->cpuid_max_input_value_eax = 0x02;
3987         cpustate->cpu_version = REG32(EDX);
3988
3989         // [ 0:0] FPU on chip
3990         cpustate->feature_flags = 0x008081bf;       // TODO: enable relevant flags here
3991
3992         CHANGE_PC(cpustate,cpustate->eip);
3993 }
3994
3995 /*****************************************************************************/
3996 /* Intel Pentium III */
3997
3998 static CPU_INIT( pentium3 )
3999 {
4000         // 64 dtlb small, 8 dtlb large, 32 itlb small, 2 itlb large
4001         i386_state *cpustate = i386_common_init(96);
4002         build_x87_opcode_table(cpustate);
4003         build_opcode_table(cpustate, OP_I386 | OP_FPU | OP_I486 | OP_PENTIUM | OP_PPRO | OP_MMX | OP_SSE);
4004         cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_PENTIUM];  // TODO: generate own cycle tables
4005         cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_PENTIUM];  // TODO: generate own cycle tables
4006         return cpustate;
4007 }
4008
4009 static CPU_RESET( pentium3 )
4010 {
4011         zero_state(cpustate);
4012         vtlb_flush_dynamic(cpustate->vtlb);
4013
4014         cpustate->sreg[CS].selector = 0xf000;
4015         cpustate->sreg[CS].base     = 0xffff0000;
4016         cpustate->sreg[CS].limit    = 0xffff;
4017         cpustate->sreg[CS].flags    = 0x0093;
4018
4019         cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
4020         cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
4021         cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0093;
4022
4023         cpustate->idtr.base = 0;
4024         cpustate->idtr.limit = 0x3ff;
4025
4026         cpustate->a20_mask = ~0;
4027
4028         cpustate->cr[0] = 0x60000010;
4029         cpustate->eflags = 0x00200000;
4030         cpustate->eflags_mask = 0x00277fd7; /* TODO: is this correct? */
4031         cpustate->eip = 0xfff0;
4032         cpustate->mxcsr = 0x1f80;
4033         cpustate->smm = false;
4034         cpustate->smi_latched = false;
4035         cpustate->smbase = 0x30000;
4036         cpustate->nmi_masked = false;
4037         cpustate->nmi_latched = false;
4038
4039         x87_reset(cpustate);
4040
4041         // [11:8] Family
4042         // [ 7:4] Model
4043         // [ 3:0] Stepping ID
4044         // Family 6, Model 8 (Pentium III / Coppermine)
4045         REG32(EAX) = 0;
4046         REG32(EDX) = (6 << 8) | (8 << 4) | (10);
4047
4048         cpustate->cpuid_id0 = 0x756e6547;   // Genu
4049         cpustate->cpuid_id1 = 0x49656e69;   // ineI
4050         cpustate->cpuid_id2 = 0x6c65746e;   // ntel
4051
4052         cpustate->cpuid_max_input_value_eax = 0x03;
4053         cpustate->cpu_version = REG32(EDX);
4054
4055         // [ 0:0] FPU on chip
4056         // [ 4:4] Time Stamp Counter
4057         // [ D:D] PTE Global Bit
4058         cpustate->feature_flags = 0x00002011;       // TODO: enable relevant flags here
4059
4060         CHANGE_PC(cpustate,cpustate->eip);
4061 }
4062
4063 /*****************************************************************************/
4064 /* Intel Pentium 4 */
4065
4066 static CPU_INIT( pentium4 )
4067 {
4068         // 128 dtlb, 64 itlb
4069         i386_state *cpustate = i386_common_init(196);
4070         build_x87_opcode_table(cpustate);
4071         build_opcode_table(cpustate, OP_I386 | OP_FPU | OP_I486 | OP_PENTIUM | OP_PPRO | OP_MMX | OP_SSE | OP_SSE2);
4072         cpustate->cycle_table_rm = cycle_table_rm[CPU_CYCLES_PENTIUM];  // TODO: generate own cycle tables
4073         cpustate->cycle_table_pm = cycle_table_pm[CPU_CYCLES_PENTIUM];  // TODO: generate own cycle tables
4074         return cpustate;
4075 }
4076
4077 static CPU_RESET( pentium4 )
4078 {
4079         zero_state(cpustate);
4080         vtlb_flush_dynamic(cpustate->vtlb);
4081
4082         cpustate->sreg[CS].selector = 0xf000;
4083         cpustate->sreg[CS].base     = 0xffff0000;
4084         cpustate->sreg[CS].limit    = 0xffff;
4085         cpustate->sreg[CS].flags    = 0x0093;
4086
4087         cpustate->sreg[DS].base = cpustate->sreg[ES].base = cpustate->sreg[FS].base = cpustate->sreg[GS].base = cpustate->sreg[SS].base = 0x00000000;
4088         cpustate->sreg[DS].limit = cpustate->sreg[ES].limit = cpustate->sreg[FS].limit = cpustate->sreg[GS].limit = cpustate->sreg[SS].limit = 0xffff;
4089         cpustate->sreg[DS].flags = cpustate->sreg[ES].flags = cpustate->sreg[FS].flags = cpustate->sreg[GS].flags = cpustate->sreg[SS].flags = 0x0093;
4090
4091         cpustate->idtr.base = 0;
4092         cpustate->idtr.limit = 0x3ff;
4093
4094         cpustate->a20_mask = ~0;
4095
4096         cpustate->cr[0] = 0x60000010;
4097         cpustate->eflags = 0x00200000;
4098         cpustate->eflags_mask = 0x00277fd7; /* TODO: is this correct? */
4099         cpustate->eip = 0xfff0;
4100         cpustate->mxcsr = 0x1f80;
4101         cpustate->smm = false;
4102         cpustate->smi_latched = false;
4103         cpustate->smbase = 0x30000;
4104         cpustate->nmi_masked = false;
4105         cpustate->nmi_latched = false;
4106
4107         x87_reset(cpustate);
4108
4109         // [27:20] Extended family
4110         // [19:16] Extended model
4111         // [13:12] Type
4112         // [11: 8] Family
4113         // [ 7: 4] Model
4114         // [ 3: 0] Stepping ID
4115         // Family 15, Model 0 (Pentium 4 / Willamette)
4116         REG32(EAX) = 0;
4117         REG32(EDX) = (0 << 20) | (0xf << 8) | (0 << 4) | (1);
4118
4119         cpustate->cpuid_id0 = 0x756e6547;   // Genu
4120         cpustate->cpuid_id1 = 0x49656e69;   // ineI
4121         cpustate->cpuid_id2 = 0x6c65746e;   // ntel
4122
4123         cpustate->cpuid_max_input_value_eax = 0x02;
4124         cpustate->cpu_version = REG32(EDX);
4125
4126         // [ 0:0] FPU on chip
4127         cpustate->feature_flags = 0x00000001;       // TODO: enable relevant flags here
4128
4129         CHANGE_PC(cpustate,cpustate->eip);
4130 }
4131