1 /****************************************************************************
2 ****************************************************************************
4 *** This header was automatically generated from a Linux kernel header
5 *** of the same name, to make information necessary for userspace to
6 *** call into the kernel available to libc. It contains only constants,
7 *** structures, and macros generated from the original header, and thus,
8 *** contains no copyrightable information.
10 ****************************************************************************
11 ****************************************************************************/
12 #ifndef __ASM_I386_PROCESSOR_H
13 #define __ASM_I386_PROCESSOR_H
16 #include <asm/math_emu.h>
17 #include <asm/segment.h>
19 #include <asm/types.h>
20 #include <asm/sigcontext.h>
21 #include <asm/cpufeature.h>
23 #include <asm/system.h>
24 #include <linux/cache.h>
25 #include <linux/threads.h>
26 #include <asm/percpu.h>
27 #include <linux/cpumask.h>
28 #include <linux/init.h>
29 #include <asm/processor-flags.h>
35 #define desc_empty(desc) (!((desc)->a | (desc)->b))
37 #define desc_equal(desc1, desc2) (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
39 #define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
51 unsigned long x86_capability[NCAPINTS];
52 char x86_vendor_id[16];
53 char x86_model_id[64];
55 int x86_cache_alignment;
61 unsigned long loops_per_jiffy;
62 unsigned char x86_max_cores;
64 unsigned short x86_clflush_size;
65 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
67 #define X86_VENDOR_INTEL 0
68 #define X86_VENDOR_CYRIX 1
69 #define X86_VENDOR_AMD 2
70 #define X86_VENDOR_UMC 3
71 #define X86_VENDOR_NEXGEN 4
72 #define X86_VENDOR_CENTAUR 5
73 #define X86_VENDOR_TRANSMETA 7
74 #define X86_VENDOR_NSC 8
75 #define X86_VENDOR_NUM 9
76 #define X86_VENDOR_UNKNOWN 0xff
78 #define cpu_data(cpu) boot_cpu_data
79 #define current_cpu_data boot_cpu_data
81 #define load_cr3(pgdir) write_cr3(__pa(pgdir))
83 #define TASK_SIZE (PAGE_OFFSET)
85 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
87 #define HAVE_ARCH_PICK_MMAP_LAYOUT
89 #define IO_BITMAP_BITS 65536
90 #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
91 #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
92 #define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
93 #define INVALID_IO_BITMAP_OFFSET 0x8000
94 #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
96 struct i387_fsave_struct {
108 struct i387_fxsave_struct {
122 } __attribute__ ((aligned (16)));
124 struct i387_soft_struct {
133 unsigned char ftop, changed, lookahead, no_update, rm, alimit;
135 unsigned long entry_eip;
139 struct i387_fsave_struct fsave;
140 struct i387_fxsave_struct fxsave;
141 struct i387_soft_struct soft;
148 struct thread_struct;
151 unsigned short back_link,__blh;
153 unsigned short ss0,__ss0h;
155 unsigned short ss1,__ss1h;
157 unsigned short ss2,__ss2h;
160 unsigned long eflags;
161 unsigned long eax,ecx,edx,ebx;
166 unsigned short es, __esh;
167 unsigned short cs, __csh;
168 unsigned short ss, __ssh;
169 unsigned short ds, __dsh;
170 unsigned short fs, __fsh;
171 unsigned short gs, __gsh;
172 unsigned short ldt, __ldth;
173 unsigned short trace, io_bitmap_base;
174 } __attribute__((packed));
177 struct i386_hw_tss x86_tss;
179 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
181 unsigned long io_bitmap_max;
182 struct thread_struct *io_bitmap_owner;
184 unsigned long __cacheline_filler[35];
186 unsigned long stack[64];
187 } __attribute__((packed));
189 #define ARCH_MIN_TASKALIGN 16
191 struct thread_struct {
193 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
195 unsigned long sysenter_cs;
201 unsigned long debugreg[8];
203 unsigned long cr2, trap_no, error_code;
205 union i387_union i387;
207 struct vm86_struct __user * vm86_info;
208 unsigned long screen_bitmap;
209 unsigned long v86flags, v86mask, saved_esp0;
210 unsigned int saved_fs, saved_gs;
212 unsigned long *io_bitmap_ptr;
215 unsigned long io_bitmap_max;
218 #define INIT_THREAD { .esp0 = sizeof(init_stack) + (long)&init_stack, .vm86_info = NULL, .sysenter_cs = __KERNEL_CS, .io_bitmap_ptr = NULL, .fs = __KERNEL_PERCPU, }
220 #define INIT_TSS { .x86_tss = { .esp0 = sizeof(init_stack) + (long)&init_stack, .ss0 = __KERNEL_DS, .ss1 = __KERNEL_CS, .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, }, .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, }
222 #define start_thread(regs, new_eip, new_esp) do { __asm__("movl %0,%%gs": :"r" (0)); regs->xfs = 0; set_fs(USER_DS); regs->xds = __USER_DS; regs->xes = __USER_DS; regs->xss = __USER_DS; regs->xcs = __USER_CS; regs->eip = new_eip; regs->esp = new_esp; } while (0)
227 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
228 #define KSTK_TOP(info) ({ unsigned long *__ptr = (unsigned long *)(info); (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); })
230 #define task_pt_regs(task) ({ struct pt_regs *__regs__; __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); __regs__ - 1; })
232 #define KSTK_EIP(task) (task_pt_regs(task)->eip)
233 #define KSTK_ESP(task) (task_pt_regs(task)->esp)
235 struct microcode_header {
243 unsigned int datasize;
244 unsigned int totalsize;
245 unsigned int reserved[3];
249 struct microcode_header hdr;
250 unsigned int bits[0];
253 typedef struct microcode microcode_t;
254 typedef struct microcode_header microcode_header_t;
256 struct extended_signature {
262 struct extended_sigtable {
265 unsigned int reserved[3];
266 struct extended_signature sigs[0];
269 #define cpu_relax() rep_nop()
270 #define paravirt_enabled() 0
271 #define __cpuid native_cpuid
272 #define get_debugreg(var, register) (var) = native_get_debugreg(register)
273 #define set_debugreg(value, register) native_set_debugreg(register, value)
274 #define set_iopl_mask native_set_iopl_mask
275 #define GENERIC_NOP1 ".byte 0x90\n"
276 #define GENERIC_NOP2 ".byte 0x89,0xf6\n"
277 #define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n"
278 #define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n"
279 #define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4
280 #define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
281 #define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
282 #define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7
283 #define K8_NOP1 GENERIC_NOP1
284 #define K8_NOP2 ".byte 0x66,0x90\n"
285 #define K8_NOP3 ".byte 0x66,0x66,0x90\n"
286 #define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
287 #define K8_NOP5 K8_NOP3 K8_NOP2
288 #define K8_NOP6 K8_NOP3 K8_NOP3
289 #define K8_NOP7 K8_NOP4 K8_NOP3
290 #define K8_NOP8 K8_NOP4 K8_NOP4
291 #define K7_NOP1 GENERIC_NOP1
292 #define K7_NOP2 ".byte 0x8b,0xc0\n"
293 #define K7_NOP3 ".byte 0x8d,0x04,0x20\n"
294 #define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n"
295 #define K7_NOP5 K7_NOP4 ASM_NOP1
296 #define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n"
297 #define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n"
298 #define K7_NOP8 K7_NOP7 ASM_NOP1
299 #define P6_NOP1 GENERIC_NOP1
300 #define P6_NOP2 ".byte 0x66,0x90\n"
301 #define P6_NOP3 ".byte 0x0f,0x1f,0x00\n"
302 #define P6_NOP4 ".byte 0x0f,0x1f,0x40,0\n"
303 #define P6_NOP5 ".byte 0x0f,0x1f,0x44,0x00,0\n"
304 #define P6_NOP6 ".byte 0x66,0x0f,0x1f,0x44,0x00,0\n"
305 #define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n"
306 #define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n"
307 #define ASM_NOP1 GENERIC_NOP1
308 #define ASM_NOP2 GENERIC_NOP2
309 #define ASM_NOP3 GENERIC_NOP3
310 #define ASM_NOP4 GENERIC_NOP4
311 #define ASM_NOP5 GENERIC_NOP5
312 #define ASM_NOP6 GENERIC_NOP6
313 #define ASM_NOP7 GENERIC_NOP7
314 #define ASM_NOP8 GENERIC_NOP8
315 #define ASM_NOP_MAX 8
316 #define ARCH_HAS_PREFETCH
317 #define ARCH_HAS_PREFETCH
318 #define ARCH_HAS_PREFETCHW
319 #define ARCH_HAS_SPINLOCK_PREFETCH
320 #define spin_lock_prefetch(x) prefetchw(x)
322 #define cache_line_size() (boot_cpu_data.x86_cache_alignment)