1 /* $Id: winfixup.S,v 1.29 2000/03/26 09:13:48 davem Exp $
3 * winfixup.S: Handle cases where user stack pointer is found to be bogus.
5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
11 #include <asm/ptrace.h>
12 #include <asm/processor.h>
13 #include <asm/spitfire.h>
14 #include <asm/asm_offsets.h>
19 /* Here are the rules, pay attention.
21 * The kernel is disallowed from touching user space while
22 * the trap level is greater than zero, except for from within
23 * the window spill/fill handlers. This must be followed
24 * so that we can easily detect the case where we tried to
25 * spill/fill with a bogus (or unmapped) user stack pointer.
27 * These are layed out in a special way for cache reasons,
30 .globl fill_fixup, spill_fixup
33 andcc %g1, TSTATE_PRIV, %g0
34 or %g4, FAULT_CODE_WINFIXUP, %g4
35 be,pt %xcc, window_scheisse_from_user_common
36 and %g1, TSTATE_CWP, %g1
38 /* This is the extremely complex case, but it does happen from
39 * time to time if things are just right. Essentially the restore
40 * done in rtrap right before going back to user mode, with tl=1
41 * and that levels trap stack registers all setup, took a fill trap,
42 * the user stack was not mapped in the tlb, and tlb miss occurred,
43 * the pte found was not valid, and a simple ref bit watch update
44 * could not satisfy the miss, so we got here.
46 * We must carefully unwind the state so we get back to tl=0, preserve
47 * all the register values we were going to give to the user. Luckily
48 * most things are where they need to be, we also have the address
49 * which triggered the fault handy as well.
51 * Also note that we must preserve %l5 and %l6. If the user was
52 * returning from a system call, we must make it look this way
53 * after we process the fill fault on the users stack.
55 * First, get into the window where the original restore was executed.
58 rdpr %wstate, %g2 ! Grab user mode wstate.
59 wrpr %g1, %cwp ! Get into the right window.
60 sll %g2, 3, %g2 ! NORMAL-->OTHER
62 wrpr %g0, 0x0, %canrestore ! Standard etrap stuff.
63 wrpr %g2, 0x0, %wstate ! This must be consistant.
64 wrpr %g0, 0x0, %otherwin ! We know this.
65 mov PRIMARY_CONTEXT, %g1 ! Change contexts...
66 stxa %g0, [%g1] ASI_DMMU ! Back into the nucleus.
67 flush %g6 ! Flush instruction buffers
68 rdpr %pstate, %l1 ! Prepare to change globals.
69 mov %g6, %o7 ! Get current.
71 andn %l1, PSTATE_MM, %l1 ! We want to be in RMO
72 stb %g4, [%g6 + AOFF_task_thread + AOFF_thread_fault_code]
73 stx %g5, [%g6 + AOFF_task_thread + AOFF_thread_fault_address]
74 wrpr %g0, 0x0, %tl ! Out of trap levels.
75 wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate
76 sethi %uhi(PAGE_OFFSET), %g4 ! Prepare page_offset global reg
78 sllx %g4, 32, %g4 ! and finish it...
80 /* This is the same as below, except we handle this a bit special
81 * since we must preserve %l5 and %l6, see comment above.
84 add %sp, PTREGS_OFF, %o0
86 nop ! yes, nop is correct
88 /* Be very careful about usage of the alternate globals here.
89 * You cannot touch %g4/%g5 as that has the fault information
90 * should this be from usermode. Also be careful for the case
91 * where we get here from the save instruction in etrap.S when
92 * coming from either user or kernel (does not matter which, it
93 * is the same problem in both cases). Essentially this means
94 * do not touch %g7 or %g2 so we handle the two cases fine.
97 ldub [%g6 + AOFF_task_thread + AOFF_thread_flags], %g1
98 andcc %g1, SPARC_FLAG_32BIT, %g0
99 ldub [%g6 + AOFF_task_thread + AOFF_thread_w_saved], %g1
103 stx %sp, [%g3 + AOFF_task_thread + AOFF_thread_rwbuf_stkptrs]
107 stx %l0, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x00]
108 stx %l1, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x08]
110 stx %l2, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x10]
111 stx %l3, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x18]
112 stx %l4, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x20]
113 stx %l5, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x28]
114 stx %l6, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x30]
115 stx %l7, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x38]
116 stx %i0, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x40]
117 stx %i1, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x48]
119 stx %i2, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x50]
120 stx %i3, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x58]
121 stx %i4, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x60]
122 stx %i5, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x68]
123 stx %i6, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x70]
125 stx %i7, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x78]
126 1: stw %l0, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x00]
128 stw %l1, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x04]
129 stw %l2, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x08]
130 stw %l3, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x0c]
131 stw %l4, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x10]
132 stw %l5, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x14]
133 stw %l6, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x18]
134 stw %l7, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x1c]
135 stw %i0, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x20]
137 stw %i1, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x24]
138 stw %i2, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x28]
139 stw %i3, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x2c]
140 stw %i4, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x30]
141 stw %i5, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x34]
142 stw %i6, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x38]
143 stw %i7, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x3c]
146 stb %g1, [%g6 + AOFF_task_thread + AOFF_thread_w_saved]
148 andcc %g1, TSTATE_PRIV, %g0
150 and %g1, TSTATE_CWP, %g1
151 be,pn %xcc, window_scheisse_from_user_common
152 mov FAULT_CODE_WRITE | FAULT_CODE_DTLB | FAULT_CODE_WINFIXUP, %g4
155 window_scheisse_from_user_common:
156 stb %g4, [%g6 + AOFF_task_thread + AOFF_thread_fault_code]
157 stx %g5, [%g6 + AOFF_task_thread + AOFF_thread_fault_address]
161 call do_sparc64_fault
162 add %sp, PTREGS_OFF, %o0
163 ba,a,pt %xcc, rtrap_clr_l6
165 .globl winfix_mna, fill_fixup_mna, spill_fixup_mna
173 andcc %g1, TSTATE_PRIV, %g0
174 be,pt %xcc, window_mna_from_user_common
175 and %g1, TSTATE_CWP, %g1
177 /* Please, see fill_fixup commentary about why we must preserve
178 * %l5 and %l6 to preserve absolute correct semantics.
180 rdpr %wstate, %g2 ! Grab user mode wstate.
181 wrpr %g1, %cwp ! Get into the right window.
182 sll %g2, 3, %g2 ! NORMAL-->OTHER
183 wrpr %g0, 0x0, %canrestore ! Standard etrap stuff.
185 wrpr %g2, 0x0, %wstate ! This must be consistant.
186 wrpr %g0, 0x0, %otherwin ! We know this.
187 mov PRIMARY_CONTEXT, %g1 ! Change contexts...
188 stxa %g0, [%g1] ASI_DMMU ! Back into the nucleus.
189 flush %g6 ! Flush instruction buffers
190 rdpr %pstate, %l1 ! Prepare to change globals.
191 mov %g4, %o2 ! Setup args for
192 mov %g5, %o1 ! final call to mem_address_unaligned.
193 andn %l1, PSTATE_MM, %l1 ! We want to be in RMO
195 mov %g6, %o7 ! Stash away current.
196 wrpr %g0, 0x0, %tl ! Out of trap levels.
197 wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate
198 sethi %uhi(PAGE_OFFSET), %g4 ! Set page_offset global reg.
199 mov %o7, %g6 ! Get current back.
200 sllx %g4, 32, %g4 ! Finish it.
201 call mem_address_unaligned
202 add %sp, PTREGS_OFF, %o0
205 nop ! yes, the nop is correct
207 ldub [%g6 + AOFF_task_thread + AOFF_thread_flags], %g1
208 andcc %g1, SPARC_FLAG_32BIT, %g0
209 ldub [%g6 + AOFF_task_thread + AOFF_thread_w_saved], %g1
212 stx %sp, [%g3 + AOFF_task_thread + AOFF_thread_rwbuf_stkptrs]
217 stx %l0, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x00]
218 stx %l1, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x08]
219 stx %l2, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x10]
220 stx %l3, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x18]
221 stx %l4, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x20]
223 stx %l5, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x28]
224 stx %l6, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x30]
225 stx %l7, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x38]
226 stx %i0, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x40]
227 stx %i1, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x48]
228 stx %i2, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x50]
229 stx %i3, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x58]
230 stx %i4, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x60]
232 stx %i5, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x68]
233 stx %i6, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x70]
234 stx %i7, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x78]
237 1: std %l0, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x00]
238 std %l2, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x08]
239 std %l4, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x10]
241 std %l6, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x18]
242 std %i0, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x20]
243 std %i2, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x28]
244 std %i4, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x30]
245 std %i6, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x38]
247 2: stb %g1, [%g6 + AOFF_task_thread + AOFF_thread_w_saved]
250 andcc %g1, TSTATE_PRIV, %g0
252 be,pn %xcc, window_mna_from_user_common
253 and %g1, TSTATE_CWP, %g1
255 window_mna_from_user_common:
259 109: or %g7, %lo(109b), %g7
262 call mem_address_unaligned
263 add %sp, PTREGS_OFF, %o0
267 /* These are only needed for 64-bit mode processes which
268 * put their stack pointer into the VPTE area and there
269 * happens to be a VPTE tlb entry mapped there during
270 * a spill/fill trap to that stack frame.
272 .globl winfix_dax, fill_fixup_dax, spill_fixup_dax
280 andcc %g1, TSTATE_PRIV, %g0
281 be,pt %xcc, window_dax_from_user_common
282 and %g1, TSTATE_CWP, %g1
284 /* Please, see fill_fixup commentary about why we must preserve
285 * %l5 and %l6 to preserve absolute correct semantics.
287 rdpr %wstate, %g2 ! Grab user mode wstate.
288 wrpr %g1, %cwp ! Get into the right window.
289 sll %g2, 3, %g2 ! NORMAL-->OTHER
290 wrpr %g0, 0x0, %canrestore ! Standard etrap stuff.
292 wrpr %g2, 0x0, %wstate ! This must be consistant.
293 wrpr %g0, 0x0, %otherwin ! We know this.
294 mov PRIMARY_CONTEXT, %g1 ! Change contexts...
295 stxa %g0, [%g1] ASI_DMMU ! Back into the nucleus.
296 flush %g6 ! Flush instruction buffers
297 rdpr %pstate, %l1 ! Prepare to change globals.
298 mov %g4, %o1 ! Setup args for
299 mov %g5, %o2 ! final call to spitfire_data_access_exception.
300 andn %l1, PSTATE_MM, %l1 ! We want to be in RMO
302 mov %g6, %o7 ! Stash away current.
303 wrpr %g0, 0x0, %tl ! Out of trap levels.
304 wrpr %l1, (PSTATE_IE | PSTATE_AG | PSTATE_RMO), %pstate
305 sethi %uhi(PAGE_OFFSET), %g4 ! Set page_offset global reg.
306 mov %o7, %g6 ! Get current back.
307 sllx %g4, 32, %g4 ! Finish it.
308 call spitfire_data_access_exception
309 add %sp, PTREGS_OFF, %o0
312 nop ! yes, the nop is correct
314 ldub [%g6 + AOFF_task_thread + AOFF_thread_flags], %g1
315 andcc %g1, SPARC_FLAG_32BIT, %g0
316 ldub [%g6 + AOFF_task_thread + AOFF_thread_w_saved], %g1
319 stx %sp, [%g3 + AOFF_task_thread + AOFF_thread_rwbuf_stkptrs]
324 stx %l0, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x00]
325 stx %l1, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x08]
326 stx %l2, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x10]
327 stx %l3, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x18]
328 stx %l4, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x20]
330 stx %l5, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x28]
331 stx %l6, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x30]
332 stx %l7, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x38]
333 stx %i0, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x40]
334 stx %i1, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x48]
335 stx %i2, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x50]
336 stx %i3, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x58]
337 stx %i4, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x60]
339 stx %i5, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x68]
340 stx %i6, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x70]
341 stx %i7, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x78]
344 1: std %l0, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x00]
345 std %l2, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x08]
346 std %l4, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x10]
348 std %l6, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x18]
349 std %i0, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x20]
350 std %i2, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x28]
351 std %i4, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x30]
352 std %i6, [%g3 + AOFF_task_thread + AOFF_thread_reg_window + 0x38]
354 2: stb %g1, [%g6 + AOFF_task_thread + AOFF_thread_w_saved]
357 andcc %g1, TSTATE_PRIV, %g0
359 be,pn %xcc, window_dax_from_user_common
360 and %g1, TSTATE_CWP, %g1
362 window_dax_from_user_common:
366 109: or %g7, %lo(109b), %g7
369 call spitfire_data_access_exception
370 add %sp, PTREGS_OFF, %o0