2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
7 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2004 Thiemo Seufer
10 * Copyright (C) 2013 Imagination Technologies Ltd.
12 #include <linux/errno.h>
13 #include <linux/sched.h>
14 #include <linux/tick.h>
15 #include <linux/kernel.h>
17 #include <linux/stddef.h>
18 #include <linux/unistd.h>
19 #include <linux/export.h>
20 #include <linux/ptrace.h>
21 #include <linux/mman.h>
22 #include <linux/personality.h>
23 #include <linux/sys.h>
24 #include <linux/init.h>
25 #include <linux/completion.h>
26 #include <linux/kallsyms.h>
27 #include <linux/random.h>
28 #include <linux/prctl.h>
31 #include <asm/bootinfo.h>
36 #include <asm/pgtable.h>
37 #include <asm/mipsregs.h>
38 #include <asm/processor.h>
40 #include <asm/uaccess.h>
43 #include <asm/isadep.h>
45 #include <asm/stacktrace.h>
46 #include <asm/irq_regs.h>
48 #ifdef CONFIG_HOTPLUG_CPU
49 void arch_cpu_idle_dead(void)
51 /* What the heck is this check doing ? */
52 if (!cpu_isset(smp_processor_id(), cpu_callin_map))
57 asmlinkage void ret_from_fork(void);
58 asmlinkage void ret_from_kernel_thread(void);
60 void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
64 /* New thread loses kernel privileges. */
65 status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK);
67 regs->cp0_status = status;
71 clear_thread_flag(TIF_USEDMSA);
72 clear_thread_flag(TIF_MSA_CTX_LIVE);
78 void exit_thread(void)
82 void flush_thread(void)
86 int copy_thread(unsigned long clone_flags, unsigned long usp,
87 unsigned long arg, struct task_struct *p)
89 struct thread_info *ti = task_thread_info(p);
90 struct pt_regs *childregs, *regs = current_pt_regs();
91 unsigned long childksp;
92 p->set_child_tid = p->clear_child_tid = NULL;
94 childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
100 else if (is_fpu_owner())
108 /* set up new TSS. */
109 childregs = (struct pt_regs *) childksp - 1;
110 /* Put the stack after the struct pt_regs. */
111 childksp = (unsigned long) childregs;
112 p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
113 if (unlikely(p->flags & PF_KTHREAD)) {
114 unsigned long status = p->thread.cp0_status;
115 memset(childregs, 0, sizeof(struct pt_regs));
116 ti->addr_limit = KERNEL_DS;
117 p->thread.reg16 = usp; /* fn */
118 p->thread.reg17 = arg;
119 p->thread.reg29 = childksp;
120 p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
121 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
122 status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
123 ((status & (ST0_KUC | ST0_IEC)) << 2);
127 childregs->cp0_status = status;
131 childregs->regs[7] = 0; /* Clear error flag */
132 childregs->regs[2] = 0; /* Child gets zero as return value */
134 childregs->regs[29] = usp;
135 ti->addr_limit = USER_DS;
137 p->thread.reg29 = (unsigned long) childregs;
138 p->thread.reg31 = (unsigned long) ret_from_fork;
141 * New tasks lose permission to use the fpu. This accelerates context
142 * switching for most programs since they don't use the fpu.
144 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
146 clear_tsk_thread_flag(p, TIF_USEDFPU);
147 clear_tsk_thread_flag(p, TIF_USEDMSA);
148 clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
150 #ifdef CONFIG_MIPS_MT_FPAFF
151 clear_tsk_thread_flag(p, TIF_FPUBOUND);
152 #endif /* CONFIG_MIPS_MT_FPAFF */
154 if (clone_flags & CLONE_SETTLS)
155 ti->tp_value = regs->regs[7];
160 #ifdef CONFIG_CC_STACKPROTECTOR
161 #include <linux/stackprotector.h>
162 unsigned long __stack_chk_guard __read_mostly;
163 EXPORT_SYMBOL(__stack_chk_guard);
166 struct mips_frame_info {
168 unsigned long func_size;
173 #define J_TARGET(pc,target) \
174 (((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
176 static inline int is_ra_save_ins(union mips_instruction *ip)
178 #ifdef CONFIG_CPU_MICROMIPS
179 union mips_instruction mmi;
183 * swm16 reglist,offset(sp)
184 * swm32 reglist,offset(sp)
186 * jradiussp - NOT SUPPORTED
188 * microMIPS is way more fun...
190 if (mm_insn_16bit(ip->halfword[0])) {
191 mmi.word = (ip->halfword[0] << 16);
192 return (mmi.mm16_r5_format.opcode == mm_swsp16_op &&
193 mmi.mm16_r5_format.rt == 31) ||
194 (mmi.mm16_m_format.opcode == mm_pool16c_op &&
195 mmi.mm16_m_format.func == mm_swm16_op);
198 mmi.halfword[0] = ip->halfword[1];
199 mmi.halfword[1] = ip->halfword[0];
200 return (mmi.mm_m_format.opcode == mm_pool32b_op &&
201 mmi.mm_m_format.rd > 9 &&
202 mmi.mm_m_format.base == 29 &&
203 mmi.mm_m_format.func == mm_swm32_func) ||
204 (mmi.i_format.opcode == mm_sw32_op &&
205 mmi.i_format.rs == 29 &&
206 mmi.i_format.rt == 31);
209 /* sw / sd $ra, offset($sp) */
210 return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
211 ip->i_format.rs == 29 &&
212 ip->i_format.rt == 31;
216 static inline int is_jump_ins(union mips_instruction *ip)
218 #ifdef CONFIG_CPU_MICROMIPS
220 * jr16,jrc,jalr16,jalr16
222 * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
223 * jraddiusp - NOT SUPPORTED
225 * microMIPS is kind of more fun...
227 union mips_instruction mmi;
229 mmi.word = (ip->halfword[0] << 16);
231 if ((mmi.mm16_r5_format.opcode == mm_pool16c_op &&
232 (mmi.mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op) ||
233 ip->j_format.opcode == mm_jal32_op)
235 if (ip->r_format.opcode != mm_pool32a_op ||
236 ip->r_format.func != mm_pool32axf_op)
238 return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op;
240 if (ip->j_format.opcode == j_op)
242 if (ip->j_format.opcode == jal_op)
244 if (ip->r_format.opcode != spec_op)
246 return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
250 static inline int is_sp_move_ins(union mips_instruction *ip)
252 #ifdef CONFIG_CPU_MICROMIPS
257 * jradiussp - NOT SUPPORTED
259 * microMIPS is not more fun...
261 if (mm_insn_16bit(ip->halfword[0])) {
262 union mips_instruction mmi;
264 mmi.word = (ip->halfword[0] << 16);
265 return (mmi.mm16_r3_format.opcode == mm_pool16d_op &&
266 mmi.mm16_r3_format.simmediate && mm_addiusp_func) ||
267 (mmi.mm16_r5_format.opcode == mm_pool16d_op &&
268 mmi.mm16_r5_format.rt == 29);
270 return ip->mm_i_format.opcode == mm_addiu32_op &&
271 ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29;
273 /* addiu/daddiu sp,sp,-imm */
274 if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
276 if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
282 static int get_frame_info(struct mips_frame_info *info)
284 #ifdef CONFIG_CPU_MICROMIPS
285 union mips_instruction *ip = (void *) (((char *) info->func) - 1);
287 union mips_instruction *ip = info->func;
289 unsigned max_insns = info->func_size / sizeof(union mips_instruction);
292 info->pc_offset = -1;
293 info->frame_size = 0;
299 max_insns = 128U; /* unknown function size */
300 max_insns = min(128U, max_insns);
302 for (i = 0; i < max_insns; i++, ip++) {
306 if (!info->frame_size) {
307 if (is_sp_move_ins(ip))
309 #ifdef CONFIG_CPU_MICROMIPS
310 if (mm_insn_16bit(ip->halfword[0]))
314 if (ip->halfword[0] & mm_addiusp_func)
316 tmp = (((ip->halfword[0] >> 1) & 0x1ff) << 2);
317 info->frame_size = -(signed short)(tmp | ((tmp & 0x100) ? 0xfe00 : 0));
319 tmp = (ip->halfword[0] >> 1);
320 info->frame_size = -(signed short)(tmp & 0xf);
322 ip = (void *) &ip->halfword[1];
326 info->frame_size = - ip->i_format.simmediate;
330 if (info->pc_offset == -1 && is_ra_save_ins(ip)) {
332 ip->i_format.simmediate / sizeof(long);
336 if (info->frame_size && info->pc_offset >= 0) /* nested */
338 if (info->pc_offset < 0) /* leaf */
340 /* prologue seems boggus... */
345 static struct mips_frame_info schedule_mfi __read_mostly;
347 #ifdef CONFIG_KALLSYMS
348 static unsigned long get___schedule_addr(void)
350 return kallsyms_lookup_name("__schedule");
353 static unsigned long get___schedule_addr(void)
355 union mips_instruction *ip = (void *)schedule;
359 for (i = 0; i < max_insns; i++, ip++) {
360 if (ip->j_format.opcode == j_op)
361 return J_TARGET(ip, ip->j_format.target);
367 static int __init frame_info_init(void)
369 unsigned long size = 0;
370 #ifdef CONFIG_KALLSYMS
375 addr = get___schedule_addr();
377 addr = (unsigned long)schedule;
379 #ifdef CONFIG_KALLSYMS
380 kallsyms_lookup_size_offset(addr, &size, &ofs);
382 schedule_mfi.func = (void *)addr;
383 schedule_mfi.func_size = size;
385 get_frame_info(&schedule_mfi);
388 * Without schedule() frame info, result given by
389 * thread_saved_pc() and get_wchan() are not reliable.
391 if (schedule_mfi.pc_offset < 0)
392 printk("Can't analyze schedule() prologue at %p\n", schedule);
397 arch_initcall(frame_info_init);
400 * Return saved PC of a blocked thread.
402 unsigned long thread_saved_pc(struct task_struct *tsk)
404 struct thread_struct *t = &tsk->thread;
406 /* New born processes are a special case */
407 if (t->reg31 == (unsigned long) ret_from_fork)
409 if (schedule_mfi.pc_offset < 0)
411 return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
415 #ifdef CONFIG_KALLSYMS
416 /* generic stack unwinding function */
417 unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
422 struct mips_frame_info info;
423 unsigned long size, ofs;
425 extern void ret_from_irq(void);
426 extern void ret_from_exception(void);
432 * If we reached the bottom of interrupt context,
433 * return saved pc in pt_regs.
435 if (pc == (unsigned long)ret_from_irq ||
436 pc == (unsigned long)ret_from_exception) {
437 struct pt_regs *regs;
438 if (*sp >= stack_page &&
439 *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) {
440 regs = (struct pt_regs *)*sp;
442 if (__kernel_text_address(pc)) {
443 *sp = regs->regs[29];
444 *ra = regs->regs[31];
450 if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
453 * Return ra if an exception occurred at the first instruction
455 if (unlikely(ofs == 0)) {
461 info.func = (void *)(pc - ofs);
462 info.func_size = ofs; /* analyze from start to ofs */
463 leaf = get_frame_info(&info);
467 if (*sp < stack_page ||
468 *sp + info.frame_size > stack_page + THREAD_SIZE - 32)
473 * For some extreme cases, get_frame_info() can
474 * consider wrongly a nested function as a leaf
475 * one. In that cases avoid to return always the
478 pc = pc != *ra ? *ra : 0;
480 pc = ((unsigned long *)(*sp))[info.pc_offset];
482 *sp += info.frame_size;
484 return __kernel_text_address(pc) ? pc : 0;
486 EXPORT_SYMBOL(unwind_stack_by_address);
488 /* used by show_backtrace() */
489 unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
490 unsigned long pc, unsigned long *ra)
492 unsigned long stack_page = (unsigned long)task_stack_page(task);
493 return unwind_stack_by_address(stack_page, sp, pc, ra);
498 * get_wchan - a maintenance nightmare^W^Wpain in the ass ...
500 unsigned long get_wchan(struct task_struct *task)
502 unsigned long pc = 0;
503 #ifdef CONFIG_KALLSYMS
505 unsigned long ra = 0;
508 if (!task || task == current || task->state == TASK_RUNNING)
510 if (!task_stack_page(task))
513 pc = thread_saved_pc(task);
515 #ifdef CONFIG_KALLSYMS
516 sp = task->thread.reg29 + schedule_mfi.frame_size;
518 while (in_sched_functions(pc))
519 pc = unwind_stack(task, &sp, pc, &ra);
527 * Don't forget that the stack pointer must be aligned on a 8 bytes
528 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
530 unsigned long arch_align_stack(unsigned long sp)
532 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
533 sp -= get_random_int() & ~PAGE_MASK;
538 static void arch_dump_stack(void *info)
540 struct pt_regs *regs;
542 regs = get_irq_regs();
550 void arch_trigger_all_cpu_backtrace(bool include_self)
552 smp_call_function(arch_dump_stack, NULL, 1);
555 int mips_get_process_fp_mode(struct task_struct *task)
559 if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
560 value |= PR_FP_MODE_FR;
561 if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
562 value |= PR_FP_MODE_FRE;
567 int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
569 const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
570 unsigned long switch_count;
571 struct task_struct *t;
573 /* Check the value is valid */
574 if (value & ~known_bits)
577 /* Avoid inadvertently triggering emulation */
578 if ((value & PR_FP_MODE_FR) && cpu_has_fpu &&
579 !(current_cpu_data.fpu_id & MIPS_FPIR_F64))
581 if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre)
584 /* Save FP & vector context, then disable FPU & MSA */
585 if (task->signal == current->signal)
588 /* Prevent any threads from obtaining live FP context */
589 atomic_set(&task->mm->context.fp_mode_switching, 1);
590 smp_mb__after_atomic();
593 * If there are multiple online CPUs then wait until all threads whose
594 * FP mode is about to change have been context switched. This approach
595 * allows us to only worry about whether an FP mode switch is in
596 * progress when FP is first used in a tasks time slice. Pretty much all
597 * of the mode switch overhead can thus be confined to cases where mode
598 * switches are actually occuring. That is, to here. However for the
599 * thread performing the mode switch it may take a while...
601 if (num_online_cpus() > 1) {
602 spin_lock_irq(&task->sighand->siglock);
604 for_each_thread(task, t) {
608 switch_count = t->nvcsw + t->nivcsw;
611 spin_unlock_irq(&task->sighand->siglock);
613 spin_lock_irq(&task->sighand->siglock);
614 } while ((t->nvcsw + t->nivcsw) == switch_count);
617 spin_unlock_irq(&task->sighand->siglock);
621 * There are now no threads of the process with live FP context, so it
622 * is safe to proceed with the FP mode switch.
624 for_each_thread(task, t) {
625 /* Update desired FP register width */
626 if (value & PR_FP_MODE_FR) {
627 clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
629 set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
630 clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
633 /* Update desired FP single layout */
634 if (value & PR_FP_MODE_FRE)
635 set_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
637 clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
640 /* Allow threads to use FP again */
641 atomic_set(&task->mm->context.fp_mode_switching, 0);