2 * Based on arch/arm/kernel/process.c
4 * Original Copyright (C) 1995 Linus Torvalds
5 * Copyright (C) 1996-2000 Russell King - Converted to ARM.
6 * Copyright (C) 2012 ARM Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include <linux/compat.h>
24 #include <linux/efi.h>
25 #include <linux/export.h>
26 #include <linux/sched.h>
27 #include <linux/kernel.h>
29 #include <linux/stddef.h>
30 #include <linux/unistd.h>
31 #include <linux/user.h>
32 #include <linux/delay.h>
33 #include <linux/reboot.h>
34 #include <linux/interrupt.h>
35 #include <linux/kallsyms.h>
36 #include <linux/init.h>
37 #include <linux/cpu.h>
38 #include <linux/elfcore.h>
40 #include <linux/tick.h>
41 #include <linux/utsname.h>
42 #include <linux/uaccess.h>
43 #include <linux/random.h>
44 #include <linux/hw_breakpoint.h>
45 #include <linux/personality.h>
46 #include <linux/notifier.h>
47 #include <trace/events/power.h>
49 #include <asm/alternative.h>
50 #include <asm/compat.h>
51 #include <asm/cacheflush.h>
53 #include <asm/fpsimd.h>
54 #include <asm/mmu_context.h>
55 #include <asm/processor.h>
56 #include <asm/stacktrace.h>
58 #ifdef CONFIG_CC_STACKPROTECTOR
59 #include <linux/stackprotector.h>
60 unsigned long __stack_chk_guard __read_mostly;
61 EXPORT_SYMBOL(__stack_chk_guard);
65 * Function pointers to optional machine specific functions
67 void (*pm_power_off)(void);
68 EXPORT_SYMBOL_GPL(pm_power_off);
70 void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
73 * This is our default idle handler.
75 void arch_cpu_idle(void)
78 * This should do all the clock switching and wait for interrupt
81 trace_cpu_idle_rcuidle(1, smp_processor_id());
84 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
87 void arch_cpu_idle_enter(void)
89 idle_notifier_call_chain(IDLE_START);
92 void arch_cpu_idle_exit(void)
94 idle_notifier_call_chain(IDLE_END);
97 #ifdef CONFIG_HOTPLUG_CPU
98 void arch_cpu_idle_dead(void)
105 * Called by kexec, immediately prior to machine_kexec().
107 * This must completely disable all secondary CPUs; simply causing those CPUs
108 * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
109 * kexec'd kernel to use any and all RAM as it sees fit, without having to
110 * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
111 * functionality embodied in disable_nonboot_cpus() to achieve this.
113 void machine_shutdown(void)
115 disable_nonboot_cpus();
119 * Halting simply requires that the secondary CPUs stop performing any
120 * activity (executing tasks, handling interrupts). smp_send_stop()
123 void machine_halt(void)
131 * Power-off simply requires that the secondary CPUs stop performing any
132 * activity (executing tasks, handling interrupts). smp_send_stop()
133 * achieves this. When the system power is turned off, it will take all CPUs
136 void machine_power_off(void)
145 * Restart requires that the secondary CPUs stop performing any activity
146 * while the primary CPU resets the system. Systems with multiple CPUs must
147 * provide a HW restart implementation, to ensure that all CPUs reset at once.
148 * This is required so that any code running after reset on the primary CPU
149 * doesn't have to co-ordinate with other CPUs to ensure they aren't still
150 * executing pre-reset code, and using RAM that the primary CPU's code wishes
151 * to use. Implementing such co-ordination would be essentially impossible.
153 void machine_restart(char *cmd)
155 /* Disable interrupts first */
160 * UpdateCapsule() depends on the system being reset via
163 if (efi_enabled(EFI_RUNTIME_SERVICES))
164 efi_reboot(reboot_mode, NULL);
166 /* Now call the architecture specific reboot code. */
168 arm_pm_restart(reboot_mode, cmd);
170 do_kernel_restart(cmd);
173 * Whoops - the architecture was unable to reboot.
175 printk("Reboot failed -- System halted\n");
180 * dump a block of kernel memory from around the given address
182 static void show_data(unsigned long addr, int nbytes, const char *name)
189 * don't attempt to dump non-kernel addresses or
190 * values that are probably just small negative numbers
192 if (addr < KIMAGE_VADDR || addr > -256UL)
195 printk("\n%s: %#lx:\n", name, addr);
198 * round address down to a 32 bit boundary
199 * and always dump a multiple of 32 bytes
201 p = (u32 *)(addr & ~(sizeof(u32) - 1));
202 nbytes += (addr & (sizeof(u32) - 1));
203 nlines = (nbytes + 31) / 32;
206 for (i = 0; i < nlines; i++) {
208 * just display low 16 bits of address to keep
209 * each line of the dump < 80 characters
211 printk("%04lx ", (unsigned long)p & 0xffff);
212 for (j = 0; j < 8; j++) {
214 if (probe_kernel_address(p, data)) {
217 pr_cont(" %08x", data);
225 static void show_extra_register_data(struct pt_regs *regs, int nbytes)
231 show_data(regs->pc - nbytes, nbytes * 2, "PC");
232 show_data(regs->regs[30] - nbytes, nbytes * 2, "LR");
233 show_data(regs->sp - nbytes, nbytes * 2, "SP");
237 void __show_regs(struct pt_regs *regs)
242 if (compat_user_mode(regs)) {
243 lr = regs->compat_lr;
244 sp = regs->compat_sp;
252 show_regs_print_info(KERN_DEFAULT);
253 print_symbol("PC is at %s\n", instruction_pointer(regs));
254 print_symbol("LR is at %s\n", lr);
255 printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n",
256 regs->pc, lr, regs->pstate);
257 printk("sp : %016llx\n", sp);
258 for (i = top_reg; i >= 0; i--) {
259 printk("x%-2d: %016llx ", i, regs->regs[i]);
263 if (!user_mode(regs))
264 show_extra_register_data(regs, 64);
268 void show_regs(struct pt_regs * regs)
275 * Free current thread data structures etc..
277 void exit_thread(void)
281 static void tls_thread_flush(void)
283 asm ("msr tpidr_el0, xzr");
285 if (is_compat_task()) {
286 current->thread.tp_value = 0;
289 * We need to ensure ordering between the shadow state and the
290 * hardware state, so that we don't corrupt the hardware state
291 * with a stale shadow state during context switch.
294 asm ("msr tpidrro_el0, xzr");
298 void flush_thread(void)
300 fpsimd_flush_thread();
302 flush_ptrace_hw_breakpoint(current);
305 void release_thread(struct task_struct *dead_task)
309 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
312 fpsimd_preserve_current_state();
317 asmlinkage void ret_from_fork(void) asm("ret_from_fork");
319 int copy_thread(unsigned long clone_flags, unsigned long stack_start,
320 unsigned long stk_sz, struct task_struct *p)
322 struct pt_regs *childregs = task_pt_regs(p);
324 memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
326 if (likely(!(p->flags & PF_KTHREAD))) {
327 *childregs = *current_pt_regs();
328 childregs->regs[0] = 0;
331 * Read the current TLS pointer from tpidr_el0 as it may be
332 * out-of-sync with the saved value.
334 asm("mrs %0, tpidr_el0" : "=r" (*task_user_tls(p)));
337 if (is_compat_thread(task_thread_info(p)))
338 childregs->compat_sp = stack_start;
339 /* 16-byte aligned stack mandatory on AArch64 */
340 else if (stack_start & 15)
343 childregs->sp = stack_start;
347 * If a TLS pointer was passed to clone (4th argument), use it
348 * for the new thread.
350 if (clone_flags & CLONE_SETTLS)
351 p->thread.tp_value = childregs->regs[3];
353 memset(childregs, 0, sizeof(struct pt_regs));
354 childregs->pstate = PSR_MODE_EL1h;
355 if (IS_ENABLED(CONFIG_ARM64_UAO) &&
356 cpus_have_cap(ARM64_HAS_UAO))
357 childregs->pstate |= PSR_UAO_BIT;
358 p->thread.cpu_context.x19 = stack_start;
359 p->thread.cpu_context.x20 = stk_sz;
361 p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
362 p->thread.cpu_context.sp = (unsigned long)childregs;
364 ptrace_hw_copy_thread(p);
369 static void tls_thread_switch(struct task_struct *next)
371 unsigned long tpidr, tpidrro;
373 asm("mrs %0, tpidr_el0" : "=r" (tpidr));
374 *task_user_tls(current) = tpidr;
376 tpidr = *task_user_tls(next);
377 tpidrro = is_compat_thread(task_thread_info(next)) ?
378 next->thread.tp_value : 0;
381 " msr tpidr_el0, %0\n"
382 " msr tpidrro_el0, %1"
383 : : "r" (tpidr), "r" (tpidrro));
386 /* Restore the UAO state depending on next's addr_limit */
387 void uao_thread_switch(struct task_struct *next)
389 if (IS_ENABLED(CONFIG_ARM64_UAO)) {
390 if (task_thread_info(next)->addr_limit == KERNEL_DS)
391 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
393 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO));
400 struct task_struct *__switch_to(struct task_struct *prev,
401 struct task_struct *next)
403 struct task_struct *last;
405 fpsimd_thread_switch(next);
406 tls_thread_switch(next);
407 hw_breakpoint_thread_switch(next);
408 contextidr_thread_switch(next);
409 uao_thread_switch(next);
412 * Complete any pending TLB or cache maintenance on this CPU in case
413 * the thread migrates to a different CPU.
417 /* the actual thread switch */
418 last = cpu_switch_to(prev, next);
423 unsigned long get_wchan(struct task_struct *p)
425 struct stackframe frame;
426 unsigned long stack_page;
428 if (!p || p == current || p->state == TASK_RUNNING)
431 frame.fp = thread_saved_fp(p);
432 frame.sp = thread_saved_sp(p);
433 frame.pc = thread_saved_pc(p);
434 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
435 frame.graph = p->curr_ret_stack;
437 stack_page = (unsigned long)task_stack_page(p);
439 if (frame.sp < stack_page ||
440 frame.sp >= stack_page + THREAD_SIZE ||
441 unwind_frame(p, &frame))
443 if (!in_sched_functions(frame.pc))
445 } while (count ++ < 16);
449 unsigned long arch_align_stack(unsigned long sp)
451 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
452 sp -= get_random_int() & ~PAGE_MASK;
456 static unsigned long randomize_base(unsigned long base)
458 unsigned long range_end = base + (STACK_RND_MASK << PAGE_SHIFT) + 1;
459 return randomize_range(base, range_end, 0) ? : base;
462 unsigned long arch_randomize_brk(struct mm_struct *mm)
464 return randomize_base(mm->brk);