From 48dd80cb1343a0e535cc6065f078dfde9b60f5ba Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 3 Nov 2016 20:23:09 +0000 Subject: [PATCH] BACKPORT: arm64: move sp_el0 and tpidr_el1 into cpu_suspend_ctx When returning from idle, we rely on the fact that thread_info lives at the end of the kernel stack, and restore this by masking the saved stack pointer. Subsequent patches will sever the relationship between the stack and thread_info, and to cater for this we must save/restore sp_el0 explicitly, storing it in cpu_suspend_ctx. As cpu_suspend_ctx must be doubleword aligned, this leaves us with an extra slot in cpu_suspend_ctx. We can use this to save/restore tpidr_el1 in the same way, which simplifies the code, avoiding pointer chasing on the restore path (as we no longer need to load thread_info::cpu followed by the relevant slot in __per_cpu_offset based on this). This patch stashes both registers in cpu_suspend_ctx. Signed-off-by: Mark Rutland Tested-by: Laura Abbott Cc: James Morse Cc: Lorenzo Pieralisi Cc: Will Deacon Signed-off-by: Catalin Marinas This is a modification of Mark Rutland's original patch. The differences from the original patch are as follows :- - NR_CTX_REGS is set to 13 instead of 12 - x13 and x14 are used as temporary registers to hold sp_el0 and tpidr_el1 instead of x11 and x12. - The values are temporarily stashed at offset 88 and 96 of cpu_suspend_ctx instead of 80 and 88. The original patch would not apply cleanly and these changes were made to resolve this. Bug: 38331309 Change-Id: I4e72aebd51e99d3767487383c14a1ba784312bf1 (cherry picked from commit 623b476fc815464a0241ea7483da7b3580b7d8ac) Signed-off-by: Zubin Mithra --- arch/arm64/include/asm/suspend.h | 2 +- arch/arm64/kernel/sleep.S | 3 --- arch/arm64/kernel/suspend.c | 6 ------ arch/arm64/mm/proc.S | 10 ++++++++-- 4 files changed, 9 insertions(+), 12 deletions(-) diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h index 59a5b0f1e81c..4d19a03d316e 100644 --- a/arch/arm64/include/asm/suspend.h +++ b/arch/arm64/include/asm/suspend.h @@ -1,7 +1,7 @@ #ifndef __ASM_SUSPEND_H #define __ASM_SUSPEND_H -#define NR_CTX_REGS 11 +#define NR_CTX_REGS 13 /* * struct cpu_suspend_ctx must be 16-byte aligned since it is allocated on diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index e33fe33876ab..f586f7c875e2 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S @@ -173,9 +173,6 @@ ENTRY(cpu_resume) /* load physical address of identity map page table in x1 */ adrp x1, idmap_pg_dir mov sp, x2 - /* save thread_info */ - and x2, x2, #~(THREAD_SIZE - 1) - msr sp_el0, x2 /* * cpu_do_resume expects x0 to contain context physical address * pointer and x1 to contain physical address of 1:1 page tables diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index f42b8b8f1d0a..e7a96462ca2d 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -96,12 +96,6 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) cpu_uninstall_idmap(); /* - * Restore per-cpu offset before any kernel - * subsystem relying on it has a chance to run. - */ - set_my_cpu_offset(per_cpu_offset(smp_processor_id())); - - /* * PSTATE was not saved over suspend/resume, re-enable any * detected features that might not have been set correctly. */ diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 85a542b21575..3b3a4710dcd6 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -71,12 +71,15 @@ ENTRY(cpu_do_suspend) mrs x10, mdscr_el1 mrs x11, oslsr_el1 mrs x12, sctlr_el1 + mrs x13, tpidr_el1 + mrs x14, sp_el0 stp x2, x3, [x0] stp x4, x5, [x0, #16] stp x6, x7, [x0, #32] stp x8, x9, [x0, #48] stp x10, x11, [x0, #64] - str x12, [x0, #80] + stp x12, x13, [x0, #80] + str x14, [x0, #96] ret ENDPROC(cpu_do_suspend) @@ -99,7 +102,8 @@ ENTRY(cpu_do_resume) ldp x6, x7, [x0, #32] ldp x8, x9, [x0, #48] ldp x10, x11, [x0, #64] - ldr x12, [x0, #80] + ldp x12, x13, [x0, #80] + ldr x14, [x0, #96] msr tpidr_el0, x2 msr tpidrro_el0, x3 msr contextidr_el1, x4 @@ -111,6 +115,8 @@ ENTRY(cpu_do_resume) msr tcr_el1, x8 msr vbar_el1, x9 msr mdscr_el1, x10 + msr tpidr_el1, x13 + msr sp_el0, x14 /* * Restore oslsr_el1 by writing oslar_el1 */ -- 2.11.0