1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
5 * Copyright (C) 1996-2000 Russell King
6 * Copyright (C) 2012 ARM Ltd.
9 #error "Only include this from assembly code"
12 #ifndef __ASM_ASSEMBLER_H
13 #define __ASM_ASSEMBLER_H
15 #include <asm-generic/export.h>
17 #include <asm/asm-offsets.h>
18 #include <asm/cpufeature.h>
19 #include <asm/cputype.h>
20 #include <asm/debug-monitors.h>
22 #include <asm/pgtable-hwdef.h>
23 #include <asm/ptrace.h>
24 #include <asm/thread_info.h>
26 .macro save_and_disable_daif, flags
39 .macro restore_daif, flags:req
43 /* IRQ is the lowest priority flag, unconditionally unmask the rest. */
45 msr daifclr, #(8 | 4 | 1)
49 * Save/restore interrupts.
51 .macro save_and_disable_irq, flags
56 .macro restore_irq, flags
64 .macro disable_step_tsk, flgs, tmp
65 tbz \flgs, #TIF_SINGLESTEP, 9990f
67 bic \tmp, \tmp, #DBG_MDSCR_SS
69 isb // Synchronise with enable_dbg
73 /* call with daif masked */
74 .macro enable_step_tsk, flgs, tmp
75 tbz \flgs, #TIF_SINGLESTEP, 9990f
77 orr \tmp, \tmp, #DBG_MDSCR_SS
83 * RAS Error Synchronization barrier
86 #ifdef CONFIG_ARM64_RAS_EXTN
94 * Value prediction barrier
101 * Speculation barrier
104 alternative_if_not ARM64_HAS_SB
123 * Emit an entry into the exception table
125 .macro _asm_extable, from, to
126 .pushsection __ex_table, "a"
128 .long (\from - .), (\to - .)
132 #define USER(l, x...) \
134 _asm_extable 9999b, l
139 lr .req x30 // link register
150 * Select code when configured for BE.
152 #ifdef CONFIG_CPU_BIG_ENDIAN
153 #define CPU_BE(code...) code
155 #define CPU_BE(code...)
159 * Select code when configured for LE.
161 #ifdef CONFIG_CPU_BIG_ENDIAN
162 #define CPU_LE(code...)
164 #define CPU_LE(code...) code
168 * Define a macro that constructs a 64-bit value by concatenating two
169 * 32-bit registers. Note that on big endian systems the order of the
170 * registers is swapped.
172 #ifndef CONFIG_CPU_BIG_ENDIAN
173 .macro regs_to_64, rd, lbits, hbits
175 .macro regs_to_64, rd, hbits, lbits
177 orr \rd, \lbits, \hbits, lsl #32
181 * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
182 * <symbol> is within the range +/- 4 GB of the PC.
185 * @dst: destination register (64 bit wide)
186 * @sym: name of the symbol
188 .macro adr_l, dst, sym
190 add \dst, \dst, :lo12:\sym
194 * @dst: destination register (32 or 64 bit wide)
195 * @sym: name of the symbol
196 * @tmp: optional 64-bit scratch register to be used if <dst> is a
197 * 32-bit wide register, in which case it cannot be used to hold
200 .macro ldr_l, dst, sym, tmp=
203 ldr \dst, [\dst, :lo12:\sym]
206 ldr \dst, [\tmp, :lo12:\sym]
211 * @src: source register (32 or 64 bit wide)
212 * @sym: name of the symbol
213 * @tmp: mandatory 64-bit scratch register to calculate the address
214 * while <src> needs to be preserved.
216 .macro str_l, src, sym, tmp
218 str \src, [\tmp, :lo12:\sym]
222 * @dst: Result of per_cpu(sym, smp_processor_id()) (can be SP)
223 * @sym: The name of the per-cpu variable
224 * @tmp: scratch register
226 .macro adr_this_cpu, dst, sym, tmp
228 add \dst, \tmp, #:lo12:\sym
229 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
238 * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
239 * @sym: The name of the per-cpu variable
240 * @tmp: scratch register
242 .macro ldr_this_cpu dst, sym, tmp
244 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
249 ldr \dst, [\dst, \tmp]
253 * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
255 .macro vma_vm_mm, rd, rn
256 ldr \rd, [\rn, #VMA_VM_MM]
260 * mmid - get context id from mm pointer (mm->context.id)
263 ldr \rd, [\rn, #MM_CONTEXT_ID]
266 * read_ctr - read CTR_EL0. If the system has mismatched register fields,
267 * provide the system wide safe value from arm64_ftr_reg_ctrel0.sys_val
270 alternative_if_not ARM64_MISMATCHED_CACHE_TYPE
271 mrs \reg, ctr_el0 // read CTR
274 ldr_l \reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
280 * raw_dcache_line_size - get the minimum D-cache line size on this CPU
281 * from the CTR register.
283 .macro raw_dcache_line_size, reg, tmp
284 mrs \tmp, ctr_el0 // read CTR
285 ubfm \tmp, \tmp, #16, #19 // cache line size encoding
286 mov \reg, #4 // bytes per word
287 lsl \reg, \reg, \tmp // actual cache line size
291 * dcache_line_size - get the safe D-cache line size across all CPUs
293 .macro dcache_line_size, reg, tmp
295 ubfm \tmp, \tmp, #16, #19 // cache line size encoding
296 mov \reg, #4 // bytes per word
297 lsl \reg, \reg, \tmp // actual cache line size
301 * raw_icache_line_size - get the minimum I-cache line size on this CPU
302 * from the CTR register.
304 .macro raw_icache_line_size, reg, tmp
305 mrs \tmp, ctr_el0 // read CTR
306 and \tmp, \tmp, #0xf // cache line size encoding
307 mov \reg, #4 // bytes per word
308 lsl \reg, \reg, \tmp // actual cache line size
312 * icache_line_size - get the safe I-cache line size across all CPUs
314 .macro icache_line_size, reg, tmp
316 and \tmp, \tmp, #0xf // cache line size encoding
317 mov \reg, #4 // bytes per word
318 lsl \reg, \reg, \tmp // actual cache line size
322 * tcr_set_t0sz - update TCR.T0SZ so that we can load the ID map
324 .macro tcr_set_t0sz, valreg, t0sz
325 bfi \valreg, \t0sz, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
329 * tcr_set_t1sz - update TCR.T1SZ
331 .macro tcr_set_t1sz, valreg, t1sz
332 bfi \valreg, \t1sz, #TCR_T1SZ_OFFSET, #TCR_TxSZ_WIDTH
336 * tcr_compute_pa_size - set TCR.(I)PS to the highest supported
337 * ID_AA64MMFR0_EL1.PARange value
339 * tcr: register with the TCR_ELx value to be updated
340 * pos: IPS or PS bitfield position
341 * tmp{0,1}: temporary registers
343 .macro tcr_compute_pa_size, tcr, pos, tmp0, tmp1
344 mrs \tmp0, ID_AA64MMFR0_EL1
345 // Narrow PARange to fit the PS field in TCR_ELx
346 ubfx \tmp0, \tmp0, #ID_AA64MMFR0_PARANGE_SHIFT, #3
347 mov \tmp1, #ID_AA64MMFR0_PARANGE_MAX
349 csel \tmp0, \tmp1, \tmp0, hi
350 bfi \tcr, \tmp0, \pos, #3
354 * Macro to perform a data cache maintenance for the interval
355 * [kaddr, kaddr + size)
357 * op: operation passed to dc instruction
358 * domain: domain used in dsb instruciton
359 * kaddr: starting virtual address of the region
360 * size: size of the region
361 * Corrupts: kaddr, size, tmp1, tmp2
363 .macro __dcache_op_workaround_clean_cache, op, kaddr
364 alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
371 .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
372 dcache_line_size \tmp1, \tmp2
373 add \size, \kaddr, \size
375 bic \kaddr, \kaddr, \tmp2
378 __dcache_op_workaround_clean_cache \op, \kaddr
381 __dcache_op_workaround_clean_cache \op, \kaddr
384 sys 3, c7, c12, 1, \kaddr // dc cvap
387 sys 3, c7, c13, 1, \kaddr // dc cvadp
394 add \kaddr, \kaddr, \tmp1
401 * Macro to perform an instruction cache maintenance for the interval
404 * start, end: virtual addresses describing the region
405 * label: A label to branch to on user fault.
406 * Corrupts: tmp1, tmp2
408 .macro invalidate_icache_by_line start, end, tmp1, tmp2, label
409 icache_line_size \tmp1, \tmp2
411 bic \tmp2, \start, \tmp2
413 USER(\label, ic ivau, \tmp2) // invalidate I line PoU
414 add \tmp2, \tmp2, \tmp1
422 * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
424 .macro reset_pmuserenr_el0, tmpreg
425 mrs \tmpreg, id_aa64dfr0_el1
426 sbfx \tmpreg, \tmpreg, #ID_AA64DFR0_PMUVER_SHIFT, #4
427 cmp \tmpreg, #1 // Skip if no PMU present
429 msr pmuserenr_el0, xzr // Disable PMU access from EL0
434 * copy_page - copy src to dest using temp registers t1-t8
436 .macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
437 9998: ldp \t1, \t2, [\src]
438 ldp \t3, \t4, [\src, #16]
439 ldp \t5, \t6, [\src, #32]
440 ldp \t7, \t8, [\src, #48]
442 stnp \t1, \t2, [\dest]
443 stnp \t3, \t4, [\dest, #16]
444 stnp \t5, \t6, [\dest, #32]
445 stnp \t7, \t8, [\dest, #48]
446 add \dest, \dest, #64
447 tst \src, #(PAGE_SIZE - 1)
452 * Annotate a function as being unsuitable for kprobes.
454 #ifdef CONFIG_KPROBES
455 #define NOKPROBE(x) \
456 .pushsection "_kprobe_blacklist", "aw"; \
464 #define EXPORT_SYMBOL_NOKASAN(name)
466 #define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name)
470 * Emit a 64-bit absolute little endian symbol reference in a way that
471 * ensures that it will be resolved at build time, even when building a
472 * PIE binary. This requires cooperation from the linker script, which
473 * must emit the lo32/hi32 halves individually.
481 * mov_q - move an immediate constant into a 64-bit register using
482 * between 2 and 4 movz/movk instructions (depending on the
483 * magnitude and sign of the operand)
485 .macro mov_q, reg, val
486 .if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
487 movz \reg, :abs_g1_s:\val
489 .if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
490 movz \reg, :abs_g2_s:\val
492 movz \reg, :abs_g3:\val
493 movk \reg, :abs_g2_nc:\val
495 movk \reg, :abs_g1_nc:\val
497 movk \reg, :abs_g0_nc:\val
501 * Return the current task_struct.
503 .macro get_current_task, rd
508 * Offset ttbr1 to allow for 48-bit kernel VAs set with 52-bit PTRS_PER_PGD.
509 * orr is used as it can cover the immediate value (and is idempotent).
510 * In future this may be nop'ed out when dealing with 52-bit kernel VAs.
511 * ttbr: Value of ttbr to set, modified.
513 .macro offset_ttbr1, ttbr, tmp
514 #ifdef CONFIG_ARM64_VA_BITS_52
515 mrs_s \tmp, SYS_ID_AA64MMFR2_EL1
516 and \tmp, \tmp, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
517 cbnz \tmp, .Lskipoffs_\@
518 orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
524 * Perform the reverse of offset_ttbr1.
525 * bic is used as it can cover the immediate value and, in future, won't need
526 * to be nop'ed out when dealing with 52-bit kernel VAs.
528 .macro restore_ttbr1, ttbr
529 #ifdef CONFIG_ARM64_VA_BITS_52
530 bic \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
535 * Arrange a physical address in a TTBR register, taking care of 52-bit
538 * phys: physical address, preserved
539 * ttbr: returns the TTBR value
541 .macro phys_to_ttbr, ttbr, phys
542 #ifdef CONFIG_ARM64_PA_BITS_52
543 orr \ttbr, \phys, \phys, lsr #46
544 and \ttbr, \ttbr, #TTBR_BADDR_MASK_52
550 .macro phys_to_pte, pte, phys
551 #ifdef CONFIG_ARM64_PA_BITS_52
553 * We assume \phys is 64K aligned and this is guaranteed by only
554 * supporting this configuration with 64K pages.
556 orr \pte, \phys, \phys, lsr #36
557 and \pte, \pte, #PTE_ADDR_MASK
563 .macro pte_to_phys, phys, pte
564 #ifdef CONFIG_ARM64_PA_BITS_52
565 ubfiz \phys, \pte, #(48 - 16 - 12), #16
566 bfxil \phys, \pte, #16, #32
567 lsl \phys, \phys, #16
569 and \phys, \pte, #PTE_ADDR_MASK
574 * tcr_clear_errata_bits - Clear TCR bits that trigger an errata on this CPU.
576 .macro tcr_clear_errata_bits, tcr, tmp1, tmp2
577 #ifdef CONFIG_FUJITSU_ERRATUM_010001
580 mov_q \tmp2, MIDR_FUJITSU_ERRATUM_010001_MASK
581 and \tmp1, \tmp1, \tmp2
582 mov_q \tmp2, MIDR_FUJITSU_ERRATUM_010001
586 mov_q \tmp2, TCR_CLEAR_FUJITSU_ERRATUM_010001
587 bic \tcr, \tcr, \tmp2
589 #endif /* CONFIG_FUJITSU_ERRATUM_010001 */
593 * Errata workaround prior to disable MMU. Insert an ISB immediately prior
594 * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
596 .macro pre_disable_mmu_workaround
597 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
603 * frame_push - Push @regcount callee saved registers to the stack,
604 * starting at x19, as well as x29/x30, and set x29 to
605 * the new value of sp. Add @extra bytes of stack space
608 .macro frame_push, regcount:req, extra
609 __frame st, \regcount, \extra
613 * frame_pop - Pop the callee saved registers from the stack that were
614 * pushed in the most recent call to frame_push, as well
615 * as x29/x30 and any extra stack space that may have been
622 .macro __frame_regs, reg1, reg2, op, num
623 .if .Lframe_regcount == \num
624 \op\()r \reg1, [sp, #(\num + 1) * 8]
625 .elseif .Lframe_regcount > \num
626 \op\()p \reg1, \reg2, [sp, #(\num + 1) * 8]
630 .macro __frame, op, regcount, extra=0
632 .if (\regcount) < 0 || (\regcount) > 10
633 .error "regcount should be in the range [0 ... 10]"
635 .if ((\extra) % 16) != 0
636 .error "extra should be a multiple of 16 bytes"
638 .ifdef .Lframe_regcount
639 .if .Lframe_regcount != -1
640 .error "frame_push/frame_pop may not be nested"
643 .set .Lframe_regcount, \regcount
644 .set .Lframe_extra, \extra
645 .set .Lframe_local_offset, ((\regcount + 3) / 2) * 16
646 stp x29, x30, [sp, #-.Lframe_local_offset - .Lframe_extra]!
650 __frame_regs x19, x20, \op, 1
651 __frame_regs x21, x22, \op, 3
652 __frame_regs x23, x24, \op, 5
653 __frame_regs x25, x26, \op, 7
654 __frame_regs x27, x28, \op, 9
657 .if .Lframe_regcount == -1
658 .error "frame_push/frame_pop may not be nested"
660 ldp x29, x30, [sp], #.Lframe_local_offset + .Lframe_extra
661 .set .Lframe_regcount, -1
666 * Check whether to yield to another runnable task from kernel mode NEON code
667 * (which runs with preemption disabled).
669 * if_will_cond_yield_neon
670 * // pre-yield patchup code
672 * // post-yield patchup code
673 * endif_yield_neon <label>
675 * where <label> is optional, and marks the point where execution will resume
676 * after a yield has been performed. If omitted, execution resumes right after
677 * the endif_yield_neon invocation. Note that the entire sequence, including
678 * the provided patchup code, will be omitted from the image if
679 * CONFIG_PREEMPTION is not defined.
681 * As a convenience, in the case where no patchup code is required, the above
682 * sequence may be abbreviated to
684 * cond_yield_neon <label>
686 * Note that the patchup code does not support assembler directives that change
687 * the output section, any use of such directives is undefined.
689 * The yield itself consists of the following:
690 * - Check whether the preempt count is exactly 1 and a reschedule is also
691 * needed. If so, calling of preempt_enable() in kernel_neon_end() will
692 * trigger a reschedule. If it is not the case, yielding is pointless.
693 * - Disable and re-enable kernel mode NEON, and branch to the yield fixup
696 * This macro sequence may clobber all CPU state that is not guaranteed by the
697 * AAPCS to be preserved across an ordinary function call.
700 .macro cond_yield_neon, lbl
701 if_will_cond_yield_neon
703 endif_yield_neon \lbl
706 .macro if_will_cond_yield_neon
707 #ifdef CONFIG_PREEMPTION
709 ldr x0, [x0, #TSK_TI_PREEMPT]
710 sub x0, x0, #PREEMPT_DISABLE_OFFSET
712 /* fall through to endif_yield_neon */
716 .section ".discard.cond_yield_neon", "ax"
720 .macro do_cond_yield_neon
725 .macro endif_yield_neon, lbl
735 #endif /* __ASM_ASSEMBLER_H */