improves system performance, but it may also
expose users to several CPU vulnerabilities.
Equivalent to: nopti [X86,PPC]
- kpti=0 [ARM64]
+ if nokaslr then kpti=0 [ARM64]
nospectre_v1 [X86,PPC]
nobp=0 [S390]
nospectre_v2 [X86,PPC,S390,ARM64]
0000000000000000 0000ffffffffffff 256TB user
ffff000000000000 ffff7fffffffffff 128TB kernel logical memory map
[ffff600000000000 ffff7fffffffffff] 32TB [kasan shadow region]
- ffff800000000000 ffff800007ffffff 128MB bpf jit region
- ffff800008000000 ffff80000fffffff 128MB modules
- ffff800010000000 fffffbffefffffff 124TB vmalloc
+ ffff800000000000 ffff800007ffffff 128MB modules
+ ffff800008000000 fffffbffefffffff 124TB vmalloc
fffffbfff0000000 fffffbfffdffffff 224MB fixed mappings (top down)
fffffbfffe000000 fffffbfffe7fffff 8MB [guard region]
fffffbfffe800000 fffffbffff7fffff 16MB PCI I/O space
0000000000000000 000fffffffffffff 4PB user
fff0000000000000 ffff7fffffffffff ~4PB kernel logical memory map
[fffd800000000000 ffff7fffffffffff] 512TB [kasan shadow region]
- ffff800000000000 ffff800007ffffff 128MB bpf jit region
- ffff800008000000 ffff80000fffffff 128MB modules
- ffff800010000000 fffffbffefffffff 124TB vmalloc
+ ffff800000000000 ffff800007ffffff 128MB modules
+ ffff800008000000 fffffbffefffffff 124TB vmalloc
fffffbfff0000000 fffffbfffdffffff 224MB fixed mappings (top down)
fffffbfffe000000 fffffbfffe7fffff 8MB [guard region]
fffffbfffe800000 fffffbffff7fffff 16MB PCI I/O space
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A57 | #1319537 | ARM64_ERRATUM_1319367 |
+----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-A57 | #1742098 | ARM64_ERRATUM_1742098 |
++----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A72 | #853709 | N/A |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A72 | #1319367 | ARM64_ERRATUM_1319367 |
+----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-A72 | #1655431 | ARM64_ERRATUM_1742098 |
++----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A76 | #1188873,1418040| ARM64_ERRATUM_1418040 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A510 | #2077057 | ARM64_ERRATUM_2077057 |
+----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-A510 | #2441009 | ARM64_ERRATUM_2441009 |
++----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A710 | #2119858 | ARM64_ERRATUM_2119858 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 |
| alpha: | TODO |
| arc: | ok |
| arm: | TODO |
- | arm64: | TODO |
+ | arm64: | ok |
| csky: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
(*) dma_wmb();
(*) dma_rmb();
+ (*) dma_mb();
These are for use with consistent memory to guarantee the ordering
of writes or reads of shared memory accessible to both the CPU and a
The dma_rmb() allows us guarantee the device has released ownership
before we read the data from the descriptor, and the dma_wmb() allows
us to guarantee the data is written to the descriptor before the device
- can see it now has ownership. Note that, when using writel(), a prior
- wmb() is not needed to guarantee that the cache coherent memory writes
- have completed before writing to the MMIO region. The cheaper
- writel_relaxed() does not provide this guarantee and must not be used
- here.
+ can see it now has ownership. The dma_mb() implies both a dma_rmb() and
+ a dma_wmb(). Note that, when using writel(), a prior wmb() is not needed
+ to guarantee that the cache coherent memory writes have completed before
+ writing to the MMIO region. The cheaper writel_relaxed() does not provide
+ this guarantee and must not be used here.
See the subsection "Kernel I/O barrier effects" for more information on
relaxed I/O accessors and the Documentation/core-api/dma-api.rst file for
config TRACE_IRQFLAGS_SUPPORT
bool
+config TRACE_IRQFLAGS_NMI_SUPPORT
+ bool
+
#
# An arch should select this if it provides all these things:
#
extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached);
void __arm_iomem_set_ro(void __iomem *ptr, size_t size);
-extern void __iounmap(volatile void __iomem *addr);
extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
unsigned int, void *);
-extern void (*arch_iounmap)(volatile void __iomem *);
/*
* Bad read/write accesses...
#define ioremap_wc ioremap_wc
#define ioremap_wt ioremap_wc
-void iounmap(volatile void __iomem *iomem_cookie);
+void iounmap(volatile void __iomem *io_addr);
#define iounmap iounmap
void *arch_memremap_wb(phys_addr_t phys_addr, size_t size);
__builtin_return_address(0));
}
-void __iounmap(volatile void __iomem *io_addr)
+void iounmap(volatile void __iomem *io_addr)
{
void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
struct static_vm *svm;
vunmap(addr);
}
-
-void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
-
-void iounmap(volatile void __iomem *cookie)
-{
- arch_iounmap(cookie);
-}
EXPORT_SYMBOL(iounmap);
#if defined(CONFIG_PCI) || IS_ENABLED(CONFIG_PCMCIA)
return (void *)phys_addr;
}
-void __iounmap(volatile void __iomem *addr)
-{
-}
-EXPORT_SYMBOL(__iounmap);
-
-void (*arch_iounmap)(volatile void __iomem *);
-
-void iounmap(volatile void __iomem *addr)
+void iounmap(volatile void __iomem *io_addr)
{
}
EXPORT_SYMBOL(iounmap);
select ARCH_WANT_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_WANTS_NO_INSTR
+ select ARCH_WANTS_THP_SWAP if ARM64_4K_PAGES
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARM_AMBA
select ARM_ARCH_TIMER
select GENERIC_CPU_VULNERABILITIES
select GENERIC_EARLY_IOREMAP
select GENERIC_IDLE_POLL_SETUP
+ select GENERIC_IOREMAP
select GENERIC_IRQ_IPI
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_GCC_PLUGINS
select HAVE_HW_BREAKPOINT if PERF_EVENTS
+ select HAVE_IOREMAP_PROT
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_KVM
select HAVE_NMI
select THREAD_INFO_IN_TASK
select HAVE_ARCH_USERFAULTFD_MINOR if USERFAULTFD
select TRACE_IRQFLAGS_SUPPORT
+ select TRACE_IRQFLAGS_NMI_SUPPORT
help
ARM 64-bit (AArch64) Linux support.
If unsure, say Y.
+config ARM64_ERRATUM_1742098
+ bool "Cortex-A57/A72: 1742098: ELR recorded incorrectly on interrupt taken between cryptographic instructions in a sequence"
+ depends on COMPAT
+ default y
+ help
+ This option removes the AES hwcap for aarch32 user-space to
+ workaround erratum 1742098 on Cortex-A57 and Cortex-A72.
+
+ Affected parts may corrupt the AES state if an interrupt is
+ taken between a pair of AES instructions. These instructions
+ are only present if the cryptography extensions are present.
+ All software should have a fallback implementation for CPUs
+ that don't implement the cryptography extensions.
+
+ If unsure, say Y.
+
config ARM64_ERRATUM_845719
bool "Cortex-A53: 845719: a load might read incorrect data"
depends on COMPAT
If unsure, say Y.
+config ARM64_ERRATUM_2441009
+ bool "Cortex-A510: Completion of affected memory accesses might not be guaranteed by completion of a TLBI"
+ default y
+ select ARM64_WORKAROUND_REPEAT_TLBI
+ help
+ This option adds a workaround for ARM Cortex-A510 erratum #2441009.
+
+ Under very rare circumstances, affected Cortex-A510 CPUs
+ may not handle a race between a break-before-make sequence on one
+ CPU, and another CPU accessing the same page. This could allow a
+ store to a page that has been unmapped.
+
+ Work around this by adding the affected CPUs to the list that needs
+ TLB sequences to be done twice.
+
+ If unsure, say Y.
+
config ARM64_ERRATUM_2064142
bool "Cortex-A510: 2064142: workaround TRBE register writes while disabled"
depends on CORESIGHT_TRBE
OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
-targets := Image Image.bz2 Image.gz Image.lz4 Image.lzma Image.lzo
+targets := Image Image.bz2 Image.gz Image.lz4 Image.lzma Image.lzo Image.zst
$(obj)/Image: vmlinux FORCE
$(call if_changed,objcopy)
$(obj)/Image.lzo: $(obj)/Image FORCE
$(call if_changed,lzo)
+
+$(obj)/Image.zst: $(obj)/Image FORCE
+ $(call if_changed,zstd)
#ifndef __ASM_ASM_EXTABLE_H
#define __ASM_ASM_EXTABLE_H
+#include <linux/bits.h>
+#include <asm/gpr-num.h>
+
#define EX_TYPE_NONE 0
-#define EX_TYPE_FIXUP 1
-#define EX_TYPE_BPF 2
-#define EX_TYPE_UACCESS_ERR_ZERO 3
+#define EX_TYPE_BPF 1
+#define EX_TYPE_UACCESS_ERR_ZERO 2
+#define EX_TYPE_KACCESS_ERR_ZERO 3
#define EX_TYPE_LOAD_UNALIGNED_ZEROPAD 4
+/* Data fields for EX_TYPE_UACCESS_ERR_ZERO */
+#define EX_DATA_REG_ERR_SHIFT 0
+#define EX_DATA_REG_ERR GENMASK(4, 0)
+#define EX_DATA_REG_ZERO_SHIFT 5
+#define EX_DATA_REG_ZERO GENMASK(9, 5)
+
+/* Data fields for EX_TYPE_LOAD_UNALIGNED_ZEROPAD */
+#define EX_DATA_REG_DATA_SHIFT 0
+#define EX_DATA_REG_DATA GENMASK(4, 0)
+#define EX_DATA_REG_ADDR_SHIFT 5
+#define EX_DATA_REG_ADDR GENMASK(9, 5)
+
#ifdef __ASSEMBLY__
#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \
.short (data); \
.popsection;
+#define EX_DATA_REG(reg, gpr) \
+ (.L__gpr_num_##gpr << EX_DATA_REG_##reg##_SHIFT)
+
+#define _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero) \
+ __ASM_EXTABLE_RAW(insn, fixup, \
+ EX_TYPE_UACCESS_ERR_ZERO, \
+ ( \
+ EX_DATA_REG(ERR, err) | \
+ EX_DATA_REG(ZERO, zero) \
+ ))
+
+#define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) \
+ _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, wzr)
+
+#define _ASM_EXTABLE_UACCESS(insn, fixup) \
+ _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, wzr, wzr)
+
/*
- * Create an exception table entry for `insn`, which will branch to `fixup`
+ * Create an exception table entry for uaccess `insn`, which will branch to `fixup`
* when an unhandled fault is taken.
*/
- .macro _asm_extable, insn, fixup
- __ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_FIXUP, 0)
+ .macro _asm_extable_uaccess, insn, fixup
+ _ASM_EXTABLE_UACCESS(\insn, \fixup)
.endm
/*
* Create an exception table entry for `insn` if `fixup` is provided. Otherwise
* do nothing.
*/
- .macro _cond_extable, insn, fixup
- .ifnc \fixup,
- _asm_extable \insn, \fixup
+ .macro _cond_uaccess_extable, insn, fixup
+ .ifnc \fixup,
+ _asm_extable_uaccess \insn, \fixup
.endif
.endm
#else /* __ASSEMBLY__ */
-#include <linux/bits.h>
#include <linux/stringify.h>
-#include <asm/gpr-num.h>
-
#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \
".pushsection __ex_table, \"a\"\n" \
".align 2\n" \
".short (" data ")\n" \
".popsection\n"
-#define _ASM_EXTABLE(insn, fixup) \
- __ASM_EXTABLE_RAW(#insn, #fixup, __stringify(EX_TYPE_FIXUP), "0")
-
-#define EX_DATA_REG_ERR_SHIFT 0
-#define EX_DATA_REG_ERR GENMASK(4, 0)
-#define EX_DATA_REG_ZERO_SHIFT 5
-#define EX_DATA_REG_ZERO GENMASK(9, 5)
-
#define EX_DATA_REG(reg, gpr) \
"((.L__gpr_num_" #gpr ") << " __stringify(EX_DATA_REG_##reg##_SHIFT) ")"
EX_DATA_REG(ZERO, zero) \
")")
+#define _ASM_EXTABLE_KACCESS_ERR_ZERO(insn, fixup, err, zero) \
+ __DEFINE_ASM_GPR_NUMS \
+ __ASM_EXTABLE_RAW(#insn, #fixup, \
+ __stringify(EX_TYPE_KACCESS_ERR_ZERO), \
+ "(" \
+ EX_DATA_REG(ERR, err) " | " \
+ EX_DATA_REG(ZERO, zero) \
+ ")")
+
#define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) \
_ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, wzr)
-#define EX_DATA_REG_DATA_SHIFT 0
-#define EX_DATA_REG_DATA GENMASK(4, 0)
-#define EX_DATA_REG_ADDR_SHIFT 5
-#define EX_DATA_REG_ADDR GENMASK(9, 5)
+#define _ASM_EXTABLE_UACCESS(insn, fixup) \
+ _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, wzr, wzr)
+
+#define _ASM_EXTABLE_KACCESS_ERR(insn, fixup, err) \
+ _ASM_EXTABLE_KACCESS_ERR_ZERO(insn, fixup, err, wzr)
#define _ASM_EXTABLE_LOAD_UNALIGNED_ZEROPAD(insn, fixup, data, addr) \
__DEFINE_ASM_GPR_NUMS \
#define USER(l, x...) \
9999: x; \
- _asm_extable 9999b, l
+ _asm_extable_uaccess 9999b, l
/*
* Generate the assembly for LDTR/STTR with exception table entries.
8889: ldtr \reg2, [\addr, #8];
add \addr, \addr, \post_inc;
- _asm_extable 8888b,\l;
- _asm_extable 8889b,\l;
+ _asm_extable_uaccess 8888b, \l;
+ _asm_extable_uaccess 8889b, \l;
.endm
.macro user_stp l, reg1, reg2, addr, post_inc
8889: sttr \reg2, [\addr, #8];
add \addr, \addr, \post_inc;
- _asm_extable 8888b,\l;
- _asm_extable 8889b,\l;
+ _asm_extable_uaccess 8888b,\l;
+ _asm_extable_uaccess 8889b,\l;
.endm
.macro user_ldst l, inst, reg, addr, post_inc
8888: \inst \reg, [\addr];
add \addr, \addr, \post_inc;
- _asm_extable 8888b,\l;
+ _asm_extable_uaccess 8888b, \l;
.endm
#endif
b.lo .Ldcache_op\@
dsb \domain
- _cond_extable .Ldcache_op\@, \fixup
+ _cond_uaccess_extable .Ldcache_op\@, \fixup
.endm
/*
dsb ish
isb
- _cond_extable .Licache_op\@, \fixup
+ _cond_uaccess_extable .Licache_op\@, \fixup
.endm
/*
#define pmr_sync() do {} while (0)
#endif
-#define mb() dsb(sy)
-#define rmb() dsb(ld)
-#define wmb() dsb(st)
+#define __mb() dsb(sy)
+#define __rmb() dsb(ld)
+#define __wmb() dsb(st)
-#define dma_mb() dmb(osh)
-#define dma_rmb() dmb(oshld)
-#define dma_wmb() dmb(oshst)
+#define __dma_mb() dmb(osh)
+#define __dma_rmb() dmb(oshld)
+#define __dma_wmb() dmb(oshst)
#define io_stop_wc() dgh()
#define flush_icache_range flush_icache_range
/*
- * Cache maintenance functions used by the DMA API. No to be used directly.
- */
-extern void __dma_map_area(const void *, size_t, int);
-extern void __dma_unmap_area(const void *, size_t, int);
-extern void __dma_flush_area(const void *, size_t);
-
-/*
* Copy user data from/to a page which is mapped into a different
* processes address space. Really, we want to allow our "user
* space" model to handle this.
* @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the
* cpu being killed.
* @cpu_kill: Ensures a cpu has left the kernel. Called from another cpu.
- * @cpu_init_idle: Reads any data necessary to initialize CPU idle states for
- * a proposed logical id.
- * @cpu_suspend: Suspends a cpu and saves the required context. May fail owing
- * to wrong parameters or error conditions. Called from the
- * CPU being suspended. Must be called with IRQs disabled.
*/
struct cpu_operations {
const char *name;
void (*cpu_die)(unsigned int cpu);
int (*cpu_kill)(unsigned int cpu);
#endif
-#ifdef CONFIG_CPU_IDLE
- int (*cpu_init_idle)(unsigned int);
- int (*cpu_suspend)(unsigned long);
-#endif
};
int __init init_cpu_ops(int cpu);
#include <asm/proc-fns.h>
-#ifdef CONFIG_CPU_IDLE
-extern int arm_cpuidle_init(unsigned int cpu);
-extern int arm_cpuidle_suspend(int index);
-#else
-static inline int arm_cpuidle_init(unsigned int cpu)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int arm_cpuidle_suspend(int index)
-{
- return -EOPNOTSUPP;
-}
-#endif
-
#ifdef CONFIG_ARM64_PSEUDO_NMI
#include <asm/arch_gicv3.h>
#endif /* CONFIG_ACPI_APEI_GHES */
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+#ifdef CONFIG_RELOCATABLE
+ FIX_ENTRY_TRAMP_TEXT4, /* one extra slot for the data page */
+#endif
FIX_ENTRY_TRAMP_TEXT3,
FIX_ENTRY_TRAMP_TEXT2,
FIX_ENTRY_TRAMP_TEXT1,
- FIX_ENTRY_TRAMP_DATA,
#define TRAMP_VALIAS (__fix_to_virt(FIX_ENTRY_TRAMP_TEXT1))
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
__end_of_permanent_fixed_addresses,
/*
* I/O memory mapping functions.
*/
-extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot);
-extern void iounmap(volatile void __iomem *addr);
-extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
-#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
-#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
-#define ioremap_np(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRnE))
+bool ioremap_allowed(phys_addr_t phys_addr, size_t size, unsigned long prot);
+#define ioremap_allowed ioremap_allowed
+
+#define _PAGE_IOREMAP PROT_DEVICE_nGnRE
+
+#define ioremap_wc(addr, size) \
+ ioremap_prot((addr), (size), PROT_NORMAL_NC)
+#define ioremap_np(addr, size) \
+ ioremap_prot((addr), (size), PROT_DEVICE_nGnRnE)
/*
* io{read,write}{16,32,64}be() macros
#include <asm-generic/io.h>
+#define ioremap_cache ioremap_cache
+static inline void __iomem *ioremap_cache(phys_addr_t addr, size_t size)
+{
+ if (pfn_is_map_memory(__phys_to_pfn(addr)))
+ return (void __iomem *)__phys_to_virt(addr);
+
+ return ioremap_prot(addr, size, PROT_NORMAL);
+}
+
/*
* More restrictive address range checking than the default implementation
* (PHYS_OFFSET and PHYS_MASK taken into account).
})
void dump_mem_limit(void);
+
+static inline bool defer_reserve_crashkernel(void)
+{
+ return IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32);
+}
#endif /* !ASSEMBLY */
/*
*/
#ifdef CONFIG_ARM64_PA_BITS_52
/*
- * This should be GENMASK_ULL(47, 2).
* TTBR_ELx[1] is RES0 in this configuration.
*/
-#define TTBR_BADDR_MASK_52 (((UL(1) << 46) - 1) << 2)
+#define TTBR_BADDR_MASK_52 GENMASK_ULL(47, 2)
#endif
#ifdef CONFIG_ARM64_VA_BITS_52
__flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1)
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+static inline bool arch_thp_swp_supported(void)
+{
+ return !system_supports_mte();
+}
+#define arch_thp_swp_supported arch_thp_swp_supported
+
/*
* Outside of a few very special situations (e.g. hibernation), we always
* use broadcast TLB invalidation instructions, therefore a spurious page
return clear_pte_bit(pte, __pgprot(PTE_SWP_EXCLUSIVE));
}
+/*
+ * Select all bits except the pfn
+ */
+static inline pgprot_t pte_pgprot(pte_t pte)
+{
+ unsigned long pfn = pte_pfn(pte);
+
+ return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
+}
+
#ifdef CONFIG_NUMA_BALANCING
/*
* See the comment in include/linux/pgtable.h
static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
{
+ s32 previous_syscall = regs->syscallno;
memset(regs, 0, sizeof(*regs));
- forget_syscall(regs);
+ regs->syscallno = previous_syscall;
regs->pc = pc;
if (system_uses_irq_prio_masking())
* The "__xxx_error" versions set the third argument to -EFAULT if an error
* occurs, and leave it unchanged on success.
*/
-#define __get_mem_asm(load, reg, x, addr, err) \
+#define __get_mem_asm(load, reg, x, addr, err, type) \
asm volatile( \
"1: " load " " reg "1, [%2]\n" \
"2:\n" \
- _ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 2b, %w0, %w1) \
+ _ASM_EXTABLE_##type##ACCESS_ERR_ZERO(1b, 2b, %w0, %w1) \
: "+r" (err), "=&r" (x) \
: "r" (addr))
-#define __raw_get_mem(ldr, x, ptr, err) \
-do { \
- unsigned long __gu_val; \
- switch (sizeof(*(ptr))) { \
- case 1: \
- __get_mem_asm(ldr "b", "%w", __gu_val, (ptr), (err)); \
- break; \
- case 2: \
- __get_mem_asm(ldr "h", "%w", __gu_val, (ptr), (err)); \
- break; \
- case 4: \
- __get_mem_asm(ldr, "%w", __gu_val, (ptr), (err)); \
- break; \
- case 8: \
- __get_mem_asm(ldr, "%x", __gu_val, (ptr), (err)); \
- break; \
- default: \
- BUILD_BUG(); \
- } \
- (x) = (__force __typeof__(*(ptr)))__gu_val; \
+#define __raw_get_mem(ldr, x, ptr, err, type) \
+do { \
+ unsigned long __gu_val; \
+ switch (sizeof(*(ptr))) { \
+ case 1: \
+ __get_mem_asm(ldr "b", "%w", __gu_val, (ptr), (err), type); \
+ break; \
+ case 2: \
+ __get_mem_asm(ldr "h", "%w", __gu_val, (ptr), (err), type); \
+ break; \
+ case 4: \
+ __get_mem_asm(ldr, "%w", __gu_val, (ptr), (err), type); \
+ break; \
+ case 8: \
+ __get_mem_asm(ldr, "%x", __gu_val, (ptr), (err), type); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ (x) = (__force __typeof__(*(ptr)))__gu_val; \
} while (0)
/*
__chk_user_ptr(ptr); \
\
uaccess_ttbr0_enable(); \
- __raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err); \
+ __raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err, U); \
uaccess_ttbr0_disable(); \
\
(x) = __rgu_val; \
\
__uaccess_enable_tco_async(); \
__raw_get_mem("ldr", *((type *)(__gkn_dst)), \
- (__force type *)(__gkn_src), __gkn_err); \
+ (__force type *)(__gkn_src), __gkn_err, K); \
__uaccess_disable_tco_async(); \
\
if (unlikely(__gkn_err)) \
goto err_label; \
} while (0)
-#define __put_mem_asm(store, reg, x, addr, err) \
+#define __put_mem_asm(store, reg, x, addr, err, type) \
asm volatile( \
"1: " store " " reg "1, [%2]\n" \
"2:\n" \
- _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %w0) \
+ _ASM_EXTABLE_##type##ACCESS_ERR(1b, 2b, %w0) \
: "+r" (err) \
: "r" (x), "r" (addr))
-#define __raw_put_mem(str, x, ptr, err) \
-do { \
- __typeof__(*(ptr)) __pu_val = (x); \
- switch (sizeof(*(ptr))) { \
- case 1: \
- __put_mem_asm(str "b", "%w", __pu_val, (ptr), (err)); \
- break; \
- case 2: \
- __put_mem_asm(str "h", "%w", __pu_val, (ptr), (err)); \
- break; \
- case 4: \
- __put_mem_asm(str, "%w", __pu_val, (ptr), (err)); \
- break; \
- case 8: \
- __put_mem_asm(str, "%x", __pu_val, (ptr), (err)); \
- break; \
- default: \
- BUILD_BUG(); \
- } \
+#define __raw_put_mem(str, x, ptr, err, type) \
+do { \
+ __typeof__(*(ptr)) __pu_val = (x); \
+ switch (sizeof(*(ptr))) { \
+ case 1: \
+ __put_mem_asm(str "b", "%w", __pu_val, (ptr), (err), type); \
+ break; \
+ case 2: \
+ __put_mem_asm(str "h", "%w", __pu_val, (ptr), (err), type); \
+ break; \
+ case 4: \
+ __put_mem_asm(str, "%w", __pu_val, (ptr), (err), type); \
+ break; \
+ case 8: \
+ __put_mem_asm(str, "%x", __pu_val, (ptr), (err), type); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
} while (0)
/*
__chk_user_ptr(__rpu_ptr); \
\
uaccess_ttbr0_enable(); \
- __raw_put_mem("sttr", __rpu_val, __rpu_ptr, err); \
+ __raw_put_mem("sttr", __rpu_val, __rpu_ptr, err, U); \
uaccess_ttbr0_disable(); \
} while (0)
\
__uaccess_enable_tco_async(); \
__raw_put_mem("str", *((type *)(__pkn_src)), \
- (__force type *)(__pkn_dst), __pkn_err); \
+ (__force type *)(__pkn_dst), __pkn_err, K); \
__uaccess_disable_tco_async(); \
\
if (unlikely(__pkn_err)) \
prot = __acpi_get_writethrough_mem_attribute();
}
}
- return __ioremap(phys, size, prot);
+ return ioremap_prot(phys, size, pgprot_val(prot));
}
/*
pxm = pa->proximity_domain;
node = acpi_map_pxm_to_node(pxm);
- if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
+ if (node == NUMA_NO_NODE) {
pr_err("SRAT: Too many proximity domains %d\n", pxm);
bad_srat();
return;
static LIST_HEAD(insn_emulation);
static int nr_insn_emulated __initdata;
static DEFINE_RAW_SPINLOCK(insn_emulation_lock);
+static DEFINE_MUTEX(insn_emulation_mutex);
static void register_emulation_hooks(struct insn_emulation_ops *ops)
{
loff_t *ppos)
{
int ret = 0;
- struct insn_emulation *insn = (struct insn_emulation *) table->data;
+ struct insn_emulation *insn = container_of(table->data, struct insn_emulation, current_mode);
enum insn_emulation_mode prev_mode = insn->current_mode;
- table->data = &insn->current_mode;
+ mutex_lock(&insn_emulation_mutex);
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (ret || !write || prev_mode == insn->current_mode)
update_insn_emulation_mode(insn, INSN_UNDEF);
}
ret:
- table->data = insn;
+ mutex_unlock(&insn_emulation_mutex);
return ret;
}
sysctl->maxlen = sizeof(int);
sysctl->procname = insn->ops->name;
- sysctl->data = insn;
+ sysctl->data = &insn->current_mode;
sysctl->extra1 = &insn->min;
sysctl->extra2 = &insn->max;
sysctl->proc_handler = emulation_proc_handler;
ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
},
#endif
+#ifdef CONFIG_ARM64_ERRATUM_2441009
+ {
+ /* Cortex-A510 r0p0 -> r1p1. Fixed in r1p2 */
+ ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1),
+ },
+#endif
{},
};
#endif
};
#endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
+#ifdef CONFIG_ARM64_ERRATUM_1742098
+static struct midr_range broken_aarch32_aes[] = {
+ MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+ {},
+};
+#endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
+
const struct arm64_cpu_capabilities arm64_errata[] = {
#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
{
#endif
#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
{
- .desc = "Qualcomm erratum 1009, or ARM erratum 1286807",
+ .desc = "Qualcomm erratum 1009, or ARM erratum 1286807, 2441009",
.capability = ARM64_WORKAROUND_REPEAT_TLBI,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.matches = cpucap_multi_entry_cap_matches,
ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 1)
},
#endif
+#ifdef CONFIG_ARM64_ERRATUM_1742098
+ {
+ .desc = "ARM erratum 1742098",
+ .capability = ARM64_WORKAROUND_1742098,
+ CAP_MIDR_RANGE_LIST(broken_aarch32_aes),
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ },
+#endif
{
}
};
#include <asm/cpufeature.h>
#include <asm/cpu_ops.h>
#include <asm/fpsimd.h>
+#include <asm/hwcap.h>
#include <asm/insn.h>
#include <asm/kvm_host.h>
#include <asm/mmu_context.h>
}
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+#define KPTI_NG_TEMP_VA (-(1UL << PMD_SHIFT))
+
+extern
+void create_kpti_ng_temp_pgd(pgd_t *pgdir, phys_addr_t phys, unsigned long virt,
+ phys_addr_t size, pgprot_t prot,
+ phys_addr_t (*pgtable_alloc)(int), int flags);
+
+static phys_addr_t kpti_ng_temp_alloc;
+
+static phys_addr_t kpti_ng_pgd_alloc(int shift)
+{
+ kpti_ng_temp_alloc -= PAGE_SIZE;
+ return kpti_ng_temp_alloc;
+}
+
static void __nocfi
kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
{
- typedef void (kpti_remap_fn)(int, int, phys_addr_t);
+ typedef void (kpti_remap_fn)(int, int, phys_addr_t, unsigned long);
extern kpti_remap_fn idmap_kpti_install_ng_mappings;
kpti_remap_fn *remap_fn;
int cpu = smp_processor_id();
+ int levels = CONFIG_PGTABLE_LEVELS;
+ int order = order_base_2(levels);
+ u64 kpti_ng_temp_pgd_pa = 0;
+ pgd_t *kpti_ng_temp_pgd;
+ u64 alloc = 0;
if (__this_cpu_read(this_cpu_vector) == vectors) {
const char *v = arm64_get_bp_hardening_vector(EL1_VECTOR_KPTI);
remap_fn = (void *)__pa_symbol(function_nocfi(idmap_kpti_install_ng_mappings));
+ if (!cpu) {
+ alloc = __get_free_pages(GFP_ATOMIC | __GFP_ZERO, order);
+ kpti_ng_temp_pgd = (pgd_t *)(alloc + (levels - 1) * PAGE_SIZE);
+ kpti_ng_temp_alloc = kpti_ng_temp_pgd_pa = __pa(kpti_ng_temp_pgd);
+
+ //
+ // Create a minimal page table hierarchy that permits us to map
+ // the swapper page tables temporarily as we traverse them.
+ //
+ // The physical pages are laid out as follows:
+ //
+ // +--------+-/-------+-/------ +-\\--------+
+ // : PTE[] : | PMD[] : | PUD[] : || PGD[] :
+ // +--------+-\-------+-\------ +-//--------+
+ // ^
+ // The first page is mapped into this hierarchy at a PMD_SHIFT
+ // aligned virtual address, so that we can manipulate the PTE
+ // level entries while the mapping is active. The first entry
+ // covers the PTE[] page itself, the remaining entries are free
+ // to be used as a ad-hoc fixmap.
+ //
+ create_kpti_ng_temp_pgd(kpti_ng_temp_pgd, __pa(alloc),
+ KPTI_NG_TEMP_VA, PAGE_SIZE, PAGE_KERNEL,
+ kpti_ng_pgd_alloc, 0);
+ }
+
cpu_install_idmap();
- remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
+ remap_fn(cpu, num_online_cpus(), kpti_ng_temp_pgd_pa, KPTI_NG_TEMP_VA);
cpu_uninstall_idmap();
- if (!cpu)
+ if (!cpu) {
+ free_pages(alloc, order);
arm64_use_ng_mappings = true;
+ }
}
#else
static void
}
#endif /* CONFIG_ARM64_MTE */
+static void elf_hwcap_fixup(void)
+{
+#ifdef CONFIG_ARM64_ERRATUM_1742098
+ if (cpus_have_const_cap(ARM64_WORKAROUND_1742098))
+ compat_elf_hwcap2 &= ~COMPAT_HWCAP2_AES;
+#endif /* ARM64_ERRATUM_1742098 */
+}
+
#ifdef CONFIG_KVM
static bool is_kvm_protected_mode(const struct arm64_cpu_capabilities *entry, int __unused)
{
setup_system_capabilities();
setup_elf_hwcaps(arm64_elf_hwcaps);
- if (system_supports_32bit_el0())
+ if (system_supports_32bit_el0()) {
setup_elf_hwcaps(compat_elf_hwcaps);
+ elf_hwcap_fixup();
+ }
if (system_uses_ttbr0_pan())
pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
cpu_active_mask);
get_cpu_device(lucky_winner)->offline_disabled = true;
setup_elf_hwcaps(compat_elf_hwcaps);
+ elf_hwcap_fixup();
pr_info("Asymmetric 32-bit EL0 support detected on CPU %u; CPU hot-unplug disabled on CPU %u\n",
cpu, lucky_winner);
return 0;
#include <linux/of_device.h>
#include <linux/psci.h>
-#include <asm/cpuidle.h>
-#include <asm/cpu_ops.h>
-
-int arm_cpuidle_init(unsigned int cpu)
-{
- const struct cpu_operations *ops = get_cpu_ops(cpu);
- int ret = -EOPNOTSUPP;
-
- if (ops && ops->cpu_suspend && ops->cpu_init_idle)
- ret = ops->cpu_init_idle(cpu);
-
- return ret;
-}
-
-/**
- * arm_cpuidle_suspend() - function to enter a low-power idle state
- * @index: argument to pass to CPU suspend operations
- *
- * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
- * operations back-end error code otherwise.
- */
-int arm_cpuidle_suspend(int index)
-{
- int cpu = smp_processor_id();
- const struct cpu_operations *ops = get_cpu_ops(cpu);
-
- return ops->cpu_suspend(index);
-}
-
#ifdef CONFIG_ACPI
#include <acpi/processor.h>
*/
.endm
- .macro tramp_data_page dst
- adr_l \dst, .entry.tramp.text
- sub \dst, \dst, PAGE_SIZE
- .endm
-
- .macro tramp_data_read_var dst, var
-#ifdef CONFIG_RANDOMIZE_BASE
- tramp_data_page \dst
- add \dst, \dst, #:lo12:__entry_tramp_data_\var
- ldr \dst, [\dst]
+ .macro tramp_data_read_var dst, var
+#ifdef CONFIG_RELOCATABLE
+ ldr \dst, .L__tramp_data_\var
+ .ifndef .L__tramp_data_\var
+ .pushsection ".entry.tramp.rodata", "a", %progbits
+ .align 3
+.L__tramp_data_\var:
+ .quad \var
+ .popsection
+ .endif
#else
- ldr \dst, =\var
+ /*
+ * As !RELOCATABLE implies !RANDOMIZE_BASE the address is always a
+ * compile time constant (and hence not secret and not worth hiding).
+ *
+ * As statically allocated kernel code and data always live in the top
+ * 47 bits of the address space we can sign-extend bit 47 and avoid an
+ * instruction to load the upper 16 bits (which must be 0xFFFF).
+ */
+ movz \dst, :abs_g2_s:\var
+ movk \dst, :abs_g1_nc:\var
+ movk \dst, :abs_g0_nc:\var
#endif
.endm
msr vbar_el1, x30
isb
.else
- ldr x30, =vectors
+ adr_l x30, vectors
.endif // \kpti == 1
.if \bhb == BHB_MITIGATION_FW
SYM_CODE_START(tramp_exit_compat)
tramp_exit 32
SYM_CODE_END(tramp_exit_compat)
-
- .ltorg
.popsection // .entry.tramp.text
-#ifdef CONFIG_RANDOMIZE_BASE
- .pushsection ".rodata", "a"
- .align PAGE_SHIFT
-SYM_DATA_START(__entry_tramp_data_start)
-__entry_tramp_data_vectors:
- .quad vectors
-#ifdef CONFIG_ARM_SDE_INTERFACE
-__entry_tramp_data___sdei_asm_handler:
- .quad __sdei_asm_handler
-#endif /* CONFIG_ARM_SDE_INTERFACE */
-__entry_tramp_data_this_cpu_vector:
- .quad this_cpu_vector
-SYM_DATA_END(__entry_tramp_data_start)
- .popsection // .rodata
-#endif /* CONFIG_RANDOMIZE_BASE */
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
/*
* This clobbers x4, __sdei_handler() will restore this from firmware's
* copy.
*/
-.ltorg
.pushsection ".entry.tramp.text", "ax"
SYM_CODE_START(__sdei_asm_entry_trampoline)
mrs x4, ttbr1_el1
1: sdei_handler_exit exit_mode=x2
SYM_CODE_END(__sdei_asm_exit_trampoline)
NOKPROBE(__sdei_asm_exit_trampoline)
- .ltorg
.popsection // .entry.tramp.text
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
unsigned long pfn = xa_state.xa_index;
struct page *page = pfn_to_online_page(pfn);
- /*
- * It is not required to invoke page_kasan_tag_reset(page)
- * at this point since the tags stored in page->flags are
- * already restored.
- */
mte_restore_page_tags(page_address(page), tags);
mte_free_tag_storage(tags);
if (!pte_is_tagged)
return;
- page_kasan_tag_reset(page);
- /*
- * We need smp_wmb() in between setting the flags and clearing the
- * tags because if another thread reads page->flags and builds a
- * tagged address out of it, there is an actual dependency to the
- * memory access, but on the current thread we do not guarantee that
- * the new page->flags are visible before the tags were updated.
- */
- smp_wmb();
mte_clear_page_tags(page_address(page));
}
__entry_tramp_text_start = .; \
*(.entry.tramp.text) \
. = ALIGN(PAGE_SIZE); \
- __entry_tramp_text_end = .;
+ __entry_tramp_text_end = .; \
+ *(.entry.tramp.rodata)
#else
#define TRAMP_TEXT
#endif
ret
SYM_FUNC_END(__pi_dcache_clean_pop)
SYM_FUNC_ALIAS(dcache_clean_pop, __pi_dcache_clean_pop)
-
-/*
- * __dma_flush_area(start, size)
- *
- * clean & invalidate D / U line
- *
- * - start - virtual start address of region
- * - size - size in question
- */
-SYM_FUNC_START(__pi___dma_flush_area)
- add x1, x0, x1
- dcache_by_line_op civac, sy, x0, x1, x2, x3
- ret
-SYM_FUNC_END(__pi___dma_flush_area)
-SYM_FUNC_ALIAS(__dma_flush_area, __pi___dma_flush_area)
-
-/*
- * __dma_map_area(start, size, dir)
- * - start - kernel virtual start address
- * - size - size of region
- * - dir - DMA direction
- */
-SYM_FUNC_START(__pi___dma_map_area)
- add x1, x0, x1
- b __pi_dcache_clean_poc
-SYM_FUNC_END(__pi___dma_map_area)
-SYM_FUNC_ALIAS(__dma_map_area, __pi___dma_map_area)
-
-/*
- * __dma_unmap_area(start, size, dir)
- * - start - kernel virtual start address
- * - size - size of region
- * - dir - DMA direction
- */
-SYM_FUNC_START(__pi___dma_unmap_area)
- add x1, x0, x1
- cmp w2, #DMA_TO_DEVICE
- b.ne __pi_dcache_inval_poc
- ret
-SYM_FUNC_END(__pi___dma_unmap_area)
-SYM_FUNC_ALIAS(__dma_unmap_area, __pi___dma_unmap_area)
if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) {
set_bit(PG_mte_tagged, &to->flags);
- page_kasan_tag_reset(to);
- /*
- * We need smp_wmb() in between setting the flags and clearing the
- * tags because if another thread reads page->flags and builds a
- * tagged address out of it, there is an actual dependency to the
- * memory access, but on the current thread we do not guarantee that
- * the new page->flags are visible before the tags were updated.
- */
- smp_wmb();
mte_copy_page_tags(kto, kfrom);
}
}
#include <asm/xen/xen-ops.h>
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
- enum dma_data_direction dir)
+ enum dma_data_direction dir)
{
- __dma_map_area(phys_to_virt(paddr), size, dir);
+ unsigned long start = (unsigned long)phys_to_virt(paddr);
+
+ dcache_clean_poc(start, start + size);
}
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
- enum dma_data_direction dir)
+ enum dma_data_direction dir)
{
- __dma_unmap_area(phys_to_virt(paddr), size, dir);
+ unsigned long start = (unsigned long)phys_to_virt(paddr);
+
+ if (dir == DMA_TO_DEVICE)
+ return;
+
+ dcache_inval_poc(start, start + size);
}
void arch_dma_prep_coherent(struct page *page, size_t size)
{
- __dma_flush_area(page_address(page), size);
+ unsigned long start = (unsigned long)page_address(page);
+
+ dcache_clean_inval_poc(start, start + size);
}
#ifdef CONFIG_IOMMU_DMA
return ((unsigned long)&ex->fixup + ex->fixup);
}
-static bool ex_handler_fixup(const struct exception_table_entry *ex,
- struct pt_regs *regs)
-{
- regs->pc = get_ex_fixup(ex);
- return true;
-}
-
static bool ex_handler_uaccess_err_zero(const struct exception_table_entry *ex,
struct pt_regs *regs)
{
return false;
switch (ex->type) {
- case EX_TYPE_FIXUP:
- return ex_handler_fixup(ex, regs);
case EX_TYPE_BPF:
return ex_handler_bpf(ex, regs);
case EX_TYPE_UACCESS_ERR_ZERO:
+ case EX_TYPE_KACCESS_ERR_ZERO:
return ex_handler_uaccess_err_zero(ex, regs);
case EX_TYPE_LOAD_UNALIGNED_ZEROPAD:
return ex_handler_load_unaligned_zeropad(ex, regs);
void tag_clear_highpage(struct page *page)
{
mte_zero_clear_page_tags(page_address(page));
- page_kasan_tag_reset(page);
set_bit(PG_mte_tagged, &page->flags);
}
#endif
}
-/*
- * Select all bits except the pfn
- */
-static inline pgprot_t pte_pgprot(pte_t pte)
-{
- unsigned long pfn = pte_pfn(pte);
-
- return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte));
-}
-
static int find_num_contig(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, size_t *pgsize)
{
early_init_fdt_scan_reserved_mem();
- if (!IS_ENABLED(CONFIG_ZONE_DMA) && !IS_ENABLED(CONFIG_ZONE_DMA32))
+ if (!defer_reserve_crashkernel())
reserve_crashkernel();
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
* request_standard_resources() depends on crashkernel's memory being
* reserved, so do it here.
*/
- if (IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32))
+ if (defer_reserve_crashkernel())
reserve_crashkernel();
memblock_dump_all();
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Based on arch/arm/mm/ioremap.c
- *
- * (C) Copyright 1995 1996 Linus Torvalds
- * Hacked for ARM by Phil Blundell <philb@gnu.org>
- * Hacked to allow all architectures to build, and various cleanups
- * by Russell King
- * Copyright (C) 2012 ARM Ltd.
- */
-#include <linux/export.h>
#include <linux/mm.h>
-#include <linux/vmalloc.h>
#include <linux/io.h>
-#include <asm/fixmap.h>
-#include <asm/tlbflush.h>
-
-static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
- pgprot_t prot, void *caller)
+bool ioremap_allowed(phys_addr_t phys_addr, size_t size, unsigned long prot)
{
- unsigned long last_addr;
- unsigned long offset = phys_addr & ~PAGE_MASK;
- int err;
- unsigned long addr;
- struct vm_struct *area;
+ unsigned long last_addr = phys_addr + size - 1;
- /*
- * Page align the mapping address and size, taking account of any
- * offset.
- */
- phys_addr &= PAGE_MASK;
- size = PAGE_ALIGN(size + offset);
+ /* Don't allow outside PHYS_MASK */
+ if (last_addr & ~PHYS_MASK)
+ return false;
- /*
- * Don't allow wraparound, zero size or outside PHYS_MASK.
- */
- last_addr = phys_addr + size - 1;
- if (!size || last_addr < phys_addr || (last_addr & ~PHYS_MASK))
- return NULL;
-
- /*
- * Don't allow RAM to be mapped.
- */
+ /* Don't allow RAM to be mapped. */
if (WARN_ON(pfn_is_map_memory(__phys_to_pfn(phys_addr))))
- return NULL;
-
- area = get_vm_area_caller(size, VM_IOREMAP, caller);
- if (!area)
- return NULL;
- addr = (unsigned long)area->addr;
- area->phys_addr = phys_addr;
-
- err = ioremap_page_range(addr, addr + size, phys_addr, prot);
- if (err) {
- vunmap((void *)addr);
- return NULL;
- }
-
- return (void __iomem *)(offset + addr);
-}
-
-void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot)
-{
- return __ioremap_caller(phys_addr, size, prot,
- __builtin_return_address(0));
-}
-EXPORT_SYMBOL(__ioremap);
-
-void iounmap(volatile void __iomem *io_addr)
-{
- unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
-
- /*
- * We could get an address outside vmalloc range in case
- * of ioremap_cache() reusing a RAM mapping.
- */
- if (is_vmalloc_addr((void *)addr))
- vunmap((void *)addr);
-}
-EXPORT_SYMBOL(iounmap);
-
-void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
-{
- /* For normal memory we already have a cacheable mapping. */
- if (pfn_is_map_memory(__phys_to_pfn(phys_addr)))
- return (void __iomem *)__phys_to_virt(phys_addr);
+ return false;
- return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL),
- __builtin_return_address(0));
+ return true;
}
-EXPORT_SYMBOL(ioremap_cache);
/*
* Must be called after early_fixmap_init
} while (pgdp++, addr = next, addr != end);
}
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+extern __alias(__create_pgd_mapping)
+void create_kpti_ng_temp_pgd(pgd_t *pgdir, phys_addr_t phys, unsigned long virt,
+ phys_addr_t size, pgprot_t prot,
+ phys_addr_t (*pgtable_alloc)(int), int flags);
+#endif
+
static phys_addr_t __pgd_pgtable_alloc(int shift)
{
void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL);
#ifdef CONFIG_KEXEC_CORE
if (crash_mem_map) {
- if (IS_ENABLED(CONFIG_ZONE_DMA) ||
- IS_ENABLED(CONFIG_ZONE_DMA32))
+ if (defer_reserve_crashkernel())
flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
else if (crashk_res.end)
memblock_mark_nomap(crashk_res.start,
* through /sys/kernel/kexec_crash_size interface.
*/
#ifdef CONFIG_KEXEC_CORE
- if (crash_mem_map &&
- !IS_ENABLED(CONFIG_ZONE_DMA) && !IS_ENABLED(CONFIG_ZONE_DMA32)) {
+ if (crash_mem_map && !defer_reserve_crashkernel()) {
if (crashk_res.end) {
__map_memblock(pgdp, crashk_res.start,
crashk_res.end + 1,
__set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i,
pa_start + i * PAGE_SIZE, prot);
- if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
- extern char __entry_tramp_data_start[];
-
- __set_fixmap(FIX_ENTRY_TRAMP_DATA,
- __pa_symbol(__entry_tramp_data_start),
- PAGE_KERNEL_RO);
- }
+ if (IS_ENABLED(CONFIG_RELOCATABLE))
+ __set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i,
+ pa_start + i * PAGE_SIZE, PAGE_KERNEL_RO);
return 0;
}
if (!tags)
return false;
- page_kasan_tag_reset(page);
- /*
- * We need smp_wmb() in between setting the flags and clearing the
- * tags because if another thread reads page->flags and builds a
- * tagged address out of it, there is an actual dependency to the
- * memory access, but on the current thread we do not guarantee that
- * the new page->flags are visible before the tags were updated.
- */
- smp_wmb();
mte_restore_page_tags(page_address(page), tags);
return true;
#include <asm/asm-offsets.h>
#include <asm/asm_pointer_auth.h>
#include <asm/hwcap.h>
+#include <asm/kernel-pgtable.h>
#include <asm/pgtable-hwdef.h>
#include <asm/cpufeature.h>
#include <asm/alternative.h>
.popsection
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+
+#define KPTI_NG_PTE_FLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS)
+
.pushsection ".idmap.text", "awx"
- .macro __idmap_kpti_get_pgtable_ent, type
- dc cvac, cur_\()\type\()p // Ensure any existing dirty
- dmb sy // lines are written back before
- ldr \type, [cur_\()\type\()p] // loading the entry
- tbz \type, #0, skip_\()\type // Skip invalid and
- tbnz \type, #11, skip_\()\type // non-global entries
+ .macro kpti_mk_tbl_ng, type, num_entries
+ add end_\type\()p, cur_\type\()p, #\num_entries * 8
+.Ldo_\type:
+ ldr \type, [cur_\type\()p] // Load the entry
+ tbz \type, #0, .Lnext_\type // Skip invalid and
+ tbnz \type, #11, .Lnext_\type // non-global entries
+ orr \type, \type, #PTE_NG // Same bit for blocks and pages
+ str \type, [cur_\type\()p] // Update the entry
+ .ifnc \type, pte
+ tbnz \type, #1, .Lderef_\type
+ .endif
+.Lnext_\type:
+ add cur_\type\()p, cur_\type\()p, #8
+ cmp cur_\type\()p, end_\type\()p
+ b.ne .Ldo_\type
.endm
- .macro __idmap_kpti_put_pgtable_ent_ng, type
- orr \type, \type, #PTE_NG // Same bit for blocks and pages
- str \type, [cur_\()\type\()p] // Update the entry and ensure
- dmb sy // that it is visible to all
- dc civac, cur_\()\type\()p // CPUs.
+ /*
+ * Dereference the current table entry and map it into the temporary
+ * fixmap slot associated with the current level.
+ */
+ .macro kpti_map_pgtbl, type, level
+ str xzr, [temp_pte, #8 * (\level + 1)] // break before make
+ dsb nshst
+ add pte, temp_pte, #PAGE_SIZE * (\level + 1)
+ lsr pte, pte, #12
+ tlbi vaae1, pte
+ dsb nsh
+ isb
+
+ phys_to_pte pte, cur_\type\()p
+ add cur_\type\()p, temp_pte, #PAGE_SIZE * (\level + 1)
+ orr pte, pte, pte_flags
+ str pte, [temp_pte, #8 * (\level + 1)]
+ dsb nshst
.endm
/*
- * void __kpti_install_ng_mappings(int cpu, int num_cpus, phys_addr_t swapper)
+ * void __kpti_install_ng_mappings(int cpu, int num_secondaries, phys_addr_t temp_pgd,
+ * unsigned long temp_pte_va)
*
* Called exactly once from stop_machine context by each CPU found during boot.
*/
.long 1
SYM_FUNC_START(idmap_kpti_install_ng_mappings)
cpu .req w0
+ temp_pte .req x0
num_cpus .req w1
- swapper_pa .req x2
+ pte_flags .req x1
+ temp_pgd_phys .req x2
swapper_ttb .req x3
flag_ptr .req x4
cur_pgdp .req x5
pgd .req x7
cur_pudp .req x8
end_pudp .req x9
- pud .req x10
cur_pmdp .req x11
end_pmdp .req x12
- pmd .req x13
cur_ptep .req x14
end_ptep .req x15
pte .req x16
+ valid .req x17
+ mov x5, x3 // preserve temp_pte arg
mrs swapper_ttb, ttbr1_el1
- restore_ttbr1 swapper_ttb
adr flag_ptr, __idmap_kpti_flag
cbnz cpu, __idmap_kpti_secondary
eor w17, w17, num_cpus
cbnz w17, 1b
- /* We need to walk swapper, so turn off the MMU. */
- pre_disable_mmu_workaround
- mrs x17, sctlr_el1
- bic x17, x17, #SCTLR_ELx_M
- msr sctlr_el1, x17
+ /* Switch to the temporary page tables on this CPU only */
+ __idmap_cpu_set_reserved_ttbr1 x8, x9
+ offset_ttbr1 temp_pgd_phys, x8
+ msr ttbr1_el1, temp_pgd_phys
isb
+ mov temp_pte, x5
+ mov pte_flags, #KPTI_NG_PTE_FLAGS
+
/* Everybody is enjoying the idmap, so we can rewrite swapper. */
/* PGD */
- mov cur_pgdp, swapper_pa
- add end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8)
-do_pgd: __idmap_kpti_get_pgtable_ent pgd
- tbnz pgd, #1, walk_puds
-next_pgd:
- __idmap_kpti_put_pgtable_ent_ng pgd
-skip_pgd:
- add cur_pgdp, cur_pgdp, #8
- cmp cur_pgdp, end_pgdp
- b.ne do_pgd
-
- /* Publish the updated tables and nuke all the TLBs */
- dsb sy
- tlbi vmalle1is
- dsb ish
- isb
+ adrp cur_pgdp, swapper_pg_dir
+ kpti_map_pgtbl pgd, 0
+ kpti_mk_tbl_ng pgd, PTRS_PER_PGD
- /* We're done: fire up the MMU again */
- mrs x17, sctlr_el1
- orr x17, x17, #SCTLR_ELx_M
- set_sctlr_el1 x17
+ /* Ensure all the updated entries are visible to secondary CPUs */
+ dsb ishst
+
+ /* We're done: fire up swapper_pg_dir again */
+ __idmap_cpu_set_reserved_ttbr1 x8, x9
+ msr ttbr1_el1, swapper_ttb
+ isb
/* Set the flag to zero to indicate that we're all done */
str wzr, [flag_ptr]
ret
+.Lderef_pgd:
/* PUD */
-walk_puds:
- .if CONFIG_PGTABLE_LEVELS > 3
+ .if CONFIG_PGTABLE_LEVELS > 3
+ pud .req x10
pte_to_phys cur_pudp, pgd
- add end_pudp, cur_pudp, #(PTRS_PER_PUD * 8)
-do_pud: __idmap_kpti_get_pgtable_ent pud
- tbnz pud, #1, walk_pmds
-next_pud:
- __idmap_kpti_put_pgtable_ent_ng pud
-skip_pud:
- add cur_pudp, cur_pudp, 8
- cmp cur_pudp, end_pudp
- b.ne do_pud
- b next_pgd
- .else /* CONFIG_PGTABLE_LEVELS <= 3 */
- mov pud, pgd
- b walk_pmds
-next_pud:
- b next_pgd
+ kpti_map_pgtbl pud, 1
+ kpti_mk_tbl_ng pud, PTRS_PER_PUD
+ b .Lnext_pgd
+ .else /* CONFIG_PGTABLE_LEVELS <= 3 */
+ pud .req pgd
+ .set .Lnext_pud, .Lnext_pgd
.endif
+.Lderef_pud:
/* PMD */
-walk_pmds:
- .if CONFIG_PGTABLE_LEVELS > 2
+ .if CONFIG_PGTABLE_LEVELS > 2
+ pmd .req x13
pte_to_phys cur_pmdp, pud
- add end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8)
-do_pmd: __idmap_kpti_get_pgtable_ent pmd
- tbnz pmd, #1, walk_ptes
-next_pmd:
- __idmap_kpti_put_pgtable_ent_ng pmd
-skip_pmd:
- add cur_pmdp, cur_pmdp, #8
- cmp cur_pmdp, end_pmdp
- b.ne do_pmd
- b next_pud
- .else /* CONFIG_PGTABLE_LEVELS <= 2 */
- mov pmd, pud
- b walk_ptes
-next_pmd:
- b next_pud
+ kpti_map_pgtbl pmd, 2
+ kpti_mk_tbl_ng pmd, PTRS_PER_PMD
+ b .Lnext_pud
+ .else /* CONFIG_PGTABLE_LEVELS <= 2 */
+ pmd .req pgd
+ .set .Lnext_pmd, .Lnext_pgd
.endif
+.Lderef_pmd:
/* PTE */
-walk_ptes:
pte_to_phys cur_ptep, pmd
- add end_ptep, cur_ptep, #(PTRS_PER_PTE * 8)
-do_pte: __idmap_kpti_get_pgtable_ent pte
- __idmap_kpti_put_pgtable_ent_ng pte
-skip_pte:
- add cur_ptep, cur_ptep, #8
- cmp cur_ptep, end_ptep
- b.ne do_pte
- b next_pmd
+ kpti_map_pgtbl pte, 3
+ kpti_mk_tbl_ng pte, PTRS_PER_PTE
+ b .Lnext_pmd
.unreq cpu
+ .unreq temp_pte
.unreq num_cpus
- .unreq swapper_pa
+ .unreq pte_flags
+ .unreq temp_pgd_phys
.unreq cur_pgdp
.unreq end_pgdp
.unreq pgd
.unreq cur_ptep
.unreq end_ptep
.unreq pte
+ .unreq valid
/* Secondary CPUs end up here */
__idmap_kpti_secondary:
cbnz w16, 1b
/* All done, act like nothing happened */
- offset_ttbr1 swapper_ttb, x16
msr ttbr1_el1, swapper_ttb
isb
ret
WORKAROUND_1463225
WORKAROUND_1508412
WORKAROUND_1542419
+WORKAROUND_1742098
WORKAROUND_1902691
WORKAROUND_2038923
WORKAROUND_2064142
select SYSCTL_EXCEPTION_TRACE
select THREAD_INFO_IN_TASK
select TRACE_IRQFLAGS_SUPPORT
+ select TRACE_IRQFLAGS_NMI_SUPPORT
select USER_STACKTRACE_SUPPORT
select VIRT_TO_BUS
select HAVE_ARCH_KCSAN if X86_64
# SPDX-License-Identifier: GPL-2.0
-config TRACE_IRQFLAGS_NMI_SUPPORT
- def_bool y
-
config EARLY_PRINTK_USB
bool
# ARM CPU Idle drivers
#
config ARM_CPUIDLE
- bool "Generic ARM/ARM64 CPU idle Driver"
+ bool "Generic ARM CPU idle Driver"
+ depends on ARM
select DT_IDLE_STATES
select CPU_IDLE_MULTIPLE_DRIVERS
help
#define wmb() do { kcsan_wmb(); __wmb(); } while (0)
#endif
+#ifdef __dma_mb
+#define dma_mb() do { kcsan_mb(); __dma_mb(); } while (0)
+#endif
+
#ifdef __dma_rmb
#define dma_rmb() do { kcsan_rmb(); __dma_rmb(); } while (0)
#endif
#define wmb() mb()
#endif
+#ifndef dma_mb
+#define dma_mb() mb()
+#endif
+
#ifndef dma_rmb
#define dma_rmb() rmb()
#endif
#elif defined(CONFIG_GENERIC_IOREMAP)
#include <linux/pgtable.h>
-void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot);
+/*
+ * Arch code can implement the following two hooks when using GENERIC_IOREMAP
+ * ioremap_allowed() return a bool,
+ * - true means continue to remap
+ * - false means skip remap and return directly
+ * iounmap_allowed() return a bool,
+ * - true means continue to vunmap
+ * - false means skip vunmap and return directly
+ */
+#ifndef ioremap_allowed
+#define ioremap_allowed ioremap_allowed
+static inline bool ioremap_allowed(phys_addr_t phys_addr, size_t size,
+ unsigned long prot)
+{
+ return true;
+}
+#endif
+
+#ifndef iounmap_allowed
+#define iounmap_allowed iounmap_allowed
+static inline bool iounmap_allowed(void *addr)
+{
+ return true;
+}
+#endif
+
+void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
+ unsigned long prot);
void iounmap(volatile void __iomem *addr);
static inline void __iomem *ioremap(phys_addr_t addr, size_t size)
#define GFP_DMA32 __GFP_DMA32
#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE | \
- __GFP_SKIP_KASAN_POISON)
+ __GFP_SKIP_KASAN_POISON | __GFP_SKIP_KASAN_UNPOISON)
#define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
__GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM)
#define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM)
return split_huge_page_to_list(&folio->page, list);
}
+/*
+ * archs that select ARCH_WANTS_THP_SWAP but don't support THP_SWP due to
+ * limitations in the implementation like arm64 MTE can override this to
+ * false
+ */
+#ifndef arch_thp_swp_supported
+static inline bool arch_thp_swp_supported(void)
+{
+ return true;
+}
+#endif
+
#endif /* _LINUX_HUGE_MM_H */
#include <linux/io.h>
#include <linux/export.h>
-void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
+void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
+ unsigned long prot)
{
unsigned long offset, vaddr;
phys_addr_t last_addr;
struct vm_struct *area;
/* Disallow wrap-around or zero size */
- last_addr = addr + size - 1;
- if (!size || last_addr < addr)
+ last_addr = phys_addr + size - 1;
+ if (!size || last_addr < phys_addr)
return NULL;
/* Page-align mappings */
- offset = addr & (~PAGE_MASK);
- addr -= offset;
+ offset = phys_addr & (~PAGE_MASK);
+ phys_addr -= offset;
size = PAGE_ALIGN(size + offset);
+ if (!ioremap_allowed(phys_addr, size, prot))
+ return NULL;
+
area = get_vm_area_caller(size, VM_IOREMAP,
__builtin_return_address(0));
if (!area)
return NULL;
vaddr = (unsigned long)area->addr;
+ area->phys_addr = phys_addr;
- if (ioremap_page_range(vaddr, vaddr + size, addr, __pgprot(prot))) {
+ if (ioremap_page_range(vaddr, vaddr + size, phys_addr,
+ __pgprot(prot))) {
free_vm_area(area);
return NULL;
}
void iounmap(volatile void __iomem *addr)
{
- vunmap((void *)((unsigned long)addr & PAGE_MASK));
+ void *vaddr = (void *)((unsigned long)addr & PAGE_MASK);
+
+ if (!iounmap_allowed(vaddr))
+ return;
+
+ if (is_vmalloc_addr(vaddr))
+ vunmap(vaddr);
}
EXPORT_SYMBOL(iounmap);
return;
tag = kasan_random_tag();
+ kasan_unpoison(set_tag(page_address(page), tag),
+ PAGE_SIZE << order, init);
for (i = 0; i < (1 << order); i++)
page_kasan_tag_set(page + i, tag);
- kasan_unpoison(page_address(page), PAGE_SIZE << order, init);
}
void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
}
#endif /* CONFIG_DEBUG_VM */
-static inline bool should_skip_kasan_unpoison(gfp_t flags, bool init_tags)
+static inline bool should_skip_kasan_unpoison(gfp_t flags)
{
/* Don't skip if a software KASAN mode is enabled. */
if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
return true;
/*
- * With hardware tag-based KASAN enabled, skip if either:
- *
- * 1. Memory tags have already been cleared via tag_clear_highpage().
- * 2. Skipping has been requested via __GFP_SKIP_KASAN_UNPOISON.
+ * With hardware tag-based KASAN enabled, skip if this has been
+ * requested via __GFP_SKIP_KASAN_UNPOISON.
*/
- return init_tags || (flags & __GFP_SKIP_KASAN_UNPOISON);
+ return flags & __GFP_SKIP_KASAN_UNPOISON;
}
static inline bool should_skip_init(gfp_t flags)
bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) &&
!should_skip_init(gfp_flags);
bool init_tags = init && (gfp_flags & __GFP_ZEROTAGS);
+ int i;
set_page_private(page, 0);
set_page_refcounted(page);
* should be initialized as well).
*/
if (init_tags) {
- int i;
-
/* Initialize both memory and tags. */
for (i = 0; i != 1 << order; ++i)
tag_clear_highpage(page + i);
/* Note that memory is already initialized by the loop above. */
init = false;
}
- if (!should_skip_kasan_unpoison(gfp_flags, init_tags)) {
+ if (!should_skip_kasan_unpoison(gfp_flags)) {
/* Unpoison shadow memory or set memory tags. */
kasan_unpoison_pages(page, order, init);
/* Note that memory is already initialized by KASAN. */
if (kasan_has_integrated_init())
init = false;
+ } else {
+ /* Ensure page_address() dereferencing does not fault. */
+ for (i = 0; i != 1 << order; ++i)
+ page_kasan_tag_reset(page + i);
}
/* If memory is still not initialized, do it now. */
if (init)
entry.val = 0;
if (folio_test_large(folio)) {
- if (IS_ENABLED(CONFIG_THP_SWAP))
+ if (IS_ENABLED(CONFIG_THP_SWAP) && arch_thp_swp_supported())
get_swap_pages(1, &entry, folio_nr_pages(folio));
goto out;
}