OSDN Git Service

Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
[tomoyo/tomoyo-test1.git] / mm / kfence / core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KFENCE guarded object allocator and fault handling.
4  *
5  * Copyright (C) 2020, Google LLC.
6  */
7
8 #define pr_fmt(fmt) "kfence: " fmt
9
10 #include <linux/atomic.h>
11 #include <linux/bug.h>
12 #include <linux/debugfs.h>
13 #include <linux/hash.h>
14 #include <linux/irq_work.h>
15 #include <linux/jhash.h>
16 #include <linux/kcsan-checks.h>
17 #include <linux/kfence.h>
18 #include <linux/kmemleak.h>
19 #include <linux/list.h>
20 #include <linux/lockdep.h>
21 #include <linux/log2.h>
22 #include <linux/memblock.h>
23 #include <linux/moduleparam.h>
24 #include <linux/notifier.h>
25 #include <linux/panic_notifier.h>
26 #include <linux/random.h>
27 #include <linux/rcupdate.h>
28 #include <linux/sched/clock.h>
29 #include <linux/seq_file.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/string.h>
33
34 #include <asm/kfence.h>
35
36 #include "kfence.h"
37
38 /* Disables KFENCE on the first warning assuming an irrecoverable error. */
39 #define KFENCE_WARN_ON(cond)                                                   \
40         ({                                                                     \
41                 const bool __cond = WARN_ON(cond);                             \
42                 if (unlikely(__cond)) {                                        \
43                         WRITE_ONCE(kfence_enabled, false);                     \
44                         disabled_by_warn = true;                               \
45                 }                                                              \
46                 __cond;                                                        \
47         })
48
49 /* === Data ================================================================= */
50
51 static bool kfence_enabled __read_mostly;
52 static bool disabled_by_warn __read_mostly;
53
54 unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
55 EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */
56
57 #ifdef MODULE_PARAM_PREFIX
58 #undef MODULE_PARAM_PREFIX
59 #endif
60 #define MODULE_PARAM_PREFIX "kfence."
61
62 static int kfence_enable_late(void);
63 static int param_set_sample_interval(const char *val, const struct kernel_param *kp)
64 {
65         unsigned long num;
66         int ret = kstrtoul(val, 0, &num);
67
68         if (ret < 0)
69                 return ret;
70
71         /* Using 0 to indicate KFENCE is disabled. */
72         if (!num && READ_ONCE(kfence_enabled)) {
73                 pr_info("disabled\n");
74                 WRITE_ONCE(kfence_enabled, false);
75         }
76
77         *((unsigned long *)kp->arg) = num;
78
79         if (num && !READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING)
80                 return disabled_by_warn ? -EINVAL : kfence_enable_late();
81         return 0;
82 }
83
84 static int param_get_sample_interval(char *buffer, const struct kernel_param *kp)
85 {
86         if (!READ_ONCE(kfence_enabled))
87                 return sprintf(buffer, "0\n");
88
89         return param_get_ulong(buffer, kp);
90 }
91
92 static const struct kernel_param_ops sample_interval_param_ops = {
93         .set = param_set_sample_interval,
94         .get = param_get_sample_interval,
95 };
96 module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_interval, 0600);
97
98 /* Pool usage% threshold when currently covered allocations are skipped. */
99 static unsigned long kfence_skip_covered_thresh __read_mostly = 75;
100 module_param_named(skip_covered_thresh, kfence_skip_covered_thresh, ulong, 0644);
101
102 /* If true, use a deferrable timer. */
103 static bool kfence_deferrable __read_mostly = IS_ENABLED(CONFIG_KFENCE_DEFERRABLE);
104 module_param_named(deferrable, kfence_deferrable, bool, 0444);
105
106 /* If true, check all canary bytes on panic. */
107 static bool kfence_check_on_panic __read_mostly;
108 module_param_named(check_on_panic, kfence_check_on_panic, bool, 0444);
109
110 /* The pool of pages used for guard pages and objects. */
111 char *__kfence_pool __read_mostly;
112 EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */
113
114 /*
115  * Per-object metadata, with one-to-one mapping of object metadata to
116  * backing pages (in __kfence_pool).
117  */
118 static_assert(CONFIG_KFENCE_NUM_OBJECTS > 0);
119 struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS];
120
121 /* Freelist with available objects. */
122 static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist);
123 static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */
124
125 /*
126  * The static key to set up a KFENCE allocation; or if static keys are not used
127  * to gate allocations, to avoid a load and compare if KFENCE is disabled.
128  */
129 DEFINE_STATIC_KEY_FALSE(kfence_allocation_key);
130
131 /* Gates the allocation, ensuring only one succeeds in a given period. */
132 atomic_t kfence_allocation_gate = ATOMIC_INIT(1);
133
134 /*
135  * A Counting Bloom filter of allocation coverage: limits currently covered
136  * allocations of the same source filling up the pool.
137  *
138  * Assuming a range of 15%-85% unique allocations in the pool at any point in
139  * time, the below parameters provide a probablity of 0.02-0.33 for false
140  * positive hits respectively:
141  *
142  *      P(alloc_traces) = (1 - e^(-HNUM * (alloc_traces / SIZE)) ^ HNUM
143  */
144 #define ALLOC_COVERED_HNUM      2
145 #define ALLOC_COVERED_ORDER     (const_ilog2(CONFIG_KFENCE_NUM_OBJECTS) + 2)
146 #define ALLOC_COVERED_SIZE      (1 << ALLOC_COVERED_ORDER)
147 #define ALLOC_COVERED_HNEXT(h)  hash_32(h, ALLOC_COVERED_ORDER)
148 #define ALLOC_COVERED_MASK      (ALLOC_COVERED_SIZE - 1)
149 static atomic_t alloc_covered[ALLOC_COVERED_SIZE];
150
151 /* Stack depth used to determine uniqueness of an allocation. */
152 #define UNIQUE_ALLOC_STACK_DEPTH ((size_t)8)
153
154 /*
155  * Randomness for stack hashes, making the same collisions across reboots and
156  * different machines less likely.
157  */
158 static u32 stack_hash_seed __ro_after_init;
159
160 /* Statistics counters for debugfs. */
161 enum kfence_counter_id {
162         KFENCE_COUNTER_ALLOCATED,
163         KFENCE_COUNTER_ALLOCS,
164         KFENCE_COUNTER_FREES,
165         KFENCE_COUNTER_ZOMBIES,
166         KFENCE_COUNTER_BUGS,
167         KFENCE_COUNTER_SKIP_INCOMPAT,
168         KFENCE_COUNTER_SKIP_CAPACITY,
169         KFENCE_COUNTER_SKIP_COVERED,
170         KFENCE_COUNTER_COUNT,
171 };
172 static atomic_long_t counters[KFENCE_COUNTER_COUNT];
173 static const char *const counter_names[] = {
174         [KFENCE_COUNTER_ALLOCATED]      = "currently allocated",
175         [KFENCE_COUNTER_ALLOCS]         = "total allocations",
176         [KFENCE_COUNTER_FREES]          = "total frees",
177         [KFENCE_COUNTER_ZOMBIES]        = "zombie allocations",
178         [KFENCE_COUNTER_BUGS]           = "total bugs",
179         [KFENCE_COUNTER_SKIP_INCOMPAT]  = "skipped allocations (incompatible)",
180         [KFENCE_COUNTER_SKIP_CAPACITY]  = "skipped allocations (capacity)",
181         [KFENCE_COUNTER_SKIP_COVERED]   = "skipped allocations (covered)",
182 };
183 static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT);
184
185 /* === Internals ============================================================ */
186
187 static inline bool should_skip_covered(void)
188 {
189         unsigned long thresh = (CONFIG_KFENCE_NUM_OBJECTS * kfence_skip_covered_thresh) / 100;
190
191         return atomic_long_read(&counters[KFENCE_COUNTER_ALLOCATED]) > thresh;
192 }
193
194 static u32 get_alloc_stack_hash(unsigned long *stack_entries, size_t num_entries)
195 {
196         num_entries = min(num_entries, UNIQUE_ALLOC_STACK_DEPTH);
197         num_entries = filter_irq_stacks(stack_entries, num_entries);
198         return jhash(stack_entries, num_entries * sizeof(stack_entries[0]), stack_hash_seed);
199 }
200
201 /*
202  * Adds (or subtracts) count @val for allocation stack trace hash
203  * @alloc_stack_hash from Counting Bloom filter.
204  */
205 static void alloc_covered_add(u32 alloc_stack_hash, int val)
206 {
207         int i;
208
209         for (i = 0; i < ALLOC_COVERED_HNUM; i++) {
210                 atomic_add(val, &alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]);
211                 alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash);
212         }
213 }
214
215 /*
216  * Returns true if the allocation stack trace hash @alloc_stack_hash is
217  * currently contained (non-zero count) in Counting Bloom filter.
218  */
219 static bool alloc_covered_contains(u32 alloc_stack_hash)
220 {
221         int i;
222
223         for (i = 0; i < ALLOC_COVERED_HNUM; i++) {
224                 if (!atomic_read(&alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]))
225                         return false;
226                 alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash);
227         }
228
229         return true;
230 }
231
232 static bool kfence_protect(unsigned long addr)
233 {
234         return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true));
235 }
236
237 static bool kfence_unprotect(unsigned long addr)
238 {
239         return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
240 }
241
242 static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
243 {
244         unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
245         unsigned long pageaddr = (unsigned long)&__kfence_pool[offset];
246
247         /* The checks do not affect performance; only called from slow-paths. */
248
249         /* Only call with a pointer into kfence_metadata. */
250         if (KFENCE_WARN_ON(meta < kfence_metadata ||
251                            meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS))
252                 return 0;
253
254         /*
255          * This metadata object only ever maps to 1 page; verify that the stored
256          * address is in the expected range.
257          */
258         if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr))
259                 return 0;
260
261         return pageaddr;
262 }
263
264 /*
265  * Update the object's metadata state, including updating the alloc/free stacks
266  * depending on the state transition.
267  */
268 static noinline void
269 metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state next,
270                       unsigned long *stack_entries, size_t num_stack_entries)
271 {
272         struct kfence_track *track =
273                 next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track;
274
275         lockdep_assert_held(&meta->lock);
276
277         if (stack_entries) {
278                 memcpy(track->stack_entries, stack_entries,
279                        num_stack_entries * sizeof(stack_entries[0]));
280         } else {
281                 /*
282                  * Skip over 1 (this) functions; noinline ensures we do not
283                  * accidentally skip over the caller by never inlining.
284                  */
285                 num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1);
286         }
287         track->num_stack_entries = num_stack_entries;
288         track->pid = task_pid_nr(current);
289         track->cpu = raw_smp_processor_id();
290         track->ts_nsec = local_clock(); /* Same source as printk timestamps. */
291
292         /*
293          * Pairs with READ_ONCE() in
294          *      kfence_shutdown_cache(),
295          *      kfence_handle_page_fault().
296          */
297         WRITE_ONCE(meta->state, next);
298 }
299
300 /* Write canary byte to @addr. */
301 static inline bool set_canary_byte(u8 *addr)
302 {
303         *addr = KFENCE_CANARY_PATTERN(addr);
304         return true;
305 }
306
307 /* Check canary byte at @addr. */
308 static inline bool check_canary_byte(u8 *addr)
309 {
310         struct kfence_metadata *meta;
311         unsigned long flags;
312
313         if (likely(*addr == KFENCE_CANARY_PATTERN(addr)))
314                 return true;
315
316         atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
317
318         meta = addr_to_metadata((unsigned long)addr);
319         raw_spin_lock_irqsave(&meta->lock, flags);
320         kfence_report_error((unsigned long)addr, false, NULL, meta, KFENCE_ERROR_CORRUPTION);
321         raw_spin_unlock_irqrestore(&meta->lock, flags);
322
323         return false;
324 }
325
326 /* __always_inline this to ensure we won't do an indirect call to fn. */
327 static __always_inline void for_each_canary(const struct kfence_metadata *meta, bool (*fn)(u8 *))
328 {
329         const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
330         unsigned long addr;
331
332         /*
333          * We'll iterate over each canary byte per-side until fn() returns
334          * false. However, we'll still iterate over the canary bytes to the
335          * right of the object even if there was an error in the canary bytes to
336          * the left of the object. Specifically, if check_canary_byte()
337          * generates an error, showing both sides might give more clues as to
338          * what the error is about when displaying which bytes were corrupted.
339          */
340
341         /* Apply to left of object. */
342         for (addr = pageaddr; addr < meta->addr; addr++) {
343                 if (!fn((u8 *)addr))
344                         break;
345         }
346
347         /* Apply to right of object. */
348         for (addr = meta->addr + meta->size; addr < pageaddr + PAGE_SIZE; addr++) {
349                 if (!fn((u8 *)addr))
350                         break;
351         }
352 }
353
354 static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp,
355                                   unsigned long *stack_entries, size_t num_stack_entries,
356                                   u32 alloc_stack_hash)
357 {
358         struct kfence_metadata *meta = NULL;
359         unsigned long flags;
360         struct slab *slab;
361         void *addr;
362         const bool random_right_allocate = get_random_u32_below(2);
363         const bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS &&
364                                   !get_random_u32_below(CONFIG_KFENCE_STRESS_TEST_FAULTS);
365
366         /* Try to obtain a free object. */
367         raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
368         if (!list_empty(&kfence_freelist)) {
369                 meta = list_entry(kfence_freelist.next, struct kfence_metadata, list);
370                 list_del_init(&meta->list);
371         }
372         raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
373         if (!meta) {
374                 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_CAPACITY]);
375                 return NULL;
376         }
377
378         if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) {
379                 /*
380                  * This is extremely unlikely -- we are reporting on a
381                  * use-after-free, which locked meta->lock, and the reporting
382                  * code via printk calls kmalloc() which ends up in
383                  * kfence_alloc() and tries to grab the same object that we're
384                  * reporting on. While it has never been observed, lockdep does
385                  * report that there is a possibility of deadlock. Fix it by
386                  * using trylock and bailing out gracefully.
387                  */
388                 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
389                 /* Put the object back on the freelist. */
390                 list_add_tail(&meta->list, &kfence_freelist);
391                 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
392
393                 return NULL;
394         }
395
396         meta->addr = metadata_to_pageaddr(meta);
397         /* Unprotect if we're reusing this page. */
398         if (meta->state == KFENCE_OBJECT_FREED)
399                 kfence_unprotect(meta->addr);
400
401         /*
402          * Note: for allocations made before RNG initialization, will always
403          * return zero. We still benefit from enabling KFENCE as early as
404          * possible, even when the RNG is not yet available, as this will allow
405          * KFENCE to detect bugs due to earlier allocations. The only downside
406          * is that the out-of-bounds accesses detected are deterministic for
407          * such allocations.
408          */
409         if (random_right_allocate) {
410                 /* Allocate on the "right" side, re-calculate address. */
411                 meta->addr += PAGE_SIZE - size;
412                 meta->addr = ALIGN_DOWN(meta->addr, cache->align);
413         }
414
415         addr = (void *)meta->addr;
416
417         /* Update remaining metadata. */
418         metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED, stack_entries, num_stack_entries);
419         /* Pairs with READ_ONCE() in kfence_shutdown_cache(). */
420         WRITE_ONCE(meta->cache, cache);
421         meta->size = size;
422         meta->alloc_stack_hash = alloc_stack_hash;
423         raw_spin_unlock_irqrestore(&meta->lock, flags);
424
425         alloc_covered_add(alloc_stack_hash, 1);
426
427         /* Set required slab fields. */
428         slab = virt_to_slab((void *)meta->addr);
429         slab->slab_cache = cache;
430 #if defined(CONFIG_SLUB)
431         slab->objects = 1;
432 #elif defined(CONFIG_SLAB)
433         slab->s_mem = addr;
434 #endif
435
436         /* Memory initialization. */
437         for_each_canary(meta, set_canary_byte);
438
439         /*
440          * We check slab_want_init_on_alloc() ourselves, rather than letting
441          * SL*B do the initialization, as otherwise we might overwrite KFENCE's
442          * redzone.
443          */
444         if (unlikely(slab_want_init_on_alloc(gfp, cache)))
445                 memzero_explicit(addr, size);
446         if (cache->ctor)
447                 cache->ctor(addr);
448
449         if (random_fault)
450                 kfence_protect(meta->addr); /* Random "faults" by protecting the object. */
451
452         atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]);
453         atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCS]);
454
455         return addr;
456 }
457
458 static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie)
459 {
460         struct kcsan_scoped_access assert_page_exclusive;
461         unsigned long flags;
462         bool init;
463
464         raw_spin_lock_irqsave(&meta->lock, flags);
465
466         if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) {
467                 /* Invalid or double-free, bail out. */
468                 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
469                 kfence_report_error((unsigned long)addr, false, NULL, meta,
470                                     KFENCE_ERROR_INVALID_FREE);
471                 raw_spin_unlock_irqrestore(&meta->lock, flags);
472                 return;
473         }
474
475         /* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */
476         kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE,
477                                   KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT,
478                                   &assert_page_exclusive);
479
480         if (CONFIG_KFENCE_STRESS_TEST_FAULTS)
481                 kfence_unprotect((unsigned long)addr); /* To check canary bytes. */
482
483         /* Restore page protection if there was an OOB access. */
484         if (meta->unprotected_page) {
485                 memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE);
486                 kfence_protect(meta->unprotected_page);
487                 meta->unprotected_page = 0;
488         }
489
490         /* Mark the object as freed. */
491         metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
492         init = slab_want_init_on_free(meta->cache);
493         raw_spin_unlock_irqrestore(&meta->lock, flags);
494
495         alloc_covered_add(meta->alloc_stack_hash, -1);
496
497         /* Check canary bytes for memory corruption. */
498         for_each_canary(meta, check_canary_byte);
499
500         /*
501          * Clear memory if init-on-free is set. While we protect the page, the
502          * data is still there, and after a use-after-free is detected, we
503          * unprotect the page, so the data is still accessible.
504          */
505         if (!zombie && unlikely(init))
506                 memzero_explicit(addr, meta->size);
507
508         /* Protect to detect use-after-frees. */
509         kfence_protect((unsigned long)addr);
510
511         kcsan_end_scoped_access(&assert_page_exclusive);
512         if (!zombie) {
513                 /* Add it to the tail of the freelist for reuse. */
514                 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
515                 KFENCE_WARN_ON(!list_empty(&meta->list));
516                 list_add_tail(&meta->list, &kfence_freelist);
517                 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
518
519                 atomic_long_dec(&counters[KFENCE_COUNTER_ALLOCATED]);
520                 atomic_long_inc(&counters[KFENCE_COUNTER_FREES]);
521         } else {
522                 /* See kfence_shutdown_cache(). */
523                 atomic_long_inc(&counters[KFENCE_COUNTER_ZOMBIES]);
524         }
525 }
526
527 static void rcu_guarded_free(struct rcu_head *h)
528 {
529         struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head);
530
531         kfence_guarded_free((void *)meta->addr, meta, false);
532 }
533
534 /*
535  * Initialization of the KFENCE pool after its allocation.
536  * Returns 0 on success; otherwise returns the address up to
537  * which partial initialization succeeded.
538  */
539 static unsigned long kfence_init_pool(void)
540 {
541         unsigned long addr = (unsigned long)__kfence_pool;
542         struct page *pages;
543         int i;
544
545         if (!arch_kfence_init_pool())
546                 return addr;
547
548         pages = virt_to_page(__kfence_pool);
549
550         /*
551          * Set up object pages: they must have PG_slab set, to avoid freeing
552          * these as real pages.
553          *
554          * We also want to avoid inserting kfence_free() in the kfree()
555          * fast-path in SLUB, and therefore need to ensure kfree() correctly
556          * enters __slab_free() slow-path.
557          */
558         for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
559                 struct slab *slab = page_slab(&pages[i]);
560
561                 if (!i || (i % 2))
562                         continue;
563
564                 /* Verify we do not have a compound head page. */
565                 if (WARN_ON(compound_head(&pages[i]) != &pages[i]))
566                         return addr;
567
568                 __folio_set_slab(slab_folio(slab));
569 #ifdef CONFIG_MEMCG
570                 slab->memcg_data = (unsigned long)&kfence_metadata[i / 2 - 1].objcg |
571                                    MEMCG_DATA_OBJCGS;
572 #endif
573         }
574
575         /*
576          * Protect the first 2 pages. The first page is mostly unnecessary, and
577          * merely serves as an extended guard page. However, adding one
578          * additional page in the beginning gives us an even number of pages,
579          * which simplifies the mapping of address to metadata index.
580          */
581         for (i = 0; i < 2; i++) {
582                 if (unlikely(!kfence_protect(addr)))
583                         return addr;
584
585                 addr += PAGE_SIZE;
586         }
587
588         for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
589                 struct kfence_metadata *meta = &kfence_metadata[i];
590
591                 /* Initialize metadata. */
592                 INIT_LIST_HEAD(&meta->list);
593                 raw_spin_lock_init(&meta->lock);
594                 meta->state = KFENCE_OBJECT_UNUSED;
595                 meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */
596                 list_add_tail(&meta->list, &kfence_freelist);
597
598                 /* Protect the right redzone. */
599                 if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
600                         return addr;
601
602                 addr += 2 * PAGE_SIZE;
603         }
604
605         return 0;
606 }
607
608 static bool __init kfence_init_pool_early(void)
609 {
610         unsigned long addr;
611
612         if (!__kfence_pool)
613                 return false;
614
615         addr = kfence_init_pool();
616
617         if (!addr) {
618                 /*
619                  * The pool is live and will never be deallocated from this point on.
620                  * Ignore the pool object from the kmemleak phys object tree, as it would
621                  * otherwise overlap with allocations returned by kfence_alloc(), which
622                  * are registered with kmemleak through the slab post-alloc hook.
623                  */
624                 kmemleak_ignore_phys(__pa(__kfence_pool));
625                 return true;
626         }
627
628         /*
629          * Only release unprotected pages, and do not try to go back and change
630          * page attributes due to risk of failing to do so as well. If changing
631          * page attributes for some pages fails, it is very likely that it also
632          * fails for the first page, and therefore expect addr==__kfence_pool in
633          * most failure cases.
634          */
635         for (char *p = (char *)addr; p < __kfence_pool + KFENCE_POOL_SIZE; p += PAGE_SIZE) {
636                 struct slab *slab = virt_to_slab(p);
637
638                 if (!slab)
639                         continue;
640 #ifdef CONFIG_MEMCG
641                 slab->memcg_data = 0;
642 #endif
643                 __folio_clear_slab(slab_folio(slab));
644         }
645         memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
646         __kfence_pool = NULL;
647         return false;
648 }
649
650 static bool kfence_init_pool_late(void)
651 {
652         unsigned long addr, free_size;
653
654         addr = kfence_init_pool();
655
656         if (!addr)
657                 return true;
658
659         /* Same as above. */
660         free_size = KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool);
661 #ifdef CONFIG_CONTIG_ALLOC
662         free_contig_range(page_to_pfn(virt_to_page((void *)addr)), free_size / PAGE_SIZE);
663 #else
664         free_pages_exact((void *)addr, free_size);
665 #endif
666         __kfence_pool = NULL;
667         return false;
668 }
669
670 /* === DebugFS Interface ==================================================== */
671
672 static int stats_show(struct seq_file *seq, void *v)
673 {
674         int i;
675
676         seq_printf(seq, "enabled: %i\n", READ_ONCE(kfence_enabled));
677         for (i = 0; i < KFENCE_COUNTER_COUNT; i++)
678                 seq_printf(seq, "%s: %ld\n", counter_names[i], atomic_long_read(&counters[i]));
679
680         return 0;
681 }
682 DEFINE_SHOW_ATTRIBUTE(stats);
683
684 /*
685  * debugfs seq_file operations for /sys/kernel/debug/kfence/objects.
686  * start_object() and next_object() return the object index + 1, because NULL is used
687  * to stop iteration.
688  */
689 static void *start_object(struct seq_file *seq, loff_t *pos)
690 {
691         if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
692                 return (void *)((long)*pos + 1);
693         return NULL;
694 }
695
696 static void stop_object(struct seq_file *seq, void *v)
697 {
698 }
699
700 static void *next_object(struct seq_file *seq, void *v, loff_t *pos)
701 {
702         ++*pos;
703         if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
704                 return (void *)((long)*pos + 1);
705         return NULL;
706 }
707
708 static int show_object(struct seq_file *seq, void *v)
709 {
710         struct kfence_metadata *meta = &kfence_metadata[(long)v - 1];
711         unsigned long flags;
712
713         raw_spin_lock_irqsave(&meta->lock, flags);
714         kfence_print_object(seq, meta);
715         raw_spin_unlock_irqrestore(&meta->lock, flags);
716         seq_puts(seq, "---------------------------------\n");
717
718         return 0;
719 }
720
721 static const struct seq_operations objects_sops = {
722         .start = start_object,
723         .next = next_object,
724         .stop = stop_object,
725         .show = show_object,
726 };
727 DEFINE_SEQ_ATTRIBUTE(objects);
728
729 static int kfence_debugfs_init(void)
730 {
731         struct dentry *kfence_dir;
732
733         if (!READ_ONCE(kfence_enabled))
734                 return 0;
735
736         kfence_dir = debugfs_create_dir("kfence", NULL);
737         debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops);
738         debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops);
739         return 0;
740 }
741
742 late_initcall(kfence_debugfs_init);
743
744 /* === Panic Notifier ====================================================== */
745
746 static void kfence_check_all_canary(void)
747 {
748         int i;
749
750         for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
751                 struct kfence_metadata *meta = &kfence_metadata[i];
752
753                 if (meta->state == KFENCE_OBJECT_ALLOCATED)
754                         for_each_canary(meta, check_canary_byte);
755         }
756 }
757
758 static int kfence_check_canary_callback(struct notifier_block *nb,
759                                         unsigned long reason, void *arg)
760 {
761         kfence_check_all_canary();
762         return NOTIFY_OK;
763 }
764
765 static struct notifier_block kfence_check_canary_notifier = {
766         .notifier_call = kfence_check_canary_callback,
767 };
768
769 /* === Allocation Gate Timer ================================================ */
770
771 static struct delayed_work kfence_timer;
772
773 #ifdef CONFIG_KFENCE_STATIC_KEYS
774 /* Wait queue to wake up allocation-gate timer task. */
775 static DECLARE_WAIT_QUEUE_HEAD(allocation_wait);
776
777 static void wake_up_kfence_timer(struct irq_work *work)
778 {
779         wake_up(&allocation_wait);
780 }
781 static DEFINE_IRQ_WORK(wake_up_kfence_timer_work, wake_up_kfence_timer);
782 #endif
783
784 /*
785  * Set up delayed work, which will enable and disable the static key. We need to
786  * use a work queue (rather than a simple timer), since enabling and disabling a
787  * static key cannot be done from an interrupt.
788  *
789  * Note: Toggling a static branch currently causes IPIs, and here we'll end up
790  * with a total of 2 IPIs to all CPUs. If this ends up a problem in future (with
791  * more aggressive sampling intervals), we could get away with a variant that
792  * avoids IPIs, at the cost of not immediately capturing allocations if the
793  * instructions remain cached.
794  */
795 static void toggle_allocation_gate(struct work_struct *work)
796 {
797         if (!READ_ONCE(kfence_enabled))
798                 return;
799
800         atomic_set(&kfence_allocation_gate, 0);
801 #ifdef CONFIG_KFENCE_STATIC_KEYS
802         /* Enable static key, and await allocation to happen. */
803         static_branch_enable(&kfence_allocation_key);
804
805         wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate));
806
807         /* Disable static key and reset timer. */
808         static_branch_disable(&kfence_allocation_key);
809 #endif
810         queue_delayed_work(system_unbound_wq, &kfence_timer,
811                            msecs_to_jiffies(kfence_sample_interval));
812 }
813
814 /* === Public interface ===================================================== */
815
816 void __init kfence_alloc_pool(void)
817 {
818         if (!kfence_sample_interval)
819                 return;
820
821         __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
822
823         if (!__kfence_pool)
824                 pr_err("failed to allocate pool\n");
825 }
826
827 static void kfence_init_enable(void)
828 {
829         if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS))
830                 static_branch_enable(&kfence_allocation_key);
831
832         if (kfence_deferrable)
833                 INIT_DEFERRABLE_WORK(&kfence_timer, toggle_allocation_gate);
834         else
835                 INIT_DELAYED_WORK(&kfence_timer, toggle_allocation_gate);
836
837         if (kfence_check_on_panic)
838                 atomic_notifier_chain_register(&panic_notifier_list, &kfence_check_canary_notifier);
839
840         WRITE_ONCE(kfence_enabled, true);
841         queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
842
843         pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
844                 CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
845                 (void *)(__kfence_pool + KFENCE_POOL_SIZE));
846 }
847
848 void __init kfence_init(void)
849 {
850         stack_hash_seed = get_random_u32();
851
852         /* Setting kfence_sample_interval to 0 on boot disables KFENCE. */
853         if (!kfence_sample_interval)
854                 return;
855
856         if (!kfence_init_pool_early()) {
857                 pr_err("%s failed\n", __func__);
858                 return;
859         }
860
861         kfence_init_enable();
862 }
863
864 static int kfence_init_late(void)
865 {
866         const unsigned long nr_pages = KFENCE_POOL_SIZE / PAGE_SIZE;
867 #ifdef CONFIG_CONTIG_ALLOC
868         struct page *pages;
869
870         pages = alloc_contig_pages(nr_pages, GFP_KERNEL, first_online_node, NULL);
871         if (!pages)
872                 return -ENOMEM;
873         __kfence_pool = page_to_virt(pages);
874 #else
875         if (nr_pages > MAX_ORDER_NR_PAGES) {
876                 pr_warn("KFENCE_NUM_OBJECTS too large for buddy allocator\n");
877                 return -EINVAL;
878         }
879         __kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL);
880         if (!__kfence_pool)
881                 return -ENOMEM;
882 #endif
883
884         if (!kfence_init_pool_late()) {
885                 pr_err("%s failed\n", __func__);
886                 return -EBUSY;
887         }
888
889         kfence_init_enable();
890         kfence_debugfs_init();
891
892         return 0;
893 }
894
895 static int kfence_enable_late(void)
896 {
897         if (!__kfence_pool)
898                 return kfence_init_late();
899
900         WRITE_ONCE(kfence_enabled, true);
901         queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
902         pr_info("re-enabled\n");
903         return 0;
904 }
905
906 void kfence_shutdown_cache(struct kmem_cache *s)
907 {
908         unsigned long flags;
909         struct kfence_metadata *meta;
910         int i;
911
912         for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
913                 bool in_use;
914
915                 meta = &kfence_metadata[i];
916
917                 /*
918                  * If we observe some inconsistent cache and state pair where we
919                  * should have returned false here, cache destruction is racing
920                  * with either kmem_cache_alloc() or kmem_cache_free(). Taking
921                  * the lock will not help, as different critical section
922                  * serialization will have the same outcome.
923                  */
924                 if (READ_ONCE(meta->cache) != s ||
925                     READ_ONCE(meta->state) != KFENCE_OBJECT_ALLOCATED)
926                         continue;
927
928                 raw_spin_lock_irqsave(&meta->lock, flags);
929                 in_use = meta->cache == s && meta->state == KFENCE_OBJECT_ALLOCATED;
930                 raw_spin_unlock_irqrestore(&meta->lock, flags);
931
932                 if (in_use) {
933                         /*
934                          * This cache still has allocations, and we should not
935                          * release them back into the freelist so they can still
936                          * safely be used and retain the kernel's default
937                          * behaviour of keeping the allocations alive (leak the
938                          * cache); however, they effectively become "zombie
939                          * allocations" as the KFENCE objects are the only ones
940                          * still in use and the owning cache is being destroyed.
941                          *
942                          * We mark them freed, so that any subsequent use shows
943                          * more useful error messages that will include stack
944                          * traces of the user of the object, the original
945                          * allocation, and caller to shutdown_cache().
946                          */
947                         kfence_guarded_free((void *)meta->addr, meta, /*zombie=*/true);
948                 }
949         }
950
951         for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
952                 meta = &kfence_metadata[i];
953
954                 /* See above. */
955                 if (READ_ONCE(meta->cache) != s || READ_ONCE(meta->state) != KFENCE_OBJECT_FREED)
956                         continue;
957
958                 raw_spin_lock_irqsave(&meta->lock, flags);
959                 if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED)
960                         meta->cache = NULL;
961                 raw_spin_unlock_irqrestore(&meta->lock, flags);
962         }
963 }
964
965 void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
966 {
967         unsigned long stack_entries[KFENCE_STACK_DEPTH];
968         size_t num_stack_entries;
969         u32 alloc_stack_hash;
970
971         /*
972          * Perform size check before switching kfence_allocation_gate, so that
973          * we don't disable KFENCE without making an allocation.
974          */
975         if (size > PAGE_SIZE) {
976                 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
977                 return NULL;
978         }
979
980         /*
981          * Skip allocations from non-default zones, including DMA. We cannot
982          * guarantee that pages in the KFENCE pool will have the requested
983          * properties (e.g. reside in DMAable memory).
984          */
985         if ((flags & GFP_ZONEMASK) ||
986             (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) {
987                 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
988                 return NULL;
989         }
990
991         /*
992          * Skip allocations for this slab, if KFENCE has been disabled for
993          * this slab.
994          */
995         if (s->flags & SLAB_SKIP_KFENCE)
996                 return NULL;
997
998         if (atomic_inc_return(&kfence_allocation_gate) > 1)
999                 return NULL;
1000 #ifdef CONFIG_KFENCE_STATIC_KEYS
1001         /*
1002          * waitqueue_active() is fully ordered after the update of
1003          * kfence_allocation_gate per atomic_inc_return().
1004          */
1005         if (waitqueue_active(&allocation_wait)) {
1006                 /*
1007                  * Calling wake_up() here may deadlock when allocations happen
1008                  * from within timer code. Use an irq_work to defer it.
1009                  */
1010                 irq_work_queue(&wake_up_kfence_timer_work);
1011         }
1012 #endif
1013
1014         if (!READ_ONCE(kfence_enabled))
1015                 return NULL;
1016
1017         num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 0);
1018
1019         /*
1020          * Do expensive check for coverage of allocation in slow-path after
1021          * allocation_gate has already become non-zero, even though it might
1022          * mean not making any allocation within a given sample interval.
1023          *
1024          * This ensures reasonable allocation coverage when the pool is almost
1025          * full, including avoiding long-lived allocations of the same source
1026          * filling up the pool (e.g. pagecache allocations).
1027          */
1028         alloc_stack_hash = get_alloc_stack_hash(stack_entries, num_stack_entries);
1029         if (should_skip_covered() && alloc_covered_contains(alloc_stack_hash)) {
1030                 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_COVERED]);
1031                 return NULL;
1032         }
1033
1034         return kfence_guarded_alloc(s, size, flags, stack_entries, num_stack_entries,
1035                                     alloc_stack_hash);
1036 }
1037
1038 size_t kfence_ksize(const void *addr)
1039 {
1040         const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1041
1042         /*
1043          * Read locklessly -- if there is a race with __kfence_alloc(), this is
1044          * either a use-after-free or invalid access.
1045          */
1046         return meta ? meta->size : 0;
1047 }
1048
1049 void *kfence_object_start(const void *addr)
1050 {
1051         const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1052
1053         /*
1054          * Read locklessly -- if there is a race with __kfence_alloc(), this is
1055          * either a use-after-free or invalid access.
1056          */
1057         return meta ? (void *)meta->addr : NULL;
1058 }
1059
1060 void __kfence_free(void *addr)
1061 {
1062         struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1063
1064 #ifdef CONFIG_MEMCG
1065         KFENCE_WARN_ON(meta->objcg);
1066 #endif
1067         /*
1068          * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing
1069          * the object, as the object page may be recycled for other-typed
1070          * objects once it has been freed. meta->cache may be NULL if the cache
1071          * was destroyed.
1072          */
1073         if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU)))
1074                 call_rcu(&meta->rcu_head, rcu_guarded_free);
1075         else
1076                 kfence_guarded_free(addr, meta, false);
1077 }
1078
1079 bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs)
1080 {
1081         const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE;
1082         struct kfence_metadata *to_report = NULL;
1083         enum kfence_error_type error_type;
1084         unsigned long flags;
1085
1086         if (!is_kfence_address((void *)addr))
1087                 return false;
1088
1089         if (!READ_ONCE(kfence_enabled)) /* If disabled at runtime ... */
1090                 return kfence_unprotect(addr); /* ... unprotect and proceed. */
1091
1092         atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
1093
1094         if (page_index % 2) {
1095                 /* This is a redzone, report a buffer overflow. */
1096                 struct kfence_metadata *meta;
1097                 int distance = 0;
1098
1099                 meta = addr_to_metadata(addr - PAGE_SIZE);
1100                 if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
1101                         to_report = meta;
1102                         /* Data race ok; distance calculation approximate. */
1103                         distance = addr - data_race(meta->addr + meta->size);
1104                 }
1105
1106                 meta = addr_to_metadata(addr + PAGE_SIZE);
1107                 if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
1108                         /* Data race ok; distance calculation approximate. */
1109                         if (!to_report || distance > data_race(meta->addr) - addr)
1110                                 to_report = meta;
1111                 }
1112
1113                 if (!to_report)
1114                         goto out;
1115
1116                 raw_spin_lock_irqsave(&to_report->lock, flags);
1117                 to_report->unprotected_page = addr;
1118                 error_type = KFENCE_ERROR_OOB;
1119
1120                 /*
1121                  * If the object was freed before we took the look we can still
1122                  * report this as an OOB -- the report will simply show the
1123                  * stacktrace of the free as well.
1124                  */
1125         } else {
1126                 to_report = addr_to_metadata(addr);
1127                 if (!to_report)
1128                         goto out;
1129
1130                 raw_spin_lock_irqsave(&to_report->lock, flags);
1131                 error_type = KFENCE_ERROR_UAF;
1132                 /*
1133                  * We may race with __kfence_alloc(), and it is possible that a
1134                  * freed object may be reallocated. We simply report this as a
1135                  * use-after-free, with the stack trace showing the place where
1136                  * the object was re-allocated.
1137                  */
1138         }
1139
1140 out:
1141         if (to_report) {
1142                 kfence_report_error(addr, is_write, regs, to_report, error_type);
1143                 raw_spin_unlock_irqrestore(&to_report->lock, flags);
1144         } else {
1145                 /* This may be a UAF or OOB access, but we can't be sure. */
1146                 kfence_report_error(addr, is_write, regs, NULL, KFENCE_ERROR_INVALID);
1147         }
1148
1149         return kfence_unprotect(addr); /* Unprotect and let access proceed. */
1150 }