OSDN Git Service

ahci: Add Intel Comet Lake PCH RAID PCI ID
[tomoyo/tomoyo-test1.git] / mm / kasan / common.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file contains common generic and tag-based KASAN code.
4  *
5  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7  *
8  * Some code borrowed from https://github.com/xairy/kasan-prototype by
9  *        Andrey Konovalov <andreyknvl@gmail.com>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License version 2 as
13  * published by the Free Software Foundation.
14  *
15  */
16
17 #include <linux/export.h>
18 #include <linux/interrupt.h>
19 #include <linux/init.h>
20 #include <linux/kasan.h>
21 #include <linux/kernel.h>
22 #include <linux/kmemleak.h>
23 #include <linux/linkage.h>
24 #include <linux/memblock.h>
25 #include <linux/memory.h>
26 #include <linux/mm.h>
27 #include <linux/module.h>
28 #include <linux/printk.h>
29 #include <linux/sched.h>
30 #include <linux/sched/task_stack.h>
31 #include <linux/slab.h>
32 #include <linux/stacktrace.h>
33 #include <linux/string.h>
34 #include <linux/types.h>
35 #include <linux/vmalloc.h>
36 #include <linux/bug.h>
37 #include <linux/uaccess.h>
38
39 #include <asm/cacheflush.h>
40 #include <asm/tlbflush.h>
41
42 #include "kasan.h"
43 #include "../slab.h"
44
45 static inline int in_irqentry_text(unsigned long ptr)
46 {
47         return (ptr >= (unsigned long)&__irqentry_text_start &&
48                 ptr < (unsigned long)&__irqentry_text_end) ||
49                 (ptr >= (unsigned long)&__softirqentry_text_start &&
50                  ptr < (unsigned long)&__softirqentry_text_end);
51 }
52
53 static inline unsigned int filter_irq_stacks(unsigned long *entries,
54                                              unsigned int nr_entries)
55 {
56         unsigned int i;
57
58         for (i = 0; i < nr_entries; i++) {
59                 if (in_irqentry_text(entries[i])) {
60                         /* Include the irqentry function into the stack. */
61                         return i + 1;
62                 }
63         }
64         return nr_entries;
65 }
66
67 static inline depot_stack_handle_t save_stack(gfp_t flags)
68 {
69         unsigned long entries[KASAN_STACK_DEPTH];
70         unsigned int nr_entries;
71
72         nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
73         nr_entries = filter_irq_stacks(entries, nr_entries);
74         return stack_depot_save(entries, nr_entries, flags);
75 }
76
77 static inline void set_track(struct kasan_track *track, gfp_t flags)
78 {
79         track->pid = current->pid;
80         track->stack = save_stack(flags);
81 }
82
83 void kasan_enable_current(void)
84 {
85         current->kasan_depth++;
86 }
87
88 void kasan_disable_current(void)
89 {
90         current->kasan_depth--;
91 }
92
93 bool __kasan_check_read(const volatile void *p, unsigned int size)
94 {
95         return check_memory_region((unsigned long)p, size, false, _RET_IP_);
96 }
97 EXPORT_SYMBOL(__kasan_check_read);
98
99 bool __kasan_check_write(const volatile void *p, unsigned int size)
100 {
101         return check_memory_region((unsigned long)p, size, true, _RET_IP_);
102 }
103 EXPORT_SYMBOL(__kasan_check_write);
104
105 #undef memset
106 void *memset(void *addr, int c, size_t len)
107 {
108         if (!check_memory_region((unsigned long)addr, len, true, _RET_IP_))
109                 return NULL;
110
111         return __memset(addr, c, len);
112 }
113
114 #ifdef __HAVE_ARCH_MEMMOVE
115 #undef memmove
116 void *memmove(void *dest, const void *src, size_t len)
117 {
118         if (!check_memory_region((unsigned long)src, len, false, _RET_IP_) ||
119             !check_memory_region((unsigned long)dest, len, true, _RET_IP_))
120                 return NULL;
121
122         return __memmove(dest, src, len);
123 }
124 #endif
125
126 #undef memcpy
127 void *memcpy(void *dest, const void *src, size_t len)
128 {
129         if (!check_memory_region((unsigned long)src, len, false, _RET_IP_) ||
130             !check_memory_region((unsigned long)dest, len, true, _RET_IP_))
131                 return NULL;
132
133         return __memcpy(dest, src, len);
134 }
135
136 /*
137  * Poisons the shadow memory for 'size' bytes starting from 'addr'.
138  * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
139  */
140 void kasan_poison_shadow(const void *address, size_t size, u8 value)
141 {
142         void *shadow_start, *shadow_end;
143
144         /*
145          * Perform shadow offset calculation based on untagged address, as
146          * some of the callers (e.g. kasan_poison_object_data) pass tagged
147          * addresses to this function.
148          */
149         address = reset_tag(address);
150
151         shadow_start = kasan_mem_to_shadow(address);
152         shadow_end = kasan_mem_to_shadow(address + size);
153
154         __memset(shadow_start, value, shadow_end - shadow_start);
155 }
156
157 void kasan_unpoison_shadow(const void *address, size_t size)
158 {
159         u8 tag = get_tag(address);
160
161         /*
162          * Perform shadow offset calculation based on untagged address, as
163          * some of the callers (e.g. kasan_unpoison_object_data) pass tagged
164          * addresses to this function.
165          */
166         address = reset_tag(address);
167
168         kasan_poison_shadow(address, size, tag);
169
170         if (size & KASAN_SHADOW_MASK) {
171                 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
172
173                 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
174                         *shadow = tag;
175                 else
176                         *shadow = size & KASAN_SHADOW_MASK;
177         }
178 }
179
180 static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
181 {
182         void *base = task_stack_page(task);
183         size_t size = sp - base;
184
185         kasan_unpoison_shadow(base, size);
186 }
187
188 /* Unpoison the entire stack for a task. */
189 void kasan_unpoison_task_stack(struct task_struct *task)
190 {
191         __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
192 }
193
194 /* Unpoison the stack for the current task beyond a watermark sp value. */
195 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
196 {
197         /*
198          * Calculate the task stack base address.  Avoid using 'current'
199          * because this function is called by early resume code which hasn't
200          * yet set up the percpu register (%gs).
201          */
202         void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
203
204         kasan_unpoison_shadow(base, watermark - base);
205 }
206
207 /*
208  * Clear all poison for the region between the current SP and a provided
209  * watermark value, as is sometimes required prior to hand-crafted asm function
210  * returns in the middle of functions.
211  */
212 void kasan_unpoison_stack_above_sp_to(const void *watermark)
213 {
214         const void *sp = __builtin_frame_address(0);
215         size_t size = watermark - sp;
216
217         if (WARN_ON(sp > watermark))
218                 return;
219         kasan_unpoison_shadow(sp, size);
220 }
221
222 void kasan_alloc_pages(struct page *page, unsigned int order)
223 {
224         u8 tag;
225         unsigned long i;
226
227         if (unlikely(PageHighMem(page)))
228                 return;
229
230         tag = random_tag();
231         for (i = 0; i < (1 << order); i++)
232                 page_kasan_tag_set(page + i, tag);
233         kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
234 }
235
236 void kasan_free_pages(struct page *page, unsigned int order)
237 {
238         if (likely(!PageHighMem(page)))
239                 kasan_poison_shadow(page_address(page),
240                                 PAGE_SIZE << order,
241                                 KASAN_FREE_PAGE);
242 }
243
244 /*
245  * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
246  * For larger allocations larger redzones are used.
247  */
248 static inline unsigned int optimal_redzone(unsigned int object_size)
249 {
250         if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
251                 return 0;
252
253         return
254                 object_size <= 64        - 16   ? 16 :
255                 object_size <= 128       - 32   ? 32 :
256                 object_size <= 512       - 64   ? 64 :
257                 object_size <= 4096      - 128  ? 128 :
258                 object_size <= (1 << 14) - 256  ? 256 :
259                 object_size <= (1 << 15) - 512  ? 512 :
260                 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
261 }
262
263 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
264                         slab_flags_t *flags)
265 {
266         unsigned int orig_size = *size;
267         unsigned int redzone_size;
268         int redzone_adjust;
269
270         /* Add alloc meta. */
271         cache->kasan_info.alloc_meta_offset = *size;
272         *size += sizeof(struct kasan_alloc_meta);
273
274         /* Add free meta. */
275         if (IS_ENABLED(CONFIG_KASAN_GENERIC) &&
276             (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
277              cache->object_size < sizeof(struct kasan_free_meta))) {
278                 cache->kasan_info.free_meta_offset = *size;
279                 *size += sizeof(struct kasan_free_meta);
280         }
281
282         redzone_size = optimal_redzone(cache->object_size);
283         redzone_adjust = redzone_size - (*size - cache->object_size);
284         if (redzone_adjust > 0)
285                 *size += redzone_adjust;
286
287         *size = min_t(unsigned int, KMALLOC_MAX_SIZE,
288                         max(*size, cache->object_size + redzone_size));
289
290         /*
291          * If the metadata doesn't fit, don't enable KASAN at all.
292          */
293         if (*size <= cache->kasan_info.alloc_meta_offset ||
294                         *size <= cache->kasan_info.free_meta_offset) {
295                 cache->kasan_info.alloc_meta_offset = 0;
296                 cache->kasan_info.free_meta_offset = 0;
297                 *size = orig_size;
298                 return;
299         }
300
301         *flags |= SLAB_KASAN;
302 }
303
304 size_t kasan_metadata_size(struct kmem_cache *cache)
305 {
306         return (cache->kasan_info.alloc_meta_offset ?
307                 sizeof(struct kasan_alloc_meta) : 0) +
308                 (cache->kasan_info.free_meta_offset ?
309                 sizeof(struct kasan_free_meta) : 0);
310 }
311
312 struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
313                                         const void *object)
314 {
315         return (void *)object + cache->kasan_info.alloc_meta_offset;
316 }
317
318 struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
319                                       const void *object)
320 {
321         BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
322         return (void *)object + cache->kasan_info.free_meta_offset;
323 }
324
325
326 static void kasan_set_free_info(struct kmem_cache *cache,
327                 void *object, u8 tag)
328 {
329         struct kasan_alloc_meta *alloc_meta;
330         u8 idx = 0;
331
332         alloc_meta = get_alloc_info(cache, object);
333
334 #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
335         idx = alloc_meta->free_track_idx;
336         alloc_meta->free_pointer_tag[idx] = tag;
337         alloc_meta->free_track_idx = (idx + 1) % KASAN_NR_FREE_STACKS;
338 #endif
339
340         set_track(&alloc_meta->free_track[idx], GFP_NOWAIT);
341 }
342
343 void kasan_poison_slab(struct page *page)
344 {
345         unsigned long i;
346
347         for (i = 0; i < compound_nr(page); i++)
348                 page_kasan_tag_reset(page + i);
349         kasan_poison_shadow(page_address(page), page_size(page),
350                         KASAN_KMALLOC_REDZONE);
351 }
352
353 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
354 {
355         kasan_unpoison_shadow(object, cache->object_size);
356 }
357
358 void kasan_poison_object_data(struct kmem_cache *cache, void *object)
359 {
360         kasan_poison_shadow(object,
361                         round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
362                         KASAN_KMALLOC_REDZONE);
363 }
364
365 /*
366  * This function assigns a tag to an object considering the following:
367  * 1. A cache might have a constructor, which might save a pointer to a slab
368  *    object somewhere (e.g. in the object itself). We preassign a tag for
369  *    each object in caches with constructors during slab creation and reuse
370  *    the same tag each time a particular object is allocated.
371  * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
372  *    accessed after being freed. We preassign tags for objects in these
373  *    caches as well.
374  * 3. For SLAB allocator we can't preassign tags randomly since the freelist
375  *    is stored as an array of indexes instead of a linked list. Assign tags
376  *    based on objects indexes, so that objects that are next to each other
377  *    get different tags.
378  */
379 static u8 assign_tag(struct kmem_cache *cache, const void *object,
380                         bool init, bool keep_tag)
381 {
382         /*
383          * 1. When an object is kmalloc()'ed, two hooks are called:
384          *    kasan_slab_alloc() and kasan_kmalloc(). We assign the
385          *    tag only in the first one.
386          * 2. We reuse the same tag for krealloc'ed objects.
387          */
388         if (keep_tag)
389                 return get_tag(object);
390
391         /*
392          * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
393          * set, assign a tag when the object is being allocated (init == false).
394          */
395         if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
396                 return init ? KASAN_TAG_KERNEL : random_tag();
397
398         /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
399 #ifdef CONFIG_SLAB
400         /* For SLAB assign tags based on the object index in the freelist. */
401         return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
402 #else
403         /*
404          * For SLUB assign a random tag during slab creation, otherwise reuse
405          * the already assigned tag.
406          */
407         return init ? random_tag() : get_tag(object);
408 #endif
409 }
410
411 void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
412                                                 const void *object)
413 {
414         struct kasan_alloc_meta *alloc_info;
415
416         if (!(cache->flags & SLAB_KASAN))
417                 return (void *)object;
418
419         alloc_info = get_alloc_info(cache, object);
420         __memset(alloc_info, 0, sizeof(*alloc_info));
421
422         if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
423                 object = set_tag(object,
424                                 assign_tag(cache, object, true, false));
425
426         return (void *)object;
427 }
428
429 static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
430 {
431         if (IS_ENABLED(CONFIG_KASAN_GENERIC))
432                 return shadow_byte < 0 ||
433                         shadow_byte >= KASAN_SHADOW_SCALE_SIZE;
434
435         /* else CONFIG_KASAN_SW_TAGS: */
436         if ((u8)shadow_byte == KASAN_TAG_INVALID)
437                 return true;
438         if ((tag != KASAN_TAG_KERNEL) && (tag != (u8)shadow_byte))
439                 return true;
440
441         return false;
442 }
443
444 static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
445                               unsigned long ip, bool quarantine)
446 {
447         s8 shadow_byte;
448         u8 tag;
449         void *tagged_object;
450         unsigned long rounded_up_size;
451
452         tag = get_tag(object);
453         tagged_object = object;
454         object = reset_tag(object);
455
456         if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
457             object)) {
458                 kasan_report_invalid_free(tagged_object, ip);
459                 return true;
460         }
461
462         /* RCU slabs could be legally used after free within the RCU period */
463         if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
464                 return false;
465
466         shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
467         if (shadow_invalid(tag, shadow_byte)) {
468                 kasan_report_invalid_free(tagged_object, ip);
469                 return true;
470         }
471
472         rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE);
473         kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
474
475         if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) ||
476                         unlikely(!(cache->flags & SLAB_KASAN)))
477                 return false;
478
479         kasan_set_free_info(cache, object, tag);
480
481         quarantine_put(get_free_info(cache, object), cache);
482
483         return IS_ENABLED(CONFIG_KASAN_GENERIC);
484 }
485
486 bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
487 {
488         return __kasan_slab_free(cache, object, ip, true);
489 }
490
491 static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
492                                 size_t size, gfp_t flags, bool keep_tag)
493 {
494         unsigned long redzone_start;
495         unsigned long redzone_end;
496         u8 tag = 0xff;
497
498         if (gfpflags_allow_blocking(flags))
499                 quarantine_reduce();
500
501         if (unlikely(object == NULL))
502                 return NULL;
503
504         redzone_start = round_up((unsigned long)(object + size),
505                                 KASAN_SHADOW_SCALE_SIZE);
506         redzone_end = round_up((unsigned long)object + cache->object_size,
507                                 KASAN_SHADOW_SCALE_SIZE);
508
509         if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
510                 tag = assign_tag(cache, object, false, keep_tag);
511
512         /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
513         kasan_unpoison_shadow(set_tag(object, tag), size);
514         kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
515                 KASAN_KMALLOC_REDZONE);
516
517         if (cache->flags & SLAB_KASAN)
518                 set_track(&get_alloc_info(cache, object)->alloc_track, flags);
519
520         return set_tag(object, tag);
521 }
522
523 void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
524                                         gfp_t flags)
525 {
526         return __kasan_kmalloc(cache, object, cache->object_size, flags, false);
527 }
528
529 void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
530                                 size_t size, gfp_t flags)
531 {
532         return __kasan_kmalloc(cache, object, size, flags, true);
533 }
534 EXPORT_SYMBOL(kasan_kmalloc);
535
536 void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
537                                                 gfp_t flags)
538 {
539         struct page *page;
540         unsigned long redzone_start;
541         unsigned long redzone_end;
542
543         if (gfpflags_allow_blocking(flags))
544                 quarantine_reduce();
545
546         if (unlikely(ptr == NULL))
547                 return NULL;
548
549         page = virt_to_page(ptr);
550         redzone_start = round_up((unsigned long)(ptr + size),
551                                 KASAN_SHADOW_SCALE_SIZE);
552         redzone_end = (unsigned long)ptr + page_size(page);
553
554         kasan_unpoison_shadow(ptr, size);
555         kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
556                 KASAN_PAGE_REDZONE);
557
558         return (void *)ptr;
559 }
560
561 void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags)
562 {
563         struct page *page;
564
565         if (unlikely(object == ZERO_SIZE_PTR))
566                 return (void *)object;
567
568         page = virt_to_head_page(object);
569
570         if (unlikely(!PageSlab(page)))
571                 return kasan_kmalloc_large(object, size, flags);
572         else
573                 return __kasan_kmalloc(page->slab_cache, object, size,
574                                                 flags, true);
575 }
576
577 void kasan_poison_kfree(void *ptr, unsigned long ip)
578 {
579         struct page *page;
580
581         page = virt_to_head_page(ptr);
582
583         if (unlikely(!PageSlab(page))) {
584                 if (ptr != page_address(page)) {
585                         kasan_report_invalid_free(ptr, ip);
586                         return;
587                 }
588                 kasan_poison_shadow(ptr, page_size(page), KASAN_FREE_PAGE);
589         } else {
590                 __kasan_slab_free(page->slab_cache, ptr, ip, false);
591         }
592 }
593
594 void kasan_kfree_large(void *ptr, unsigned long ip)
595 {
596         if (ptr != page_address(virt_to_head_page(ptr)))
597                 kasan_report_invalid_free(ptr, ip);
598         /* The object will be poisoned by page_alloc. */
599 }
600
601 #ifndef CONFIG_KASAN_VMALLOC
602 int kasan_module_alloc(void *addr, size_t size)
603 {
604         void *ret;
605         size_t scaled_size;
606         size_t shadow_size;
607         unsigned long shadow_start;
608
609         shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
610         scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
611         shadow_size = round_up(scaled_size, PAGE_SIZE);
612
613         if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
614                 return -EINVAL;
615
616         ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
617                         shadow_start + shadow_size,
618                         GFP_KERNEL,
619                         PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
620                         __builtin_return_address(0));
621
622         if (ret) {
623                 __memset(ret, KASAN_SHADOW_INIT, shadow_size);
624                 find_vm_area(addr)->flags |= VM_KASAN;
625                 kmemleak_ignore(ret);
626                 return 0;
627         }
628
629         return -ENOMEM;
630 }
631
632 void kasan_free_shadow(const struct vm_struct *vm)
633 {
634         if (vm->flags & VM_KASAN)
635                 vfree(kasan_mem_to_shadow(vm->addr));
636 }
637 #endif
638
639 extern void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip);
640 extern bool report_enabled(void);
641
642 bool kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip)
643 {
644         unsigned long flags = user_access_save();
645         bool ret = false;
646
647         if (likely(report_enabled())) {
648                 __kasan_report(addr, size, is_write, ip);
649                 ret = true;
650         }
651
652         user_access_restore(flags);
653
654         return ret;
655 }
656
657 #ifdef CONFIG_MEMORY_HOTPLUG
658 static bool shadow_mapped(unsigned long addr)
659 {
660         pgd_t *pgd = pgd_offset_k(addr);
661         p4d_t *p4d;
662         pud_t *pud;
663         pmd_t *pmd;
664         pte_t *pte;
665
666         if (pgd_none(*pgd))
667                 return false;
668         p4d = p4d_offset(pgd, addr);
669         if (p4d_none(*p4d))
670                 return false;
671         pud = pud_offset(p4d, addr);
672         if (pud_none(*pud))
673                 return false;
674
675         /*
676          * We can't use pud_large() or pud_huge(), the first one is
677          * arch-specific, the last one depends on HUGETLB_PAGE.  So let's abuse
678          * pud_bad(), if pud is bad then it's bad because it's huge.
679          */
680         if (pud_bad(*pud))
681                 return true;
682         pmd = pmd_offset(pud, addr);
683         if (pmd_none(*pmd))
684                 return false;
685
686         if (pmd_bad(*pmd))
687                 return true;
688         pte = pte_offset_kernel(pmd, addr);
689         return !pte_none(*pte);
690 }
691
692 static int __meminit kasan_mem_notifier(struct notifier_block *nb,
693                         unsigned long action, void *data)
694 {
695         struct memory_notify *mem_data = data;
696         unsigned long nr_shadow_pages, start_kaddr, shadow_start;
697         unsigned long shadow_end, shadow_size;
698
699         nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT;
700         start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn);
701         shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr);
702         shadow_size = nr_shadow_pages << PAGE_SHIFT;
703         shadow_end = shadow_start + shadow_size;
704
705         if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) ||
706                 WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT)))
707                 return NOTIFY_BAD;
708
709         switch (action) {
710         case MEM_GOING_ONLINE: {
711                 void *ret;
712
713                 /*
714                  * If shadow is mapped already than it must have been mapped
715                  * during the boot. This could happen if we onlining previously
716                  * offlined memory.
717                  */
718                 if (shadow_mapped(shadow_start))
719                         return NOTIFY_OK;
720
721                 ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
722                                         shadow_end, GFP_KERNEL,
723                                         PAGE_KERNEL, VM_NO_GUARD,
724                                         pfn_to_nid(mem_data->start_pfn),
725                                         __builtin_return_address(0));
726                 if (!ret)
727                         return NOTIFY_BAD;
728
729                 kmemleak_ignore(ret);
730                 return NOTIFY_OK;
731         }
732         case MEM_CANCEL_ONLINE:
733         case MEM_OFFLINE: {
734                 struct vm_struct *vm;
735
736                 /*
737                  * shadow_start was either mapped during boot by kasan_init()
738                  * or during memory online by __vmalloc_node_range().
739                  * In the latter case we can use vfree() to free shadow.
740                  * Non-NULL result of the find_vm_area() will tell us if
741                  * that was the second case.
742                  *
743                  * Currently it's not possible to free shadow mapped
744                  * during boot by kasan_init(). It's because the code
745                  * to do that hasn't been written yet. So we'll just
746                  * leak the memory.
747                  */
748                 vm = find_vm_area((void *)shadow_start);
749                 if (vm)
750                         vfree((void *)shadow_start);
751         }
752         }
753
754         return NOTIFY_OK;
755 }
756
757 static int __init kasan_memhotplug_init(void)
758 {
759         hotplug_memory_notifier(kasan_mem_notifier, 0);
760
761         return 0;
762 }
763
764 core_initcall(kasan_memhotplug_init);
765 #endif
766
767 #ifdef CONFIG_KASAN_VMALLOC
768 static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
769                                       void *unused)
770 {
771         unsigned long page;
772         pte_t pte;
773
774         if (likely(!pte_none(*ptep)))
775                 return 0;
776
777         page = __get_free_page(GFP_KERNEL);
778         if (!page)
779                 return -ENOMEM;
780
781         memset((void *)page, KASAN_VMALLOC_INVALID, PAGE_SIZE);
782         pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL);
783
784         spin_lock(&init_mm.page_table_lock);
785         if (likely(pte_none(*ptep))) {
786                 set_pte_at(&init_mm, addr, ptep, pte);
787                 page = 0;
788         }
789         spin_unlock(&init_mm.page_table_lock);
790         if (page)
791                 free_page(page);
792         return 0;
793 }
794
795 int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
796 {
797         unsigned long shadow_start, shadow_end;
798         int ret;
799
800         if (!is_vmalloc_or_module_addr((void *)addr))
801                 return 0;
802
803         shadow_start = (unsigned long)kasan_mem_to_shadow((void *)addr);
804         shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE);
805         shadow_end = (unsigned long)kasan_mem_to_shadow((void *)addr + size);
806         shadow_end = ALIGN(shadow_end, PAGE_SIZE);
807
808         ret = apply_to_page_range(&init_mm, shadow_start,
809                                   shadow_end - shadow_start,
810                                   kasan_populate_vmalloc_pte, NULL);
811         if (ret)
812                 return ret;
813
814         flush_cache_vmap(shadow_start, shadow_end);
815
816         /*
817          * We need to be careful about inter-cpu effects here. Consider:
818          *
819          *   CPU#0                                CPU#1
820          * WRITE_ONCE(p, vmalloc(100));         while (x = READ_ONCE(p)) ;
821          *                                      p[99] = 1;
822          *
823          * With compiler instrumentation, that ends up looking like this:
824          *
825          *   CPU#0                                CPU#1
826          * // vmalloc() allocates memory
827          * // let a = area->addr
828          * // we reach kasan_populate_vmalloc
829          * // and call kasan_unpoison_shadow:
830          * STORE shadow(a), unpoison_val
831          * ...
832          * STORE shadow(a+99), unpoison_val     x = LOAD p
833          * // rest of vmalloc process           <data dependency>
834          * STORE p, a                           LOAD shadow(x+99)
835          *
836          * If there is no barrier between the end of unpoisioning the shadow
837          * and the store of the result to p, the stores could be committed
838          * in a different order by CPU#0, and CPU#1 could erroneously observe
839          * poison in the shadow.
840          *
841          * We need some sort of barrier between the stores.
842          *
843          * In the vmalloc() case, this is provided by a smp_wmb() in
844          * clear_vm_uninitialized_flag(). In the per-cpu allocator and in
845          * get_vm_area() and friends, the caller gets shadow allocated but
846          * doesn't have any pages mapped into the virtual address space that
847          * has been reserved. Mapping those pages in will involve taking and
848          * releasing a page-table lock, which will provide the barrier.
849          */
850
851         return 0;
852 }
853
854 /*
855  * Poison the shadow for a vmalloc region. Called as part of the
856  * freeing process at the time the region is freed.
857  */
858 void kasan_poison_vmalloc(const void *start, unsigned long size)
859 {
860         if (!is_vmalloc_or_module_addr(start))
861                 return;
862
863         size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
864         kasan_poison_shadow(start, size, KASAN_VMALLOC_INVALID);
865 }
866
867 void kasan_unpoison_vmalloc(const void *start, unsigned long size)
868 {
869         if (!is_vmalloc_or_module_addr(start))
870                 return;
871
872         kasan_unpoison_shadow(start, size);
873 }
874
875 static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
876                                         void *unused)
877 {
878         unsigned long page;
879
880         page = (unsigned long)__va(pte_pfn(*ptep) << PAGE_SHIFT);
881
882         spin_lock(&init_mm.page_table_lock);
883
884         if (likely(!pte_none(*ptep))) {
885                 pte_clear(&init_mm, addr, ptep);
886                 free_page(page);
887         }
888         spin_unlock(&init_mm.page_table_lock);
889
890         return 0;
891 }
892
893 /*
894  * Release the backing for the vmalloc region [start, end), which
895  * lies within the free region [free_region_start, free_region_end).
896  *
897  * This can be run lazily, long after the region was freed. It runs
898  * under vmap_area_lock, so it's not safe to interact with the vmalloc/vmap
899  * infrastructure.
900  *
901  * How does this work?
902  * -------------------
903  *
904  * We have a region that is page aligned, labelled as A.
905  * That might not map onto the shadow in a way that is page-aligned:
906  *
907  *                    start                     end
908  *                    v                         v
909  * |????????|????????|AAAAAAAA|AA....AA|AAAAAAAA|????????| < vmalloc
910  *  -------- -------- --------          -------- --------
911  *      |        |       |                 |        |
912  *      |        |       |         /-------/        |
913  *      \-------\|/------/         |/---------------/
914  *              |||                ||
915  *             |??AAAAAA|AAAAAAAA|AA??????|                < shadow
916  *                 (1)      (2)      (3)
917  *
918  * First we align the start upwards and the end downwards, so that the
919  * shadow of the region aligns with shadow page boundaries. In the
920  * example, this gives us the shadow page (2). This is the shadow entirely
921  * covered by this allocation.
922  *
923  * Then we have the tricky bits. We want to know if we can free the
924  * partially covered shadow pages - (1) and (3) in the example. For this,
925  * we are given the start and end of the free region that contains this
926  * allocation. Extending our previous example, we could have:
927  *
928  *  free_region_start                                    free_region_end
929  *  |                 start                     end      |
930  *  v                 v                         v        v
931  * |FFFFFFFF|FFFFFFFF|AAAAAAAA|AA....AA|AAAAAAAA|FFFFFFFF| < vmalloc
932  *  -------- -------- --------          -------- --------
933  *      |        |       |                 |        |
934  *      |        |       |         /-------/        |
935  *      \-------\|/------/         |/---------------/
936  *              |||                ||
937  *             |FFAAAAAA|AAAAAAAA|AAF?????|                < shadow
938  *                 (1)      (2)      (3)
939  *
940  * Once again, we align the start of the free region up, and the end of
941  * the free region down so that the shadow is page aligned. So we can free
942  * page (1) - we know no allocation currently uses anything in that page,
943  * because all of it is in the vmalloc free region. But we cannot free
944  * page (3), because we can't be sure that the rest of it is unused.
945  *
946  * We only consider pages that contain part of the original region for
947  * freeing: we don't try to free other pages from the free region or we'd
948  * end up trying to free huge chunks of virtual address space.
949  *
950  * Concurrency
951  * -----------
952  *
953  * How do we know that we're not freeing a page that is simultaneously
954  * being used for a fresh allocation in kasan_populate_vmalloc(_pte)?
955  *
956  * We _can_ have kasan_release_vmalloc and kasan_populate_vmalloc running
957  * at the same time. While we run under free_vmap_area_lock, the population
958  * code does not.
959  *
960  * free_vmap_area_lock instead operates to ensure that the larger range
961  * [free_region_start, free_region_end) is safe: because __alloc_vmap_area and
962  * the per-cpu region-finding algorithm both run under free_vmap_area_lock,
963  * no space identified as free will become used while we are running. This
964  * means that so long as we are careful with alignment and only free shadow
965  * pages entirely covered by the free region, we will not run in to any
966  * trouble - any simultaneous allocations will be for disjoint regions.
967  */
968 void kasan_release_vmalloc(unsigned long start, unsigned long end,
969                            unsigned long free_region_start,
970                            unsigned long free_region_end)
971 {
972         void *shadow_start, *shadow_end;
973         unsigned long region_start, region_end;
974         unsigned long size;
975
976         region_start = ALIGN(start, PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
977         region_end = ALIGN_DOWN(end, PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
978
979         free_region_start = ALIGN(free_region_start,
980                                   PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
981
982         if (start != region_start &&
983             free_region_start < region_start)
984                 region_start -= PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE;
985
986         free_region_end = ALIGN_DOWN(free_region_end,
987                                      PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
988
989         if (end != region_end &&
990             free_region_end > region_end)
991                 region_end += PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE;
992
993         shadow_start = kasan_mem_to_shadow((void *)region_start);
994         shadow_end = kasan_mem_to_shadow((void *)region_end);
995
996         if (shadow_end > shadow_start) {
997                 size = shadow_end - shadow_start;
998                 apply_to_existing_page_range(&init_mm,
999                                              (unsigned long)shadow_start,
1000                                              size, kasan_depopulate_vmalloc_pte,
1001                                              NULL);
1002                 flush_tlb_kernel_range((unsigned long)shadow_start,
1003                                        (unsigned long)shadow_end);
1004         }
1005 }
1006 #endif