OSDN Git Service

kasan: move kasan_report() into report.c
[tomoyo/tomoyo-test1.git] / mm / kasan / common.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file contains common generic and tag-based KASAN code.
4  *
5  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7  *
8  * Some code borrowed from https://github.com/xairy/kasan-prototype by
9  *        Andrey Konovalov <andreyknvl@gmail.com>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License version 2 as
13  * published by the Free Software Foundation.
14  *
15  */
16
17 #include <linux/export.h>
18 #include <linux/init.h>
19 #include <linux/kasan.h>
20 #include <linux/kernel.h>
21 #include <linux/kmemleak.h>
22 #include <linux/linkage.h>
23 #include <linux/memblock.h>
24 #include <linux/memory.h>
25 #include <linux/mm.h>
26 #include <linux/module.h>
27 #include <linux/printk.h>
28 #include <linux/sched.h>
29 #include <linux/sched/task_stack.h>
30 #include <linux/slab.h>
31 #include <linux/stacktrace.h>
32 #include <linux/string.h>
33 #include <linux/types.h>
34 #include <linux/vmalloc.h>
35 #include <linux/bug.h>
36
37 #include <asm/cacheflush.h>
38 #include <asm/tlbflush.h>
39
40 #include "kasan.h"
41 #include "../slab.h"
42
43 static inline depot_stack_handle_t save_stack(gfp_t flags)
44 {
45         unsigned long entries[KASAN_STACK_DEPTH];
46         unsigned int nr_entries;
47
48         nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
49         nr_entries = filter_irq_stacks(entries, nr_entries);
50         return stack_depot_save(entries, nr_entries, flags);
51 }
52
53 static inline void set_track(struct kasan_track *track, gfp_t flags)
54 {
55         track->pid = current->pid;
56         track->stack = save_stack(flags);
57 }
58
59 void kasan_enable_current(void)
60 {
61         current->kasan_depth++;
62 }
63
64 void kasan_disable_current(void)
65 {
66         current->kasan_depth--;
67 }
68
69 bool __kasan_check_read(const volatile void *p, unsigned int size)
70 {
71         return check_memory_region((unsigned long)p, size, false, _RET_IP_);
72 }
73 EXPORT_SYMBOL(__kasan_check_read);
74
75 bool __kasan_check_write(const volatile void *p, unsigned int size)
76 {
77         return check_memory_region((unsigned long)p, size, true, _RET_IP_);
78 }
79 EXPORT_SYMBOL(__kasan_check_write);
80
81 #undef memset
82 void *memset(void *addr, int c, size_t len)
83 {
84         if (!check_memory_region((unsigned long)addr, len, true, _RET_IP_))
85                 return NULL;
86
87         return __memset(addr, c, len);
88 }
89
90 #ifdef __HAVE_ARCH_MEMMOVE
91 #undef memmove
92 void *memmove(void *dest, const void *src, size_t len)
93 {
94         if (!check_memory_region((unsigned long)src, len, false, _RET_IP_) ||
95             !check_memory_region((unsigned long)dest, len, true, _RET_IP_))
96                 return NULL;
97
98         return __memmove(dest, src, len);
99 }
100 #endif
101
102 #undef memcpy
103 void *memcpy(void *dest, const void *src, size_t len)
104 {
105         if (!check_memory_region((unsigned long)src, len, false, _RET_IP_) ||
106             !check_memory_region((unsigned long)dest, len, true, _RET_IP_))
107                 return NULL;
108
109         return __memcpy(dest, src, len);
110 }
111
112 /*
113  * Poisons the shadow memory for 'size' bytes starting from 'addr'.
114  * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
115  */
116 void kasan_poison_shadow(const void *address, size_t size, u8 value)
117 {
118         void *shadow_start, *shadow_end;
119
120         /*
121          * Perform shadow offset calculation based on untagged address, as
122          * some of the callers (e.g. kasan_poison_object_data) pass tagged
123          * addresses to this function.
124          */
125         address = reset_tag(address);
126
127         shadow_start = kasan_mem_to_shadow(address);
128         shadow_end = kasan_mem_to_shadow(address + size);
129
130         __memset(shadow_start, value, shadow_end - shadow_start);
131 }
132
133 void kasan_unpoison_shadow(const void *address, size_t size)
134 {
135         u8 tag = get_tag(address);
136
137         /*
138          * Perform shadow offset calculation based on untagged address, as
139          * some of the callers (e.g. kasan_unpoison_object_data) pass tagged
140          * addresses to this function.
141          */
142         address = reset_tag(address);
143
144         kasan_poison_shadow(address, size, tag);
145
146         if (size & KASAN_SHADOW_MASK) {
147                 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
148
149                 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
150                         *shadow = tag;
151                 else
152                         *shadow = size & KASAN_SHADOW_MASK;
153         }
154 }
155
156 static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
157 {
158         void *base = task_stack_page(task);
159         size_t size = sp - base;
160
161         kasan_unpoison_shadow(base, size);
162 }
163
164 /* Unpoison the entire stack for a task. */
165 void kasan_unpoison_task_stack(struct task_struct *task)
166 {
167         __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
168 }
169
170 /* Unpoison the stack for the current task beyond a watermark sp value. */
171 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
172 {
173         /*
174          * Calculate the task stack base address.  Avoid using 'current'
175          * because this function is called by early resume code which hasn't
176          * yet set up the percpu register (%gs).
177          */
178         void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
179
180         kasan_unpoison_shadow(base, watermark - base);
181 }
182
183 /*
184  * Clear all poison for the region between the current SP and a provided
185  * watermark value, as is sometimes required prior to hand-crafted asm function
186  * returns in the middle of functions.
187  */
188 void kasan_unpoison_stack_above_sp_to(const void *watermark)
189 {
190         const void *sp = __builtin_frame_address(0);
191         size_t size = watermark - sp;
192
193         if (WARN_ON(sp > watermark))
194                 return;
195         kasan_unpoison_shadow(sp, size);
196 }
197
198 void kasan_alloc_pages(struct page *page, unsigned int order)
199 {
200         u8 tag;
201         unsigned long i;
202
203         if (unlikely(PageHighMem(page)))
204                 return;
205
206         tag = random_tag();
207         for (i = 0; i < (1 << order); i++)
208                 page_kasan_tag_set(page + i, tag);
209         kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
210 }
211
212 void kasan_free_pages(struct page *page, unsigned int order)
213 {
214         if (likely(!PageHighMem(page)))
215                 kasan_poison_shadow(page_address(page),
216                                 PAGE_SIZE << order,
217                                 KASAN_FREE_PAGE);
218 }
219
220 /*
221  * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
222  * For larger allocations larger redzones are used.
223  */
224 static inline unsigned int optimal_redzone(unsigned int object_size)
225 {
226         if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
227                 return 0;
228
229         return
230                 object_size <= 64        - 16   ? 16 :
231                 object_size <= 128       - 32   ? 32 :
232                 object_size <= 512       - 64   ? 64 :
233                 object_size <= 4096      - 128  ? 128 :
234                 object_size <= (1 << 14) - 256  ? 256 :
235                 object_size <= (1 << 15) - 512  ? 512 :
236                 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
237 }
238
239 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
240                         slab_flags_t *flags)
241 {
242         unsigned int orig_size = *size;
243         unsigned int redzone_size;
244         int redzone_adjust;
245
246         /* Add alloc meta. */
247         cache->kasan_info.alloc_meta_offset = *size;
248         *size += sizeof(struct kasan_alloc_meta);
249
250         /* Add free meta. */
251         if (IS_ENABLED(CONFIG_KASAN_GENERIC) &&
252             (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
253              cache->object_size < sizeof(struct kasan_free_meta))) {
254                 cache->kasan_info.free_meta_offset = *size;
255                 *size += sizeof(struct kasan_free_meta);
256         }
257
258         redzone_size = optimal_redzone(cache->object_size);
259         redzone_adjust = redzone_size - (*size - cache->object_size);
260         if (redzone_adjust > 0)
261                 *size += redzone_adjust;
262
263         *size = min_t(unsigned int, KMALLOC_MAX_SIZE,
264                         max(*size, cache->object_size + redzone_size));
265
266         /*
267          * If the metadata doesn't fit, don't enable KASAN at all.
268          */
269         if (*size <= cache->kasan_info.alloc_meta_offset ||
270                         *size <= cache->kasan_info.free_meta_offset) {
271                 cache->kasan_info.alloc_meta_offset = 0;
272                 cache->kasan_info.free_meta_offset = 0;
273                 *size = orig_size;
274                 return;
275         }
276
277         *flags |= SLAB_KASAN;
278 }
279
280 size_t kasan_metadata_size(struct kmem_cache *cache)
281 {
282         return (cache->kasan_info.alloc_meta_offset ?
283                 sizeof(struct kasan_alloc_meta) : 0) +
284                 (cache->kasan_info.free_meta_offset ?
285                 sizeof(struct kasan_free_meta) : 0);
286 }
287
288 struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
289                                         const void *object)
290 {
291         return (void *)object + cache->kasan_info.alloc_meta_offset;
292 }
293
294 struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
295                                       const void *object)
296 {
297         BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
298         return (void *)object + cache->kasan_info.free_meta_offset;
299 }
300
301
302 static void kasan_set_free_info(struct kmem_cache *cache,
303                 void *object, u8 tag)
304 {
305         struct kasan_alloc_meta *alloc_meta;
306         u8 idx = 0;
307
308         alloc_meta = get_alloc_info(cache, object);
309
310 #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
311         idx = alloc_meta->free_track_idx;
312         alloc_meta->free_pointer_tag[idx] = tag;
313         alloc_meta->free_track_idx = (idx + 1) % KASAN_NR_FREE_STACKS;
314 #endif
315
316         set_track(&alloc_meta->free_track[idx], GFP_NOWAIT);
317 }
318
319 void kasan_poison_slab(struct page *page)
320 {
321         unsigned long i;
322
323         for (i = 0; i < compound_nr(page); i++)
324                 page_kasan_tag_reset(page + i);
325         kasan_poison_shadow(page_address(page), page_size(page),
326                         KASAN_KMALLOC_REDZONE);
327 }
328
329 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
330 {
331         kasan_unpoison_shadow(object, cache->object_size);
332 }
333
334 void kasan_poison_object_data(struct kmem_cache *cache, void *object)
335 {
336         kasan_poison_shadow(object,
337                         round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
338                         KASAN_KMALLOC_REDZONE);
339 }
340
341 /*
342  * This function assigns a tag to an object considering the following:
343  * 1. A cache might have a constructor, which might save a pointer to a slab
344  *    object somewhere (e.g. in the object itself). We preassign a tag for
345  *    each object in caches with constructors during slab creation and reuse
346  *    the same tag each time a particular object is allocated.
347  * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
348  *    accessed after being freed. We preassign tags for objects in these
349  *    caches as well.
350  * 3. For SLAB allocator we can't preassign tags randomly since the freelist
351  *    is stored as an array of indexes instead of a linked list. Assign tags
352  *    based on objects indexes, so that objects that are next to each other
353  *    get different tags.
354  */
355 static u8 assign_tag(struct kmem_cache *cache, const void *object,
356                         bool init, bool keep_tag)
357 {
358         /*
359          * 1. When an object is kmalloc()'ed, two hooks are called:
360          *    kasan_slab_alloc() and kasan_kmalloc(). We assign the
361          *    tag only in the first one.
362          * 2. We reuse the same tag for krealloc'ed objects.
363          */
364         if (keep_tag)
365                 return get_tag(object);
366
367         /*
368          * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
369          * set, assign a tag when the object is being allocated (init == false).
370          */
371         if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
372                 return init ? KASAN_TAG_KERNEL : random_tag();
373
374         /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
375 #ifdef CONFIG_SLAB
376         /* For SLAB assign tags based on the object index in the freelist. */
377         return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
378 #else
379         /*
380          * For SLUB assign a random tag during slab creation, otherwise reuse
381          * the already assigned tag.
382          */
383         return init ? random_tag() : get_tag(object);
384 #endif
385 }
386
387 void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
388                                                 const void *object)
389 {
390         struct kasan_alloc_meta *alloc_info;
391
392         if (!(cache->flags & SLAB_KASAN))
393                 return (void *)object;
394
395         alloc_info = get_alloc_info(cache, object);
396         __memset(alloc_info, 0, sizeof(*alloc_info));
397
398         if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
399                 object = set_tag(object,
400                                 assign_tag(cache, object, true, false));
401
402         return (void *)object;
403 }
404
405 static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
406 {
407         if (IS_ENABLED(CONFIG_KASAN_GENERIC))
408                 return shadow_byte < 0 ||
409                         shadow_byte >= KASAN_SHADOW_SCALE_SIZE;
410
411         /* else CONFIG_KASAN_SW_TAGS: */
412         if ((u8)shadow_byte == KASAN_TAG_INVALID)
413                 return true;
414         if ((tag != KASAN_TAG_KERNEL) && (tag != (u8)shadow_byte))
415                 return true;
416
417         return false;
418 }
419
420 static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
421                               unsigned long ip, bool quarantine)
422 {
423         s8 shadow_byte;
424         u8 tag;
425         void *tagged_object;
426         unsigned long rounded_up_size;
427
428         tag = get_tag(object);
429         tagged_object = object;
430         object = reset_tag(object);
431
432         if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
433             object)) {
434                 kasan_report_invalid_free(tagged_object, ip);
435                 return true;
436         }
437
438         /* RCU slabs could be legally used after free within the RCU period */
439         if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
440                 return false;
441
442         shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
443         if (shadow_invalid(tag, shadow_byte)) {
444                 kasan_report_invalid_free(tagged_object, ip);
445                 return true;
446         }
447
448         rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE);
449         kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
450
451         if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) ||
452                         unlikely(!(cache->flags & SLAB_KASAN)))
453                 return false;
454
455         kasan_set_free_info(cache, object, tag);
456
457         quarantine_put(get_free_info(cache, object), cache);
458
459         return IS_ENABLED(CONFIG_KASAN_GENERIC);
460 }
461
462 bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
463 {
464         return __kasan_slab_free(cache, object, ip, true);
465 }
466
467 static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
468                                 size_t size, gfp_t flags, bool keep_tag)
469 {
470         unsigned long redzone_start;
471         unsigned long redzone_end;
472         u8 tag = 0xff;
473
474         if (gfpflags_allow_blocking(flags))
475                 quarantine_reduce();
476
477         if (unlikely(object == NULL))
478                 return NULL;
479
480         redzone_start = round_up((unsigned long)(object + size),
481                                 KASAN_SHADOW_SCALE_SIZE);
482         redzone_end = round_up((unsigned long)object + cache->object_size,
483                                 KASAN_SHADOW_SCALE_SIZE);
484
485         if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
486                 tag = assign_tag(cache, object, false, keep_tag);
487
488         /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
489         kasan_unpoison_shadow(set_tag(object, tag), size);
490         kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
491                 KASAN_KMALLOC_REDZONE);
492
493         if (cache->flags & SLAB_KASAN)
494                 set_track(&get_alloc_info(cache, object)->alloc_track, flags);
495
496         return set_tag(object, tag);
497 }
498
499 void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
500                                         gfp_t flags)
501 {
502         return __kasan_kmalloc(cache, object, cache->object_size, flags, false);
503 }
504
505 void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
506                                 size_t size, gfp_t flags)
507 {
508         return __kasan_kmalloc(cache, object, size, flags, true);
509 }
510 EXPORT_SYMBOL(kasan_kmalloc);
511
512 void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
513                                                 gfp_t flags)
514 {
515         struct page *page;
516         unsigned long redzone_start;
517         unsigned long redzone_end;
518
519         if (gfpflags_allow_blocking(flags))
520                 quarantine_reduce();
521
522         if (unlikely(ptr == NULL))
523                 return NULL;
524
525         page = virt_to_page(ptr);
526         redzone_start = round_up((unsigned long)(ptr + size),
527                                 KASAN_SHADOW_SCALE_SIZE);
528         redzone_end = (unsigned long)ptr + page_size(page);
529
530         kasan_unpoison_shadow(ptr, size);
531         kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
532                 KASAN_PAGE_REDZONE);
533
534         return (void *)ptr;
535 }
536
537 void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags)
538 {
539         struct page *page;
540
541         if (unlikely(object == ZERO_SIZE_PTR))
542                 return (void *)object;
543
544         page = virt_to_head_page(object);
545
546         if (unlikely(!PageSlab(page)))
547                 return kasan_kmalloc_large(object, size, flags);
548         else
549                 return __kasan_kmalloc(page->slab_cache, object, size,
550                                                 flags, true);
551 }
552
553 void kasan_poison_kfree(void *ptr, unsigned long ip)
554 {
555         struct page *page;
556
557         page = virt_to_head_page(ptr);
558
559         if (unlikely(!PageSlab(page))) {
560                 if (ptr != page_address(page)) {
561                         kasan_report_invalid_free(ptr, ip);
562                         return;
563                 }
564                 kasan_poison_shadow(ptr, page_size(page), KASAN_FREE_PAGE);
565         } else {
566                 __kasan_slab_free(page->slab_cache, ptr, ip, false);
567         }
568 }
569
570 void kasan_kfree_large(void *ptr, unsigned long ip)
571 {
572         if (ptr != page_address(virt_to_head_page(ptr)))
573                 kasan_report_invalid_free(ptr, ip);
574         /* The object will be poisoned by page_alloc. */
575 }
576
577 #ifndef CONFIG_KASAN_VMALLOC
578 int kasan_module_alloc(void *addr, size_t size)
579 {
580         void *ret;
581         size_t scaled_size;
582         size_t shadow_size;
583         unsigned long shadow_start;
584
585         shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
586         scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
587         shadow_size = round_up(scaled_size, PAGE_SIZE);
588
589         if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
590                 return -EINVAL;
591
592         ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
593                         shadow_start + shadow_size,
594                         GFP_KERNEL,
595                         PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
596                         __builtin_return_address(0));
597
598         if (ret) {
599                 __memset(ret, KASAN_SHADOW_INIT, shadow_size);
600                 find_vm_area(addr)->flags |= VM_KASAN;
601                 kmemleak_ignore(ret);
602                 return 0;
603         }
604
605         return -ENOMEM;
606 }
607
608 void kasan_free_shadow(const struct vm_struct *vm)
609 {
610         if (vm->flags & VM_KASAN)
611                 vfree(kasan_mem_to_shadow(vm->addr));
612 }
613 #endif
614
615 #ifdef CONFIG_MEMORY_HOTPLUG
616 static bool shadow_mapped(unsigned long addr)
617 {
618         pgd_t *pgd = pgd_offset_k(addr);
619         p4d_t *p4d;
620         pud_t *pud;
621         pmd_t *pmd;
622         pte_t *pte;
623
624         if (pgd_none(*pgd))
625                 return false;
626         p4d = p4d_offset(pgd, addr);
627         if (p4d_none(*p4d))
628                 return false;
629         pud = pud_offset(p4d, addr);
630         if (pud_none(*pud))
631                 return false;
632
633         /*
634          * We can't use pud_large() or pud_huge(), the first one is
635          * arch-specific, the last one depends on HUGETLB_PAGE.  So let's abuse
636          * pud_bad(), if pud is bad then it's bad because it's huge.
637          */
638         if (pud_bad(*pud))
639                 return true;
640         pmd = pmd_offset(pud, addr);
641         if (pmd_none(*pmd))
642                 return false;
643
644         if (pmd_bad(*pmd))
645                 return true;
646         pte = pte_offset_kernel(pmd, addr);
647         return !pte_none(*pte);
648 }
649
650 static int __meminit kasan_mem_notifier(struct notifier_block *nb,
651                         unsigned long action, void *data)
652 {
653         struct memory_notify *mem_data = data;
654         unsigned long nr_shadow_pages, start_kaddr, shadow_start;
655         unsigned long shadow_end, shadow_size;
656
657         nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT;
658         start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn);
659         shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr);
660         shadow_size = nr_shadow_pages << PAGE_SHIFT;
661         shadow_end = shadow_start + shadow_size;
662
663         if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) ||
664                 WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT)))
665                 return NOTIFY_BAD;
666
667         switch (action) {
668         case MEM_GOING_ONLINE: {
669                 void *ret;
670
671                 /*
672                  * If shadow is mapped already than it must have been mapped
673                  * during the boot. This could happen if we onlining previously
674                  * offlined memory.
675                  */
676                 if (shadow_mapped(shadow_start))
677                         return NOTIFY_OK;
678
679                 ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
680                                         shadow_end, GFP_KERNEL,
681                                         PAGE_KERNEL, VM_NO_GUARD,
682                                         pfn_to_nid(mem_data->start_pfn),
683                                         __builtin_return_address(0));
684                 if (!ret)
685                         return NOTIFY_BAD;
686
687                 kmemleak_ignore(ret);
688                 return NOTIFY_OK;
689         }
690         case MEM_CANCEL_ONLINE:
691         case MEM_OFFLINE: {
692                 struct vm_struct *vm;
693
694                 /*
695                  * shadow_start was either mapped during boot by kasan_init()
696                  * or during memory online by __vmalloc_node_range().
697                  * In the latter case we can use vfree() to free shadow.
698                  * Non-NULL result of the find_vm_area() will tell us if
699                  * that was the second case.
700                  *
701                  * Currently it's not possible to free shadow mapped
702                  * during boot by kasan_init(). It's because the code
703                  * to do that hasn't been written yet. So we'll just
704                  * leak the memory.
705                  */
706                 vm = find_vm_area((void *)shadow_start);
707                 if (vm)
708                         vfree((void *)shadow_start);
709         }
710         }
711
712         return NOTIFY_OK;
713 }
714
715 static int __init kasan_memhotplug_init(void)
716 {
717         hotplug_memory_notifier(kasan_mem_notifier, 0);
718
719         return 0;
720 }
721
722 core_initcall(kasan_memhotplug_init);
723 #endif
724
725 #ifdef CONFIG_KASAN_VMALLOC
726 static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
727                                       void *unused)
728 {
729         unsigned long page;
730         pte_t pte;
731
732         if (likely(!pte_none(*ptep)))
733                 return 0;
734
735         page = __get_free_page(GFP_KERNEL);
736         if (!page)
737                 return -ENOMEM;
738
739         memset((void *)page, KASAN_VMALLOC_INVALID, PAGE_SIZE);
740         pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL);
741
742         spin_lock(&init_mm.page_table_lock);
743         if (likely(pte_none(*ptep))) {
744                 set_pte_at(&init_mm, addr, ptep, pte);
745                 page = 0;
746         }
747         spin_unlock(&init_mm.page_table_lock);
748         if (page)
749                 free_page(page);
750         return 0;
751 }
752
753 int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
754 {
755         unsigned long shadow_start, shadow_end;
756         int ret;
757
758         if (!is_vmalloc_or_module_addr((void *)addr))
759                 return 0;
760
761         shadow_start = (unsigned long)kasan_mem_to_shadow((void *)addr);
762         shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE);
763         shadow_end = (unsigned long)kasan_mem_to_shadow((void *)addr + size);
764         shadow_end = ALIGN(shadow_end, PAGE_SIZE);
765
766         ret = apply_to_page_range(&init_mm, shadow_start,
767                                   shadow_end - shadow_start,
768                                   kasan_populate_vmalloc_pte, NULL);
769         if (ret)
770                 return ret;
771
772         flush_cache_vmap(shadow_start, shadow_end);
773
774         /*
775          * We need to be careful about inter-cpu effects here. Consider:
776          *
777          *   CPU#0                                CPU#1
778          * WRITE_ONCE(p, vmalloc(100));         while (x = READ_ONCE(p)) ;
779          *                                      p[99] = 1;
780          *
781          * With compiler instrumentation, that ends up looking like this:
782          *
783          *   CPU#0                                CPU#1
784          * // vmalloc() allocates memory
785          * // let a = area->addr
786          * // we reach kasan_populate_vmalloc
787          * // and call kasan_unpoison_shadow:
788          * STORE shadow(a), unpoison_val
789          * ...
790          * STORE shadow(a+99), unpoison_val     x = LOAD p
791          * // rest of vmalloc process           <data dependency>
792          * STORE p, a                           LOAD shadow(x+99)
793          *
794          * If there is no barrier between the end of unpoisioning the shadow
795          * and the store of the result to p, the stores could be committed
796          * in a different order by CPU#0, and CPU#1 could erroneously observe
797          * poison in the shadow.
798          *
799          * We need some sort of barrier between the stores.
800          *
801          * In the vmalloc() case, this is provided by a smp_wmb() in
802          * clear_vm_uninitialized_flag(). In the per-cpu allocator and in
803          * get_vm_area() and friends, the caller gets shadow allocated but
804          * doesn't have any pages mapped into the virtual address space that
805          * has been reserved. Mapping those pages in will involve taking and
806          * releasing a page-table lock, which will provide the barrier.
807          */
808
809         return 0;
810 }
811
812 /*
813  * Poison the shadow for a vmalloc region. Called as part of the
814  * freeing process at the time the region is freed.
815  */
816 void kasan_poison_vmalloc(const void *start, unsigned long size)
817 {
818         if (!is_vmalloc_or_module_addr(start))
819                 return;
820
821         size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
822         kasan_poison_shadow(start, size, KASAN_VMALLOC_INVALID);
823 }
824
825 void kasan_unpoison_vmalloc(const void *start, unsigned long size)
826 {
827         if (!is_vmalloc_or_module_addr(start))
828                 return;
829
830         kasan_unpoison_shadow(start, size);
831 }
832
833 static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
834                                         void *unused)
835 {
836         unsigned long page;
837
838         page = (unsigned long)__va(pte_pfn(*ptep) << PAGE_SHIFT);
839
840         spin_lock(&init_mm.page_table_lock);
841
842         if (likely(!pte_none(*ptep))) {
843                 pte_clear(&init_mm, addr, ptep);
844                 free_page(page);
845         }
846         spin_unlock(&init_mm.page_table_lock);
847
848         return 0;
849 }
850
851 /*
852  * Release the backing for the vmalloc region [start, end), which
853  * lies within the free region [free_region_start, free_region_end).
854  *
855  * This can be run lazily, long after the region was freed. It runs
856  * under vmap_area_lock, so it's not safe to interact with the vmalloc/vmap
857  * infrastructure.
858  *
859  * How does this work?
860  * -------------------
861  *
862  * We have a region that is page aligned, labelled as A.
863  * That might not map onto the shadow in a way that is page-aligned:
864  *
865  *                    start                     end
866  *                    v                         v
867  * |????????|????????|AAAAAAAA|AA....AA|AAAAAAAA|????????| < vmalloc
868  *  -------- -------- --------          -------- --------
869  *      |        |       |                 |        |
870  *      |        |       |         /-------/        |
871  *      \-------\|/------/         |/---------------/
872  *              |||                ||
873  *             |??AAAAAA|AAAAAAAA|AA??????|                < shadow
874  *                 (1)      (2)      (3)
875  *
876  * First we align the start upwards and the end downwards, so that the
877  * shadow of the region aligns with shadow page boundaries. In the
878  * example, this gives us the shadow page (2). This is the shadow entirely
879  * covered by this allocation.
880  *
881  * Then we have the tricky bits. We want to know if we can free the
882  * partially covered shadow pages - (1) and (3) in the example. For this,
883  * we are given the start and end of the free region that contains this
884  * allocation. Extending our previous example, we could have:
885  *
886  *  free_region_start                                    free_region_end
887  *  |                 start                     end      |
888  *  v                 v                         v        v
889  * |FFFFFFFF|FFFFFFFF|AAAAAAAA|AA....AA|AAAAAAAA|FFFFFFFF| < vmalloc
890  *  -------- -------- --------          -------- --------
891  *      |        |       |                 |        |
892  *      |        |       |         /-------/        |
893  *      \-------\|/------/         |/---------------/
894  *              |||                ||
895  *             |FFAAAAAA|AAAAAAAA|AAF?????|                < shadow
896  *                 (1)      (2)      (3)
897  *
898  * Once again, we align the start of the free region up, and the end of
899  * the free region down so that the shadow is page aligned. So we can free
900  * page (1) - we know no allocation currently uses anything in that page,
901  * because all of it is in the vmalloc free region. But we cannot free
902  * page (3), because we can't be sure that the rest of it is unused.
903  *
904  * We only consider pages that contain part of the original region for
905  * freeing: we don't try to free other pages from the free region or we'd
906  * end up trying to free huge chunks of virtual address space.
907  *
908  * Concurrency
909  * -----------
910  *
911  * How do we know that we're not freeing a page that is simultaneously
912  * being used for a fresh allocation in kasan_populate_vmalloc(_pte)?
913  *
914  * We _can_ have kasan_release_vmalloc and kasan_populate_vmalloc running
915  * at the same time. While we run under free_vmap_area_lock, the population
916  * code does not.
917  *
918  * free_vmap_area_lock instead operates to ensure that the larger range
919  * [free_region_start, free_region_end) is safe: because __alloc_vmap_area and
920  * the per-cpu region-finding algorithm both run under free_vmap_area_lock,
921  * no space identified as free will become used while we are running. This
922  * means that so long as we are careful with alignment and only free shadow
923  * pages entirely covered by the free region, we will not run in to any
924  * trouble - any simultaneous allocations will be for disjoint regions.
925  */
926 void kasan_release_vmalloc(unsigned long start, unsigned long end,
927                            unsigned long free_region_start,
928                            unsigned long free_region_end)
929 {
930         void *shadow_start, *shadow_end;
931         unsigned long region_start, region_end;
932         unsigned long size;
933
934         region_start = ALIGN(start, PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
935         region_end = ALIGN_DOWN(end, PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
936
937         free_region_start = ALIGN(free_region_start,
938                                   PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
939
940         if (start != region_start &&
941             free_region_start < region_start)
942                 region_start -= PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE;
943
944         free_region_end = ALIGN_DOWN(free_region_end,
945                                      PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
946
947         if (end != region_end &&
948             free_region_end > region_end)
949                 region_end += PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE;
950
951         shadow_start = kasan_mem_to_shadow((void *)region_start);
952         shadow_end = kasan_mem_to_shadow((void *)region_end);
953
954         if (shadow_end > shadow_start) {
955                 size = shadow_end - shadow_start;
956                 apply_to_existing_page_range(&init_mm,
957                                              (unsigned long)shadow_start,
958                                              size, kasan_depopulate_vmalloc_pte,
959                                              NULL);
960                 flush_tlb_kernel_range((unsigned long)shadow_start,
961                                        (unsigned long)shadow_end);
962         }
963 }
964 #endif