OSDN Git Service

mm, kasan: SLAB support
[uclinux-h8/linux.git] / mm / kasan / kasan.c
1 /*
2  * This file contains shadow memory manipulation code.
3  *
4  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
6  *
7  * Some code borrowed from https://github.com/xairy/kasan-prototype by
8  *        Andrey Konovalov <adech.fo@gmail.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  *
14  */
15
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #define DISABLE_BRANCH_PROFILING
18
19 #include <linux/export.h>
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/kmemleak.h>
23 #include <linux/linkage.h>
24 #include <linux/memblock.h>
25 #include <linux/memory.h>
26 #include <linux/mm.h>
27 #include <linux/module.h>
28 #include <linux/printk.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/stacktrace.h>
32 #include <linux/string.h>
33 #include <linux/types.h>
34 #include <linux/vmalloc.h>
35 #include <linux/kasan.h>
36
37 #include "kasan.h"
38 #include "../slab.h"
39
40 /*
41  * Poisons the shadow memory for 'size' bytes starting from 'addr'.
42  * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
43  */
44 static void kasan_poison_shadow(const void *address, size_t size, u8 value)
45 {
46         void *shadow_start, *shadow_end;
47
48         shadow_start = kasan_mem_to_shadow(address);
49         shadow_end = kasan_mem_to_shadow(address + size);
50
51         memset(shadow_start, value, shadow_end - shadow_start);
52 }
53
54 void kasan_unpoison_shadow(const void *address, size_t size)
55 {
56         kasan_poison_shadow(address, size, 0);
57
58         if (size & KASAN_SHADOW_MASK) {
59                 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
60                 *shadow = size & KASAN_SHADOW_MASK;
61         }
62 }
63
64 static void __kasan_unpoison_stack(struct task_struct *task, void *sp)
65 {
66         void *base = task_stack_page(task);
67         size_t size = sp - base;
68
69         kasan_unpoison_shadow(base, size);
70 }
71
72 /* Unpoison the entire stack for a task. */
73 void kasan_unpoison_task_stack(struct task_struct *task)
74 {
75         __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
76 }
77
78 /* Unpoison the stack for the current task beyond a watermark sp value. */
79 asmlinkage void kasan_unpoison_remaining_stack(void *sp)
80 {
81         __kasan_unpoison_stack(current, sp);
82 }
83
84 /*
85  * All functions below always inlined so compiler could
86  * perform better optimizations in each of __asan_loadX/__assn_storeX
87  * depending on memory access size X.
88  */
89
90 static __always_inline bool memory_is_poisoned_1(unsigned long addr)
91 {
92         s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
93
94         if (unlikely(shadow_value)) {
95                 s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
96                 return unlikely(last_accessible_byte >= shadow_value);
97         }
98
99         return false;
100 }
101
102 static __always_inline bool memory_is_poisoned_2(unsigned long addr)
103 {
104         u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
105
106         if (unlikely(*shadow_addr)) {
107                 if (memory_is_poisoned_1(addr + 1))
108                         return true;
109
110                 /*
111                  * If single shadow byte covers 2-byte access, we don't
112                  * need to do anything more. Otherwise, test the first
113                  * shadow byte.
114                  */
115                 if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0))
116                         return false;
117
118                 return unlikely(*(u8 *)shadow_addr);
119         }
120
121         return false;
122 }
123
124 static __always_inline bool memory_is_poisoned_4(unsigned long addr)
125 {
126         u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
127
128         if (unlikely(*shadow_addr)) {
129                 if (memory_is_poisoned_1(addr + 3))
130                         return true;
131
132                 /*
133                  * If single shadow byte covers 4-byte access, we don't
134                  * need to do anything more. Otherwise, test the first
135                  * shadow byte.
136                  */
137                 if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3))
138                         return false;
139
140                 return unlikely(*(u8 *)shadow_addr);
141         }
142
143         return false;
144 }
145
146 static __always_inline bool memory_is_poisoned_8(unsigned long addr)
147 {
148         u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
149
150         if (unlikely(*shadow_addr)) {
151                 if (memory_is_poisoned_1(addr + 7))
152                         return true;
153
154                 /*
155                  * If single shadow byte covers 8-byte access, we don't
156                  * need to do anything more. Otherwise, test the first
157                  * shadow byte.
158                  */
159                 if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
160                         return false;
161
162                 return unlikely(*(u8 *)shadow_addr);
163         }
164
165         return false;
166 }
167
168 static __always_inline bool memory_is_poisoned_16(unsigned long addr)
169 {
170         u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr);
171
172         if (unlikely(*shadow_addr)) {
173                 u16 shadow_first_bytes = *(u16 *)shadow_addr;
174
175                 if (unlikely(shadow_first_bytes))
176                         return true;
177
178                 /*
179                  * If two shadow bytes covers 16-byte access, we don't
180                  * need to do anything more. Otherwise, test the last
181                  * shadow byte.
182                  */
183                 if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
184                         return false;
185
186                 return memory_is_poisoned_1(addr + 15);
187         }
188
189         return false;
190 }
191
192 static __always_inline unsigned long bytes_is_zero(const u8 *start,
193                                         size_t size)
194 {
195         while (size) {
196                 if (unlikely(*start))
197                         return (unsigned long)start;
198                 start++;
199                 size--;
200         }
201
202         return 0;
203 }
204
205 static __always_inline unsigned long memory_is_zero(const void *start,
206                                                 const void *end)
207 {
208         unsigned int words;
209         unsigned long ret;
210         unsigned int prefix = (unsigned long)start % 8;
211
212         if (end - start <= 16)
213                 return bytes_is_zero(start, end - start);
214
215         if (prefix) {
216                 prefix = 8 - prefix;
217                 ret = bytes_is_zero(start, prefix);
218                 if (unlikely(ret))
219                         return ret;
220                 start += prefix;
221         }
222
223         words = (end - start) / 8;
224         while (words) {
225                 if (unlikely(*(u64 *)start))
226                         return bytes_is_zero(start, 8);
227                 start += 8;
228                 words--;
229         }
230
231         return bytes_is_zero(start, (end - start) % 8);
232 }
233
234 static __always_inline bool memory_is_poisoned_n(unsigned long addr,
235                                                 size_t size)
236 {
237         unsigned long ret;
238
239         ret = memory_is_zero(kasan_mem_to_shadow((void *)addr),
240                         kasan_mem_to_shadow((void *)addr + size - 1) + 1);
241
242         if (unlikely(ret)) {
243                 unsigned long last_byte = addr + size - 1;
244                 s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
245
246                 if (unlikely(ret != (unsigned long)last_shadow ||
247                         ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
248                         return true;
249         }
250         return false;
251 }
252
253 static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
254 {
255         if (__builtin_constant_p(size)) {
256                 switch (size) {
257                 case 1:
258                         return memory_is_poisoned_1(addr);
259                 case 2:
260                         return memory_is_poisoned_2(addr);
261                 case 4:
262                         return memory_is_poisoned_4(addr);
263                 case 8:
264                         return memory_is_poisoned_8(addr);
265                 case 16:
266                         return memory_is_poisoned_16(addr);
267                 default:
268                         BUILD_BUG();
269                 }
270         }
271
272         return memory_is_poisoned_n(addr, size);
273 }
274
275
276 static __always_inline void check_memory_region(unsigned long addr,
277                                                 size_t size, bool write)
278 {
279         if (unlikely(size == 0))
280                 return;
281
282         if (unlikely((void *)addr <
283                 kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
284                 kasan_report(addr, size, write, _RET_IP_);
285                 return;
286         }
287
288         if (likely(!memory_is_poisoned(addr, size)))
289                 return;
290
291         kasan_report(addr, size, write, _RET_IP_);
292 }
293
294 void __asan_loadN(unsigned long addr, size_t size);
295 void __asan_storeN(unsigned long addr, size_t size);
296
297 #undef memset
298 void *memset(void *addr, int c, size_t len)
299 {
300         __asan_storeN((unsigned long)addr, len);
301
302         return __memset(addr, c, len);
303 }
304
305 #undef memmove
306 void *memmove(void *dest, const void *src, size_t len)
307 {
308         __asan_loadN((unsigned long)src, len);
309         __asan_storeN((unsigned long)dest, len);
310
311         return __memmove(dest, src, len);
312 }
313
314 #undef memcpy
315 void *memcpy(void *dest, const void *src, size_t len)
316 {
317         __asan_loadN((unsigned long)src, len);
318         __asan_storeN((unsigned long)dest, len);
319
320         return __memcpy(dest, src, len);
321 }
322
323 void kasan_alloc_pages(struct page *page, unsigned int order)
324 {
325         if (likely(!PageHighMem(page)))
326                 kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
327 }
328
329 void kasan_free_pages(struct page *page, unsigned int order)
330 {
331         if (likely(!PageHighMem(page)))
332                 kasan_poison_shadow(page_address(page),
333                                 PAGE_SIZE << order,
334                                 KASAN_FREE_PAGE);
335 }
336
337 #ifdef CONFIG_SLAB
338 /*
339  * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
340  * For larger allocations larger redzones are used.
341  */
342 static size_t optimal_redzone(size_t object_size)
343 {
344         int rz =
345                 object_size <= 64        - 16   ? 16 :
346                 object_size <= 128       - 32   ? 32 :
347                 object_size <= 512       - 64   ? 64 :
348                 object_size <= 4096      - 128  ? 128 :
349                 object_size <= (1 << 14) - 256  ? 256 :
350                 object_size <= (1 << 15) - 512  ? 512 :
351                 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
352         return rz;
353 }
354
355 void kasan_cache_create(struct kmem_cache *cache, size_t *size,
356                         unsigned long *flags)
357 {
358         int redzone_adjust;
359         /* Make sure the adjusted size is still less than
360          * KMALLOC_MAX_CACHE_SIZE.
361          * TODO: this check is only useful for SLAB, but not SLUB. We'll need
362          * to skip it for SLUB when it starts using kasan_cache_create().
363          */
364         if (*size > KMALLOC_MAX_CACHE_SIZE -
365             sizeof(struct kasan_alloc_meta) -
366             sizeof(struct kasan_free_meta))
367                 return;
368         *flags |= SLAB_KASAN;
369         /* Add alloc meta. */
370         cache->kasan_info.alloc_meta_offset = *size;
371         *size += sizeof(struct kasan_alloc_meta);
372
373         /* Add free meta. */
374         if (cache->flags & SLAB_DESTROY_BY_RCU || cache->ctor ||
375             cache->object_size < sizeof(struct kasan_free_meta)) {
376                 cache->kasan_info.free_meta_offset = *size;
377                 *size += sizeof(struct kasan_free_meta);
378         }
379         redzone_adjust = optimal_redzone(cache->object_size) -
380                 (*size - cache->object_size);
381         if (redzone_adjust > 0)
382                 *size += redzone_adjust;
383         *size = min(KMALLOC_MAX_CACHE_SIZE,
384                     max(*size,
385                         cache->object_size +
386                         optimal_redzone(cache->object_size)));
387 }
388 #endif
389
390 void kasan_poison_slab(struct page *page)
391 {
392         kasan_poison_shadow(page_address(page),
393                         PAGE_SIZE << compound_order(page),
394                         KASAN_KMALLOC_REDZONE);
395 }
396
397 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
398 {
399         kasan_unpoison_shadow(object, cache->object_size);
400 }
401
402 void kasan_poison_object_data(struct kmem_cache *cache, void *object)
403 {
404         kasan_poison_shadow(object,
405                         round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
406                         KASAN_KMALLOC_REDZONE);
407 #ifdef CONFIG_SLAB
408         if (cache->flags & SLAB_KASAN) {
409                 struct kasan_alloc_meta *alloc_info =
410                         get_alloc_info(cache, object);
411                 alloc_info->state = KASAN_STATE_INIT;
412         }
413 #endif
414 }
415
416 static inline void set_track(struct kasan_track *track)
417 {
418         track->cpu = raw_smp_processor_id();
419         track->pid = current->pid;
420         track->when = jiffies;
421 }
422
423 #ifdef CONFIG_SLAB
424 struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
425                                         const void *object)
426 {
427         return (void *)object + cache->kasan_info.alloc_meta_offset;
428 }
429
430 struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
431                                       const void *object)
432 {
433         return (void *)object + cache->kasan_info.free_meta_offset;
434 }
435 #endif
436
437 void kasan_slab_alloc(struct kmem_cache *cache, void *object)
438 {
439         kasan_kmalloc(cache, object, cache->object_size);
440 }
441
442 void kasan_slab_free(struct kmem_cache *cache, void *object)
443 {
444         unsigned long size = cache->object_size;
445         unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
446
447         /* RCU slabs could be legally used after free within the RCU period */
448         if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
449                 return;
450
451 #ifdef CONFIG_SLAB
452         if (cache->flags & SLAB_KASAN) {
453                 struct kasan_free_meta *free_info =
454                         get_free_info(cache, object);
455                 struct kasan_alloc_meta *alloc_info =
456                         get_alloc_info(cache, object);
457                 alloc_info->state = KASAN_STATE_FREE;
458                 set_track(&free_info->track);
459         }
460 #endif
461
462         kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
463 }
464
465 void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size)
466 {
467         unsigned long redzone_start;
468         unsigned long redzone_end;
469
470         if (unlikely(object == NULL))
471                 return;
472
473         redzone_start = round_up((unsigned long)(object + size),
474                                 KASAN_SHADOW_SCALE_SIZE);
475         redzone_end = round_up((unsigned long)object + cache->object_size,
476                                 KASAN_SHADOW_SCALE_SIZE);
477
478         kasan_unpoison_shadow(object, size);
479         kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
480                 KASAN_KMALLOC_REDZONE);
481 #ifdef CONFIG_SLAB
482         if (cache->flags & SLAB_KASAN) {
483                 struct kasan_alloc_meta *alloc_info =
484                         get_alloc_info(cache, object);
485
486                 alloc_info->state = KASAN_STATE_ALLOC;
487                 alloc_info->alloc_size = size;
488                 set_track(&alloc_info->track);
489         }
490 #endif
491 }
492 EXPORT_SYMBOL(kasan_kmalloc);
493
494 void kasan_kmalloc_large(const void *ptr, size_t size)
495 {
496         struct page *page;
497         unsigned long redzone_start;
498         unsigned long redzone_end;
499
500         if (unlikely(ptr == NULL))
501                 return;
502
503         page = virt_to_page(ptr);
504         redzone_start = round_up((unsigned long)(ptr + size),
505                                 KASAN_SHADOW_SCALE_SIZE);
506         redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
507
508         kasan_unpoison_shadow(ptr, size);
509         kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
510                 KASAN_PAGE_REDZONE);
511 }
512
513 void kasan_krealloc(const void *object, size_t size)
514 {
515         struct page *page;
516
517         if (unlikely(object == ZERO_SIZE_PTR))
518                 return;
519
520         page = virt_to_head_page(object);
521
522         if (unlikely(!PageSlab(page)))
523                 kasan_kmalloc_large(object, size);
524         else
525                 kasan_kmalloc(page->slab_cache, object, size);
526 }
527
528 void kasan_kfree(void *ptr)
529 {
530         struct page *page;
531
532         page = virt_to_head_page(ptr);
533
534         if (unlikely(!PageSlab(page)))
535                 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
536                                 KASAN_FREE_PAGE);
537         else
538                 kasan_slab_free(page->slab_cache, ptr);
539 }
540
541 void kasan_kfree_large(const void *ptr)
542 {
543         struct page *page = virt_to_page(ptr);
544
545         kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
546                         KASAN_FREE_PAGE);
547 }
548
549 int kasan_module_alloc(void *addr, size_t size)
550 {
551         void *ret;
552         size_t shadow_size;
553         unsigned long shadow_start;
554
555         shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
556         shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
557                         PAGE_SIZE);
558
559         if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
560                 return -EINVAL;
561
562         ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
563                         shadow_start + shadow_size,
564                         GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
565                         PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
566                         __builtin_return_address(0));
567
568         if (ret) {
569                 find_vm_area(addr)->flags |= VM_KASAN;
570                 kmemleak_ignore(ret);
571                 return 0;
572         }
573
574         return -ENOMEM;
575 }
576
577 void kasan_free_shadow(const struct vm_struct *vm)
578 {
579         if (vm->flags & VM_KASAN)
580                 vfree(kasan_mem_to_shadow(vm->addr));
581 }
582
583 static void register_global(struct kasan_global *global)
584 {
585         size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
586
587         kasan_unpoison_shadow(global->beg, global->size);
588
589         kasan_poison_shadow(global->beg + aligned_size,
590                 global->size_with_redzone - aligned_size,
591                 KASAN_GLOBAL_REDZONE);
592 }
593
594 void __asan_register_globals(struct kasan_global *globals, size_t size)
595 {
596         int i;
597
598         for (i = 0; i < size; i++)
599                 register_global(&globals[i]);
600 }
601 EXPORT_SYMBOL(__asan_register_globals);
602
603 void __asan_unregister_globals(struct kasan_global *globals, size_t size)
604 {
605 }
606 EXPORT_SYMBOL(__asan_unregister_globals);
607
608 #define DEFINE_ASAN_LOAD_STORE(size)                            \
609         void __asan_load##size(unsigned long addr)              \
610         {                                                       \
611                 check_memory_region(addr, size, false);         \
612         }                                                       \
613         EXPORT_SYMBOL(__asan_load##size);                       \
614         __alias(__asan_load##size)                              \
615         void __asan_load##size##_noabort(unsigned long);        \
616         EXPORT_SYMBOL(__asan_load##size##_noabort);             \
617         void __asan_store##size(unsigned long addr)             \
618         {                                                       \
619                 check_memory_region(addr, size, true);          \
620         }                                                       \
621         EXPORT_SYMBOL(__asan_store##size);                      \
622         __alias(__asan_store##size)                             \
623         void __asan_store##size##_noabort(unsigned long);       \
624         EXPORT_SYMBOL(__asan_store##size##_noabort)
625
626 DEFINE_ASAN_LOAD_STORE(1);
627 DEFINE_ASAN_LOAD_STORE(2);
628 DEFINE_ASAN_LOAD_STORE(4);
629 DEFINE_ASAN_LOAD_STORE(8);
630 DEFINE_ASAN_LOAD_STORE(16);
631
632 void __asan_loadN(unsigned long addr, size_t size)
633 {
634         check_memory_region(addr, size, false);
635 }
636 EXPORT_SYMBOL(__asan_loadN);
637
638 __alias(__asan_loadN)
639 void __asan_loadN_noabort(unsigned long, size_t);
640 EXPORT_SYMBOL(__asan_loadN_noabort);
641
642 void __asan_storeN(unsigned long addr, size_t size)
643 {
644         check_memory_region(addr, size, true);
645 }
646 EXPORT_SYMBOL(__asan_storeN);
647
648 __alias(__asan_storeN)
649 void __asan_storeN_noabort(unsigned long, size_t);
650 EXPORT_SYMBOL(__asan_storeN_noabort);
651
652 /* to shut up compiler complaints */
653 void __asan_handle_no_return(void) {}
654 EXPORT_SYMBOL(__asan_handle_no_return);
655
656 #ifdef CONFIG_MEMORY_HOTPLUG
657 static int kasan_mem_notifier(struct notifier_block *nb,
658                         unsigned long action, void *data)
659 {
660         return (action == MEM_GOING_ONLINE) ? NOTIFY_BAD : NOTIFY_OK;
661 }
662
663 static int __init kasan_memhotplug_init(void)
664 {
665         pr_err("WARNING: KASAN doesn't support memory hot-add\n");
666         pr_err("Memory hot-add will be disabled\n");
667
668         hotplug_memory_notifier(kasan_mem_notifier, 0);
669
670         return 0;
671 }
672
673 module_init(kasan_memhotplug_init);
674 #endif