1 #define pr_fmt(fmt) "kcov: " fmt
3 #define DISABLE_BRANCH_PROFILING
4 #include <linux/atomic.h>
5 #include <linux/compiler.h>
6 #include <linux/errno.h>
7 #include <linux/export.h>
8 #include <linux/types.h>
9 #include <linux/file.h>
11 #include <linux/init.h>
13 #include <linux/preempt.h>
14 #include <linux/printk.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/vmalloc.h>
19 #include <linux/debugfs.h>
20 #include <linux/uaccess.h>
21 #include <linux/kcov.h>
22 #include <asm/setup.h>
24 /* Number of 64-bit words written per one comparison: */
25 #define KCOV_WORDS_PER_CMP 4
28 * kcov descriptor (one per opened debugfs file).
29 * State transitions of the descriptor:
30 * - initial state after open()
31 * - then there must be a single ioctl(KCOV_INIT_TRACE) call
32 * - then, mmap() call (several calls are allowed but not useful)
33 * - then, ioctl(KCOV_ENABLE, arg), where arg is
34 * KCOV_TRACE_PC - to trace only the PCs
36 * KCOV_TRACE_CMP - to trace only the comparison operands
37 * - then, ioctl(KCOV_DISABLE) to disable the task.
38 * Enabling/disabling ioctls can be repeated (only one task a time allowed).
42 * Reference counter. We keep one for:
43 * - opened file descriptor
44 * - task with enabled coverage (we can't unwire it from another task)
47 /* The lock protects mode, size, area and t. */
50 /* Size of arena (in long's for KCOV_MODE_TRACE). */
52 /* Coverage buffer shared with user space. */
54 /* Task for which we collect coverage, or NULL. */
55 struct task_struct *t;
58 static bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
63 * We are interested in code coverage as a function of a syscall inputs,
64 * so we ignore code executed in interrupts.
68 mode = READ_ONCE(t->kcov_mode);
70 * There is some code that runs in interrupts but for which
71 * in_interrupt() returns false (e.g. preempt_schedule_irq()).
72 * READ_ONCE()/barrier() effectively provides load-acquire wrt
73 * interrupts, there are paired barrier()/WRITE_ONCE() in
74 * kcov_ioctl_locked().
77 return mode == needed_mode;
80 static unsigned long canonicalize_ip(unsigned long ip)
82 #ifdef CONFIG_RANDOMIZE_BASE
89 * Entry point from instrumented code.
90 * This is called once per basic-block/edge.
92 void notrace __sanitizer_cov_trace_pc(void)
94 struct task_struct *t;
96 unsigned long ip = canonicalize_ip(_RET_IP_);
100 if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t))
104 /* The first 64-bit word is the number of subsequent PCs. */
105 pos = READ_ONCE(area[0]) + 1;
106 if (likely(pos < t->kcov_size)) {
108 WRITE_ONCE(area[0], pos);
111 EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
113 #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
114 static void write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
116 struct task_struct *t;
118 u64 count, start_index, end_pos, max_pos;
121 if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t))
124 ip = canonicalize_ip(ip);
127 * We write all comparison arguments and types as u64.
128 * The buffer was allocated for t->kcov_size unsigned longs.
130 area = (u64 *)t->kcov_area;
131 max_pos = t->kcov_size * sizeof(unsigned long);
133 count = READ_ONCE(area[0]);
135 /* Every record is KCOV_WORDS_PER_CMP 64-bit words. */
136 start_index = 1 + count * KCOV_WORDS_PER_CMP;
137 end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64);
138 if (likely(end_pos <= max_pos)) {
139 area[start_index] = type;
140 area[start_index + 1] = arg1;
141 area[start_index + 2] = arg2;
142 area[start_index + 3] = ip;
143 WRITE_ONCE(area[0], count + 1);
147 void notrace __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2)
149 write_comp_data(KCOV_CMP_SIZE(0), arg1, arg2, _RET_IP_);
151 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp1);
153 void notrace __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2)
155 write_comp_data(KCOV_CMP_SIZE(1), arg1, arg2, _RET_IP_);
157 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2);
159 void notrace __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2)
161 write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_);
163 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp4);
165 void notrace __sanitizer_cov_trace_cmp8(u64 arg1, u64 arg2)
167 write_comp_data(KCOV_CMP_SIZE(3), arg1, arg2, _RET_IP_);
169 EXPORT_SYMBOL(__sanitizer_cov_trace_cmp8);
171 void notrace __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2)
173 write_comp_data(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2,
176 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp1);
178 void notrace __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2)
180 write_comp_data(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2,
183 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2);
185 void notrace __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2)
187 write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
190 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp4);
192 void notrace __sanitizer_cov_trace_const_cmp8(u64 arg1, u64 arg2)
194 write_comp_data(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2,
197 EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp8);
199 void notrace __sanitizer_cov_trace_switch(u64 val, u64 *cases)
202 u64 count = cases[0];
204 u64 type = KCOV_CMP_CONST;
208 type |= KCOV_CMP_SIZE(0);
211 type |= KCOV_CMP_SIZE(1);
214 type |= KCOV_CMP_SIZE(2);
217 type |= KCOV_CMP_SIZE(3);
222 for (i = 0; i < count; i++)
223 write_comp_data(type, cases[i + 2], val, _RET_IP_);
225 EXPORT_SYMBOL(__sanitizer_cov_trace_switch);
226 #endif /* ifdef CONFIG_KCOV_ENABLE_COMPARISONS */
228 static void kcov_get(struct kcov *kcov)
230 atomic_inc(&kcov->refcount);
233 static void kcov_put(struct kcov *kcov)
235 if (atomic_dec_and_test(&kcov->refcount)) {
241 void kcov_task_init(struct task_struct *t)
243 t->kcov_mode = KCOV_MODE_DISABLED;
249 void kcov_task_exit(struct task_struct *t)
256 spin_lock(&kcov->lock);
257 if (WARN_ON(kcov->t != t)) {
258 spin_unlock(&kcov->lock);
261 /* Just to not leave dangling references behind. */
264 kcov->mode = KCOV_MODE_INIT;
265 spin_unlock(&kcov->lock);
269 static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
273 struct kcov *kcov = vma->vm_file->private_data;
274 unsigned long size, off;
277 area = vmalloc_user(vma->vm_end - vma->vm_start);
281 spin_lock(&kcov->lock);
282 size = kcov->size * sizeof(unsigned long);
283 if (kcov->mode != KCOV_MODE_INIT || vma->vm_pgoff != 0 ||
284 vma->vm_end - vma->vm_start != size) {
290 vma->vm_flags |= VM_DONTEXPAND;
291 spin_unlock(&kcov->lock);
292 for (off = 0; off < size; off += PAGE_SIZE) {
293 page = vmalloc_to_page(kcov->area + off);
294 if (vm_insert_page(vma, vma->vm_start + off, page))
295 WARN_ONCE(1, "vm_insert_page() failed");
300 spin_unlock(&kcov->lock);
305 static int kcov_open(struct inode *inode, struct file *filep)
309 kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
312 kcov->mode = KCOV_MODE_DISABLED;
313 atomic_set(&kcov->refcount, 1);
314 spin_lock_init(&kcov->lock);
315 filep->private_data = kcov;
316 return nonseekable_open(inode, filep);
319 static int kcov_close(struct inode *inode, struct file *filep)
321 kcov_put(filep->private_data);
325 static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
328 struct task_struct *t;
329 unsigned long size, unused;
332 case KCOV_INIT_TRACE:
334 * Enable kcov in trace mode and setup buffer size.
335 * Must happen before anything else.
337 if (kcov->mode != KCOV_MODE_DISABLED)
340 * Size must be at least 2 to hold current position and one PC.
341 * Later we allocate size * sizeof(unsigned long) memory,
342 * that must not overflow.
345 if (size < 2 || size > INT_MAX / sizeof(unsigned long))
348 kcov->mode = KCOV_MODE_INIT;
352 * Enable coverage for the current task.
353 * At this point user must have been enabled trace mode,
354 * and mmapped the file. Coverage collection is disabled only
355 * at task exit or voluntary by KCOV_DISABLE. After that it can
356 * be enabled for another task.
358 if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
362 if (arg == KCOV_TRACE_PC)
363 kcov->mode = KCOV_MODE_TRACE_PC;
364 else if (arg == KCOV_TRACE_CMP)
365 #ifdef CONFIG_KCOV_ENABLE_COMPARISONS
366 kcov->mode = KCOV_MODE_TRACE_CMP;
373 /* Cache in task struct for performance. */
374 t->kcov_size = kcov->size;
375 t->kcov_area = kcov->area;
376 /* See comment in check_kcov_mode(). */
378 WRITE_ONCE(t->kcov_mode, kcov->mode);
381 /* This is put either in kcov_task_exit() or in KCOV_DISABLE. */
385 /* Disable coverage for the current task. */
387 if (unused != 0 || current->kcov != kcov)
390 if (WARN_ON(kcov->t != t))
394 kcov->mode = KCOV_MODE_INIT;
402 static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
407 kcov = filep->private_data;
408 spin_lock(&kcov->lock);
409 res = kcov_ioctl_locked(kcov, cmd, arg);
410 spin_unlock(&kcov->lock);
414 static const struct file_operations kcov_fops = {
416 .unlocked_ioctl = kcov_ioctl,
417 .compat_ioctl = kcov_ioctl,
419 .release = kcov_close,
422 static int __init kcov_init(void)
424 if (!debugfs_create_file("kcov", 0600, NULL, NULL, &kcov_fops)) {
425 pr_err("failed to create kcov in debugfs\n");
431 device_initcall(kcov_init);