OSDN Git Service

perf/x86/uncore: Correct the number of CHAs on EMR
[tomoyo/tomoyo-test1.git] / tools / testing / selftests / kvm / aarch64 / page_fault_test.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * page_fault_test.c - Test stage 2 faults.
4  *
5  * This test tries different combinations of guest accesses (e.g., write,
6  * S1PTW), backing source type (e.g., anon) and types of faults (e.g., read on
7  * hugetlbfs with a hole). It checks that the expected handling method is
8  * called (e.g., uffd faults with the right address and write/read flag).
9  */
10
11 #define _GNU_SOURCE
12 #include <linux/bitmap.h>
13 #include <fcntl.h>
14 #include <test_util.h>
15 #include <kvm_util.h>
16 #include <processor.h>
17 #include <asm/sysreg.h>
18 #include <linux/bitfield.h>
19 #include "guest_modes.h"
20 #include "userfaultfd_util.h"
21
22 /* Guest virtual addresses that point to the test page and its PTE. */
23 #define TEST_GVA                                0xc0000000
24 #define TEST_EXEC_GVA                           (TEST_GVA + 0x8)
25 #define TEST_PTE_GVA                            0xb0000000
26 #define TEST_DATA                               0x0123456789ABCDEF
27
28 static uint64_t *guest_test_memory = (uint64_t *)TEST_GVA;
29
30 #define CMD_NONE                                (0)
31 #define CMD_SKIP_TEST                           (1ULL << 1)
32 #define CMD_HOLE_PT                             (1ULL << 2)
33 #define CMD_HOLE_DATA                           (1ULL << 3)
34 #define CMD_CHECK_WRITE_IN_DIRTY_LOG            (1ULL << 4)
35 #define CMD_CHECK_S1PTW_WR_IN_DIRTY_LOG         (1ULL << 5)
36 #define CMD_CHECK_NO_WRITE_IN_DIRTY_LOG         (1ULL << 6)
37 #define CMD_CHECK_NO_S1PTW_WR_IN_DIRTY_LOG      (1ULL << 7)
38 #define CMD_SET_PTE_AF                          (1ULL << 8)
39
40 #define PREPARE_FN_NR                           10
41 #define CHECK_FN_NR                             10
42
43 static struct event_cnt {
44         int mmio_exits;
45         int fail_vcpu_runs;
46         int uffd_faults;
47         /* uffd_faults is incremented from multiple threads. */
48         pthread_mutex_t uffd_faults_mutex;
49 } events;
50
51 struct test_desc {
52         const char *name;
53         uint64_t mem_mark_cmd;
54         /* Skip the test if any prepare function returns false */
55         bool (*guest_prepare[PREPARE_FN_NR])(void);
56         void (*guest_test)(void);
57         void (*guest_test_check[CHECK_FN_NR])(void);
58         uffd_handler_t uffd_pt_handler;
59         uffd_handler_t uffd_data_handler;
60         void (*dabt_handler)(struct ex_regs *regs);
61         void (*iabt_handler)(struct ex_regs *regs);
62         void (*mmio_handler)(struct kvm_vm *vm, struct kvm_run *run);
63         void (*fail_vcpu_run_handler)(int ret);
64         uint32_t pt_memslot_flags;
65         uint32_t data_memslot_flags;
66         bool skip;
67         struct event_cnt expected_events;
68 };
69
70 struct test_params {
71         enum vm_mem_backing_src_type src_type;
72         struct test_desc *test_desc;
73 };
74
75 static inline void flush_tlb_page(uint64_t vaddr)
76 {
77         uint64_t page = vaddr >> 12;
78
79         dsb(ishst);
80         asm volatile("tlbi vaae1is, %0" :: "r" (page));
81         dsb(ish);
82         isb();
83 }
84
85 static void guest_write64(void)
86 {
87         uint64_t val;
88
89         WRITE_ONCE(*guest_test_memory, TEST_DATA);
90         val = READ_ONCE(*guest_test_memory);
91         GUEST_ASSERT_EQ(val, TEST_DATA);
92 }
93
94 /* Check the system for atomic instructions. */
95 static bool guest_check_lse(void)
96 {
97         uint64_t isar0 = read_sysreg(id_aa64isar0_el1);
98         uint64_t atomic;
99
100         atomic = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR0_ATOMICS), isar0);
101         return atomic >= 2;
102 }
103
104 static bool guest_check_dc_zva(void)
105 {
106         uint64_t dczid = read_sysreg(dczid_el0);
107         uint64_t dzp = FIELD_GET(ARM64_FEATURE_MASK(DCZID_DZP), dczid);
108
109         return dzp == 0;
110 }
111
112 /* Compare and swap instruction. */
113 static void guest_cas(void)
114 {
115         uint64_t val;
116
117         GUEST_ASSERT(guest_check_lse());
118         asm volatile(".arch_extension lse\n"
119                      "casal %0, %1, [%2]\n"
120                      :: "r" (0ul), "r" (TEST_DATA), "r" (guest_test_memory));
121         val = READ_ONCE(*guest_test_memory);
122         GUEST_ASSERT_EQ(val, TEST_DATA);
123 }
124
125 static void guest_read64(void)
126 {
127         uint64_t val;
128
129         val = READ_ONCE(*guest_test_memory);
130         GUEST_ASSERT_EQ(val, 0);
131 }
132
133 /* Address translation instruction */
134 static void guest_at(void)
135 {
136         uint64_t par;
137
138         asm volatile("at s1e1r, %0" :: "r" (guest_test_memory));
139         par = read_sysreg(par_el1);
140         isb();
141
142         /* Bit 1 indicates whether the AT was successful */
143         GUEST_ASSERT_EQ(par & 1, 0);
144 }
145
146 /*
147  * The size of the block written by "dc zva" is guaranteed to be between (2 <<
148  * 0) and (2 << 9), which is safe in our case as we need the write to happen
149  * for at least a word, and not more than a page.
150  */
151 static void guest_dc_zva(void)
152 {
153         uint16_t val;
154
155         asm volatile("dc zva, %0" :: "r" (guest_test_memory));
156         dsb(ish);
157         val = READ_ONCE(*guest_test_memory);
158         GUEST_ASSERT_EQ(val, 0);
159 }
160
161 /*
162  * Pre-indexing loads and stores don't have a valid syndrome (ESR_EL2.ISV==0).
163  * And that's special because KVM must take special care with those: they
164  * should still count as accesses for dirty logging or user-faulting, but
165  * should be handled differently on mmio.
166  */
167 static void guest_ld_preidx(void)
168 {
169         uint64_t val;
170         uint64_t addr = TEST_GVA - 8;
171
172         /*
173          * This ends up accessing "TEST_GVA + 8 - 8", where "TEST_GVA - 8" is
174          * in a gap between memslots not backing by anything.
175          */
176         asm volatile("ldr %0, [%1, #8]!"
177                      : "=r" (val), "+r" (addr));
178         GUEST_ASSERT_EQ(val, 0);
179         GUEST_ASSERT_EQ(addr, TEST_GVA);
180 }
181
182 static void guest_st_preidx(void)
183 {
184         uint64_t val = TEST_DATA;
185         uint64_t addr = TEST_GVA - 8;
186
187         asm volatile("str %0, [%1, #8]!"
188                      : "+r" (val), "+r" (addr));
189
190         GUEST_ASSERT_EQ(addr, TEST_GVA);
191         val = READ_ONCE(*guest_test_memory);
192 }
193
194 static bool guest_set_ha(void)
195 {
196         uint64_t mmfr1 = read_sysreg(id_aa64mmfr1_el1);
197         uint64_t hadbs, tcr;
198
199         /* Skip if HA is not supported. */
200         hadbs = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_HADBS), mmfr1);
201         if (hadbs == 0)
202                 return false;
203
204         tcr = read_sysreg(tcr_el1) | TCR_EL1_HA;
205         write_sysreg(tcr, tcr_el1);
206         isb();
207
208         return true;
209 }
210
211 static bool guest_clear_pte_af(void)
212 {
213         *((uint64_t *)TEST_PTE_GVA) &= ~PTE_AF;
214         flush_tlb_page(TEST_GVA);
215
216         return true;
217 }
218
219 static void guest_check_pte_af(void)
220 {
221         dsb(ish);
222         GUEST_ASSERT_EQ(*((uint64_t *)TEST_PTE_GVA) & PTE_AF, PTE_AF);
223 }
224
225 static void guest_check_write_in_dirty_log(void)
226 {
227         GUEST_SYNC(CMD_CHECK_WRITE_IN_DIRTY_LOG);
228 }
229
230 static void guest_check_no_write_in_dirty_log(void)
231 {
232         GUEST_SYNC(CMD_CHECK_NO_WRITE_IN_DIRTY_LOG);
233 }
234
235 static void guest_check_s1ptw_wr_in_dirty_log(void)
236 {
237         GUEST_SYNC(CMD_CHECK_S1PTW_WR_IN_DIRTY_LOG);
238 }
239
240 static void guest_check_no_s1ptw_wr_in_dirty_log(void)
241 {
242         GUEST_SYNC(CMD_CHECK_NO_S1PTW_WR_IN_DIRTY_LOG);
243 }
244
245 static void guest_exec(void)
246 {
247         int (*code)(void) = (int (*)(void))TEST_EXEC_GVA;
248         int ret;
249
250         ret = code();
251         GUEST_ASSERT_EQ(ret, 0x77);
252 }
253
254 static bool guest_prepare(struct test_desc *test)
255 {
256         bool (*prepare_fn)(void);
257         int i;
258
259         for (i = 0; i < PREPARE_FN_NR; i++) {
260                 prepare_fn = test->guest_prepare[i];
261                 if (prepare_fn && !prepare_fn())
262                         return false;
263         }
264
265         return true;
266 }
267
268 static void guest_test_check(struct test_desc *test)
269 {
270         void (*check_fn)(void);
271         int i;
272
273         for (i = 0; i < CHECK_FN_NR; i++) {
274                 check_fn = test->guest_test_check[i];
275                 if (check_fn)
276                         check_fn();
277         }
278 }
279
280 static void guest_code(struct test_desc *test)
281 {
282         if (!guest_prepare(test))
283                 GUEST_SYNC(CMD_SKIP_TEST);
284
285         GUEST_SYNC(test->mem_mark_cmd);
286
287         if (test->guest_test)
288                 test->guest_test();
289
290         guest_test_check(test);
291         GUEST_DONE();
292 }
293
294 static void no_dabt_handler(struct ex_regs *regs)
295 {
296         GUEST_ASSERT_1(false, read_sysreg(far_el1));
297 }
298
299 static void no_iabt_handler(struct ex_regs *regs)
300 {
301         GUEST_ASSERT_1(false, regs->pc);
302 }
303
304 static struct uffd_args {
305         char *copy;
306         void *hva;
307         uint64_t paging_size;
308 } pt_args, data_args;
309
310 /* Returns true to continue the test, and false if it should be skipped. */
311 static int uffd_generic_handler(int uffd_mode, int uffd, struct uffd_msg *msg,
312                                 struct uffd_args *args)
313 {
314         uint64_t addr = msg->arg.pagefault.address;
315         uint64_t flags = msg->arg.pagefault.flags;
316         struct uffdio_copy copy;
317         int ret;
318
319         TEST_ASSERT(uffd_mode == UFFDIO_REGISTER_MODE_MISSING,
320                     "The only expected UFFD mode is MISSING");
321         ASSERT_EQ(addr, (uint64_t)args->hva);
322
323         pr_debug("uffd fault: addr=%p write=%d\n",
324                  (void *)addr, !!(flags & UFFD_PAGEFAULT_FLAG_WRITE));
325
326         copy.src = (uint64_t)args->copy;
327         copy.dst = addr;
328         copy.len = args->paging_size;
329         copy.mode = 0;
330
331         ret = ioctl(uffd, UFFDIO_COPY, &copy);
332         if (ret == -1) {
333                 pr_info("Failed UFFDIO_COPY in 0x%lx with errno: %d\n",
334                         addr, errno);
335                 return ret;
336         }
337
338         pthread_mutex_lock(&events.uffd_faults_mutex);
339         events.uffd_faults += 1;
340         pthread_mutex_unlock(&events.uffd_faults_mutex);
341         return 0;
342 }
343
344 static int uffd_pt_handler(int mode, int uffd, struct uffd_msg *msg)
345 {
346         return uffd_generic_handler(mode, uffd, msg, &pt_args);
347 }
348
349 static int uffd_data_handler(int mode, int uffd, struct uffd_msg *msg)
350 {
351         return uffd_generic_handler(mode, uffd, msg, &data_args);
352 }
353
354 static void setup_uffd_args(struct userspace_mem_region *region,
355                             struct uffd_args *args)
356 {
357         args->hva = (void *)region->region.userspace_addr;
358         args->paging_size = region->region.memory_size;
359
360         args->copy = malloc(args->paging_size);
361         TEST_ASSERT(args->copy, "Failed to allocate data copy.");
362         memcpy(args->copy, args->hva, args->paging_size);
363 }
364
365 static void setup_uffd(struct kvm_vm *vm, struct test_params *p,
366                        struct uffd_desc **pt_uffd, struct uffd_desc **data_uffd)
367 {
368         struct test_desc *test = p->test_desc;
369         int uffd_mode = UFFDIO_REGISTER_MODE_MISSING;
370
371         setup_uffd_args(vm_get_mem_region(vm, MEM_REGION_PT), &pt_args);
372         setup_uffd_args(vm_get_mem_region(vm, MEM_REGION_TEST_DATA), &data_args);
373
374         *pt_uffd = NULL;
375         if (test->uffd_pt_handler)
376                 *pt_uffd = uffd_setup_demand_paging(uffd_mode, 0,
377                                                     pt_args.hva,
378                                                     pt_args.paging_size,
379                                                     test->uffd_pt_handler);
380
381         *data_uffd = NULL;
382         if (test->uffd_data_handler)
383                 *data_uffd = uffd_setup_demand_paging(uffd_mode, 0,
384                                                       data_args.hva,
385                                                       data_args.paging_size,
386                                                       test->uffd_data_handler);
387 }
388
389 static void free_uffd(struct test_desc *test, struct uffd_desc *pt_uffd,
390                       struct uffd_desc *data_uffd)
391 {
392         if (test->uffd_pt_handler)
393                 uffd_stop_demand_paging(pt_uffd);
394         if (test->uffd_data_handler)
395                 uffd_stop_demand_paging(data_uffd);
396
397         free(pt_args.copy);
398         free(data_args.copy);
399 }
400
401 static int uffd_no_handler(int mode, int uffd, struct uffd_msg *msg)
402 {
403         TEST_FAIL("There was no UFFD fault expected.");
404         return -1;
405 }
406
407 /* Returns false if the test should be skipped. */
408 static bool punch_hole_in_backing_store(struct kvm_vm *vm,
409                                         struct userspace_mem_region *region)
410 {
411         void *hva = (void *)region->region.userspace_addr;
412         uint64_t paging_size = region->region.memory_size;
413         int ret, fd = region->fd;
414
415         if (fd != -1) {
416                 ret = fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
417                                 0, paging_size);
418                 TEST_ASSERT(ret == 0, "fallocate failed\n");
419         } else {
420                 ret = madvise(hva, paging_size, MADV_DONTNEED);
421                 TEST_ASSERT(ret == 0, "madvise failed\n");
422         }
423
424         return true;
425 }
426
427 static void mmio_on_test_gpa_handler(struct kvm_vm *vm, struct kvm_run *run)
428 {
429         struct userspace_mem_region *region;
430         void *hva;
431
432         region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
433         hva = (void *)region->region.userspace_addr;
434
435         ASSERT_EQ(run->mmio.phys_addr, region->region.guest_phys_addr);
436
437         memcpy(hva, run->mmio.data, run->mmio.len);
438         events.mmio_exits += 1;
439 }
440
441 static void mmio_no_handler(struct kvm_vm *vm, struct kvm_run *run)
442 {
443         uint64_t data;
444
445         memcpy(&data, run->mmio.data, sizeof(data));
446         pr_debug("addr=%lld len=%d w=%d data=%lx\n",
447                  run->mmio.phys_addr, run->mmio.len,
448                  run->mmio.is_write, data);
449         TEST_FAIL("There was no MMIO exit expected.");
450 }
451
452 static bool check_write_in_dirty_log(struct kvm_vm *vm,
453                                      struct userspace_mem_region *region,
454                                      uint64_t host_pg_nr)
455 {
456         unsigned long *bmap;
457         bool first_page_dirty;
458         uint64_t size = region->region.memory_size;
459
460         /* getpage_size() is not always equal to vm->page_size */
461         bmap = bitmap_zalloc(size / getpagesize());
462         kvm_vm_get_dirty_log(vm, region->region.slot, bmap);
463         first_page_dirty = test_bit(host_pg_nr, bmap);
464         free(bmap);
465         return first_page_dirty;
466 }
467
468 /* Returns true to continue the test, and false if it should be skipped. */
469 static bool handle_cmd(struct kvm_vm *vm, int cmd)
470 {
471         struct userspace_mem_region *data_region, *pt_region;
472         bool continue_test = true;
473         uint64_t pte_gpa, pte_pg;
474
475         data_region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
476         pt_region = vm_get_mem_region(vm, MEM_REGION_PT);
477         pte_gpa = addr_hva2gpa(vm, virt_get_pte_hva(vm, TEST_GVA));
478         pte_pg = (pte_gpa - pt_region->region.guest_phys_addr) / getpagesize();
479
480         if (cmd == CMD_SKIP_TEST)
481                 continue_test = false;
482
483         if (cmd & CMD_HOLE_PT)
484                 continue_test = punch_hole_in_backing_store(vm, pt_region);
485         if (cmd & CMD_HOLE_DATA)
486                 continue_test = punch_hole_in_backing_store(vm, data_region);
487         if (cmd & CMD_CHECK_WRITE_IN_DIRTY_LOG)
488                 TEST_ASSERT(check_write_in_dirty_log(vm, data_region, 0),
489                             "Missing write in dirty log");
490         if (cmd & CMD_CHECK_S1PTW_WR_IN_DIRTY_LOG)
491                 TEST_ASSERT(check_write_in_dirty_log(vm, pt_region, pte_pg),
492                             "Missing s1ptw write in dirty log");
493         if (cmd & CMD_CHECK_NO_WRITE_IN_DIRTY_LOG)
494                 TEST_ASSERT(!check_write_in_dirty_log(vm, data_region, 0),
495                             "Unexpected write in dirty log");
496         if (cmd & CMD_CHECK_NO_S1PTW_WR_IN_DIRTY_LOG)
497                 TEST_ASSERT(!check_write_in_dirty_log(vm, pt_region, pte_pg),
498                             "Unexpected s1ptw write in dirty log");
499
500         return continue_test;
501 }
502
503 void fail_vcpu_run_no_handler(int ret)
504 {
505         TEST_FAIL("Unexpected vcpu run failure\n");
506 }
507
508 void fail_vcpu_run_mmio_no_syndrome_handler(int ret)
509 {
510         TEST_ASSERT(errno == ENOSYS,
511                     "The mmio handler should have returned not implemented.");
512         events.fail_vcpu_runs += 1;
513 }
514
515 typedef uint32_t aarch64_insn_t;
516 extern aarch64_insn_t __exec_test[2];
517
518 noinline void __return_0x77(void)
519 {
520         asm volatile("__exec_test: mov x0, #0x77\n"
521                      "ret\n");
522 }
523
524 /*
525  * Note that this function runs on the host before the test VM starts: there's
526  * no need to sync the D$ and I$ caches.
527  */
528 static void load_exec_code_for_test(struct kvm_vm *vm)
529 {
530         uint64_t *code;
531         struct userspace_mem_region *region;
532         void *hva;
533
534         region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
535         hva = (void *)region->region.userspace_addr;
536
537         assert(TEST_EXEC_GVA > TEST_GVA);
538         code = hva + TEST_EXEC_GVA - TEST_GVA;
539         memcpy(code, __exec_test, sizeof(__exec_test));
540 }
541
542 static void setup_abort_handlers(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
543                                  struct test_desc *test)
544 {
545         vm_init_descriptor_tables(vm);
546         vcpu_init_descriptor_tables(vcpu);
547
548         vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
549                                 ESR_EC_DABT, no_dabt_handler);
550         vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT,
551                                 ESR_EC_IABT, no_iabt_handler);
552 }
553
554 static void setup_gva_maps(struct kvm_vm *vm)
555 {
556         struct userspace_mem_region *region;
557         uint64_t pte_gpa;
558
559         region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
560         /* Map TEST_GVA first. This will install a new PTE. */
561         virt_pg_map(vm, TEST_GVA, region->region.guest_phys_addr);
562         /* Then map TEST_PTE_GVA to the above PTE. */
563         pte_gpa = addr_hva2gpa(vm, virt_get_pte_hva(vm, TEST_GVA));
564         virt_pg_map(vm, TEST_PTE_GVA, pte_gpa);
565 }
566
567 enum pf_test_memslots {
568         CODE_AND_DATA_MEMSLOT,
569         PAGE_TABLE_MEMSLOT,
570         TEST_DATA_MEMSLOT,
571 };
572
573 /*
574  * Create a memslot for code and data at pfn=0, and test-data and PT ones
575  * at max_gfn.
576  */
577 static void setup_memslots(struct kvm_vm *vm, struct test_params *p)
578 {
579         uint64_t backing_src_pagesz = get_backing_src_pagesz(p->src_type);
580         uint64_t guest_page_size = vm->page_size;
581         uint64_t max_gfn = vm_compute_max_gfn(vm);
582         /* Enough for 2M of code when using 4K guest pages. */
583         uint64_t code_npages = 512;
584         uint64_t pt_size, data_size, data_gpa;
585
586         /*
587          * This test requires 1 pgd, 2 pud, 4 pmd, and 6 pte pages when using
588          * VM_MODE_P48V48_4K. Note that the .text takes ~1.6MBs.  That's 13
589          * pages. VM_MODE_P48V48_4K is the mode with most PT pages; let's use
590          * twice that just in case.
591          */
592         pt_size = 26 * guest_page_size;
593
594         /* memslot sizes and gpa's must be aligned to the backing page size */
595         pt_size = align_up(pt_size, backing_src_pagesz);
596         data_size = align_up(guest_page_size, backing_src_pagesz);
597         data_gpa = (max_gfn * guest_page_size) - data_size;
598         data_gpa = align_down(data_gpa, backing_src_pagesz);
599
600         vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0,
601                                     CODE_AND_DATA_MEMSLOT, code_npages, 0);
602         vm->memslots[MEM_REGION_CODE] = CODE_AND_DATA_MEMSLOT;
603         vm->memslots[MEM_REGION_DATA] = CODE_AND_DATA_MEMSLOT;
604
605         vm_userspace_mem_region_add(vm, p->src_type, data_gpa - pt_size,
606                                     PAGE_TABLE_MEMSLOT, pt_size / guest_page_size,
607                                     p->test_desc->pt_memslot_flags);
608         vm->memslots[MEM_REGION_PT] = PAGE_TABLE_MEMSLOT;
609
610         vm_userspace_mem_region_add(vm, p->src_type, data_gpa, TEST_DATA_MEMSLOT,
611                                     data_size / guest_page_size,
612                                     p->test_desc->data_memslot_flags);
613         vm->memslots[MEM_REGION_TEST_DATA] = TEST_DATA_MEMSLOT;
614 }
615
616 static void setup_ucall(struct kvm_vm *vm)
617 {
618         struct userspace_mem_region *region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
619
620         ucall_init(vm, region->region.guest_phys_addr + region->region.memory_size);
621 }
622
623 static void setup_default_handlers(struct test_desc *test)
624 {
625         if (!test->mmio_handler)
626                 test->mmio_handler = mmio_no_handler;
627
628         if (!test->fail_vcpu_run_handler)
629                 test->fail_vcpu_run_handler = fail_vcpu_run_no_handler;
630 }
631
632 static void check_event_counts(struct test_desc *test)
633 {
634         ASSERT_EQ(test->expected_events.uffd_faults, events.uffd_faults);
635         ASSERT_EQ(test->expected_events.mmio_exits, events.mmio_exits);
636         ASSERT_EQ(test->expected_events.fail_vcpu_runs, events.fail_vcpu_runs);
637 }
638
639 static void print_test_banner(enum vm_guest_mode mode, struct test_params *p)
640 {
641         struct test_desc *test = p->test_desc;
642
643         pr_debug("Test: %s\n", test->name);
644         pr_debug("Testing guest mode: %s\n", vm_guest_mode_string(mode));
645         pr_debug("Testing memory backing src type: %s\n",
646                  vm_mem_backing_src_alias(p->src_type)->name);
647 }
648
649 static void reset_event_counts(void)
650 {
651         memset(&events, 0, sizeof(events));
652 }
653
654 /*
655  * This function either succeeds, skips the test (after setting test->skip), or
656  * fails with a TEST_FAIL that aborts all tests.
657  */
658 static void vcpu_run_loop(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
659                           struct test_desc *test)
660 {
661         struct kvm_run *run;
662         struct ucall uc;
663         int ret;
664
665         run = vcpu->run;
666
667         for (;;) {
668                 ret = _vcpu_run(vcpu);
669                 if (ret) {
670                         test->fail_vcpu_run_handler(ret);
671                         goto done;
672                 }
673
674                 switch (get_ucall(vcpu, &uc)) {
675                 case UCALL_SYNC:
676                         if (!handle_cmd(vm, uc.args[1])) {
677                                 test->skip = true;
678                                 goto done;
679                         }
680                         break;
681                 case UCALL_ABORT:
682                         REPORT_GUEST_ASSERT_2(uc, "values: %#lx, %#lx");
683                         break;
684                 case UCALL_DONE:
685                         goto done;
686                 case UCALL_NONE:
687                         if (run->exit_reason == KVM_EXIT_MMIO)
688                                 test->mmio_handler(vm, run);
689                         break;
690                 default:
691                         TEST_FAIL("Unknown ucall %lu", uc.cmd);
692                 }
693         }
694
695 done:
696         pr_debug(test->skip ? "Skipped.\n" : "Done.\n");
697 }
698
699 static void run_test(enum vm_guest_mode mode, void *arg)
700 {
701         struct test_params *p = (struct test_params *)arg;
702         struct test_desc *test = p->test_desc;
703         struct kvm_vm *vm;
704         struct kvm_vcpu *vcpu;
705         struct uffd_desc *pt_uffd, *data_uffd;
706
707         print_test_banner(mode, p);
708
709         vm = ____vm_create(mode);
710         setup_memslots(vm, p);
711         kvm_vm_elf_load(vm, program_invocation_name);
712         setup_ucall(vm);
713         vcpu = vm_vcpu_add(vm, 0, guest_code);
714
715         setup_gva_maps(vm);
716
717         reset_event_counts();
718
719         /*
720          * Set some code in the data memslot for the guest to execute (only
721          * applicable to the EXEC tests). This has to be done before
722          * setup_uffd() as that function copies the memslot data for the uffd
723          * handler.
724          */
725         load_exec_code_for_test(vm);
726         setup_uffd(vm, p, &pt_uffd, &data_uffd);
727         setup_abort_handlers(vm, vcpu, test);
728         setup_default_handlers(test);
729         vcpu_args_set(vcpu, 1, test);
730
731         vcpu_run_loop(vm, vcpu, test);
732
733         kvm_vm_free(vm);
734         free_uffd(test, pt_uffd, data_uffd);
735
736         /*
737          * Make sure we check the events after the uffd threads have exited,
738          * which means they updated their respective event counters.
739          */
740         if (!test->skip)
741                 check_event_counts(test);
742 }
743
744 static void help(char *name)
745 {
746         puts("");
747         printf("usage: %s [-h] [-s mem-type]\n", name);
748         puts("");
749         guest_modes_help();
750         backing_src_help("-s");
751         puts("");
752 }
753
754 #define SNAME(s)                        #s
755 #define SCAT2(a, b)                     SNAME(a ## _ ## b)
756 #define SCAT3(a, b, c)                  SCAT2(a, SCAT2(b, c))
757 #define SCAT4(a, b, c, d)               SCAT2(a, SCAT3(b, c, d))
758
759 #define _CHECK(_test)                   _CHECK_##_test
760 #define _PREPARE(_test)                 _PREPARE_##_test
761 #define _PREPARE_guest_read64           NULL
762 #define _PREPARE_guest_ld_preidx        NULL
763 #define _PREPARE_guest_write64          NULL
764 #define _PREPARE_guest_st_preidx        NULL
765 #define _PREPARE_guest_exec             NULL
766 #define _PREPARE_guest_at               NULL
767 #define _PREPARE_guest_dc_zva           guest_check_dc_zva
768 #define _PREPARE_guest_cas              guest_check_lse
769
770 /* With or without access flag checks */
771 #define _PREPARE_with_af                guest_set_ha, guest_clear_pte_af
772 #define _PREPARE_no_af                  NULL
773 #define _CHECK_with_af                  guest_check_pte_af
774 #define _CHECK_no_af                    NULL
775
776 /* Performs an access and checks that no faults were triggered. */
777 #define TEST_ACCESS(_access, _with_af, _mark_cmd)                               \
778 {                                                                               \
779         .name                   = SCAT3(_access, _with_af, #_mark_cmd),         \
780         .guest_prepare          = { _PREPARE(_with_af),                         \
781                                     _PREPARE(_access) },                        \
782         .mem_mark_cmd           = _mark_cmd,                                    \
783         .guest_test             = _access,                                      \
784         .guest_test_check       = { _CHECK(_with_af) },                         \
785         .expected_events        = { 0 },                                        \
786 }
787
788 #define TEST_UFFD(_access, _with_af, _mark_cmd,                                 \
789                   _uffd_data_handler, _uffd_pt_handler, _uffd_faults)           \
790 {                                                                               \
791         .name                   = SCAT4(uffd, _access, _with_af, #_mark_cmd),   \
792         .guest_prepare          = { _PREPARE(_with_af),                         \
793                                     _PREPARE(_access) },                        \
794         .guest_test             = _access,                                      \
795         .mem_mark_cmd           = _mark_cmd,                                    \
796         .guest_test_check       = { _CHECK(_with_af) },                         \
797         .uffd_data_handler      = _uffd_data_handler,                           \
798         .uffd_pt_handler        = _uffd_pt_handler,                             \
799         .expected_events        = { .uffd_faults = _uffd_faults, },             \
800 }
801
802 #define TEST_DIRTY_LOG(_access, _with_af, _test_check, _pt_check)               \
803 {                                                                               \
804         .name                   = SCAT3(dirty_log, _access, _with_af),          \
805         .data_memslot_flags     = KVM_MEM_LOG_DIRTY_PAGES,                      \
806         .pt_memslot_flags       = KVM_MEM_LOG_DIRTY_PAGES,                      \
807         .guest_prepare          = { _PREPARE(_with_af),                         \
808                                     _PREPARE(_access) },                        \
809         .guest_test             = _access,                                      \
810         .guest_test_check       = { _CHECK(_with_af), _test_check, _pt_check }, \
811         .expected_events        = { 0 },                                        \
812 }
813
814 #define TEST_UFFD_AND_DIRTY_LOG(_access, _with_af, _uffd_data_handler,          \
815                                 _uffd_faults, _test_check, _pt_check)           \
816 {                                                                               \
817         .name                   = SCAT3(uffd_and_dirty_log, _access, _with_af), \
818         .data_memslot_flags     = KVM_MEM_LOG_DIRTY_PAGES,                      \
819         .pt_memslot_flags       = KVM_MEM_LOG_DIRTY_PAGES,                      \
820         .guest_prepare          = { _PREPARE(_with_af),                         \
821                                     _PREPARE(_access) },                        \
822         .guest_test             = _access,                                      \
823         .mem_mark_cmd           = CMD_HOLE_DATA | CMD_HOLE_PT,                  \
824         .guest_test_check       = { _CHECK(_with_af), _test_check, _pt_check }, \
825         .uffd_data_handler      = _uffd_data_handler,                           \
826         .uffd_pt_handler        = uffd_pt_handler,                              \
827         .expected_events        = { .uffd_faults = _uffd_faults, },             \
828 }
829
830 #define TEST_RO_MEMSLOT(_access, _mmio_handler, _mmio_exits)                    \
831 {                                                                               \
832         .name                   = SCAT2(ro_memslot, _access),                   \
833         .data_memslot_flags     = KVM_MEM_READONLY,                             \
834         .pt_memslot_flags       = KVM_MEM_READONLY,                             \
835         .guest_prepare          = { _PREPARE(_access) },                        \
836         .guest_test             = _access,                                      \
837         .mmio_handler           = _mmio_handler,                                \
838         .expected_events        = { .mmio_exits = _mmio_exits },                \
839 }
840
841 #define TEST_RO_MEMSLOT_NO_SYNDROME(_access)                                    \
842 {                                                                               \
843         .name                   = SCAT2(ro_memslot_no_syndrome, _access),       \
844         .data_memslot_flags     = KVM_MEM_READONLY,                             \
845         .pt_memslot_flags       = KVM_MEM_READONLY,                             \
846         .guest_test             = _access,                                      \
847         .fail_vcpu_run_handler  = fail_vcpu_run_mmio_no_syndrome_handler,       \
848         .expected_events        = { .fail_vcpu_runs = 1 },                      \
849 }
850
851 #define TEST_RO_MEMSLOT_AND_DIRTY_LOG(_access, _mmio_handler, _mmio_exits,      \
852                                       _test_check)                              \
853 {                                                                               \
854         .name                   = SCAT2(ro_memslot, _access),                   \
855         .data_memslot_flags     = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES,   \
856         .pt_memslot_flags       = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES,   \
857         .guest_prepare          = { _PREPARE(_access) },                        \
858         .guest_test             = _access,                                      \
859         .guest_test_check       = { _test_check },                              \
860         .mmio_handler           = _mmio_handler,                                \
861         .expected_events        = { .mmio_exits = _mmio_exits},                 \
862 }
863
864 #define TEST_RO_MEMSLOT_NO_SYNDROME_AND_DIRTY_LOG(_access, _test_check)         \
865 {                                                                               \
866         .name                   = SCAT2(ro_memslot_no_syn_and_dlog, _access),   \
867         .data_memslot_flags     = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES,   \
868         .pt_memslot_flags       = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES,   \
869         .guest_test             = _access,                                      \
870         .guest_test_check       = { _test_check },                              \
871         .fail_vcpu_run_handler  = fail_vcpu_run_mmio_no_syndrome_handler,       \
872         .expected_events        = { .fail_vcpu_runs = 1 },                      \
873 }
874
875 #define TEST_RO_MEMSLOT_AND_UFFD(_access, _mmio_handler, _mmio_exits,           \
876                                  _uffd_data_handler, _uffd_faults)              \
877 {                                                                               \
878         .name                   = SCAT2(ro_memslot_uffd, _access),              \
879         .data_memslot_flags     = KVM_MEM_READONLY,                             \
880         .pt_memslot_flags       = KVM_MEM_READONLY,                             \
881         .mem_mark_cmd           = CMD_HOLE_DATA | CMD_HOLE_PT,                  \
882         .guest_prepare          = { _PREPARE(_access) },                        \
883         .guest_test             = _access,                                      \
884         .uffd_data_handler      = _uffd_data_handler,                           \
885         .uffd_pt_handler        = uffd_pt_handler,                              \
886         .mmio_handler           = _mmio_handler,                                \
887         .expected_events        = { .mmio_exits = _mmio_exits,                  \
888                                     .uffd_faults = _uffd_faults },              \
889 }
890
891 #define TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(_access, _uffd_data_handler,       \
892                                              _uffd_faults)                      \
893 {                                                                               \
894         .name                   = SCAT2(ro_memslot_no_syndrome, _access),       \
895         .data_memslot_flags     = KVM_MEM_READONLY,                             \
896         .pt_memslot_flags       = KVM_MEM_READONLY,                             \
897         .mem_mark_cmd           = CMD_HOLE_DATA | CMD_HOLE_PT,                  \
898         .guest_test             = _access,                                      \
899         .uffd_data_handler      = _uffd_data_handler,                           \
900         .uffd_pt_handler        = uffd_pt_handler,                      \
901         .fail_vcpu_run_handler  = fail_vcpu_run_mmio_no_syndrome_handler,       \
902         .expected_events        = { .fail_vcpu_runs = 1,                        \
903                                     .uffd_faults = _uffd_faults },              \
904 }
905
906 static struct test_desc tests[] = {
907
908         /* Check that HW is setting the Access Flag (AF) (sanity checks). */
909         TEST_ACCESS(guest_read64, with_af, CMD_NONE),
910         TEST_ACCESS(guest_ld_preidx, with_af, CMD_NONE),
911         TEST_ACCESS(guest_cas, with_af, CMD_NONE),
912         TEST_ACCESS(guest_write64, with_af, CMD_NONE),
913         TEST_ACCESS(guest_st_preidx, with_af, CMD_NONE),
914         TEST_ACCESS(guest_dc_zva, with_af, CMD_NONE),
915         TEST_ACCESS(guest_exec, with_af, CMD_NONE),
916
917         /*
918          * Punch a hole in the data backing store, and then try multiple
919          * accesses: reads should rturn zeroes, and writes should
920          * re-populate the page. Moreover, the test also check that no
921          * exception was generated in the guest.  Note that this
922          * reading/writing behavior is the same as reading/writing a
923          * punched page (with fallocate(FALLOC_FL_PUNCH_HOLE)) from
924          * userspace.
925          */
926         TEST_ACCESS(guest_read64, no_af, CMD_HOLE_DATA),
927         TEST_ACCESS(guest_cas, no_af, CMD_HOLE_DATA),
928         TEST_ACCESS(guest_ld_preidx, no_af, CMD_HOLE_DATA),
929         TEST_ACCESS(guest_write64, no_af, CMD_HOLE_DATA),
930         TEST_ACCESS(guest_st_preidx, no_af, CMD_HOLE_DATA),
931         TEST_ACCESS(guest_at, no_af, CMD_HOLE_DATA),
932         TEST_ACCESS(guest_dc_zva, no_af, CMD_HOLE_DATA),
933
934         /*
935          * Punch holes in the data and PT backing stores and mark them for
936          * userfaultfd handling. This should result in 2 faults: the access
937          * on the data backing store, and its respective S1 page table walk
938          * (S1PTW).
939          */
940         TEST_UFFD(guest_read64, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
941                   uffd_data_handler, uffd_pt_handler, 2),
942         TEST_UFFD(guest_read64, no_af, CMD_HOLE_DATA | CMD_HOLE_PT,
943                   uffd_data_handler, uffd_pt_handler, 2),
944         TEST_UFFD(guest_cas, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
945                   uffd_data_handler, uffd_pt_handler, 2),
946         /*
947          * Can't test guest_at with_af as it's IMPDEF whether the AF is set.
948          * The S1PTW fault should still be marked as a write.
949          */
950         TEST_UFFD(guest_at, no_af, CMD_HOLE_DATA | CMD_HOLE_PT,
951                   uffd_no_handler, uffd_pt_handler, 1),
952         TEST_UFFD(guest_ld_preidx, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
953                   uffd_data_handler, uffd_pt_handler, 2),
954         TEST_UFFD(guest_write64, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
955                   uffd_data_handler, uffd_pt_handler, 2),
956         TEST_UFFD(guest_dc_zva, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
957                   uffd_data_handler, uffd_pt_handler, 2),
958         TEST_UFFD(guest_st_preidx, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
959                   uffd_data_handler, uffd_pt_handler, 2),
960         TEST_UFFD(guest_exec, with_af, CMD_HOLE_DATA | CMD_HOLE_PT,
961                   uffd_data_handler, uffd_pt_handler, 2),
962
963         /*
964          * Try accesses when the data and PT memory regions are both
965          * tracked for dirty logging.
966          */
967         TEST_DIRTY_LOG(guest_read64, with_af, guest_check_no_write_in_dirty_log,
968                        guest_check_s1ptw_wr_in_dirty_log),
969         TEST_DIRTY_LOG(guest_read64, no_af, guest_check_no_write_in_dirty_log,
970                        guest_check_no_s1ptw_wr_in_dirty_log),
971         TEST_DIRTY_LOG(guest_ld_preidx, with_af,
972                        guest_check_no_write_in_dirty_log,
973                        guest_check_s1ptw_wr_in_dirty_log),
974         TEST_DIRTY_LOG(guest_at, no_af, guest_check_no_write_in_dirty_log,
975                        guest_check_no_s1ptw_wr_in_dirty_log),
976         TEST_DIRTY_LOG(guest_exec, with_af, guest_check_no_write_in_dirty_log,
977                        guest_check_s1ptw_wr_in_dirty_log),
978         TEST_DIRTY_LOG(guest_write64, with_af, guest_check_write_in_dirty_log,
979                        guest_check_s1ptw_wr_in_dirty_log),
980         TEST_DIRTY_LOG(guest_cas, with_af, guest_check_write_in_dirty_log,
981                        guest_check_s1ptw_wr_in_dirty_log),
982         TEST_DIRTY_LOG(guest_dc_zva, with_af, guest_check_write_in_dirty_log,
983                        guest_check_s1ptw_wr_in_dirty_log),
984         TEST_DIRTY_LOG(guest_st_preidx, with_af, guest_check_write_in_dirty_log,
985                        guest_check_s1ptw_wr_in_dirty_log),
986
987         /*
988          * Access when the data and PT memory regions are both marked for
989          * dirty logging and UFFD at the same time. The expected result is
990          * that writes should mark the dirty log and trigger a userfaultfd
991          * write fault.  Reads/execs should result in a read userfaultfd
992          * fault, and nothing in the dirty log.  Any S1PTW should result in
993          * a write in the dirty log and a userfaultfd write.
994          */
995         TEST_UFFD_AND_DIRTY_LOG(guest_read64, with_af,
996                                 uffd_data_handler, 2,
997                                 guest_check_no_write_in_dirty_log,
998                                 guest_check_s1ptw_wr_in_dirty_log),
999         TEST_UFFD_AND_DIRTY_LOG(guest_read64, no_af,
1000                                 uffd_data_handler, 2,
1001                                 guest_check_no_write_in_dirty_log,
1002                                 guest_check_no_s1ptw_wr_in_dirty_log),
1003         TEST_UFFD_AND_DIRTY_LOG(guest_ld_preidx, with_af,
1004                                 uffd_data_handler,
1005                                 2, guest_check_no_write_in_dirty_log,
1006                                 guest_check_s1ptw_wr_in_dirty_log),
1007         TEST_UFFD_AND_DIRTY_LOG(guest_at, with_af, uffd_no_handler, 1,
1008                                 guest_check_no_write_in_dirty_log,
1009                                 guest_check_s1ptw_wr_in_dirty_log),
1010         TEST_UFFD_AND_DIRTY_LOG(guest_exec, with_af,
1011                                 uffd_data_handler, 2,
1012                                 guest_check_no_write_in_dirty_log,
1013                                 guest_check_s1ptw_wr_in_dirty_log),
1014         TEST_UFFD_AND_DIRTY_LOG(guest_write64, with_af,
1015                                 uffd_data_handler,
1016                                 2, guest_check_write_in_dirty_log,
1017                                 guest_check_s1ptw_wr_in_dirty_log),
1018         TEST_UFFD_AND_DIRTY_LOG(guest_cas, with_af,
1019                                 uffd_data_handler, 2,
1020                                 guest_check_write_in_dirty_log,
1021                                 guest_check_s1ptw_wr_in_dirty_log),
1022         TEST_UFFD_AND_DIRTY_LOG(guest_dc_zva, with_af,
1023                                 uffd_data_handler,
1024                                 2, guest_check_write_in_dirty_log,
1025                                 guest_check_s1ptw_wr_in_dirty_log),
1026         TEST_UFFD_AND_DIRTY_LOG(guest_st_preidx, with_af,
1027                                 uffd_data_handler, 2,
1028                                 guest_check_write_in_dirty_log,
1029                                 guest_check_s1ptw_wr_in_dirty_log),
1030         /*
1031          * Access when both the PT and data regions are marked read-only
1032          * (with KVM_MEM_READONLY). Writes with a syndrome result in an
1033          * MMIO exit, writes with no syndrome (e.g., CAS) result in a
1034          * failed vcpu run, and reads/execs with and without syndroms do
1035          * not fault.
1036          */
1037         TEST_RO_MEMSLOT(guest_read64, 0, 0),
1038         TEST_RO_MEMSLOT(guest_ld_preidx, 0, 0),
1039         TEST_RO_MEMSLOT(guest_at, 0, 0),
1040         TEST_RO_MEMSLOT(guest_exec, 0, 0),
1041         TEST_RO_MEMSLOT(guest_write64, mmio_on_test_gpa_handler, 1),
1042         TEST_RO_MEMSLOT_NO_SYNDROME(guest_dc_zva),
1043         TEST_RO_MEMSLOT_NO_SYNDROME(guest_cas),
1044         TEST_RO_MEMSLOT_NO_SYNDROME(guest_st_preidx),
1045
1046         /*
1047          * The PT and data regions are both read-only and marked
1048          * for dirty logging at the same time. The expected result is that
1049          * for writes there should be no write in the dirty log. The
1050          * readonly handling is the same as if the memslot was not marked
1051          * for dirty logging: writes with a syndrome result in an MMIO
1052          * exit, and writes with no syndrome result in a failed vcpu run.
1053          */
1054         TEST_RO_MEMSLOT_AND_DIRTY_LOG(guest_read64, 0, 0,
1055                                       guest_check_no_write_in_dirty_log),
1056         TEST_RO_MEMSLOT_AND_DIRTY_LOG(guest_ld_preidx, 0, 0,
1057                                       guest_check_no_write_in_dirty_log),
1058         TEST_RO_MEMSLOT_AND_DIRTY_LOG(guest_at, 0, 0,
1059                                       guest_check_no_write_in_dirty_log),
1060         TEST_RO_MEMSLOT_AND_DIRTY_LOG(guest_exec, 0, 0,
1061                                       guest_check_no_write_in_dirty_log),
1062         TEST_RO_MEMSLOT_AND_DIRTY_LOG(guest_write64, mmio_on_test_gpa_handler,
1063                                       1, guest_check_no_write_in_dirty_log),
1064         TEST_RO_MEMSLOT_NO_SYNDROME_AND_DIRTY_LOG(guest_dc_zva,
1065                                                   guest_check_no_write_in_dirty_log),
1066         TEST_RO_MEMSLOT_NO_SYNDROME_AND_DIRTY_LOG(guest_cas,
1067                                                   guest_check_no_write_in_dirty_log),
1068         TEST_RO_MEMSLOT_NO_SYNDROME_AND_DIRTY_LOG(guest_st_preidx,
1069                                                   guest_check_no_write_in_dirty_log),
1070
1071         /*
1072          * The PT and data regions are both read-only and punched with
1073          * holes tracked with userfaultfd.  The expected result is the
1074          * union of both userfaultfd and read-only behaviors. For example,
1075          * write accesses result in a userfaultfd write fault and an MMIO
1076          * exit.  Writes with no syndrome result in a failed vcpu run and
1077          * no userfaultfd write fault. Reads result in userfaultfd getting
1078          * triggered.
1079          */
1080         TEST_RO_MEMSLOT_AND_UFFD(guest_read64, 0, 0, uffd_data_handler, 2),
1081         TEST_RO_MEMSLOT_AND_UFFD(guest_ld_preidx, 0, 0, uffd_data_handler, 2),
1082         TEST_RO_MEMSLOT_AND_UFFD(guest_at, 0, 0, uffd_no_handler, 1),
1083         TEST_RO_MEMSLOT_AND_UFFD(guest_exec, 0, 0, uffd_data_handler, 2),
1084         TEST_RO_MEMSLOT_AND_UFFD(guest_write64, mmio_on_test_gpa_handler, 1,
1085                                  uffd_data_handler, 2),
1086         TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_cas, uffd_data_handler, 2),
1087         TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_dc_zva, uffd_no_handler, 1),
1088         TEST_RO_MEMSLOT_NO_SYNDROME_AND_UFFD(guest_st_preidx, uffd_no_handler, 1),
1089
1090         { 0 }
1091 };
1092
1093 static void for_each_test_and_guest_mode(enum vm_mem_backing_src_type src_type)
1094 {
1095         struct test_desc *t;
1096
1097         for (t = &tests[0]; t->name; t++) {
1098                 if (t->skip)
1099                         continue;
1100
1101                 struct test_params p = {
1102                         .src_type = src_type,
1103                         .test_desc = t,
1104                 };
1105
1106                 for_each_guest_mode(run_test, &p);
1107         }
1108 }
1109
1110 int main(int argc, char *argv[])
1111 {
1112         enum vm_mem_backing_src_type src_type;
1113         int opt;
1114
1115         src_type = DEFAULT_VM_MEM_SRC;
1116
1117         while ((opt = getopt(argc, argv, "hm:s:")) != -1) {
1118                 switch (opt) {
1119                 case 'm':
1120                         guest_modes_cmdline(optarg);
1121                         break;
1122                 case 's':
1123                         src_type = parse_backing_src_type(optarg);
1124                         break;
1125                 case 'h':
1126                 default:
1127                         help(argv[0]);
1128                         exit(0);
1129                 }
1130         }
1131
1132         for_each_test_and_guest_mode(src_type);
1133         return 0;
1134 }