1 ; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
2 ; RUN: opt < %s -msan -msan-check-access-address=0 -msan-track-origins=1 -S | FileCheck -check-prefix=CHECK-ORIGINS %s
3 ; RUN: opt < %s -msan -msan-check-access-address=1 -S | FileCheck %s -check-prefix=CHECK-AA
5 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
6 target triple = "x86_64-unknown-linux-gnu"
8 ; Check the presence of __msan_init
9 ; CHECK: @llvm.global_ctors {{.*}} @__msan_init
11 ; Check the presence and the linkage type of __msan_track_origins and
12 ; other interface symbols.
13 ; CHECK-NOT: @__msan_track_origins
14 ; CHECK-ORIGINS: @__msan_track_origins = weak_odr constant i32 1
15 ; CHECK-NOT: @__msan_keep_going = weak_odr constant i32 0
16 ; CHECK: @__msan_retval_tls = external thread_local(initialexec) global [{{.*}}]
17 ; CHECK: @__msan_retval_origin_tls = external thread_local(initialexec) global i32
18 ; CHECK: @__msan_param_tls = external thread_local(initialexec) global [{{.*}}]
19 ; CHECK: @__msan_param_origin_tls = external thread_local(initialexec) global [{{.*}}]
20 ; CHECK: @__msan_va_arg_tls = external thread_local(initialexec) global [{{.*}}]
21 ; CHECK: @__msan_va_arg_overflow_size_tls = external thread_local(initialexec) global i64
22 ; CHECK: @__msan_origin_tls = external thread_local(initialexec) global i32
25 ; Check instrumentation of stores
27 define void @Store(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
29 store i32 %x, i32* %p, align 4
34 ; CHECK: load {{.*}} @__msan_param_tls
38 ; CHECK-ORIGINS: @Store
39 ; CHECK-ORIGINS: load {{.*}} @__msan_param_tls
40 ; CHECK-ORIGINS: store
42 ; CHECK-ORIGINS: br i1
43 ; CHECK-ORIGINS: <label>
44 ; CHECK-ORIGINS: store
45 ; CHECK-ORIGINS: br label
46 ; CHECK-ORIGINS: <label>
47 ; CHECK-ORIGINS: store
48 ; CHECK-ORIGINS: ret void
51 ; Check instrumentation of aligned stores
52 ; Shadow store has the same alignment as the original store; origin store
53 ; does not specify explicit alignment.
55 define void @AlignedStore(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
57 store i32 %x, i32* %p, align 32
61 ; CHECK: @AlignedStore
62 ; CHECK: load {{.*}} @__msan_param_tls
63 ; CHECK: store {{.*}} align 32
64 ; CHECK: store {{.*}} align 32
66 ; CHECK-ORIGINS: @AlignedStore
67 ; CHECK-ORIGINS: load {{.*}} @__msan_param_tls
68 ; CHECK-ORIGINS: store {{.*}} align 32
70 ; CHECK-ORIGINS: br i1
71 ; CHECK-ORIGINS: <label>
72 ; CHECK-ORIGINS: store {{.*}} align 32
73 ; CHECK-ORIGINS: br label
74 ; CHECK-ORIGINS: <label>
75 ; CHECK-ORIGINS: store {{.*}} align 32
76 ; CHECK-ORIGINS: ret void
79 ; load followed by cmp: check that we load the shadow and call __msan_warning.
80 define void @LoadAndCmp(i32* nocapture %a) nounwind uwtable sanitize_memory {
82 %0 = load i32* %a, align 4
83 %tobool = icmp eq i32 %0, 0
84 br i1 %tobool, label %if.end, label %if.then
86 if.then: ; preds = %entry
87 tail call void (...)* @foo() nounwind
90 if.end: ; preds = %entry, %if.then
94 declare void @foo(...)
99 ; CHECK: call void @__msan_warning_noreturn()
100 ; CHECK-NEXT: call void asm sideeffect
101 ; CHECK-NEXT: unreachable
104 ; Check that we store the shadow for the retval.
105 define i32 @ReturnInt() nounwind uwtable readnone sanitize_memory {
111 ; CHECK: store i32 0,{{.*}}__msan_retval_tls
114 ; Check that we get the shadow for the retval.
115 define void @CopyRetVal(i32* nocapture %a) nounwind uwtable sanitize_memory {
117 %call = tail call i32 @ReturnInt() nounwind
118 store i32 %call, i32* %a, align 4
123 ; CHECK: load{{.*}}__msan_retval_tls
129 ; Check that we generate PHIs for shadow.
130 define void @FuncWithPhi(i32* nocapture %a, i32* %b, i32* nocapture %c) nounwind uwtable sanitize_memory {
132 %tobool = icmp eq i32* %b, null
133 br i1 %tobool, label %if.else, label %if.then
135 if.then: ; preds = %entry
136 %0 = load i32* %b, align 4
139 if.else: ; preds = %entry
140 %1 = load i32* %c, align 4
143 if.end: ; preds = %if.else, %if.then
144 %t.0 = phi i32 [ %0, %if.then ], [ %1, %if.else ]
145 store i32 %t.0, i32* %a, align 4
149 ; CHECK: @FuncWithPhi
156 ; Compute shadow for "x << 10"
157 define void @ShlConst(i32* nocapture %x) nounwind uwtable sanitize_memory {
159 %0 = load i32* %x, align 4
161 store i32 %1, i32* %x, align 4
174 ; Compute shadow for "10 << x": it should have 'sext i1'.
175 define void @ShlNonConst(i32* nocapture %x) nounwind uwtable sanitize_memory {
177 %0 = load i32* %x, align 4
179 store i32 %1, i32* %x, align 4
183 ; CHECK: @ShlNonConst
192 define void @SExt(i32* nocapture %a, i16* nocapture %b) nounwind uwtable sanitize_memory {
194 %0 = load i16* %b, align 2
195 %1 = sext i16 %0 to i32
196 store i32 %1, i32* %a, align 4
211 define void @MemSet(i8* nocapture %x) nounwind uwtable sanitize_memory {
213 call void @llvm.memset.p0i8.i64(i8* %x, i8 42, i64 10, i32 1, i1 false)
217 declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
220 ; CHECK: call i8* @__msan_memset
225 define void @MemCpy(i8* nocapture %x, i8* nocapture %y) nounwind uwtable sanitize_memory {
227 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %x, i8* %y, i64 10, i32 1, i1 false)
231 declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
234 ; CHECK: call i8* @__msan_memcpy
238 ; memmove is lowered to a call
239 define void @MemMove(i8* nocapture %x, i8* nocapture %y) nounwind uwtable sanitize_memory {
241 call void @llvm.memmove.p0i8.p0i8.i64(i8* %x, i8* %y, i64 10, i32 1, i1 false)
245 declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
248 ; CHECK: call i8* @__msan_memmove
252 ; Check that we propagate shadow for "select"
254 define i32 @Select(i32 %a, i32 %b, i32 %c) nounwind uwtable readnone sanitize_memory {
256 %tobool = icmp ne i32 %c, 0
257 %cond = select i1 %tobool, i32 %a, i32 %b
263 ; CHECK-NEXT: sext i1 {{.*}} to i32
269 ; Check that we propagate origin for "select" with vector condition.
270 ; Select condition is flattened to i1, which is then used to select one of the
273 define <8 x i16> @SelectVector(<8 x i16> %a, <8 x i16> %b, <8 x i1> %c) nounwind uwtable readnone sanitize_memory {
275 %cond = select <8 x i1> %c, <8 x i16> %a, <8 x i16> %b
279 ; CHECK: @SelectVector
280 ; CHECK: select <8 x i1>
281 ; CHECK-NEXT: sext <8 x i1> {{.*}} to <8 x i16>
282 ; CHECK-NEXT: or <8 x i16>
283 ; CHECK-NEXT: select <8 x i1>
284 ; CHECK: ret <8 x i16>
286 ; CHECK-ORIGINS: @SelectVector
287 ; CHECK-ORIGINS: bitcast <8 x i1> {{.*}} to i8
288 ; CHECK-ORIGINS: icmp ne i8
289 ; CHECK-ORIGINS: select i1
290 ; CHECK-ORIGINS: ret <8 x i16>
293 ; Check that we propagate origin for "select" with scalar condition and vector
294 ; arguments. Select condition shadow is sign-extended to the vector type and
295 ; mixed into the result shadow.
297 define <8 x i16> @SelectVector2(<8 x i16> %a, <8 x i16> %b, i1 %c) nounwind uwtable readnone sanitize_memory {
299 %cond = select i1 %c, <8 x i16> %a, <8 x i16> %b
303 ; CHECK: @SelectVector2
305 ; CHECK: sext i1 {{.*}} to i128
306 ; CHECK: bitcast i128 {{.*}} to <8 x i16>
307 ; CHECK: or <8 x i16>
309 ; CHECK: ret <8 x i16>
312 define { i64, i64 } @SelectStruct(i1 zeroext %x, { i64, i64 } %a, { i64, i64 } %b) readnone sanitize_memory {
314 %c = select i1 %x, { i64, i64 } %a, { i64, i64 } %b
318 ; CHECK: @SelectStruct
319 ; CHECK: select i1 {{.*}}, { i64, i64 }
320 ; CHECK-NEXT: select i1 {{.*}}, { i64, i64 } { i64 -1, i64 -1 }, { i64, i64 }
321 ; CHECK-NEXT: select i1 {{.*}}, { i64, i64 }
322 ; CHECK: ret { i64, i64 }
325 define i8* @IntToPtr(i64 %x) nounwind uwtable readnone sanitize_memory {
327 %0 = inttoptr i64 %x to i8*
332 ; CHECK: load i64*{{.*}}__msan_param_tls
333 ; CHECK-NEXT: inttoptr
334 ; CHECK-NEXT: store i64{{.*}}__msan_retval_tls
338 define i8* @IntToPtr_ZExt(i16 %x) nounwind uwtable readnone sanitize_memory {
340 %0 = inttoptr i16 %x to i8*
344 ; CHECK: @IntToPtr_ZExt
346 ; CHECK-NEXT: inttoptr
350 ; Check that we insert exactly one check on udiv
351 ; (2nd arg shadow is checked, 1st arg shadow is propagated)
353 define i32 @Div(i32 %a, i32 %b) nounwind uwtable readnone sanitize_memory {
355 %div = udiv i32 %a, %b
361 ; CHECK: call void @__msan_warning
368 ; Check that we propagate shadow for x<0, x>=0, etc (i.e. sign bit tests)
370 define zeroext i1 @ICmpSLT(i32 %x) nounwind uwtable readnone sanitize_memory {
371 %1 = icmp slt i32 %x, 0
377 ; CHECK-NOT: call void @__msan_warning
379 ; CHECK-NOT: call void @__msan_warning
382 define zeroext i1 @ICmpSGE(i32 %x) nounwind uwtable readnone sanitize_memory {
383 %1 = icmp sge i32 %x, 0
389 ; CHECK-NOT: call void @__msan_warning
391 ; CHECK-NOT: call void @__msan_warning
394 define zeroext i1 @ICmpSGT(i32 %x) nounwind uwtable readnone sanitize_memory {
395 %1 = icmp sgt i32 0, %x
401 ; CHECK-NOT: call void @__msan_warning
403 ; CHECK-NOT: call void @__msan_warning
406 define zeroext i1 @ICmpSLE(i32 %x) nounwind uwtable readnone sanitize_memory {
407 %1 = icmp sle i32 0, %x
413 ; CHECK-NOT: call void @__msan_warning
415 ; CHECK-NOT: call void @__msan_warning
419 ; Check that we propagate shadow for x<0, x>=0, etc (i.e. sign bit tests)
420 ; of the vector arguments.
422 define <2 x i1> @ICmpSLT_vector(<2 x i32*> %x) nounwind uwtable readnone sanitize_memory {
423 %1 = icmp slt <2 x i32*> %x, zeroinitializer
427 ; CHECK: @ICmpSLT_vector
428 ; CHECK: icmp slt <2 x i64>
429 ; CHECK-NOT: call void @__msan_warning
430 ; CHECK: icmp slt <2 x i32*>
431 ; CHECK-NOT: call void @__msan_warning
432 ; CHECK: ret <2 x i1>
435 ; Check that we propagate shadow for unsigned relational comparisons with
438 define zeroext i1 @ICmpUGTConst(i32 %x) nounwind uwtable readnone sanitize_memory {
440 %cmp = icmp ugt i32 %x, 7
444 ; CHECK: @ICmpUGTConst
445 ; CHECK: icmp ugt i32
446 ; CHECK-NOT: call void @__msan_warning
447 ; CHECK: icmp ugt i32
448 ; CHECK-NOT: call void @__msan_warning
449 ; CHECK: icmp ugt i32
450 ; CHECK-NOT: call void @__msan_warning
454 ; Check that loads of shadow have the same aligment as the original loads.
455 ; Check that loads of origin have the aligment of max(4, original alignment).
457 define i32 @ShadowLoadAlignmentLarge() nounwind uwtable sanitize_memory {
458 %y = alloca i32, align 64
459 %1 = load volatile i32* %y, align 64
463 ; CHECK: @ShadowLoadAlignmentLarge
464 ; CHECK: load volatile i32* {{.*}} align 64
465 ; CHECK: load i32* {{.*}} align 64
468 define i32 @ShadowLoadAlignmentSmall() nounwind uwtable sanitize_memory {
469 %y = alloca i32, align 2
470 %1 = load volatile i32* %y, align 2
474 ; CHECK: @ShadowLoadAlignmentSmall
475 ; CHECK: load volatile i32* {{.*}} align 2
476 ; CHECK: load i32* {{.*}} align 2
479 ; CHECK-ORIGINS: @ShadowLoadAlignmentSmall
480 ; CHECK-ORIGINS: load volatile i32* {{.*}} align 2
481 ; CHECK-ORIGINS: load i32* {{.*}} align 2
482 ; CHECK-ORIGINS: load i32* {{.*}} align 4
483 ; CHECK-ORIGINS: ret i32
486 ; Test vector manipulation instructions.
487 ; Check that the same bit manipulation is applied to the shadow values.
488 ; Check that there is a zero test of the shadow of %idx argument, where present.
490 define i32 @ExtractElement(<4 x i32> %vec, i32 %idx) sanitize_memory {
491 %x = extractelement <4 x i32> %vec, i32 %idx
495 ; CHECK: @ExtractElement
496 ; CHECK: extractelement
497 ; CHECK: call void @__msan_warning
498 ; CHECK: extractelement
501 define <4 x i32> @InsertElement(<4 x i32> %vec, i32 %idx, i32 %x) sanitize_memory {
502 %vec1 = insertelement <4 x i32> %vec, i32 %x, i32 %idx
506 ; CHECK: @InsertElement
507 ; CHECK: insertelement
508 ; CHECK: call void @__msan_warning
509 ; CHECK: insertelement
510 ; CHECK: ret <4 x i32>
512 define <4 x i32> @ShuffleVector(<4 x i32> %vec, <4 x i32> %vec1) sanitize_memory {
513 %vec2 = shufflevector <4 x i32> %vec, <4 x i32> %vec1,
514 <4 x i32> <i32 0, i32 4, i32 1, i32 5>
518 ; CHECK: @ShuffleVector
519 ; CHECK: shufflevector
520 ; CHECK-NOT: call void @__msan_warning
521 ; CHECK: shufflevector
522 ; CHECK: ret <4 x i32>
525 ; Test bswap intrinsic instrumentation
526 define i32 @BSwap(i32 %x) nounwind uwtable readnone sanitize_memory {
527 %y = tail call i32 @llvm.bswap.i32(i32 %x)
531 declare i32 @llvm.bswap.i32(i32) nounwind readnone
534 ; CHECK-NOT: call void @__msan_warning
535 ; CHECK: @llvm.bswap.i32
536 ; CHECK-NOT: call void @__msan_warning
537 ; CHECK: @llvm.bswap.i32
538 ; CHECK-NOT: call void @__msan_warning
544 define void @StoreIntrinsic(i8* %p, <4 x float> %x) nounwind uwtable sanitize_memory {
545 call void @llvm.x86.sse.storeu.ps(i8* %p, <4 x float> %x)
549 declare void @llvm.x86.sse.storeu.ps(i8*, <4 x float>) nounwind
551 ; CHECK: @StoreIntrinsic
554 ; CHECK: store <4 x i32> {{.*}} align 1
555 ; CHECK: call void @llvm.x86.sse.storeu.ps
561 define <16 x i8> @LoadIntrinsic(i8* %p) nounwind uwtable sanitize_memory {
562 %call = call <16 x i8> @llvm.x86.sse3.ldu.dq(i8* %p)
566 declare <16 x i8> @llvm.x86.sse3.ldu.dq(i8* %p) nounwind
568 ; CHECK: @LoadIntrinsic
569 ; CHECK: load <16 x i8>* {{.*}} align 1
572 ; CHECK: call <16 x i8> @llvm.x86.sse3.ldu.dq
573 ; CHECK: store <16 x i8> {{.*}} @__msan_retval_tls
574 ; CHECK: ret <16 x i8>
576 ; CHECK-ORIGINS: @LoadIntrinsic
577 ; CHECK-ORIGINS: [[ORIGIN:%[01-9a-z]+]] = load i32* {{.*}}
578 ; CHECK-ORIGINS: call <16 x i8> @llvm.x86.sse3.ldu.dq
579 ; CHECK-ORIGINS: store i32 {{.*}}[[ORIGIN]], i32* @__msan_retval_origin_tls
580 ; CHECK-ORIGINS: ret <16 x i8>
583 ; Simple NoMem intrinsic
584 ; Check that shadow is OR'ed, and origin is Select'ed
585 ; And no shadow checks!
587 define <8 x i16> @Paddsw128(<8 x i16> %a, <8 x i16> %b) nounwind uwtable sanitize_memory {
588 %call = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a, <8 x i16> %b)
592 declare <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %a, <8 x i16> %b) nounwind
595 ; CHECK-NEXT: load <8 x i16>* {{.*}} @__msan_param_tls
596 ; CHECK-NEXT: load <8 x i16>* {{.*}} @__msan_param_tls
597 ; CHECK-NEXT: = or <8 x i16>
598 ; CHECK-NEXT: call <8 x i16> @llvm.x86.sse2.padds.w
599 ; CHECK-NEXT: store <8 x i16> {{.*}} @__msan_retval_tls
600 ; CHECK-NEXT: ret <8 x i16>
602 ; CHECK-ORIGINS: @Paddsw128
603 ; CHECK-ORIGINS: load i32* {{.*}} @__msan_param_origin_tls
604 ; CHECK-ORIGINS: load i32* {{.*}} @__msan_param_origin_tls
605 ; CHECK-ORIGINS: = bitcast <8 x i16> {{.*}} to i128
606 ; CHECK-ORIGINS-NEXT: = icmp ne i128 {{.*}}, 0
607 ; CHECK-ORIGINS-NEXT: = select i1 {{.*}}, i32 {{.*}}, i32
608 ; CHECK-ORIGINS: call <8 x i16> @llvm.x86.sse2.padds.w
609 ; CHECK-ORIGINS: store i32 {{.*}} @__msan_retval_origin_tls
610 ; CHECK-ORIGINS: ret <8 x i16>
613 ; Test handling of vectors of pointers.
614 ; Check that shadow of such vector is a vector of integers.
616 define <8 x i8*> @VectorOfPointers(<8 x i8*>* %p) nounwind uwtable sanitize_memory {
617 %x = load <8 x i8*>* %p
621 ; CHECK: @VectorOfPointers
622 ; CHECK: load <8 x i8*>*
623 ; CHECK: load <8 x i64>*
624 ; CHECK: store <8 x i64> {{.*}} @__msan_retval_tls
625 ; CHECK: ret <8 x i8*>
627 ; Test handling of va_copy.
629 declare void @llvm.va_copy(i8*, i8*) nounwind
631 define void @VACopy(i8* %p1, i8* %p2) nounwind uwtable sanitize_memory {
632 call void @llvm.va_copy(i8* %p1, i8* %p2) nounwind
637 ; CHECK: call void @llvm.memset.p0i8.i64({{.*}}, i8 0, i64 24, i32 8, i1 false)
641 ; Test that va_start instrumentation does not use va_arg_tls*.
642 ; It should work with a local stack copy instead.
644 %struct.__va_list_tag = type { i32, i32, i8*, i8* }
645 declare void @llvm.va_start(i8*) nounwind
647 ; Function Attrs: nounwind uwtable
648 define void @VAStart(i32 %x, ...) {
650 %x.addr = alloca i32, align 4
651 %va = alloca [1 x %struct.__va_list_tag], align 16
652 store i32 %x, i32* %x.addr, align 4
653 %arraydecay = getelementptr inbounds [1 x %struct.__va_list_tag]* %va, i32 0, i32 0
654 %arraydecay1 = bitcast %struct.__va_list_tag* %arraydecay to i8*
655 call void @llvm.va_start(i8* %arraydecay1)
660 ; CHECK: call void @llvm.va_start
661 ; CHECK-NOT: @__msan_va_arg_tls
662 ; CHECK-NOT: @__msan_va_arg_overflow_size_tls
666 ; Test handling of volatile stores.
667 ; Check that MemorySanitizer does not add a check of the value being stored.
669 define void @VolatileStore(i32* nocapture %p, i32 %x) nounwind uwtable sanitize_memory {
671 store volatile i32 %x, i32* %p, align 4
675 ; CHECK: @VolatileStore
676 ; CHECK-NOT: @__msan_warning
680 ; Test that checks are omitted but shadow propagation is kept if
681 ; sanitize_memory attribute is missing.
683 define i32 @NoSanitizeMemory(i32 %x) uwtable {
685 %tobool = icmp eq i32 %x, 0
686 br i1 %tobool, label %if.end, label %if.then
688 if.then: ; preds = %entry
689 tail call void @bar()
692 if.end: ; preds = %entry, %if.then
698 ; CHECK: @NoSanitizeMemory
699 ; CHECK-NOT: @__msan_warning
700 ; CHECK: load i32* {{.*}} @__msan_param_tls
701 ; CHECK-NOT: @__msan_warning
702 ; CHECK: store {{.*}} @__msan_retval_tls
703 ; CHECK-NOT: @__msan_warning
707 ; Test that stack allocations are unpoisoned in functions missing
708 ; sanitize_memory attribute
710 define i32 @NoSanitizeMemoryAlloca() {
712 %p = alloca i32, align 4
713 %x = call i32 @NoSanitizeMemoryAllocaHelper(i32* %p)
717 declare i32 @NoSanitizeMemoryAllocaHelper(i32* %p)
719 ; CHECK: @NoSanitizeMemoryAlloca
720 ; CHECK: call void @llvm.memset.p0i8.i64(i8* {{.*}}, i8 0, i64 4, i32 4, i1 false)
721 ; CHECK: call i32 @NoSanitizeMemoryAllocaHelper(i32*
725 ; Test that undef is unpoisoned in functions missing
726 ; sanitize_memory attribute
728 define i32 @NoSanitizeMemoryUndef() {
730 %x = call i32 @NoSanitizeMemoryUndefHelper(i32 undef)
734 declare i32 @NoSanitizeMemoryUndefHelper(i32 %x)
736 ; CHECK: @NoSanitizeMemoryAlloca
737 ; CHECK: store i32 0, i32* {{.*}} @__msan_param_tls
738 ; CHECK: call i32 @NoSanitizeMemoryUndefHelper(i32 undef)
742 ; Test argument shadow alignment
744 define <2 x i64> @ArgumentShadowAlignment(i64 %a, <2 x i64> %b) sanitize_memory {
749 ; CHECK: @ArgumentShadowAlignment
750 ; CHECK: load <2 x i64>* {{.*}} @__msan_param_tls {{.*}}, align 8
751 ; CHECK: store <2 x i64> {{.*}} @__msan_retval_tls {{.*}}, align 8
752 ; CHECK: ret <2 x i64>
755 ; Test byval argument shadow alignment
757 define <2 x i64> @ByValArgumentShadowLargeAlignment(<2 x i64>* byval %p) sanitize_memory {
759 %x = load <2 x i64>* %p
763 ; CHECK-AA: @ByValArgumentShadowLargeAlignment
764 ; CHECK-AA: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* {{.*}}, i64 16, i32 8, i1 false)
765 ; CHECK-AA: ret <2 x i64>
768 define i16 @ByValArgumentShadowSmallAlignment(i16* byval %p) sanitize_memory {
774 ; CHECK-AA: @ByValArgumentShadowSmallAlignment
775 ; CHECK-AA: call void @llvm.memcpy.p0i8.p0i8.i64(i8* {{.*}}, i8* {{.*}}, i64 2, i32 2, i1 false)
779 ; Test origin propagation for insertvalue
781 define { i64, i32 } @make_pair_64_32(i64 %x, i32 %y) sanitize_memory {
783 %a = insertvalue { i64, i32 } undef, i64 %x, 0
784 %b = insertvalue { i64, i32 } %a, i32 %y, 1
788 ; CHECK-ORIGINS: @make_pair_64_32
789 ; First element shadow
790 ; CHECK-ORIGINS: insertvalue { i64, i32 } { i64 -1, i32 -1 }, i64 {{.*}}, 0
791 ; First element origin
792 ; CHECK-ORIGINS: icmp ne i64
793 ; CHECK-ORIGINS: select i1
794 ; First element app value
795 ; CHECK-ORIGINS: insertvalue { i64, i32 } undef, i64 {{.*}}, 0
796 ; Second element shadow
797 ; CHECK-ORIGINS: insertvalue { i64, i32 } {{.*}}, i32 {{.*}}, 1
798 ; Second element origin
799 ; CHECK-ORIGINS: icmp ne i32
800 ; CHECK-ORIGINS: select i1
801 ; Second element app value
802 ; CHECK-ORIGINS: insertvalue { i64, i32 } {{.*}}, i32 {{.*}}, 1
803 ; CHECK-ORIGINS: ret { i64, i32 }