; RUN: llc < %s -mtriple=i686-windows | FileCheck %s -check-prefix=NORMAL
; RUN: llc < %s -mtriple=x86_64-windows | FileCheck %s -check-prefix=X64
-; RUN: llc < %s -mtriple=i686-windows -force-align-stack -stack-alignment=32 | FileCheck %s -check-prefix=ALIGNED
+; RUN: llc < %s -mtriple=i686-windows -stackrealign -stack-alignment=32 | FileCheck %s -check-prefix=ALIGNED
+
+%class.Class = type { i32 }
+%struct.s = type { i64 }
declare void @good(i32 %a, i32 %b, i32 %c, i32 %d)
declare void @inreg(i32 %a, i32 inreg %b, i32 %c, i32 %d)
+declare x86_thiscallcc void @thiscall(%class.Class* %class, i32 %a, i32 %b, i32 %c, i32 %d)
declare void @oneparam(i32 %a)
declare void @eightparams(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f, i32 %g, i32 %h)
-
+declare void @struct(%struct.s* byval %a, i32 %b, i32 %c, i32 %d)
; Here, we should have a reserved frame, so we don't expect pushes
; NORMAL-LABEL: test1:
ret void
}
-; We don't support weird calling conventions
+; We support weird calling conventions
; NORMAL-LABEL: test4:
-; NORMAL: subl $12, %esp
-; NORMAL-NEXT: movl $4, 8(%esp)
-; NORMAL-NEXT: movl $3, 4(%esp)
-; NORMAL-NEXT: movl $1, (%esp)
-; NORMAL-NEXT: movl $2, %eax
+; NORMAL: movl $2, %eax
+; NORMAL-NEXT: pushl $4
+; NORMAL-NEXT: pushl $3
+; NORMAL-NEXT: pushl $1
; NORMAL-NEXT: call
; NORMAL-NEXT: addl $12, %esp
define void @test4() optsize {
ret void
}
+; NORMAL-LABEL: test4b:
+; NORMAL: movl 4(%esp), %ecx
+; NORMAL-NEXT: pushl $4
+; NORMAL-NEXT: pushl $3
+; NORMAL-NEXT: pushl $2
+; NORMAL-NEXT: pushl $1
+; NORMAL-NEXT: call
+; NORMAL-NEXT: ret
+define void @test4b(%class.Class* %f) optsize {
+entry:
+ call x86_thiscallcc void @thiscall(%class.Class* %f, i32 1, i32 2, i32 3, i32 4)
+ ret void
+}
+
; When there is no reserved call frame, check that additional alignment
; is added when the pushes don't add up to the required alignment.
; ALIGNED-LABEL: test5:
; NORMAL-NEXT: pushl $1
; NORMAL-NEXT: call
; NORMAL-NEXT: addl $16, %esp
-; NORMAL-NEXT: subl $16, %esp
-; NORMAL-NEXT: leal 16(%esp), [[EAX:%e..]]
-; NORMAL-NEXT: movl [[EAX]], 12(%esp)
-; NORMAL-NEXT: movl $7, 8(%esp)
-; NORMAL-NEXT: movl $6, 4(%esp)
-; NORMAL-NEXT: movl $5, (%esp)
+; NORMAL-NEXT: subl $20, %esp
+; NORMAL-NEXT: movl 20(%esp), [[E1:%e..]]
+; NORMAL-NEXT: movl 24(%esp), [[E2:%e..]]
+; NORMAL-NEXT: movl [[E2]], 4(%esp)
+; NORMAL-NEXT: movl [[E1]], (%esp)
+; NORMAL-NEXT: leal 32(%esp), [[E3:%e..]]
+; NORMAL-NEXT: movl [[E3]], 16(%esp)
+; NORMAL-NEXT: leal 28(%esp), [[E4:%e..]]
+; NORMAL-NEXT: movl [[E4]], 12(%esp)
+; NORMAL-NEXT: movl $6, 8(%esp)
; NORMAL-NEXT: call
-; NORMAL-NEXT: addl $16, %esp
+; NORMAL-NEXT: addl $20, %esp
define void @test9() optsize {
entry:
%p = alloca i32, align 4
+ %q = alloca i32, align 4
+ %s = alloca %struct.s, align 4
call void @good(i32 1, i32 2, i32 3, i32 4)
- %0 = ptrtoint i32* %p to i32
- call void @good(i32 5, i32 6, i32 7, i32 %0)
+ %pv = ptrtoint i32* %p to i32
+ %qv = ptrtoint i32* %q to i32
+ call void @struct(%struct.s* byval %s, i32 6, i32 %qv, i32 %pv)
ret void
}
; Converting one mov into a push isn't worth it when
; doing so forces too much overhead for other calls.
; NORMAL-LABEL: test12:
-; NORMAL: subl $16, %esp
-; NORMAL-NEXT: movl $4, 8(%esp)
-; NORMAL-NEXT: movl $3, 4(%esp)
-; NORMAL-NEXT: movl $1, (%esp)
-; NORMAL-NEXT: movl $2, %eax
-; NORMAL-NEXT: calll _inreg
-; NORMAL-NEXT: movl $8, 12(%esp)
+; NORMAL: movl $8, 12(%esp)
; NORMAL-NEXT: movl $7, 8(%esp)
; NORMAL-NEXT: movl $6, 4(%esp)
; NORMAL-NEXT: movl $5, (%esp)
; NORMAL-NEXT: calll _good
-; NORMAL-NEXT: movl $12, 8(%esp)
-; NORMAL-NEXT: movl $11, 4(%esp)
-; NORMAL-NEXT: movl $9, (%esp)
-; NORMAL-NEXT: movl $10, %eax
-; NORMAL-NEXT: calll _inreg
-; NORMAL-NEXT: addl $16, %esp
define void @test12() optsize {
entry:
- call void @inreg(i32 1, i32 2, i32 3, i32 4)
+ %s = alloca %struct.s, align 4
+ call void @struct(%struct.s* %s, i32 2, i32 3, i32 4)
call void @good(i32 5, i32 6, i32 7, i32 8)
- call void @inreg(i32 9, i32 10, i32 11, i32 12)
+ call void @struct(%struct.s* %s, i32 10, i32 11, i32 12)
ret void
}
; NORMAL-NEXT: pushl $1
; NORMAL-NEXT: calll _good
; NORMAL-NEXT: addl $16, %esp
-; NORMAL-NEXT: subl $12, %esp
-; NORMAL-NEXT: movl $8, 8(%esp)
-; NORMAL-NEXT: movl $7, 4(%esp)
-; NORMAL-NEXT: movl $5, (%esp)
-; NORMAL-NEXT: movl $6, %eax
-; NORMAL-NEXT: calll _inreg
-; NORMAL-NEXT: addl $12, %esp
+; NORMAL-NEXT: subl $20, %esp
+; NORMAL: movl $8, 16(%esp)
+; NORMAL-NEXT: movl $7, 12(%esp)
+; NORMAL-NEXT: movl $6, 8(%esp)
+; NORMAL-NEXT: calll _struct
+; NORMAL-NEXT: addl $20, %esp
; NORMAL-NEXT: pushl $12
; NORMAL-NEXT: pushl $11
; NORMAL-NEXT: pushl $10
; NORMAL-NEXT: addl $16, %esp
define void @test12b() optsize {
entry:
- call void @good(i32 1, i32 2, i32 3, i32 4)
- call void @inreg(i32 5, i32 6, i32 7, i32 8)
+ %s = alloca %struct.s, align 4
+ call void @good(i32 1, i32 2, i32 3, i32 4)
+ call void @struct(%struct.s* %s, i32 6, i32 7, i32 8)
call void @good(i32 9, i32 10, i32 11, i32 12)
ret void
}
+
+; Make sure the add does not prevent folding loads into pushes.
+; val1 and val2 will not be folded into pushes since they have
+; an additional use, but val3 should be.
+; NORMAL-LABEL: test13:
+; NORMAL: movl ([[P1:%e..]]), [[V1:%e..]]
+; NORMAL-NEXT: movl ([[P2:%e..]]), [[V2:%e..]]
+; NORMAL-NEXT: , [[ADD:%e..]]
+; NORMAL-NEXT: pushl [[ADD]]
+; NORMAL-NEXT: pushl ([[P3:%e..]])
+; NORMAL-NEXT: pushl [[V2]]
+; NORMAL-NEXT: pushl [[V1]]
+; NORMAL-NEXT: calll _good
+; NORMAL: movl [[P3]], %eax
+define i32* @test13(i32* inreg %ptr1, i32* inreg %ptr2, i32* inreg %ptr3) optsize {
+entry:
+ %val1 = load i32, i32* %ptr1
+ %val2 = load i32, i32* %ptr2
+ %val3 = load i32, i32* %ptr3
+ %add = add i32 %val1, %val2
+ call void @good(i32 %val1, i32 %val2, i32 %val3, i32 %add)
+ ret i32* %ptr3
+}