define i16 @cvt_f64_to_i16(double %a0) nounwind {
; ALL-LABEL: cvt_f64_to_i16:
; ALL: # %bb.0:
-; ALL-NEXT: jmp __truncdfhf2@PLT # TAILCALL
+; ALL-NEXT: jmp __truncdfhf2 # TAILCALL
%1 = fptrunc double %a0 to half
%2 = bitcast half %1 to i16
ret i16 %2
; ALL-NEXT: subq $16, %rsp
; ALL-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; ALL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
-; ALL-NEXT: callq __truncdfhf2@PLT
+; ALL-NEXT: callq __truncdfhf2
; ALL-NEXT: movl %eax, %ebx
; ALL-NEXT: shll $16, %ebx
; ALL-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
-; ALL-NEXT: callq __truncdfhf2@PLT
+; ALL-NEXT: callq __truncdfhf2
; ALL-NEXT: movzwl %ax, %eax
; ALL-NEXT: orl %ebx, %eax
; ALL-NEXT: vmovd %eax, %xmm0
; AVX1-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %ebx
; AVX1-NEXT: shll $16, %ebx
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %r14d
; AVX1-NEXT: orl %ebx, %r14d
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %ebx
; AVX1-NEXT: shll $16, %ebx
; AVX1-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %eax
; AVX1-NEXT: orl %ebx, %eax
; AVX1-NEXT: shlq $32, %rax
; AVX2-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: callq __truncdfhf2@PLT
+; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %ebx
; AVX2-NEXT: shll $16, %ebx
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: callq __truncdfhf2@PLT
+; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %r14d
; AVX2-NEXT: orl %ebx, %r14d
; AVX2-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: callq __truncdfhf2@PLT
+; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %ebx
; AVX2-NEXT: shll $16, %ebx
; AVX2-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
-; AVX2-NEXT: callq __truncdfhf2@PLT
+; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %eax
; AVX2-NEXT: orl %ebx, %eax
; AVX2-NEXT: shlq $32, %rax
; AVX512-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: callq __truncdfhf2@PLT
+; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %ebx
; AVX512-NEXT: shll $16, %ebx
; AVX512-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: callq __truncdfhf2@PLT
+; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movzwl %ax, %r14d
; AVX512-NEXT: orl %ebx, %r14d
; AVX512-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX512-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: callq __truncdfhf2@PLT
+; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %ebx
; AVX512-NEXT: shll $16, %ebx
; AVX512-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
-; AVX512-NEXT: callq __truncdfhf2@PLT
+; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movzwl %ax, %eax
; AVX512-NEXT: orl %ebx, %eax
; AVX512-NEXT: shlq $32, %rax
; AVX1-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %ebx
; AVX1-NEXT: shll $16, %ebx
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %r14d
; AVX1-NEXT: orl %ebx, %r14d
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %ebx
; AVX1-NEXT: shll $16, %ebx
; AVX1-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %eax
; AVX1-NEXT: orl %ebx, %eax
; AVX1-NEXT: shlq $32, %rax
; AVX2-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: callq __truncdfhf2@PLT
+; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %ebx
; AVX2-NEXT: shll $16, %ebx
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: callq __truncdfhf2@PLT
+; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %r14d
; AVX2-NEXT: orl %ebx, %r14d
; AVX2-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: callq __truncdfhf2@PLT
+; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %ebx
; AVX2-NEXT: shll $16, %ebx
; AVX2-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
-; AVX2-NEXT: callq __truncdfhf2@PLT
+; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %eax
; AVX2-NEXT: orl %ebx, %eax
; AVX2-NEXT: shlq $32, %rax
; AVX512-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: callq __truncdfhf2@PLT
+; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %ebx
; AVX512-NEXT: shll $16, %ebx
; AVX512-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: callq __truncdfhf2@PLT
+; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movzwl %ax, %r14d
; AVX512-NEXT: orl %ebx, %r14d
; AVX512-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX512-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: callq __truncdfhf2@PLT
+; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %ebx
; AVX512-NEXT: shll $16, %ebx
; AVX512-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
-; AVX512-NEXT: callq __truncdfhf2@PLT
+; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movzwl %ax, %eax
; AVX512-NEXT: orl %ebx, %eax
; AVX512-NEXT: shlq $32, %rax
; AVX1-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %ebx
; AVX1-NEXT: shll $16, %ebx
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %r14d
; AVX1-NEXT: orl %ebx, %r14d
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %ebx
; AVX1-NEXT: shll $16, %ebx
; AVX1-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %eax
; AVX1-NEXT: orl %ebx, %eax
; AVX1-NEXT: shlq $32, %rax
; AVX2-SLOW-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX2-SLOW-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-SLOW-NEXT: vzeroupper
-; AVX2-SLOW-NEXT: callq __truncdfhf2@PLT
+; AVX2-SLOW-NEXT: callq __truncdfhf2
; AVX2-SLOW-NEXT: movl %eax, %ebx
; AVX2-SLOW-NEXT: shll $16, %ebx
; AVX2-SLOW-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-SLOW-NEXT: vzeroupper
-; AVX2-SLOW-NEXT: callq __truncdfhf2@PLT
+; AVX2-SLOW-NEXT: callq __truncdfhf2
; AVX2-SLOW-NEXT: movzwl %ax, %r14d
; AVX2-SLOW-NEXT: orl %ebx, %r14d
; AVX2-SLOW-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX2-SLOW-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX2-SLOW-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-SLOW-NEXT: vzeroupper
-; AVX2-SLOW-NEXT: callq __truncdfhf2@PLT
+; AVX2-SLOW-NEXT: callq __truncdfhf2
; AVX2-SLOW-NEXT: movl %eax, %ebx
; AVX2-SLOW-NEXT: shll $16, %ebx
; AVX2-SLOW-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
-; AVX2-SLOW-NEXT: callq __truncdfhf2@PLT
+; AVX2-SLOW-NEXT: callq __truncdfhf2
; AVX2-SLOW-NEXT: movzwl %ax, %eax
; AVX2-SLOW-NEXT: orl %ebx, %eax
; AVX2-SLOW-NEXT: shlq $32, %rax
; AVX2-FAST-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX2-FAST-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-FAST-NEXT: vzeroupper
-; AVX2-FAST-NEXT: callq __truncdfhf2@PLT
+; AVX2-FAST-NEXT: callq __truncdfhf2
; AVX2-FAST-NEXT: movl %eax, %ebx
; AVX2-FAST-NEXT: shll $16, %ebx
; AVX2-FAST-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-FAST-NEXT: vzeroupper
-; AVX2-FAST-NEXT: callq __truncdfhf2@PLT
+; AVX2-FAST-NEXT: callq __truncdfhf2
; AVX2-FAST-NEXT: movzwl %ax, %r14d
; AVX2-FAST-NEXT: orl %ebx, %r14d
; AVX2-FAST-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX2-FAST-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX2-FAST-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-FAST-NEXT: vzeroupper
-; AVX2-FAST-NEXT: callq __truncdfhf2@PLT
+; AVX2-FAST-NEXT: callq __truncdfhf2
; AVX2-FAST-NEXT: movl %eax, %ebx
; AVX2-FAST-NEXT: shll $16, %ebx
; AVX2-FAST-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
-; AVX2-FAST-NEXT: callq __truncdfhf2@PLT
+; AVX2-FAST-NEXT: callq __truncdfhf2
; AVX2-FAST-NEXT: movzwl %ax, %eax
; AVX2-FAST-NEXT: orl %ebx, %eax
; AVX2-FAST-NEXT: shlq $32, %rax
; AVX512F-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512F-NEXT: vzeroupper
-; AVX512F-NEXT: callq __truncdfhf2@PLT
+; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movl %eax, %ebx
; AVX512F-NEXT: shll $16, %ebx
; AVX512F-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512F-NEXT: vzeroupper
-; AVX512F-NEXT: callq __truncdfhf2@PLT
+; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movzwl %ax, %r14d
; AVX512F-NEXT: orl %ebx, %r14d
; AVX512F-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX512F-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512F-NEXT: vzeroupper
-; AVX512F-NEXT: callq __truncdfhf2@PLT
+; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movl %eax, %ebx
; AVX512F-NEXT: shll $16, %ebx
; AVX512F-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
-; AVX512F-NEXT: callq __truncdfhf2@PLT
+; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movzwl %ax, %eax
; AVX512F-NEXT: orl %ebx, %eax
; AVX512F-NEXT: shlq $32, %rax
; AVX512VL-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512VL-NEXT: vzeroupper
-; AVX512VL-NEXT: callq __truncdfhf2@PLT
+; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movl %eax, %ebx
; AVX512VL-NEXT: shll $16, %ebx
; AVX512VL-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512VL-NEXT: vzeroupper
-; AVX512VL-NEXT: callq __truncdfhf2@PLT
+; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movzwl %ax, %r14d
; AVX512VL-NEXT: orl %ebx, %r14d
; AVX512VL-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX512VL-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512VL-NEXT: vzeroupper
-; AVX512VL-NEXT: callq __truncdfhf2@PLT
+; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movl %eax, %ebx
; AVX512VL-NEXT: shll $16, %ebx
; AVX512VL-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
-; AVX512VL-NEXT: callq __truncdfhf2@PLT
+; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movzwl %ax, %eax
; AVX512VL-NEXT: orl %ebx, %eax
; AVX512VL-NEXT: shlq $32, %rax
; AVX1-NEXT: vmovupd %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %ebx
; AVX1-NEXT: shll $16, %ebx
; AVX1-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %r15d
; AVX1-NEXT: orl %ebx, %r15d
; AVX1-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: vmovapd %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %ebx
; AVX1-NEXT: shll $16, %ebx
; AVX1-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %r14d
; AVX1-NEXT: orl %ebx, %r14d
; AVX1-NEXT: shlq $32, %r14
; AVX1-NEXT: orq %r15, %r14
; AVX1-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
; AVX1-NEXT: # xmm0 = mem[1,0]
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %ebx
; AVX1-NEXT: shll $16, %ebx
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %r15d
; AVX1-NEXT: orl %ebx, %r15d
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %ebx
; AVX1-NEXT: shll $16, %ebx
; AVX1-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %eax
; AVX1-NEXT: orl %ebx, %eax
; AVX1-NEXT: shlq $32, %rax
; AVX2-NEXT: vmovupd %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: callq __truncdfhf2@PLT
+; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %ebx
; AVX2-NEXT: shll $16, %ebx
; AVX2-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: callq __truncdfhf2@PLT
+; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %r15d
; AVX2-NEXT: orl %ebx, %r15d
; AVX2-NEXT: vmovupd {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovapd %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: callq __truncdfhf2@PLT
+; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %ebx
; AVX2-NEXT: shll $16, %ebx
; AVX2-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; AVX2-NEXT: callq __truncdfhf2@PLT
+; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %r14d
; AVX2-NEXT: orl %ebx, %r14d
; AVX2-NEXT: shlq $32, %r14
; AVX2-NEXT: orq %r15, %r14
; AVX2-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
; AVX2-NEXT: # xmm0 = mem[1,0]
-; AVX2-NEXT: callq __truncdfhf2@PLT
+; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %ebx
; AVX2-NEXT: shll $16, %ebx
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: callq __truncdfhf2@PLT
+; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %r15d
; AVX2-NEXT: orl %ebx, %r15d
; AVX2-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: callq __truncdfhf2@PLT
+; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %ebx
; AVX2-NEXT: shll $16, %ebx
; AVX2-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
-; AVX2-NEXT: callq __truncdfhf2@PLT
+; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %eax
; AVX2-NEXT: orl %ebx, %eax
; AVX2-NEXT: shlq $32, %rax
; AVX512-NEXT: vmovupd %zmm0, (%rsp) # 64-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: callq __truncdfhf2@PLT
+; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %ebx
; AVX512-NEXT: shll $16, %ebx
; AVX512-NEXT: vmovups (%rsp), %zmm0 # 64-byte Reload
; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: callq __truncdfhf2@PLT
+; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movzwl %ax, %r15d
; AVX512-NEXT: orl %ebx, %r15d
; AVX512-NEXT: vmovupd (%rsp), %zmm0 # 64-byte Reload
; AVX512-NEXT: vmovapd %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: callq __truncdfhf2@PLT
+; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %ebx
; AVX512-NEXT: shll $16, %ebx
; AVX512-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; AVX512-NEXT: callq __truncdfhf2@PLT
+; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movzwl %ax, %r14d
; AVX512-NEXT: orl %ebx, %r14d
; AVX512-NEXT: shlq $32, %r14
; AVX512-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: callq __truncdfhf2@PLT
+; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %ebx
; AVX512-NEXT: shll $16, %ebx
; AVX512-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: callq __truncdfhf2@PLT
+; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movzwl %ax, %r15d
; AVX512-NEXT: orl %ebx, %r15d
; AVX512-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX512-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: callq __truncdfhf2@PLT
+; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %ebx
; AVX512-NEXT: shll $16, %ebx
; AVX512-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
-; AVX512-NEXT: callq __truncdfhf2@PLT
+; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movzwl %ax, %eax
; AVX512-NEXT: orl %ebx, %eax
; AVX512-NEXT: shlq $32, %rax
; ALL: # %bb.0:
; ALL-NEXT: pushq %rbx
; ALL-NEXT: movq %rdi, %rbx
-; ALL-NEXT: callq __truncdfhf2@PLT
+; ALL-NEXT: callq __truncdfhf2
; ALL-NEXT: movw %ax, (%rbx)
; ALL-NEXT: popq %rbx
; ALL-NEXT: retq
; ALL-NEXT: movq %rdi, %rbx
; ALL-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; ALL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
-; ALL-NEXT: callq __truncdfhf2@PLT
+; ALL-NEXT: callq __truncdfhf2
; ALL-NEXT: movl %eax, %ebp
; ALL-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
-; ALL-NEXT: callq __truncdfhf2@PLT
+; ALL-NEXT: callq __truncdfhf2
; ALL-NEXT: movw %ax, (%rbx)
; ALL-NEXT: movw %bp, 2(%rbx)
; ALL-NEXT: addq $24, %rsp
; AVX1-NEXT: vmovupd %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %r14d
; AVX1-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vmovapd %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %r15d
; AVX1-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %ebp
; AVX1-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movw %ax, 4(%rbx)
; AVX1-NEXT: movw %bp, (%rbx)
; AVX1-NEXT: movw %r15w, 6(%rbx)
; AVX2-NEXT: vmovupd %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: callq __truncdfhf2@PLT
+; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %r14d
; AVX2-NEXT: vmovupd {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX2-NEXT: vmovapd %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: callq __truncdfhf2@PLT
+; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %r15d
; AVX2-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: callq __truncdfhf2@PLT
+; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %ebp
; AVX2-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; AVX2-NEXT: callq __truncdfhf2@PLT
+; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movw %ax, 4(%rbx)
; AVX2-NEXT: movw %bp, (%rbx)
; AVX2-NEXT: movw %r15w, 6(%rbx)
; AVX512-NEXT: vmovupd %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: callq __truncdfhf2@PLT
+; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %r14d
; AVX512-NEXT: vmovupd {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512-NEXT: vmovapd %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: callq __truncdfhf2@PLT
+; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %r15d
; AVX512-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: callq __truncdfhf2@PLT
+; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %ebp
; AVX512-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; AVX512-NEXT: callq __truncdfhf2@PLT
+; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movw %ax, 4(%rbx)
; AVX512-NEXT: movw %bp, (%rbx)
; AVX512-NEXT: movw %r15w, 6(%rbx)
; AVX1-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %ebp
; AVX1-NEXT: shll $16, %ebp
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %ebx
; AVX1-NEXT: orl %ebp, %ebx
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %ebp
; AVX1-NEXT: shll $16, %ebp
; AVX1-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %eax
; AVX1-NEXT: orl %ebp, %eax
; AVX1-NEXT: shlq $32, %rax
; AVX2-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: callq __truncdfhf2@PLT
+; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %ebp
; AVX2-NEXT: shll $16, %ebp
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: callq __truncdfhf2@PLT
+; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %ebx
; AVX2-NEXT: orl %ebp, %ebx
; AVX2-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: callq __truncdfhf2@PLT
+; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %ebp
; AVX2-NEXT: shll $16, %ebp
; AVX2-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
-; AVX2-NEXT: callq __truncdfhf2@PLT
+; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movzwl %ax, %eax
; AVX2-NEXT: orl %ebp, %eax
; AVX2-NEXT: shlq $32, %rax
; AVX512-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: callq __truncdfhf2@PLT
+; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %ebp
; AVX512-NEXT: shll $16, %ebp
; AVX512-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: callq __truncdfhf2@PLT
+; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movzwl %ax, %ebx
; AVX512-NEXT: orl %ebp, %ebx
; AVX512-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX512-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: callq __truncdfhf2@PLT
+; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %ebp
; AVX512-NEXT: shll $16, %ebp
; AVX512-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
-; AVX512-NEXT: callq __truncdfhf2@PLT
+; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movzwl %ax, %eax
; AVX512-NEXT: orl %ebp, %eax
; AVX512-NEXT: shlq $32, %rax
; AVX1-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %ebp
; AVX1-NEXT: shll $16, %ebp
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %ebx
; AVX1-NEXT: orl %ebp, %ebx
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %ebp
; AVX1-NEXT: shll $16, %ebp
; AVX1-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movzwl %ax, %eax
; AVX1-NEXT: orl %ebp, %eax
; AVX1-NEXT: shlq $32, %rax
; AVX2-SLOW-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX2-SLOW-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-SLOW-NEXT: vzeroupper
-; AVX2-SLOW-NEXT: callq __truncdfhf2@PLT
+; AVX2-SLOW-NEXT: callq __truncdfhf2
; AVX2-SLOW-NEXT: movl %eax, %ebp
; AVX2-SLOW-NEXT: shll $16, %ebp
; AVX2-SLOW-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-SLOW-NEXT: vzeroupper
-; AVX2-SLOW-NEXT: callq __truncdfhf2@PLT
+; AVX2-SLOW-NEXT: callq __truncdfhf2
; AVX2-SLOW-NEXT: movzwl %ax, %ebx
; AVX2-SLOW-NEXT: orl %ebp, %ebx
; AVX2-SLOW-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX2-SLOW-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX2-SLOW-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-SLOW-NEXT: vzeroupper
-; AVX2-SLOW-NEXT: callq __truncdfhf2@PLT
+; AVX2-SLOW-NEXT: callq __truncdfhf2
; AVX2-SLOW-NEXT: movl %eax, %ebp
; AVX2-SLOW-NEXT: shll $16, %ebp
; AVX2-SLOW-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
-; AVX2-SLOW-NEXT: callq __truncdfhf2@PLT
+; AVX2-SLOW-NEXT: callq __truncdfhf2
; AVX2-SLOW-NEXT: movzwl %ax, %eax
; AVX2-SLOW-NEXT: orl %ebp, %eax
; AVX2-SLOW-NEXT: shlq $32, %rax
; AVX2-FAST-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX2-FAST-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-FAST-NEXT: vzeroupper
-; AVX2-FAST-NEXT: callq __truncdfhf2@PLT
+; AVX2-FAST-NEXT: callq __truncdfhf2
; AVX2-FAST-NEXT: movl %eax, %ebp
; AVX2-FAST-NEXT: shll $16, %ebp
; AVX2-FAST-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-FAST-NEXT: vzeroupper
-; AVX2-FAST-NEXT: callq __truncdfhf2@PLT
+; AVX2-FAST-NEXT: callq __truncdfhf2
; AVX2-FAST-NEXT: movzwl %ax, %ebx
; AVX2-FAST-NEXT: orl %ebp, %ebx
; AVX2-FAST-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX2-FAST-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX2-FAST-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-FAST-NEXT: vzeroupper
-; AVX2-FAST-NEXT: callq __truncdfhf2@PLT
+; AVX2-FAST-NEXT: callq __truncdfhf2
; AVX2-FAST-NEXT: movl %eax, %ebp
; AVX2-FAST-NEXT: shll $16, %ebp
; AVX2-FAST-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
-; AVX2-FAST-NEXT: callq __truncdfhf2@PLT
+; AVX2-FAST-NEXT: callq __truncdfhf2
; AVX2-FAST-NEXT: movzwl %ax, %eax
; AVX2-FAST-NEXT: orl %ebp, %eax
; AVX2-FAST-NEXT: shlq $32, %rax
; AVX512F-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512F-NEXT: vzeroupper
-; AVX512F-NEXT: callq __truncdfhf2@PLT
+; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movl %eax, %ebp
; AVX512F-NEXT: shll $16, %ebp
; AVX512F-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512F-NEXT: vzeroupper
-; AVX512F-NEXT: callq __truncdfhf2@PLT
+; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movzwl %ax, %ebx
; AVX512F-NEXT: orl %ebp, %ebx
; AVX512F-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX512F-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX512F-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512F-NEXT: vzeroupper
-; AVX512F-NEXT: callq __truncdfhf2@PLT
+; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movl %eax, %ebp
; AVX512F-NEXT: shll $16, %ebp
; AVX512F-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
-; AVX512F-NEXT: callq __truncdfhf2@PLT
+; AVX512F-NEXT: callq __truncdfhf2
; AVX512F-NEXT: movzwl %ax, %eax
; AVX512F-NEXT: orl %ebp, %eax
; AVX512F-NEXT: shlq $32, %rax
; AVX512VL-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512VL-NEXT: vzeroupper
-; AVX512VL-NEXT: callq __truncdfhf2@PLT
+; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movl %eax, %ebp
; AVX512VL-NEXT: shll $16, %ebp
; AVX512VL-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512VL-NEXT: vzeroupper
-; AVX512VL-NEXT: callq __truncdfhf2@PLT
+; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movzwl %ax, %ebx
; AVX512VL-NEXT: orl %ebp, %ebx
; AVX512VL-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
; AVX512VL-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512VL-NEXT: vzeroupper
-; AVX512VL-NEXT: callq __truncdfhf2@PLT
+; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movl %eax, %ebp
; AVX512VL-NEXT: shll $16, %ebp
; AVX512VL-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
-; AVX512VL-NEXT: callq __truncdfhf2@PLT
+; AVX512VL-NEXT: callq __truncdfhf2
; AVX512VL-NEXT: movzwl %ax, %eax
; AVX512VL-NEXT: orl %ebp, %eax
; AVX512VL-NEXT: shlq $32, %rax
; AVX1-NEXT: vmovupd %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movw %ax, {{[0-9]+}}(%rsp) # 2-byte Spill
; AVX1-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vmovapd %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movw %ax, {{[0-9]+}}(%rsp) # 2-byte Spill
; AVX1-NEXT: vpermilpd $1, {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload
; AVX1-NEXT: # xmm0 = mem[1,0]
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %r12d
; AVX1-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-NEXT: vmovapd %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX1-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %r13d
; AVX1-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %ebp
; AVX1-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %r14d
; AVX1-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movl %eax, %r15d
; AVX1-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; AVX1-NEXT: callq __truncdfhf2@PLT
+; AVX1-NEXT: callq __truncdfhf2
; AVX1-NEXT: movw %ax, 12(%rbx)
; AVX1-NEXT: movw %r15w, 8(%rbx)
; AVX1-NEXT: movw %r14w, 4(%rbx)
; AVX2-NEXT: vmovupd %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: callq __truncdfhf2@PLT
+; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movw %ax, {{[0-9]+}}(%rsp) # 2-byte Spill
; AVX2-NEXT: vmovupd {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX2-NEXT: vmovapd %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: callq __truncdfhf2@PLT
+; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movw %ax, {{[0-9]+}}(%rsp) # 2-byte Spill
; AVX2-NEXT: vpermilpd $1, {{[0-9]+}}(%rsp), %xmm0 # 16-byte Folded Reload
; AVX2-NEXT: # xmm0 = mem[1,0]
-; AVX2-NEXT: callq __truncdfhf2@PLT
+; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %r12d
; AVX2-NEXT: vmovupd {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX2-NEXT: vmovapd %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: callq __truncdfhf2@PLT
+; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %r13d
; AVX2-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: callq __truncdfhf2@PLT
+; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %ebp
; AVX2-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; AVX2-NEXT: callq __truncdfhf2@PLT
+; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %r14d
; AVX2-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
-; AVX2-NEXT: callq __truncdfhf2@PLT
+; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movl %eax, %r15d
; AVX2-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; AVX2-NEXT: callq __truncdfhf2@PLT
+; AVX2-NEXT: callq __truncdfhf2
; AVX2-NEXT: movw %ax, 12(%rbx)
; AVX2-NEXT: movw %r15w, 8(%rbx)
; AVX2-NEXT: movw %r14w, 4(%rbx)
; AVX512-NEXT: vmovupd %zmm0, {{[0-9]+}}(%rsp) # 64-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: callq __truncdfhf2@PLT
+; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movw %ax, {{[0-9]+}}(%rsp) # 2-byte Spill
; AVX512-NEXT: vmovupd {{[0-9]+}}(%rsp), %zmm0 # 64-byte Reload
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512-NEXT: vmovapd %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: callq __truncdfhf2@PLT
+; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movw %ax, {{[0-9]+}}(%rsp) # 2-byte Spill
; AVX512-NEXT: vmovupd {{[0-9]+}}(%rsp), %zmm0 # 64-byte Reload
; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm0
; AVX512-NEXT: vmovupd %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: callq __truncdfhf2@PLT
+; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %r12d
; AVX512-NEXT: vmovupd {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512-NEXT: vmovapd %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: callq __truncdfhf2@PLT
+; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %r13d
; AVX512-NEXT: vmovups {{[0-9]+}}(%rsp), %zmm0 # 64-byte Reload
; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: callq __truncdfhf2@PLT
+; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %ebp
; AVX512-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; AVX512-NEXT: callq __truncdfhf2@PLT
+; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %r14d
; AVX512-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512-NEXT: vzeroupper
-; AVX512-NEXT: callq __truncdfhf2@PLT
+; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movl %eax, %r15d
; AVX512-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
-; AVX512-NEXT: callq __truncdfhf2@PLT
+; AVX512-NEXT: callq __truncdfhf2
; AVX512-NEXT: movw %ax, 12(%rbx)
; AVX512-NEXT: movw %r15w, 8(%rbx)
; AVX512-NEXT: movw %r14w, 4(%rbx)