1 Index: include/llvm/Intrinsics.td
\r
2 ===================================================================
\r
3 --- include/llvm/Intrinsics.td (revision 3174)
\r
4 +++ include/llvm/Intrinsics.td (working copy)
\r
6 // Target-specific intrinsics
\r
7 //===----------------------------------------------------------------------===//
\r
9 -include "llvm/IntrinsicsPowerPC.td"
\r
10 +//include "llvm/IntrinsicsPowerPC.td"
\r
11 include "llvm/IntrinsicsX86.td"
\r
12 -include "llvm/IntrinsicsARM.td"
\r
13 -include "llvm/IntrinsicsCellSPU.td"
\r
14 -include "llvm/IntrinsicsAlpha.td"
\r
15 -include "llvm/IntrinsicsXCore.td"
\r
16 +//include "llvm/IntrinsicsARM.td"
\r
17 +//include "llvm/IntrinsicsCellSPU.td"
\r
18 +//include "llvm/IntrinsicsAlpha.td"
\r
19 +//include "llvm/IntrinsicsXCore.td"
\r
20 Index: lib/Transforms/InstCombine/InstCombineCalls.cpp
\r
21 ===================================================================
\r
22 --- lib/Transforms/InstCombine/InstCombineCalls.cpp (revision 3174)
\r
23 +++ lib/Transforms/InstCombine/InstCombineCalls.cpp (working copy)
\r
28 - case Intrinsic::ppc_altivec_lvx:
29 - case Intrinsic::ppc_altivec_lvxl:
30 + //case Intrinsic::ppc_altivec_lvx:
31 + //case Intrinsic::ppc_altivec_lvxl:
32 case Intrinsic::x86_sse_loadu_ps:
33 case Intrinsic::x86_sse2_loadu_pd:
34 case Intrinsic::x86_sse2_loadu_dq:
36 return new LoadInst(Ptr);
39 - case Intrinsic::ppc_altivec_stvx:
40 - case Intrinsic::ppc_altivec_stvxl:
41 + //case Intrinsic::ppc_altivec_stvx:
42 + //case Intrinsic::ppc_altivec_stvxl:
43 // Turn stvx -> store if the pointer is known aligned.
44 if (GetOrEnforceKnownAlignment(II->getArgOperand(1), 16) >= 16) {
50 - case Intrinsic::ppc_altivec_vperm:
51 + //case Intrinsic::ppc_altivec_vperm:
52 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
53 if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getArgOperand(2))) {
54 assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!");