OSDN Git Service

Make atomic Swap work, 64-bit on x86-32.
authorDale Johannesen <dalej@apple.com>
Fri, 3 Oct 2008 22:25:52 +0000 (22:25 +0000)
committerDale Johannesen <dalej@apple.com>
Fri, 3 Oct 2008 22:25:52 +0000 (22:25 +0000)
Make it all work in non-pic mode.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@57034 91177308-0d34-0410-b5e6-96231b3b80d8

lib/Target/X86/X86ISelDAGToDAG.cpp
lib/Target/X86/X86ISelLowering.cpp
lib/Target/X86/X86ISelLowering.h
lib/Target/X86/X86InstrInfo.td

index a782079..daa7f13 100644 (file)
@@ -1305,6 +1305,8 @@ SDNode *X86DAGToDAGISel::Select(SDValue N) {
       return SelectAtomic64(Node, X86::ATOMNAND6432);
     case X86ISD::ATOMAND64_DAG:
       return SelectAtomic64(Node, X86::ATOMAND6432);
+    case X86ISD::ATOMSWAP64_DAG:
+      return SelectAtomic64(Node, X86::ATOMSWAP6432);
 
     case ISD::SMUL_LOHI:
     case ISD::UMUL_LOHI: {
index 47a1ed7..494ac1d 100644 (file)
@@ -6069,19 +6069,21 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
   case ISD::ATOMIC_LOAD_SUB_16: 
   case ISD::ATOMIC_LOAD_SUB_32: return LowerLOAD_SUB(Op,DAG);
   case ISD::ATOMIC_LOAD_SUB_64: return (Subtarget->is64Bit()) ?
-                                  LowerLOAD_SUB(Op,DAG) :
-                                  LowerATOMIC_BINARY_64(Op,DAG,
+                                        LowerLOAD_SUB(Op,DAG) :
+                                        LowerATOMIC_BINARY_64(Op,DAG,
                                         X86ISD::ATOMSUB64_DAG);
   case ISD::ATOMIC_LOAD_AND_64: return LowerATOMIC_BINARY_64(Op,DAG,
                                         X86ISD::ATOMAND64_DAG);
-  case ISD::ATOMIC_LOAD_OR_64: return LowerATOMIC_BINARY_64(Op, DAG,
+  case ISD::ATOMIC_LOAD_OR_64:  return LowerATOMIC_BINARY_64(Op, DAG,
                                         X86ISD::ATOMOR64_DAG);
   case ISD::ATOMIC_LOAD_XOR_64: return LowerATOMIC_BINARY_64(Op,DAG,
                                         X86ISD::ATOMXOR64_DAG);
-  case ISD::ATOMIC_LOAD_NAND_64: return LowerATOMIC_BINARY_64(Op,DAG,
+  case ISD::ATOMIC_LOAD_NAND_64:return LowerATOMIC_BINARY_64(Op,DAG,
                                         X86ISD::ATOMNAND64_DAG);
   case ISD::ATOMIC_LOAD_ADD_64: return LowerATOMIC_BINARY_64(Op,DAG,
                                         X86ISD::ATOMADD64_DAG);
+  case ISD::ATOMIC_SWAP_64:     return LowerATOMIC_BINARY_64(Op,DAG,
+                                        X86ISD::ATOMSWAP64_DAG);
   case ISD::BUILD_VECTOR:       return LowerBUILD_VECTOR(Op, DAG);
   case ISD::VECTOR_SHUFFLE:     return LowerVECTOR_SHUFFLE(Op, DAG);
   case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
@@ -6433,6 +6435,7 @@ X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr,
   //   newMBB:
   //     out1, out2 = phi (thisMBB, t1/t2) (newMBB, t3/t4)
   //     op  t5, t6 <- out1, out2, [bitinstr.val]
+  //      (for SWAP, substitute:  mov t5, t6 <- [bitinstr.val])
   //     mov ECX, EBX <- t5, t6
   //     mov EAX, EDX <- t1, t2
   //     cmpxchg8b [bitinstr.addr]  [EAX, EDX, EBX, ECX implicit]
@@ -6486,10 +6489,14 @@ X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr,
     (*MIB).addOperand(*argOpers[i]);
   unsigned t2 = F->getRegInfo().createVirtualRegister(RC);
   MIB = BuildMI(thisMBB, TII->get(LoadOpc), t2);
-  // add 4 to displacement.  getImm verifies it's immediate.
+  // add 4 to displacement.
   for (int i=0; i <= lastAddrIndx-1; ++i)
     (*MIB).addOperand(*argOpers[i]);
-  MachineOperand newOp3 = MachineOperand::CreateImm(argOpers[3]->getImm()+4);
+  MachineOperand newOp3 = *(argOpers[3]);
+  if (newOp3.isImm())
+    newOp3.setImm(newOp3.getImm()+4);
+  else
+    newOp3.setOffset(newOp3.getOffset()+4);
   (*MIB).addOperand(newOp3);
 
   // t3/4 are defined later, at the bottom of the loop
@@ -6518,7 +6525,8 @@ X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr,
     MIB = BuildMI(newMBB, TII->get(regOpcL), t5);
   else
     MIB = BuildMI(newMBB, TII->get(immOpcL), t5);
-  MIB.addReg(tt1);
+  if (regOpcL != X86::MOV32rr)
+    MIB.addReg(tt1);
   (*MIB).addOperand(*argOpers[4]);
   assert(argOpers[5]->isReg() == argOpers[4]->isReg());
   assert(argOpers[5]->isImm() == argOpers[4]->isImm());
@@ -6526,7 +6534,8 @@ X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr,
     MIB = BuildMI(newMBB, TII->get(regOpcH), t6);
   else
     MIB = BuildMI(newMBB, TII->get(immOpcH), t6);
-  MIB.addReg(tt2);
+  if (regOpcH != X86::MOV32rr)
+    MIB.addReg(tt2);
   (*MIB).addOperand(*argOpers[5]);
 
   MIB = BuildMI(newMBB, TII->get(copyOpc), X86::EAX);
@@ -6943,18 +6952,21 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
                                                X86::AND32rr, X86::AND32rr,
                                                X86::AND32ri, X86::AND32ri,
                                                true);
-  // FIXME carry
   case X86::ATOMADD6432:
     return EmitAtomicBit6432WithCustomInserter(MI, BB, 
                                                X86::ADD32rr, X86::ADC32rr,
                                                X86::ADD32ri, X86::ADC32ri,
                                                false);
-  // FIXME carry
   case X86::ATOMSUB6432:
     return EmitAtomicBit6432WithCustomInserter(MI, BB, 
                                                X86::SUB32rr, X86::SBB32rr,
                                                X86::SUB32ri, X86::SBB32ri,
                                                false);
+  case X86::ATOMSWAP6432:
+    return EmitAtomicBit6432WithCustomInserter(MI, BB, 
+                                               X86::MOV32rr, X86::MOV32rr,
+                                               X86::MOV32ri, X86::MOV32ri,
+                                               false);
   }
 }
 
index f12ff70..56223d5 100644 (file)
@@ -200,13 +200,15 @@ namespace llvm {
       LCMPXCHG8_DAG,
 
       // ATOMADD64_DAG, ATOMSUB64_DAG, ATOMOR64_DAG, ATOMAND64_DAG, 
-      // ATOMXOR64_DAG, ATOMNAND64_DAG - Atomic 64-bit binary operations.
+      // ATOMXOR64_DAG, ATOMNAND64_DAG, ATOMSWAP64_DAG - 
+      // Atomic 64-bit binary operations.
       ATOMADD64_DAG,
       ATOMSUB64_DAG,
       ATOMOR64_DAG,
       ATOMXOR64_DAG,
       ATOMAND64_DAG,
       ATOMNAND64_DAG,
+      ATOMSWAP64_DAG,
 
       // FNSTCW16m - Store FP control world into i16 memory.
       FNSTCW16m,
index d4bc05e..691a7cf 100644 (file)
@@ -99,6 +99,9 @@ def X86AtomAnd64 : SDNode<"X86ISD::ATOMAND64_DAG", SDTX86atomicBinary,
 def X86AtomNand64 : SDNode<"X86ISD::ATOMNAND64_DAG", SDTX86atomicBinary,
                         [SDNPHasChain, SDNPMayStore, 
                          SDNPMayLoad, SDNPMemOperand]>;
+def X86AtomSwap64 : SDNode<"X86ISD::ATOMSWAP64_DAG", SDTX86atomicBinary,
+                        [SDNPHasChain, SDNPMayStore, 
+                         SDNPMayLoad, SDNPMemOperand]>;
 def X86retflag : SDNode<"X86ISD::RET_FLAG", SDTX86Ret,
                         [SDNPHasChain, SDNPOptInFlag]>;
 
@@ -2772,6 +2775,9 @@ def ATOMADD6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
 def ATOMSUB6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
                                (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
                "#ATOMSUB6432 PSUEDO!", []>;
+def ATOMSWAP6432 : I<0, Pseudo, (outs GR32:$dst1, GR32:$dst2),
+                               (ins i64mem:$ptr, GR32:$val1, GR32:$val2),
+               "#ATOMSWAP6432 PSUEDO!", []>;
 }
 
 //===----------------------------------------------------------------------===//