OSDN Git Service

Optimizing: Tag more arena allocations.
authorVladimir Marko <vmarko@google.com>
Thu, 17 Sep 2015 16:03:26 +0000 (17:03 +0100)
committerVladimir Marko <vmarko@google.com>
Fri, 25 Sep 2015 11:18:02 +0000 (12:18 +0100)
Replace GrowableArray with ArenaVector and tag arena
allocations with new allocation types.

As part of this, make the register allocator a bit more
efficient, doing bulk insert/erase. Some loops are now
O(n) instead of O(n^2).

Change-Id: Ifac0871ffb34b121cc0447801a2d07eefd308c14

20 files changed:
compiler/optimizing/builder.cc
compiler/optimizing/builder.h
compiler/optimizing/gvn.cc
compiler/optimizing/locations.cc
compiler/optimizing/locations.h
compiler/optimizing/primitive_type_propagation.cc
compiler/optimizing/primitive_type_propagation.h
compiler/optimizing/reference_type_propagation.cc
compiler/optimizing/reference_type_propagation.h
compiler/optimizing/register_allocator.cc
compiler/optimizing/register_allocator.h
compiler/optimizing/register_allocator_test.cc
compiler/optimizing/side_effects_analysis.cc
compiler/optimizing/side_effects_analysis.h
compiler/optimizing/ssa_liveness_analysis.cc
compiler/optimizing/ssa_liveness_analysis.h
compiler/optimizing/ssa_phi_elimination.cc
compiler/optimizing/ssa_phi_elimination.h
runtime/base/arena_allocator.cc
runtime/base/arena_allocator.h

index 274a2a6..f2bde89 100644 (file)
@@ -140,11 +140,11 @@ class SwitchTable : public ValueObject {
 
 void HGraphBuilder::InitializeLocals(uint16_t count) {
   graph_->SetNumberOfVRegs(count);
-  locals_.SetSize(count);
+  locals_.resize(count);
   for (int i = 0; i < count; i++) {
     HLocal* local = new (arena_) HLocal(i);
     entry_block_->AddInstruction(local);
-    locals_.Put(i, local);
+    locals_[i] = local;
   }
 }
 
@@ -156,7 +156,7 @@ void HGraphBuilder::InitializeParameters(uint16_t number_of_parameters) {
 
   graph_->SetNumberOfInVRegs(number_of_parameters);
   const char* shorty = dex_compilation_unit_->GetShorty();
-  int locals_index = locals_.Size() - number_of_parameters;
+  int locals_index = locals_.size() - number_of_parameters;
   int parameter_index = 0;
 
   if (!dex_compilation_unit_->IsStatic()) {
@@ -554,11 +554,11 @@ void HGraphBuilder::MaybeUpdateCurrentBlock(size_t dex_pc) {
 bool HGraphBuilder::ComputeBranchTargets(const uint16_t* code_ptr,
                                          const uint16_t* code_end,
                                          size_t* number_of_branches) {
-  branch_targets_.SetSize(code_end - code_ptr);
+  branch_targets_.resize(code_end - code_ptr, nullptr);
 
   // Create the first block for the dex instructions, single successor of the entry block.
   HBasicBlock* block = new (arena_) HBasicBlock(graph_, 0);
-  branch_targets_.Put(0, block);
+  branch_targets_[0] = block;
   entry_block_->AddSuccessor(block);
 
   // Iterate over all instructions and find branching instructions. Create blocks for
@@ -602,7 +602,7 @@ bool HGraphBuilder::ComputeBranchTargets(const uint16_t* code_ptr,
         // Create a block for the switch-case logic. The block gets the dex_pc
         // of the SWITCH instruction because it is part of its semantics.
         block = new (arena_) HBasicBlock(graph_, dex_pc);
-        branch_targets_.Put(table.GetDexPcForIndex(i), block);
+        branch_targets_[table.GetDexPcForIndex(i)] = block;
       }
 
       // Fall-through. Add a block if there is more code afterwards.
@@ -626,15 +626,15 @@ bool HGraphBuilder::ComputeBranchTargets(const uint16_t* code_ptr,
 
 HBasicBlock* HGraphBuilder::FindBlockStartingAt(int32_t dex_pc) const {
   DCHECK_GE(dex_pc, 0);
-  DCHECK_LT(static_cast<size_t>(dex_pc), branch_targets_.Size());
-  return branch_targets_.Get(dex_pc);
+  DCHECK_LT(static_cast<size_t>(dex_pc), branch_targets_.size());
+  return branch_targets_[dex_pc];
 }
 
 HBasicBlock* HGraphBuilder::FindOrCreateBlockStartingAt(int32_t dex_pc) {
   HBasicBlock* block = FindBlockStartingAt(dex_pc);
   if (block == nullptr) {
     block = new (arena_) HBasicBlock(graph_, dex_pc);
-    branch_targets_.Put(dex_pc, block);
+    branch_targets_[dex_pc] = block;
   }
   return block;
 }
@@ -2840,18 +2840,19 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
   return true;
 }  // NOLINT(readability/fn_size)
 
-HLocal* HGraphBuilder::GetLocalAt(int register_index) const {
-  return locals_.Get(register_index);
+HLocal* HGraphBuilder::GetLocalAt(uint32_t register_index) const {
+  DCHECK_LT(register_index, locals_.size());
+  return locals_[register_index];
 }
 
-void HGraphBuilder::UpdateLocal(int register_index,
+void HGraphBuilder::UpdateLocal(uint32_t register_index,
                                 HInstruction* instruction,
                                 uint32_t dex_pc) const {
   HLocal* local = GetLocalAt(register_index);
   current_block_->AddInstruction(new (arena_) HStoreLocal(local, instruction, dex_pc));
 }
 
-HInstruction* HGraphBuilder::LoadLocal(int register_index,
+HInstruction* HGraphBuilder::LoadLocal(uint32_t register_index,
                                        Primitive::Type type,
                                        uint32_t dex_pc) const {
   HLocal* local = GetLocalAt(register_index);
index ae452f2..8d40d69 100644 (file)
@@ -17,6 +17,7 @@
 #ifndef ART_COMPILER_OPTIMIZING_BUILDER_H_
 #define ART_COMPILER_OPTIMIZING_BUILDER_H_
 
+#include "base/arena_containers.h"
 #include "base/arena_object.h"
 #include "dex_file.h"
 #include "dex_file-inl.h"
@@ -24,7 +25,6 @@
 #include "driver/dex_compilation_unit.h"
 #include "optimizing_compiler_stats.h"
 #include "primitive.h"
-#include "utils/growable_array.h"
 #include "nodes.h"
 
 namespace art {
@@ -43,8 +43,8 @@ class HGraphBuilder : public ValueObject {
                 const uint8_t* interpreter_metadata,
                 Handle<mirror::DexCache> dex_cache)
       : arena_(graph->GetArena()),
-        branch_targets_(graph->GetArena(), 0),
-        locals_(graph->GetArena(), 0),
+        branch_targets_(graph->GetArena()->Adapter(kArenaAllocGraphBuilder)),
+        locals_(graph->GetArena()->Adapter(kArenaAllocGraphBuilder)),
         entry_block_(nullptr),
         exit_block_(nullptr),
         current_block_(nullptr),
@@ -64,8 +64,8 @@ class HGraphBuilder : public ValueObject {
   // Only for unit testing.
   HGraphBuilder(HGraph* graph, Primitive::Type return_type = Primitive::kPrimInt)
       : arena_(graph->GetArena()),
-        branch_targets_(graph->GetArena(), 0),
-        locals_(graph->GetArena(), 0),
+        branch_targets_(graph->GetArena()->Adapter(kArenaAllocGraphBuilder)),
+        locals_(graph->GetArena()->Adapter(kArenaAllocGraphBuilder)),
         entry_block_(nullptr),
         exit_block_(nullptr),
         current_block_(nullptr),
@@ -130,9 +130,9 @@ class HGraphBuilder : public ValueObject {
   uint16_t LookupQuickenedInfo(uint32_t dex_pc);
 
   void InitializeLocals(uint16_t count);
-  HLocal* GetLocalAt(int register_index) const;
-  void UpdateLocal(int register_index, HInstruction* instruction, uint32_t dex_pc) const;
-  HInstruction* LoadLocal(int register_index, Primitive::Type type, uint32_t dex_pc) const;
+  HLocal* GetLocalAt(uint32_t register_index) const;
+  void UpdateLocal(uint32_t register_index, HInstruction* instruction, uint32_t dex_pc) const;
+  HInstruction* LoadLocal(uint32_t register_index, Primitive::Type type, uint32_t dex_pc) const;
   void PotentiallyAddSuspendCheck(HBasicBlock* target, uint32_t dex_pc);
   void InitializeParameters(uint16_t number_of_parameters);
   bool NeedsAccessCheck(uint32_t type_index) const;
@@ -304,9 +304,9 @@ class HGraphBuilder : public ValueObject {
   // A list of the size of the dex code holding block information for
   // the method. If an entry contains a block, then the dex instruction
   // starting at that entry is the first instruction of a new block.
-  GrowableArray<HBasicBlock*> branch_targets_;
+  ArenaVector<HBasicBlock*> branch_targets_;
 
-  GrowableArray<HLocal*> locals_;
+  ArenaVector<HLocal*> locals_;
 
   HBasicBlock* entry_block_;
   HBasicBlock* exit_block_;
index 1ee8648..5050e15 100644 (file)
  */
 
 #include "gvn.h"
+
+#include "base/arena_containers.h"
+#include "base/bit_vector-inl.h"
 #include "side_effects_analysis.h"
 #include "utils.h"
-
 #include "utils/arena_bit_vector.h"
-#include "base/bit_vector-inl.h"
 
 namespace art {
 
@@ -32,7 +33,7 @@ namespace art {
  * if there is one in the set. In GVN, we would say those instructions have the
  * same "number".
  */
-class ValueSet : public ArenaObject<kArenaAllocMisc> {
+class ValueSet : public ArenaObject<kArenaAllocGvn> {
  public:
   // Constructs an empty ValueSet which owns all its buckets.
   explicit ValueSet(ArenaAllocator* allocator)
@@ -143,7 +144,7 @@ class ValueSet : public ArenaObject<kArenaAllocMisc> {
   size_t GetNumberOfEntries() const { return num_entries_; }
 
  private:
-  class Node : public ArenaObject<kArenaAllocMisc> {
+  class Node : public ArenaObject<kArenaAllocGvn> {
    public:
     Node(HInstruction* instruction, size_t hash_code, Node* next)
         : instruction_(instruction), hash_code_(hash_code), next_(next) {}
@@ -306,7 +307,7 @@ class GlobalValueNumberer : public ValueObject {
       : graph_(graph),
         allocator_(allocator),
         side_effects_(side_effects),
-        sets_(allocator, graph->GetBlocks().size(), nullptr) {}
+        sets_(graph->GetBlocks().size(), nullptr, allocator->Adapter(kArenaAllocGvn)) {}
 
   void Run();
 
@@ -322,14 +323,14 @@ class GlobalValueNumberer : public ValueObject {
   // ValueSet for blocks. Initially null, but for an individual block they
   // are allocated and populated by the dominator, and updated by all blocks
   // in the path from the dominator to the block.
-  GrowableArray<ValueSet*> sets_;
+  ArenaVector<ValueSet*> sets_;
 
   DISALLOW_COPY_AND_ASSIGN(GlobalValueNumberer);
 };
 
 void GlobalValueNumberer::Run() {
   DCHECK(side_effects_.HasRun());
-  sets_.Put(graph_->GetEntryBlock()->GetBlockId(), new (allocator_) ValueSet(allocator_));
+  sets_[graph_->GetEntryBlock()->GetBlockId()] = new (allocator_) ValueSet(allocator_);
 
   // Use the reverse post order to ensure the non back-edge predecessors of a block are
   // visited before the block itself.
@@ -348,7 +349,7 @@ void GlobalValueNumberer::VisitBasicBlock(HBasicBlock* block) {
     set = new (allocator_) ValueSet(allocator_);
   } else {
     HBasicBlock* dominator = block->GetDominator();
-    ValueSet* dominator_set = sets_.Get(dominator->GetBlockId());
+    ValueSet* dominator_set = sets_[dominator->GetBlockId()];
     if (dominator->GetSuccessors().size() == 1) {
       DCHECK_EQ(dominator->GetSuccessor(0), block);
       set = dominator_set;
@@ -363,7 +364,7 @@ void GlobalValueNumberer::VisitBasicBlock(HBasicBlock* block) {
         set->Kill(side_effects_.GetLoopEffects(block));
       } else if (predecessors.size() > 1) {
         for (HBasicBlock* predecessor : predecessors) {
-          set->IntersectWith(sets_.Get(predecessor->GetBlockId()));
+          set->IntersectWith(sets_[predecessor->GetBlockId()]);
           if (set->IsEmpty()) {
             break;
           }
@@ -372,7 +373,7 @@ void GlobalValueNumberer::VisitBasicBlock(HBasicBlock* block) {
     }
   }
 
-  sets_.Put(block->GetBlockId(), set);
+  sets_[block->GetBlockId()] = set;
 
   HInstruction* current = block->GetFirstInstruction();
   while (current != nullptr) {
index d14dfc1..ebdf7a2 100644 (file)
@@ -23,18 +23,15 @@ namespace art {
 LocationSummary::LocationSummary(HInstruction* instruction,
                                  CallKind call_kind,
                                  bool intrinsified)
-    : inputs_(instruction->GetBlock()->GetGraph()->GetArena(), instruction->InputCount()),
-      temps_(instruction->GetBlock()->GetGraph()->GetArena(), 0),
+    : inputs_(instruction->InputCount(),
+              instruction->GetBlock()->GetGraph()->GetArena()->Adapter(kArenaAllocLocationSummary)),
+      temps_(instruction->GetBlock()->GetGraph()->GetArena()->Adapter(kArenaAllocLocationSummary)),
       output_overlaps_(Location::kOutputOverlap),
       call_kind_(call_kind),
       stack_mask_(nullptr),
       register_mask_(0),
       live_registers_(),
       intrinsified_(intrinsified) {
-  inputs_.SetSize(instruction->InputCount());
-  for (size_t i = 0; i < instruction->InputCount(); ++i) {
-    inputs_.Put(i, Location());
-  }
   instruction->SetLocations(this);
 
   if (NeedsSafepoint()) {
index 2162ab9..2eeba18 100644 (file)
@@ -17,6 +17,7 @@
 #ifndef ART_COMPILER_OPTIMIZING_LOCATIONS_H_
 #define ART_COMPILER_OPTIMIZING_LOCATIONS_H_
 
+#include "base/arena_containers.h"
 #include "base/arena_object.h"
 #include "base/bit_field.h"
 #include "base/bit_vector.h"
@@ -481,15 +482,17 @@ class LocationSummary : public ArenaObject<kArenaAllocMisc> {
                   bool intrinsified = false);
 
   void SetInAt(uint32_t at, Location location) {
-    inputs_.Put(at, location);
+    DCHECK_LT(at, GetInputCount());
+    inputs_[at] = location;
   }
 
   Location InAt(uint32_t at) const {
-    return inputs_.Get(at);
+    DCHECK_LT(at, GetInputCount());
+    return inputs_[at];
   }
 
   size_t GetInputCount() const {
-    return inputs_.Size();
+    return inputs_.size();
   }
 
   void SetOut(Location location, Location::OutputOverlap overlaps = Location::kOutputOverlap) {
@@ -508,23 +511,25 @@ class LocationSummary : public ArenaObject<kArenaAllocMisc> {
   }
 
   void AddTemp(Location location) {
-    temps_.Add(location);
+    temps_.push_back(location);
   }
 
   Location GetTemp(uint32_t at) const {
-    return temps_.Get(at);
+    DCHECK_LT(at, GetTempCount());
+    return temps_[at];
   }
 
   void SetTempAt(uint32_t at, Location location) {
-    DCHECK(temps_.Get(at).IsUnallocated() || temps_.Get(at).IsInvalid());
-    temps_.Put(at, location);
+    DCHECK_LT(at, GetTempCount());
+    DCHECK(temps_[at].IsUnallocated() || temps_[at].IsInvalid());
+    temps_[at] = location;
   }
 
   size_t GetTempCount() const {
-    return temps_.Size();
+    return temps_.size();
   }
 
-  bool HasTemps() const { return !temps_.IsEmpty(); }
+  bool HasTemps() const { return !temps_.empty(); }
 
   Location Out() const { return output_; }
 
@@ -576,7 +581,7 @@ class LocationSummary : public ArenaObject<kArenaAllocMisc> {
   }
 
   bool IsFixedInput(uint32_t input_index) const {
-    Location input = inputs_.Get(input_index);
+    Location input = inputs_[input_index];
     return input.IsRegister()
         || input.IsFpuRegister()
         || input.IsPair()
@@ -593,8 +598,8 @@ class LocationSummary : public ArenaObject<kArenaAllocMisc> {
   }
 
  private:
-  GrowableArray<Location> inputs_;
-  GrowableArray<Location> temps_;
+  ArenaVector<Location> inputs_;
+  ArenaVector<Location> temps_;
   // Whether the output overlaps with any of the inputs. If it overlaps, then it cannot
   // share the same register as the inputs.
   Location::OutputOverlap output_overlaps_;
index af93438..c98f43e 100644 (file)
@@ -108,8 +108,9 @@ void PrimitiveTypePropagation::VisitBasicBlock(HBasicBlock* block) {
 }
 
 void PrimitiveTypePropagation::ProcessWorklist() {
-  while (!worklist_.IsEmpty()) {
-    HPhi* instruction = worklist_.Pop();
+  while (!worklist_.empty()) {
+    HPhi* instruction = worklist_.back();
+    worklist_.pop_back();
     if (UpdateType(instruction)) {
       AddDependentInstructionsToWorklist(instruction);
     }
@@ -118,7 +119,7 @@ void PrimitiveTypePropagation::ProcessWorklist() {
 
 void PrimitiveTypePropagation::AddToWorklist(HPhi* instruction) {
   DCHECK(instruction->IsLive());
-  worklist_.Add(instruction);
+  worklist_.push_back(instruction);
 }
 
 void PrimitiveTypePropagation::AddDependentInstructionsToWorklist(HInstruction* instruction) {
index 6d370ed..212fcfc 100644 (file)
@@ -17,6 +17,7 @@
 #ifndef ART_COMPILER_OPTIMIZING_PRIMITIVE_TYPE_PROPAGATION_H_
 #define ART_COMPILER_OPTIMIZING_PRIMITIVE_TYPE_PROPAGATION_H_
 
+#include "base/arena_containers.h"
 #include "nodes.h"
 
 namespace art {
@@ -25,7 +26,9 @@ namespace art {
 class PrimitiveTypePropagation : public ValueObject {
  public:
   explicit PrimitiveTypePropagation(HGraph* graph)
-      : graph_(graph), worklist_(graph->GetArena(), kDefaultWorklistSize) {}
+      : graph_(graph), worklist_(graph->GetArena()->Adapter(kArenaAllocPrimitiveTypePropagation)) {
+    worklist_.reserve(kDefaultWorklistSize);
+  }
 
   void Run();
 
@@ -37,7 +40,7 @@ class PrimitiveTypePropagation : public ValueObject {
   bool UpdateType(HPhi* phi);
 
   HGraph* const graph_;
-  GrowableArray<HPhi*> worklist_;
+  ArenaVector<HPhi*> worklist_;
 
   static constexpr size_t kDefaultWorklistSize = 8;
 
index a88c543..fe837e4 100644 (file)
@@ -27,7 +27,7 @@ class RTPVisitor : public HGraphDelegateVisitor {
  public:
   RTPVisitor(HGraph* graph,
              StackHandleScopeCollection* handles,
-             GrowableArray<HInstruction*>* worklist,
+             ArenaVector<HInstruction*>* worklist,
              ReferenceTypeInfo::TypeHandle object_class_handle,
              ReferenceTypeInfo::TypeHandle class_class_handle,
              ReferenceTypeInfo::TypeHandle string_class_handle,
@@ -68,7 +68,7 @@ class RTPVisitor : public HGraphDelegateVisitor {
   ReferenceTypeInfo::TypeHandle class_class_handle_;
   ReferenceTypeInfo::TypeHandle string_class_handle_;
   ReferenceTypeInfo::TypeHandle throwable_class_handle_;
-  GrowableArray<HInstruction*>* worklist_;
+  ArenaVector<HInstruction*>* worklist_;
 
   static constexpr size_t kDefaultWorklistSize = 8;
 };
@@ -78,7 +78,8 @@ ReferenceTypePropagation::ReferenceTypePropagation(HGraph* graph,
                                                    const char* name)
     : HOptimization(graph, name),
       handles_(handles),
-      worklist_(graph->GetArena(), kDefaultWorklistSize) {
+      worklist_(graph->GetArena()->Adapter(kArenaAllocReferenceTypePropagation)) {
+  worklist_.reserve(kDefaultWorklistSize);
   // Mutator lock is required for NewHandle, but annotalysis ignores constructors.
   ScopedObjectAccess soa(Thread::Current());
   ClassLinker* linker = Runtime::Current()->GetClassLinker();
@@ -649,7 +650,7 @@ void RTPVisitor::VisitArrayGet(HArrayGet* instr) {
   ScopedObjectAccess soa(Thread::Current());
   UpdateArrayGet(instr, handles_, object_class_handle_);
   if (!instr->GetReferenceTypeInfo().IsValid()) {
-    worklist_->Add(instr);
+    worklist_->push_back(instr);
   }
 }
 
@@ -718,8 +719,9 @@ bool ReferenceTypePropagation::UpdateNullability(HInstruction* instr) {
 }
 
 void ReferenceTypePropagation::ProcessWorklist() {
-  while (!worklist_.IsEmpty()) {
-    HInstruction* instruction = worklist_.Pop();
+  while (!worklist_.empty()) {
+    HInstruction* instruction = worklist_.back();
+    worklist_.pop_back();
     if (UpdateNullability(instruction) || UpdateReferenceTypeInfo(instruction)) {
       AddDependentInstructionsToWorklist(instruction);
     }
@@ -729,7 +731,7 @@ void ReferenceTypePropagation::ProcessWorklist() {
 void ReferenceTypePropagation::AddToWorklist(HInstruction* instruction) {
   DCHECK_EQ(instruction->GetType(), Primitive::kPrimNot)
       << instruction->DebugName() << ":" << instruction->GetType();
-  worklist_.Add(instruction);
+  worklist_.push_back(instruction);
 }
 
 void ReferenceTypePropagation::AddDependentInstructionsToWorklist(HInstruction* instruction) {
index 62f6ab8..5493601 100644 (file)
@@ -17,6 +17,7 @@
 #ifndef ART_COMPILER_OPTIMIZING_REFERENCE_TYPE_PROPAGATION_H_
 #define ART_COMPILER_OPTIMIZING_REFERENCE_TYPE_PROPAGATION_H_
 
+#include "base/arena_containers.h"
 #include "driver/dex_compilation_unit.h"
 #include "handle_scope-inl.h"
 #include "nodes.h"
@@ -57,7 +58,7 @@ class ReferenceTypePropagation : public HOptimization {
 
   StackHandleScopeCollection* handles_;
 
-  GrowableArray<HInstruction*> worklist_;
+  ArenaVector<HInstruction*> worklist_;
 
   ReferenceTypeInfo::TypeHandle object_class_handle_;
   ReferenceTypeInfo::TypeHandle class_class_handle_;
index a4f1f45..e6b2a8b 100644 (file)
@@ -43,21 +43,21 @@ RegisterAllocator::RegisterAllocator(ArenaAllocator* allocator,
       : allocator_(allocator),
         codegen_(codegen),
         liveness_(liveness),
-        unhandled_core_intervals_(allocator, 0),
-        unhandled_fp_intervals_(allocator, 0),
+        unhandled_core_intervals_(allocator->Adapter(kArenaAllocRegisterAllocator)),
+        unhandled_fp_intervals_(allocator->Adapter(kArenaAllocRegisterAllocator)),
         unhandled_(nullptr),
-        handled_(allocator, 0),
-        active_(allocator, 0),
-        inactive_(allocator, 0),
-        physical_core_register_intervals_(allocator, codegen->GetNumberOfCoreRegisters()),
-        physical_fp_register_intervals_(allocator, codegen->GetNumberOfFloatingPointRegisters()),
-        temp_intervals_(allocator, 4),
-        int_spill_slots_(allocator, kDefaultNumberOfSpillSlots),
-        long_spill_slots_(allocator, kDefaultNumberOfSpillSlots),
-        float_spill_slots_(allocator, kDefaultNumberOfSpillSlots),
-        double_spill_slots_(allocator, kDefaultNumberOfSpillSlots),
+        handled_(allocator->Adapter(kArenaAllocRegisterAllocator)),
+        active_(allocator->Adapter(kArenaAllocRegisterAllocator)),
+        inactive_(allocator->Adapter(kArenaAllocRegisterAllocator)),
+        physical_core_register_intervals_(allocator->Adapter(kArenaAllocRegisterAllocator)),
+        physical_fp_register_intervals_(allocator->Adapter(kArenaAllocRegisterAllocator)),
+        temp_intervals_(allocator->Adapter(kArenaAllocRegisterAllocator)),
+        int_spill_slots_(allocator->Adapter(kArenaAllocRegisterAllocator)),
+        long_spill_slots_(allocator->Adapter(kArenaAllocRegisterAllocator)),
+        float_spill_slots_(allocator->Adapter(kArenaAllocRegisterAllocator)),
+        double_spill_slots_(allocator->Adapter(kArenaAllocRegisterAllocator)),
         catch_phi_spill_slots_(0),
-        safepoints_(allocator, 0),
+        safepoints_(allocator->Adapter(kArenaAllocRegisterAllocator)),
         processing_core_registers_(false),
         number_of_registers_(-1),
         registers_array_(nullptr),
@@ -66,10 +66,16 @@ RegisterAllocator::RegisterAllocator(ArenaAllocator* allocator,
         reserved_out_slots_(0),
         maximum_number_of_live_core_registers_(0),
         maximum_number_of_live_fp_registers_(0) {
+  temp_intervals_.reserve(4);
+  int_spill_slots_.reserve(kDefaultNumberOfSpillSlots);
+  long_spill_slots_.reserve(kDefaultNumberOfSpillSlots);
+  float_spill_slots_.reserve(kDefaultNumberOfSpillSlots);
+  double_spill_slots_.reserve(kDefaultNumberOfSpillSlots);
+
   static constexpr bool kIsBaseline = false;
   codegen->SetupBlockedRegisters(kIsBaseline);
-  physical_core_register_intervals_.SetSize(codegen->GetNumberOfCoreRegisters());
-  physical_fp_register_intervals_.SetSize(codegen->GetNumberOfFloatingPointRegisters());
+  physical_core_register_intervals_.resize(codegen->GetNumberOfCoreRegisters(), nullptr);
+  physical_fp_register_intervals_.resize(codegen->GetNumberOfFloatingPointRegisters(), nullptr);
   // Always reserve for the current method and the graph's max out registers.
   // TODO: compute it instead.
   // ArtMethod* takes 2 vregs for 64 bits.
@@ -129,17 +135,17 @@ void RegisterAllocator::BlockRegister(Location location, size_t start, size_t en
   int reg = location.reg();
   DCHECK(location.IsRegister() || location.IsFpuRegister());
   LiveInterval* interval = location.IsRegister()
-      ? physical_core_register_intervals_.Get(reg)
-      : physical_fp_register_intervals_.Get(reg);
+      ? physical_core_register_intervals_[reg]
+      : physical_fp_register_intervals_[reg];
   Primitive::Type type = location.IsRegister()
       ? Primitive::kPrimInt
       : Primitive::kPrimFloat;
   if (interval == nullptr) {
     interval = LiveInterval::MakeFixedInterval(allocator_, reg, type);
     if (location.IsRegister()) {
-      physical_core_register_intervals_.Put(reg, interval);
+      physical_core_register_intervals_[reg] = interval;
     } else {
-      physical_fp_register_intervals_.Put(reg, interval);
+      physical_fp_register_intervals_[reg] = interval;
     }
   }
   DCHECK(interval->GetRegister() == reg);
@@ -184,34 +190,32 @@ void RegisterAllocator::AllocateRegistersInternal() {
   registers_array_ = allocator_->AllocArray<size_t>(number_of_registers_);
   processing_core_registers_ = true;
   unhandled_ = &unhandled_core_intervals_;
-  for (size_t i = 0, e = physical_core_register_intervals_.Size(); i < e; ++i) {
-    LiveInterval* fixed = physical_core_register_intervals_.Get(i);
+  for (LiveInterval* fixed : physical_core_register_intervals_) {
     if (fixed != nullptr) {
       // Fixed interval is added to inactive_ instead of unhandled_.
       // It's also the only type of inactive interval whose start position
       // can be after the current interval during linear scan.
       // Fixed interval is never split and never moves to unhandled_.
-      inactive_.Add(fixed);
+      inactive_.push_back(fixed);
     }
   }
   LinearScan();
 
-  inactive_.Reset();
-  active_.Reset();
-  handled_.Reset();
+  inactive_.clear();
+  active_.clear();
+  handled_.clear();
 
   number_of_registers_ = codegen_->GetNumberOfFloatingPointRegisters();
   registers_array_ = allocator_->AllocArray<size_t>(number_of_registers_);
   processing_core_registers_ = false;
   unhandled_ = &unhandled_fp_intervals_;
-  for (size_t i = 0, e = physical_fp_register_intervals_.Size(); i < e; ++i) {
-    LiveInterval* fixed = physical_fp_register_intervals_.Get(i);
+  for (LiveInterval* fixed : physical_fp_register_intervals_) {
     if (fixed != nullptr) {
       // Fixed interval is added to inactive_ instead of unhandled_.
       // It's also the only type of inactive interval whose start position
       // can be after the current interval during linear scan.
       // Fixed interval is never split and never moves to unhandled_.
-      inactive_.Add(fixed);
+      inactive_.push_back(fixed);
     }
   }
   LinearScan();
@@ -236,24 +240,24 @@ void RegisterAllocator::ProcessInstruction(HInstruction* instruction) {
         case Location::kRequiresRegister: {
           LiveInterval* interval =
               LiveInterval::MakeTempInterval(allocator_, Primitive::kPrimInt);
-          temp_intervals_.Add(interval);
+          temp_intervals_.push_back(interval);
           interval->AddTempUse(instruction, i);
-          unhandled_core_intervals_.Add(interval);
+          unhandled_core_intervals_.push_back(interval);
           break;
         }
 
         case Location::kRequiresFpuRegister: {
           LiveInterval* interval =
               LiveInterval::MakeTempInterval(allocator_, Primitive::kPrimDouble);
-          temp_intervals_.Add(interval);
+          temp_intervals_.push_back(interval);
           interval->AddTempUse(instruction, i);
           if (codegen_->NeedsTwoRegisters(Primitive::kPrimDouble)) {
             interval->AddHighInterval(/* is_temp */ true);
             LiveInterval* high = interval->GetHighInterval();
-            temp_intervals_.Add(high);
-            unhandled_fp_intervals_.Add(high);
+            temp_intervals_.push_back(high);
+            unhandled_fp_intervals_.push_back(high);
           }
-          unhandled_fp_intervals_.Add(interval);
+          unhandled_fp_intervals_.push_back(interval);
           break;
         }
 
@@ -276,7 +280,7 @@ void RegisterAllocator::ProcessInstruction(HInstruction* instruction) {
       instruction->GetBlock()->RemoveInstruction(instruction);
       return;
     }
-    safepoints_.Add(instruction);
+    safepoints_.push_back(instruction);
     if (locations->OnlyCallsOnSlowPath()) {
       // We add a synthesized range at this position to record the live registers
       // at this position. Ideally, we could just update the safepoints when locations
@@ -310,28 +314,28 @@ void RegisterAllocator::ProcessInstruction(HInstruction* instruction) {
   LiveInterval* current = instruction->GetLiveInterval();
   if (current == nullptr) return;
 
-  GrowableArray<LiveInterval*>& unhandled = core_register
+  ArenaVector<LiveInterval*>& unhandled = core_register
       ? unhandled_core_intervals_
       : unhandled_fp_intervals_;
 
-  DCHECK(unhandled.IsEmpty() || current->StartsBeforeOrAt(unhandled.Peek()));
+  DCHECK(unhandled.empty() || current->StartsBeforeOrAt(unhandled.back()));
 
   if (codegen_->NeedsTwoRegisters(current->GetType())) {
     current->AddHighInterval();
   }
 
-  for (size_t safepoint_index = safepoints_.Size(); safepoint_index > 0; --safepoint_index) {
-    HInstruction* safepoint = safepoints_.Get(safepoint_index - 1);
+  for (size_t safepoint_index = safepoints_.size(); safepoint_index > 0; --safepoint_index) {
+    HInstruction* safepoint = safepoints_[safepoint_index - 1u];
     size_t safepoint_position = safepoint->GetLifetimePosition();
 
     // Test that safepoints are ordered in the optimal way.
-    DCHECK(safepoint_index == safepoints_.Size()
-           || safepoints_.Get(safepoint_index)->GetLifetimePosition() < safepoint_position);
+    DCHECK(safepoint_index == safepoints_.size() ||
+           safepoints_[safepoint_index]->GetLifetimePosition() < safepoint_position);
 
     if (safepoint_position == current->GetStart()) {
       // The safepoint is for this instruction, so the location of the instruction
       // does not need to be saved.
-      DCHECK_EQ(safepoint_index, safepoints_.Size());
+      DCHECK_EQ(safepoint_index, safepoints_.size());
       DCHECK_EQ(safepoint, instruction);
       continue;
     } else if (current->IsDeadAt(safepoint_position)) {
@@ -437,34 +441,26 @@ class AllRangesIterator : public ValueObject {
 bool RegisterAllocator::ValidateInternal(bool log_fatal_on_failure) const {
   // To simplify unit testing, we eagerly create the array of intervals, and
   // call the helper method.
-  GrowableArray<LiveInterval*> intervals(allocator_, 0);
+  ArenaVector<LiveInterval*> intervals(allocator_->Adapter(kArenaAllocRegisterAllocator));
   for (size_t i = 0; i < liveness_.GetNumberOfSsaValues(); ++i) {
     HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
     if (ShouldProcess(processing_core_registers_, instruction->GetLiveInterval())) {
-      intervals.Add(instruction->GetLiveInterval());
+      intervals.push_back(instruction->GetLiveInterval());
     }
   }
 
-  if (processing_core_registers_) {
-    for (size_t i = 0, e = physical_core_register_intervals_.Size(); i < e; ++i) {
-      LiveInterval* fixed = physical_core_register_intervals_.Get(i);
-      if (fixed != nullptr) {
-        intervals.Add(fixed);
-      }
-    }
-  } else {
-    for (size_t i = 0, e = physical_fp_register_intervals_.Size(); i < e; ++i) {
-      LiveInterval* fixed = physical_fp_register_intervals_.Get(i);
-      if (fixed != nullptr) {
-        intervals.Add(fixed);
-      }
+  const ArenaVector<LiveInterval*>* physical_register_intervals = processing_core_registers_
+      ? &physical_core_register_intervals_
+      : &physical_fp_register_intervals_;
+  for (LiveInterval* fixed : *physical_register_intervals) {
+    if (fixed != nullptr) {
+      intervals.push_back(fixed);
     }
   }
 
-  for (size_t i = 0, e = temp_intervals_.Size(); i < e; ++i) {
-    LiveInterval* temp = temp_intervals_.Get(i);
+  for (LiveInterval* temp : temp_intervals_) {
     if (ShouldProcess(processing_core_registers_, temp)) {
-      intervals.Add(temp);
+      intervals.push_back(temp);
     }
   }
 
@@ -472,7 +468,7 @@ bool RegisterAllocator::ValidateInternal(bool log_fatal_on_failure) const {
                            allocator_, processing_core_registers_, log_fatal_on_failure);
 }
 
-bool RegisterAllocator::ValidateIntervals(const GrowableArray<LiveInterval*>& intervals,
+bool RegisterAllocator::ValidateIntervals(const ArenaVector<LiveInterval*>& intervals,
                                           size_t number_of_spill_slots,
                                           size_t number_of_out_slots,
                                           const CodeGenerator& codegen,
@@ -482,26 +478,27 @@ bool RegisterAllocator::ValidateIntervals(const GrowableArray<LiveInterval*>& in
   size_t number_of_registers = processing_core_registers
       ? codegen.GetNumberOfCoreRegisters()
       : codegen.GetNumberOfFloatingPointRegisters();
-  GrowableArray<ArenaBitVector*> liveness_of_values(
-      allocator, number_of_registers + number_of_spill_slots);
+  ArenaVector<ArenaBitVector*> liveness_of_values(
+      allocator->Adapter(kArenaAllocRegisterAllocator));
+  liveness_of_values.reserve(number_of_registers + number_of_spill_slots);
 
   // Allocate a bit vector per register. A live interval that has a register
   // allocated will populate the associated bit vector based on its live ranges.
   for (size_t i = 0; i < number_of_registers + number_of_spill_slots; ++i) {
-    liveness_of_values.Add(new (allocator) ArenaBitVector(allocator, 0, true));
+    liveness_of_values.push_back(new (allocator) ArenaBitVector(allocator, 0, true));
   }
 
-  for (size_t i = 0, e = intervals.Size(); i < e; ++i) {
-    for (AllRangesIterator it(intervals.Get(i)); !it.Done(); it.Advance()) {
+  for (LiveInterval* start_interval : intervals) {
+    for (AllRangesIterator it(start_interval); !it.Done(); it.Advance()) {
       LiveInterval* current = it.CurrentInterval();
       HInstruction* defined_by = current->GetParent()->GetDefinedBy();
       if (current->GetParent()->HasSpillSlot()
            // Parameters and current method have their own stack slot.
            && !(defined_by != nullptr && (defined_by->IsParameterValue()
                                           || defined_by->IsCurrentMethod()))) {
-        BitVector* liveness_of_spill_slot = liveness_of_values.Get(number_of_registers
+        BitVector* liveness_of_spill_slot = liveness_of_values[number_of_registers
             + current->GetParent()->GetSpillSlot() / kVRegSize
-            - number_of_out_slots);
+            - number_of_out_slots];
         for (size_t j = it.CurrentRange()->GetStart(); j < it.CurrentRange()->GetEnd(); ++j) {
           if (liveness_of_spill_slot->IsBitSet(j)) {
             if (log_fatal_on_failure) {
@@ -523,7 +520,7 @@ bool RegisterAllocator::ValidateIntervals(const GrowableArray<LiveInterval*>& in
           // and test code may not properly fill the right information to the code generator.
           CHECK(codegen.HasAllocatedRegister(processing_core_registers, current->GetRegister()));
         }
-        BitVector* liveness_of_register = liveness_of_values.Get(current->GetRegister());
+        BitVector* liveness_of_register = liveness_of_values[current->GetRegister()];
         for (size_t j = it.CurrentRange()->GetStart(); j < it.CurrentRange()->GetEnd(); ++j) {
           if (liveness_of_register->IsBitSet(j)) {
             if (current->IsUsingInputRegister() && current->CanUseInputRegister()) {
@@ -572,93 +569,101 @@ void RegisterAllocator::DumpInterval(std::ostream& stream, LiveInterval* interva
 
 void RegisterAllocator::DumpAllIntervals(std::ostream& stream) const {
   stream << "inactive: " << std::endl;
-  for (size_t i = 0; i < inactive_.Size(); i ++) {
-    DumpInterval(stream, inactive_.Get(i));
+  for (LiveInterval* inactive_interval : inactive_) {
+    DumpInterval(stream, inactive_interval);
   }
   stream << "active: " << std::endl;
-  for (size_t i = 0; i < active_.Size(); i ++) {
-    DumpInterval(stream, active_.Get(i));
+  for (LiveInterval* active_interval : active_) {
+    DumpInterval(stream, active_interval);
   }
   stream << "unhandled: " << std::endl;
   auto unhandled = (unhandled_ != nullptr) ?
       unhandled_ : &unhandled_core_intervals_;
-  for (size_t i = 0; i < unhandled->Size(); i ++) {
-    DumpInterval(stream, unhandled->Get(i));
+  for (LiveInterval* unhandled_interval : *unhandled) {
+    DumpInterval(stream, unhandled_interval);
   }
   stream << "handled: " << std::endl;
-  for (size_t i = 0; i < handled_.Size(); i ++) {
-    DumpInterval(stream, handled_.Get(i));
+  for (LiveInterval* handled_interval : handled_) {
+    DumpInterval(stream, handled_interval);
   }
 }
 
 // By the book implementation of a linear scan register allocator.
 void RegisterAllocator::LinearScan() {
-  while (!unhandled_->IsEmpty()) {
+  while (!unhandled_->empty()) {
     // (1) Remove interval with the lowest start position from unhandled.
-    LiveInterval* current = unhandled_->Pop();
+    LiveInterval* current = unhandled_->back();
+    unhandled_->pop_back();
 
     // Make sure the interval is an expected state.
     DCHECK(!current->IsFixed() && !current->HasSpillSlot());
     // Make sure we are going in the right order.
-    DCHECK(unhandled_->IsEmpty() || unhandled_->Peek()->GetStart() >= current->GetStart());
+    DCHECK(unhandled_->empty() || unhandled_->back()->GetStart() >= current->GetStart());
     // Make sure a low interval is always with a high.
-    DCHECK(!current->IsLowInterval() || unhandled_->Peek()->IsHighInterval());
+    DCHECK(!current->IsLowInterval() || unhandled_->back()->IsHighInterval());
     // Make sure a high interval is always with a low.
     DCHECK(current->IsLowInterval() ||
-           unhandled_->IsEmpty() ||
-           !unhandled_->Peek()->IsHighInterval());
+           unhandled_->empty() ||
+           !unhandled_->back()->IsHighInterval());
 
     size_t position = current->GetStart();
 
     // Remember the inactive_ size here since the ones moved to inactive_ from
     // active_ below shouldn't need to be re-checked.
-    size_t inactive_intervals_to_handle = inactive_.Size();
+    size_t inactive_intervals_to_handle = inactive_.size();
 
     // (2) Remove currently active intervals that are dead at this position.
     //     Move active intervals that have a lifetime hole at this position
     //     to inactive.
-    for (size_t i = 0; i < active_.Size(); ++i) {
-      LiveInterval* interval = active_.Get(i);
+    // Note: Copy elements we keep to the beginning, just like
+    //     v.erase(std::remove(v.begin(), v.end(), value), v.end());
+    auto active_kept_end = active_.begin();
+    for (auto it = active_.begin(), end = active_.end(); it != end; ++it) {
+      LiveInterval* interval = *it;
       if (interval->IsDeadAt(position)) {
-        active_.Delete(interval);
-        --i;
-        handled_.Add(interval);
+        handled_.push_back(interval);
       } else if (!interval->Covers(position)) {
-        active_.Delete(interval);
-        --i;
-        inactive_.Add(interval);
+        inactive_.push_back(interval);
+      } else {
+        *active_kept_end++ = interval;  // Keep this interval.
       }
     }
+    // We have copied what we want to keep to [active_.begin(), active_kept_end),
+    // the rest of the data in active_ is junk - drop it.
+    active_.erase(active_kept_end, active_.end());
 
     // (3) Remove currently inactive intervals that are dead at this position.
     //     Move inactive intervals that cover this position to active.
-    for (size_t i = 0; i < inactive_intervals_to_handle; ++i) {
-      LiveInterval* interval = inactive_.Get(i);
+    // Note: Copy elements we keep to the beginning, just like
+    //     v.erase(std::remove(v.begin(), v.begin() + num, value), v.begin() + num);
+    auto inactive_kept_end = inactive_.begin();
+    auto inactive_to_handle_end = inactive_.begin() + inactive_intervals_to_handle;
+    for (auto it = inactive_.begin(); it != inactive_to_handle_end; ++it) {
+      LiveInterval* interval = *it;
       DCHECK(interval->GetStart() < position || interval->IsFixed());
       if (interval->IsDeadAt(position)) {
-        inactive_.Delete(interval);
-        --i;
-        --inactive_intervals_to_handle;
-        handled_.Add(interval);
+        handled_.push_back(interval);
       } else if (interval->Covers(position)) {
-        inactive_.Delete(interval);
-        --i;
-        --inactive_intervals_to_handle;
-        active_.Add(interval);
+        active_.push_back(interval);
+      } else {
+        *inactive_kept_end++ = interval;  // Keep this interval.
       }
     }
+    // We have copied what we want to keep to [inactive_.begin(), inactive_kept_end),
+    // the rest of the data in the processed interval is junk - drop it.
+    inactive_.erase(inactive_kept_end, inactive_to_handle_end);
 
     if (current->IsSlowPathSafepoint()) {
       // Synthesized interval to record the maximum number of live registers
       // at safepoints. No need to allocate a register for it.
       if (processing_core_registers_) {
         maximum_number_of_live_core_registers_ =
-          std::max(maximum_number_of_live_core_registers_, active_.Size());
+          std::max(maximum_number_of_live_core_registers_, active_.size());
       } else {
         maximum_number_of_live_fp_registers_ =
-          std::max(maximum_number_of_live_fp_registers_, active_.Size());
+          std::max(maximum_number_of_live_fp_registers_, active_.size());
       }
-      DCHECK(unhandled_->IsEmpty() || unhandled_->Peek()->GetStart() > current->GetStart());
+      DCHECK(unhandled_->empty() || unhandled_->back()->GetStart() > current->GetStart());
       continue;
     }
 
@@ -683,7 +688,7 @@ void RegisterAllocator::LinearScan() {
       codegen_->AddAllocatedRegister(processing_core_registers_
           ? Location::RegisterLocation(current->GetRegister())
           : Location::FpuRegisterLocation(current->GetRegister()));
-      active_.Add(current);
+      active_.push_back(current);
       if (current->HasHighInterval() && !current->GetHighInterval()->HasRegister()) {
         current->GetHighInterval()->SetRegister(GetHighForLowRegister(current->GetRegister()));
       }
@@ -726,8 +731,7 @@ bool RegisterAllocator::TryAllocateFreeReg(LiveInterval* current) {
   }
 
   // For each active interval, set its register to not free.
-  for (size_t i = 0, e = active_.Size(); i < e; ++i) {
-    LiveInterval* interval = active_.Get(i);
+  for (LiveInterval* interval : active_) {
     DCHECK(interval->HasRegister());
     free_until[interval->GetRegister()] = 0;
   }
@@ -762,8 +766,7 @@ bool RegisterAllocator::TryAllocateFreeReg(LiveInterval* current) {
 
   // For each inactive interval, set its register to be free until
   // the next intersection with `current`.
-  for (size_t i = 0, e = inactive_.Size(); i < e; ++i) {
-    LiveInterval* inactive = inactive_.Get(i);
+  for (LiveInterval* inactive : inactive_) {
     // Temp/Slow-path-safepoint interval has no holes.
     DCHECK(!inactive->IsTemp() && !inactive->IsSlowPathSafepoint());
     if (!current->IsSplit() && !inactive->IsFixed()) {
@@ -923,11 +926,29 @@ int RegisterAllocator::FindAvailableRegister(size_t* next_use, LiveInterval* cur
   return reg;
 }
 
+// Remove interval and its other half if any. Return iterator to the following element.
+static ArenaVector<LiveInterval*>::iterator RemoveIntervalAndPotentialOtherHalf(
+    ArenaVector<LiveInterval*>* intervals, ArenaVector<LiveInterval*>::iterator pos) {
+  DCHECK(intervals->begin() <= pos && pos < intervals->end());
+  LiveInterval* interval = *pos;
+  if (interval->IsLowInterval()) {
+    DCHECK(pos + 1 < intervals->end());
+    DCHECK_EQ(*(pos + 1), interval->GetHighInterval());
+    return intervals->erase(pos, pos + 2);
+  } else if (interval->IsHighInterval()) {
+    DCHECK(intervals->begin() < pos);
+    DCHECK_EQ(*(pos - 1), interval->GetLowInterval());
+    return intervals->erase(pos - 1, pos + 1);
+  } else {
+    return intervals->erase(pos);
+  }
+}
+
 bool RegisterAllocator::TrySplitNonPairOrUnalignedPairIntervalAt(size_t position,
                                                                  size_t first_register_use,
                                                                  size_t* next_use) {
-  for (size_t i = 0, e = active_.Size(); i < e; ++i) {
-    LiveInterval* active = active_.Get(i);
+  for (auto it = active_.begin(), end = active_.end(); it != end; ++it) {
+    LiveInterval* active = *it;
     DCHECK(active->HasRegister());
     if (active->IsFixed()) continue;
     if (active->IsHighInterval()) continue;
@@ -941,11 +962,10 @@ bool RegisterAllocator::TrySplitNonPairOrUnalignedPairIntervalAt(size_t position
         IsLowOfUnalignedPairInterval(active) ||
         !IsLowRegister(active->GetRegister())) {
       LiveInterval* split = Split(active, position);
-      active_.DeleteAt(i);
       if (split != active) {
-        handled_.Add(active);
+        handled_.push_back(active);
       }
-      PotentiallyRemoveOtherHalf(active, &active_, i);
+      RemoveIntervalAndPotentialOtherHalf(&active_, it);
       AddSorted(unhandled_, split);
       return true;
     }
@@ -953,23 +973,6 @@ bool RegisterAllocator::TrySplitNonPairOrUnalignedPairIntervalAt(size_t position
   return false;
 }
 
-bool RegisterAllocator::PotentiallyRemoveOtherHalf(LiveInterval* interval,
-                                                   GrowableArray<LiveInterval*>* intervals,
-                                                   size_t index) {
-  if (interval->IsLowInterval()) {
-    DCHECK_EQ(intervals->Get(index), interval->GetHighInterval());
-    intervals->DeleteAt(index);
-    return true;
-  } else if (interval->IsHighInterval()) {
-    DCHECK_GT(index, 0u);
-    DCHECK_EQ(intervals->Get(index - 1), interval->GetLowInterval());
-    intervals->DeleteAt(index - 1);
-    return true;
-  } else {
-    return false;
-  }
-}
-
 // Find the register that is used the last, and spill the interval
 // that holds it. If the first use of `current` is after that register
 // we spill `current` instead.
@@ -1001,8 +1004,7 @@ bool RegisterAllocator::AllocateBlockedReg(LiveInterval* current) {
 
   // For each active interval, find the next use of its register after the
   // start of current.
-  for (size_t i = 0, e = active_.Size(); i < e; ++i) {
-    LiveInterval* active = active_.Get(i);
+  for (LiveInterval* active : active_) {
     DCHECK(active->HasRegister());
     if (active->IsFixed()) {
       next_use[active->GetRegister()] = current->GetStart();
@@ -1016,8 +1018,7 @@ bool RegisterAllocator::AllocateBlockedReg(LiveInterval* current) {
 
   // For each inactive interval, find the next use of its register after the
   // start of current.
-  for (size_t i = 0, e = inactive_.Size(); i < e; ++i) {
-    LiveInterval* inactive = inactive_.Get(i);
+  for (LiveInterval* inactive : inactive_) {
     // Temp/Slow-path-safepoint interval has no holes.
     DCHECK(!inactive->IsTemp() && !inactive->IsSlowPathSafepoint());
     if (!current->IsSplit() && !inactive->IsFixed()) {
@@ -1087,10 +1088,10 @@ bool RegisterAllocator::AllocateBlockedReg(LiveInterval* current) {
                                                               first_register_use,
                                                               next_use);
       DCHECK(success);
-      LiveInterval* existing = unhandled_->Peek();
+      LiveInterval* existing = unhandled_->back();
       DCHECK(existing->IsHighInterval());
       DCHECK_EQ(existing->GetLowInterval(), current);
-      unhandled_->Add(current);
+      unhandled_->push_back(current);
     } else {
       // If the first use of that instruction is after the last use of the found
       // register, we split this interval just before its first register use.
@@ -1105,23 +1106,24 @@ bool RegisterAllocator::AllocateBlockedReg(LiveInterval* current) {
     // have that register.
     current->SetRegister(reg);
 
-    for (size_t i = 0, e = active_.Size(); i < e; ++i) {
-      LiveInterval* active = active_.Get(i);
+    for (auto it = active_.begin(), end = active_.end(); it != end; ++it) {
+      LiveInterval* active = *it;
       if (active->GetRegister() == reg) {
         DCHECK(!active->IsFixed());
         LiveInterval* split = Split(active, current->GetStart());
         if (split != active) {
-          handled_.Add(active);
+          handled_.push_back(active);
         }
-        active_.DeleteAt(i);
-        PotentiallyRemoveOtherHalf(active, &active_, i);
+        RemoveIntervalAndPotentialOtherHalf(&active_, it);
         AddSorted(unhandled_, split);
         break;
       }
     }
 
-    for (size_t i = 0; i < inactive_.Size(); ++i) {
-      LiveInterval* inactive = inactive_.Get(i);
+    // NOTE: Retrieve end() on each iteration because we're removing elements in the loop body.
+    for (auto it = inactive_.begin(); it != inactive_.end(); ) {
+      LiveInterval* inactive = *it;
+      bool erased = false;
       if (inactive->GetRegister() == reg) {
         if (!current->IsSplit() && !inactive->IsFixed()) {
           // Neither current nor inactive are fixed.
@@ -1129,43 +1131,43 @@ bool RegisterAllocator::AllocateBlockedReg(LiveInterval* current) {
           // inactive interval should never intersect with that inactive interval.
           // Only if it's not fixed though, because fixed intervals don't come from SSA.
           DCHECK_EQ(inactive->FirstIntersectionWith(current), kNoLifetime);
-          continue;
-        }
-        size_t next_intersection = inactive->FirstIntersectionWith(current);
-        if (next_intersection != kNoLifetime) {
-          if (inactive->IsFixed()) {
-            LiveInterval* split = Split(current, next_intersection);
-            DCHECK_NE(split, current);
-            AddSorted(unhandled_, split);
-          } else {
-            // Split at the start of `current`, which will lead to splitting
-            // at the end of the lifetime hole of `inactive`.
-            LiveInterval* split = Split(inactive, current->GetStart());
-            // If it's inactive, it must start before the current interval.
-            DCHECK_NE(split, inactive);
-            inactive_.DeleteAt(i);
-            if (PotentiallyRemoveOtherHalf(inactive, &inactive_, i) && inactive->IsHighInterval()) {
-              // We have removed an entry prior to `inactive`. So we need to decrement.
-              --i;
+        } else {
+          size_t next_intersection = inactive->FirstIntersectionWith(current);
+          if (next_intersection != kNoLifetime) {
+            if (inactive->IsFixed()) {
+              LiveInterval* split = Split(current, next_intersection);
+              DCHECK_NE(split, current);
+              AddSorted(unhandled_, split);
+            } else {
+              // Split at the start of `current`, which will lead to splitting
+              // at the end of the lifetime hole of `inactive`.
+              LiveInterval* split = Split(inactive, current->GetStart());
+              // If it's inactive, it must start before the current interval.
+              DCHECK_NE(split, inactive);
+              it = RemoveIntervalAndPotentialOtherHalf(&inactive_, it);
+              erased = true;
+              handled_.push_back(inactive);
+              AddSorted(unhandled_, split);
             }
-            // Decrement because we have removed `inactive` from the list.
-            --i;
-            handled_.Add(inactive);
-            AddSorted(unhandled_, split);
           }
         }
       }
+      // If we have erased the element, `it` already points to the next element.
+      // Otherwise we need to move to the next element.
+      if (!erased) {
+        ++it;
+      }
     }
 
     return true;
   }
 }
 
-void RegisterAllocator::AddSorted(GrowableArray<LiveInterval*>* array, LiveInterval* interval) {
+void RegisterAllocator::AddSorted(ArenaVector<LiveInterval*>* array, LiveInterval* interval) {
   DCHECK(!interval->IsFixed() && !interval->HasSpillSlot());
   size_t insert_at = 0;
-  for (size_t i = array->Size(); i > 0; --i) {
-    LiveInterval* current = array->Get(i - 1);
+  for (size_t i = array->size(); i > 0; --i) {
+    LiveInterval* current = (*array)[i - 1u];
     // High intervals must be processed right after their low equivalent.
     if (current->StartsAfter(interval) && !current->IsHighInterval()) {
       insert_at = i;
@@ -1173,18 +1175,20 @@ void RegisterAllocator::AddSorted(GrowableArray<LiveInterval*>* array, LiveInter
     } else if ((current->GetStart() == interval->GetStart()) && current->IsSlowPathSafepoint()) {
       // Ensure the slow path interval is the last to be processed at its location: we want the
       // interval to know all live registers at this location.
-      DCHECK(i == 1 || array->Get(i - 2)->StartsAfter(current));
+      DCHECK(i == 1 || (*array)[i - 2u]->StartsAfter(current));
       insert_at = i;
       break;
     }
   }
 
-  array->InsertAt(insert_at, interval);
   // Insert the high interval before the low, to ensure the low is processed before.
+  auto insert_pos = array->begin() + insert_at;
   if (interval->HasHighInterval()) {
-    array->InsertAt(insert_at, interval->GetHighInterval());
+    array->insert(insert_pos, { interval->GetHighInterval(), interval });
   } else if (interval->HasLowInterval()) {
-    array->InsertAt(insert_at + 1, interval->GetLowInterval());
+    array->insert(insert_pos, { interval, interval->GetLowInterval() });
+  } else {
+    array->insert(insert_pos, interval);
   }
 }
 
@@ -1309,7 +1313,7 @@ void RegisterAllocator::AllocateSpillSlotFor(LiveInterval* interval) {
     return;
   }
 
-  GrowableArray<size_t>* spill_slots = nullptr;
+  ArenaVector<size_t>* spill_slots = nullptr;
   switch (interval->GetType()) {
     case Primitive::kPrimDouble:
       spill_slots = &double_spill_slots_;
@@ -1334,32 +1338,27 @@ void RegisterAllocator::AllocateSpillSlotFor(LiveInterval* interval) {
 
   // Find an available spill slot.
   size_t slot = 0;
-  for (size_t e = spill_slots->Size(); slot < e; ++slot) {
-    if (spill_slots->Get(slot) <= parent->GetStart()
-        && (slot == (e - 1) || spill_slots->Get(slot + 1) <= parent->GetStart())) {
+  for (size_t e = spill_slots->size(); slot < e; ++slot) {
+    if ((*spill_slots)[slot] <= parent->GetStart()
+        && (slot == (e - 1) || (*spill_slots)[slot + 1] <= parent->GetStart())) {
       break;
     }
   }
 
   size_t end = interval->GetLastSibling()->GetEnd();
   if (parent->NeedsTwoSpillSlots()) {
-    if (slot == spill_slots->Size()) {
+    if (slot + 2u > spill_slots->size()) {
       // We need a new spill slot.
-      spill_slots->Add(end);
-      spill_slots->Add(end);
-    } else if (slot == spill_slots->Size() - 1) {
-      spill_slots->Put(slot, end);
-      spill_slots->Add(end);
-    } else {
-      spill_slots->Put(slot, end);
-      spill_slots->Put(slot + 1, end);
+      spill_slots->resize(slot + 2u, end);
     }
+    (*spill_slots)[slot] = end;
+    (*spill_slots)[slot + 1] = end;
   } else {
-    if (slot == spill_slots->Size()) {
+    if (slot == spill_slots->size()) {
       // We need a new spill slot.
-      spill_slots->Add(end);
+      spill_slots->push_back(end);
     } else {
-      spill_slots->Put(slot, end);
+      (*spill_slots)[slot] = end;
     }
   }
 
@@ -1817,13 +1816,13 @@ void RegisterAllocator::Resolve() {
       size_t slot = current->GetSpillSlot();
       switch (current->GetType()) {
         case Primitive::kPrimDouble:
-          slot += long_spill_slots_.Size();
+          slot += long_spill_slots_.size();
           FALLTHROUGH_INTENDED;
         case Primitive::kPrimLong:
-          slot += float_spill_slots_.Size();
+          slot += float_spill_slots_.size();
           FALLTHROUGH_INTENDED;
         case Primitive::kPrimFloat:
-          slot += int_spill_slots_.Size();
+          slot += int_spill_slots_.size();
           FALLTHROUGH_INTENDED;
         case Primitive::kPrimNot:
         case Primitive::kPrimInt:
@@ -1906,8 +1905,7 @@ void RegisterAllocator::Resolve() {
   }
 
   // Assign temp locations.
-  for (size_t i = 0; i < temp_intervals_.Size(); ++i) {
-    LiveInterval* temp = temp_intervals_.Get(i);
+  for (LiveInterval* temp : temp_intervals_) {
     if (temp->IsHighInterval()) {
       // High intervals can be skipped, they are already handled by the low interval.
       continue;
index e030464..58600b7 100644 (file)
@@ -18,9 +18,9 @@
 #define ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_H_
 
 #include "arch/instruction_set.h"
+#include "base/arena_containers.h"
 #include "base/macros.h"
 #include "primitive.h"
-#include "utils/growable_array.h"
 
 namespace art {
 
@@ -59,7 +59,7 @@ class RegisterAllocator {
   }
 
   // Helper method for validation. Used by unit testing.
-  static bool ValidateIntervals(const GrowableArray<LiveInterval*>& intervals,
+  static bool ValidateIntervals(const ArenaVector<LiveInterval*>& intervals,
                                 size_t number_of_spill_slots,
                                 size_t number_of_out_slots,
                                 const CodeGenerator& codegen,
@@ -70,10 +70,10 @@ class RegisterAllocator {
   static bool CanAllocateRegistersFor(const HGraph& graph, InstructionSet instruction_set);
 
   size_t GetNumberOfSpillSlots() const {
-    return int_spill_slots_.Size()
-        + long_spill_slots_.Size()
-        + float_spill_slots_.Size()
-        + double_spill_slots_.Size()
+    return int_spill_slots_.size()
+        + long_spill_slots_.size()
+        + float_spill_slots_.size()
+        + double_spill_slots_.size()
         + catch_phi_spill_slots_;
   }
 
@@ -87,7 +87,7 @@ class RegisterAllocator {
   void Resolve();
 
   // Add `interval` in the given sorted list.
-  static void AddSorted(GrowableArray<LiveInterval*>* array, LiveInterval* interval);
+  static void AddSorted(ArenaVector<LiveInterval*>* array, LiveInterval* interval);
 
   // Split `interval` at the position `position`. The new interval starts at `position`.
   LiveInterval* Split(LiveInterval* interval, size_t position);
@@ -159,13 +159,6 @@ class RegisterAllocator {
                                                 size_t first_register_use,
                                                 size_t* next_use);
 
-  // If `interval` has another half, remove it from the list of `intervals`.
-  // `index` holds the index at which `interval` is in `intervals`.
-  // Returns whether there is another half.
-  bool PotentiallyRemoveOtherHalf(LiveInterval* interval,
-                                  GrowableArray<LiveInterval*>* intervals,
-                                  size_t index);
-
   ArenaAllocator* const allocator_;
   CodeGenerator* const codegen_;
   const SsaLivenessAnalysis& liveness_;
@@ -173,43 +166,43 @@ class RegisterAllocator {
   // List of intervals for core registers that must be processed, ordered by start
   // position. Last entry is the interval that has the lowest start position.
   // This list is initially populated before doing the linear scan.
-  GrowableArray<LiveInterval*> unhandled_core_intervals_;
+  ArenaVector<LiveInterval*> unhandled_core_intervals_;
 
   // List of intervals for floating-point registers. Same comments as above.
-  GrowableArray<LiveInterval*> unhandled_fp_intervals_;
+  ArenaVector<LiveInterval*> unhandled_fp_intervals_;
 
   // Currently processed list of unhandled intervals. Either `unhandled_core_intervals_`
   // or `unhandled_fp_intervals_`.
-  GrowableArray<LiveInterval*>* unhandled_;
+  ArenaVector<LiveInterval*>* unhandled_;
 
   // List of intervals that have been processed.
-  GrowableArray<LiveInterval*> handled_;
+  ArenaVector<LiveInterval*> handled_;
 
   // List of intervals that are currently active when processing a new live interval.
   // That is, they have a live range that spans the start of the new interval.
-  GrowableArray<LiveInterval*> active_;
+  ArenaVector<LiveInterval*> active_;
 
   // List of intervals that are currently inactive when processing a new live interval.
   // That is, they have a lifetime hole that spans the start of the new interval.
-  GrowableArray<LiveInterval*> inactive_;
+  ArenaVector<LiveInterval*> inactive_;
 
   // Fixed intervals for physical registers. Such intervals cover the positions
   // where an instruction requires a specific register.
-  GrowableArray<LiveInterval*> physical_core_register_intervals_;
-  GrowableArray<LiveInterval*> physical_fp_register_intervals_;
+  ArenaVector<LiveInterval*> physical_core_register_intervals_;
+  ArenaVector<LiveInterval*> physical_fp_register_intervals_;
 
   // Intervals for temporaries. Such intervals cover the positions
   // where an instruction requires a temporary.
-  GrowableArray<LiveInterval*> temp_intervals_;
+  ArenaVector<LiveInterval*> temp_intervals_;
 
   // The spill slots allocated for live intervals. We ensure spill slots
   // are typed to avoid (1) doing moves and swaps between two different kinds
   // of registers, and (2) swapping between a single stack slot and a double
   // stack slot. This simplifies the parallel move resolver.
-  GrowableArray<size_t> int_spill_slots_;
-  GrowableArray<size_t> long_spill_slots_;
-  GrowableArray<size_t> float_spill_slots_;
-  GrowableArray<size_t> double_spill_slots_;
+  ArenaVector<size_t> int_spill_slots_;
+  ArenaVector<size_t> long_spill_slots_;
+  ArenaVector<size_t> float_spill_slots_;
+  ArenaVector<size_t> double_spill_slots_;
 
   // Spill slots allocated to catch phis. This category is special-cased because
   // (1) slots are allocated prior to linear scan and in reverse linear order,
@@ -217,7 +210,7 @@ class RegisterAllocator {
   size_t catch_phi_spill_slots_;
 
   // Instructions that need a safepoint.
-  GrowableArray<HInstruction*> safepoints_;
+  ArenaVector<HInstruction*> safepoints_;
 
   // True if processing core registers. False if processing floating
   // point registers.
index b72df86..2bb5a8b 100644 (file)
@@ -64,83 +64,83 @@ TEST(RegisterAllocatorTest, ValidateIntervals) {
   std::unique_ptr<const X86InstructionSetFeatures> features_x86(
       X86InstructionSetFeatures::FromCppDefines());
   x86::CodeGeneratorX86 codegen(graph, *features_x86.get(), CompilerOptions());
-  GrowableArray<LiveInterval*> intervals(&allocator, 0);
+  ArenaVector<LiveInterval*> intervals(allocator.Adapter());
 
   // Test with two intervals of the same range.
   {
     static constexpr size_t ranges[][2] = {{0, 42}};
-    intervals.Add(BuildInterval(ranges, arraysize(ranges), &allocator, 0));
-    intervals.Add(BuildInterval(ranges, arraysize(ranges), &allocator, 1));
+    intervals.push_back(BuildInterval(ranges, arraysize(ranges), &allocator, 0));
+    intervals.push_back(BuildInterval(ranges, arraysize(ranges), &allocator, 1));
     ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
         intervals, 0, 0, codegen, &allocator, true, false));
 
-    intervals.Get(1)->SetRegister(0);
+    intervals[1]->SetRegister(0);
     ASSERT_FALSE(RegisterAllocator::ValidateIntervals(
         intervals, 0, 0, codegen, &allocator, true, false));
-    intervals.Reset();
+    intervals.clear();
   }
 
   // Test with two non-intersecting intervals.
   {
     static constexpr size_t ranges1[][2] = {{0, 42}};
-    intervals.Add(BuildInterval(ranges1, arraysize(ranges1), &allocator, 0));
+    intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), &allocator, 0));
     static constexpr size_t ranges2[][2] = {{42, 43}};
-    intervals.Add(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1));
+    intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1));
     ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
         intervals, 0, 0, codegen, &allocator, true, false));
 
-    intervals.Get(1)->SetRegister(0);
+    intervals[1]->SetRegister(0);
     ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
         intervals, 0, 0, codegen, &allocator, true, false));
-    intervals.Reset();
+    intervals.clear();
   }
 
   // Test with two non-intersecting intervals, with one with a lifetime hole.
   {
     static constexpr size_t ranges1[][2] = {{0, 42}, {45, 48}};
-    intervals.Add(BuildInterval(ranges1, arraysize(ranges1), &allocator, 0));
+    intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), &allocator, 0));
     static constexpr size_t ranges2[][2] = {{42, 43}};
-    intervals.Add(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1));
+    intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1));
     ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
         intervals, 0, 0, codegen, &allocator, true, false));
 
-    intervals.Get(1)->SetRegister(0);
+    intervals[1]->SetRegister(0);
     ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
         intervals, 0, 0, codegen, &allocator, true, false));
-    intervals.Reset();
+    intervals.clear();
   }
 
   // Test with intersecting intervals.
   {
     static constexpr size_t ranges1[][2] = {{0, 42}, {44, 48}};
-    intervals.Add(BuildInterval(ranges1, arraysize(ranges1), &allocator, 0));
+    intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), &allocator, 0));
     static constexpr size_t ranges2[][2] = {{42, 47}};
-    intervals.Add(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1));
+    intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1));
     ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
         intervals, 0, 0, codegen, &allocator, true, false));
 
-    intervals.Get(1)->SetRegister(0);
+    intervals[1]->SetRegister(0);
     ASSERT_FALSE(RegisterAllocator::ValidateIntervals(
         intervals, 0, 0, codegen, &allocator, true, false));
-    intervals.Reset();
+    intervals.clear();
   }
 
   // Test with siblings.
   {
     static constexpr size_t ranges1[][2] = {{0, 42}, {44, 48}};
-    intervals.Add(BuildInterval(ranges1, arraysize(ranges1), &allocator, 0));
-    intervals.Get(0)->SplitAt(43);
+    intervals.push_back(BuildInterval(ranges1, arraysize(ranges1), &allocator, 0));
+    intervals[0]->SplitAt(43);
     static constexpr size_t ranges2[][2] = {{42, 47}};
-    intervals.Add(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1));
+    intervals.push_back(BuildInterval(ranges2, arraysize(ranges2), &allocator, 1));
     ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
         intervals, 0, 0, codegen, &allocator, true, false));
 
-    intervals.Get(1)->SetRegister(0);
+    intervals[1]->SetRegister(0);
     // Sibling of the first interval has no register allocated to it.
     ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
         intervals, 0, 0, codegen, &allocator, true, false));
 
-    intervals.Get(0)->GetNextSibling()->SetRegister(0);
+    intervals[0]->GetNextSibling()->SetRegister(0);
     ASSERT_FALSE(RegisterAllocator::ValidateIntervals(
         intervals, 0, 0, codegen, &allocator, true, false));
   }
@@ -429,7 +429,7 @@ TEST(RegisterAllocatorTest, FreeUntil) {
 
   // Populate the instructions in the liveness object, to please the register allocator.
   for (size_t i = 0; i < 60; ++i) {
-    liveness.instructions_from_lifetime_position_.Add(
+    liveness.instructions_from_lifetime_position_.push_back(
         graph->GetEntryBlock()->GetFirstInstruction());
   }
 
@@ -442,15 +442,15 @@ TEST(RegisterAllocatorTest, FreeUntil) {
   // we do not depend on an order.
   LiveInterval* interval = LiveInterval::MakeFixedInterval(&allocator, 0, Primitive::kPrimInt);
   interval->AddRange(40, 50);
-  register_allocator.inactive_.Add(interval);
+  register_allocator.inactive_.push_back(interval);
 
   interval = LiveInterval::MakeFixedInterval(&allocator, 0, Primitive::kPrimInt);
   interval->AddRange(20, 30);
-  register_allocator.inactive_.Add(interval);
+  register_allocator.inactive_.push_back(interval);
 
   interval = LiveInterval::MakeFixedInterval(&allocator, 0, Primitive::kPrimInt);
   interval->AddRange(60, 70);
-  register_allocator.inactive_.Add(interval);
+  register_allocator.inactive_.push_back(interval);
 
   register_allocator.number_of_registers_ = 1;
   register_allocator.registers_array_ = allocator.AllocArray<size_t>(1);
@@ -460,10 +460,10 @@ TEST(RegisterAllocatorTest, FreeUntil) {
   ASSERT_TRUE(register_allocator.TryAllocateFreeReg(unhandled));
 
   // Check that we have split the interval.
-  ASSERT_EQ(1u, register_allocator.unhandled_->Size());
+  ASSERT_EQ(1u, register_allocator.unhandled_->size());
   // Check that we know need to find a new register where the next interval
   // that uses the register starts.
-  ASSERT_EQ(20u, register_allocator.unhandled_->Get(0)->GetStart());
+  ASSERT_EQ(20u, register_allocator.unhandled_->front()->GetStart());
 }
 
 static HGraph* BuildIfElseWithPhi(ArenaAllocator* allocator,
@@ -678,7 +678,7 @@ TEST(RegisterAllocatorTest, ExpectedInRegisterHint) {
 
     // Check that the field gets put in the register expected by its use.
     // Don't use SetInAt because we are overriding an already allocated location.
-    ret->GetLocations()->inputs_.Put(0, Location::RegisterLocation(2));
+    ret->GetLocations()->inputs_[0] = Location::RegisterLocation(2);
 
     RegisterAllocator register_allocator(&allocator, &codegen, liveness);
     register_allocator.AllocateRegisters();
@@ -885,14 +885,14 @@ TEST(RegisterAllocatorTest, SpillInactive) {
   SsaLivenessAnalysis liveness(graph, &codegen);
   // Populate the instructions in the liveness object, to please the register allocator.
   for (size_t i = 0; i < 32; ++i) {
-    liveness.instructions_from_lifetime_position_.Add(user);
+    liveness.instructions_from_lifetime_position_.push_back(user);
   }
 
   RegisterAllocator register_allocator(&allocator, &codegen, liveness);
-  register_allocator.unhandled_core_intervals_.Add(fourth);
-  register_allocator.unhandled_core_intervals_.Add(third);
-  register_allocator.unhandled_core_intervals_.Add(second);
-  register_allocator.unhandled_core_intervals_.Add(first);
+  register_allocator.unhandled_core_intervals_.push_back(fourth);
+  register_allocator.unhandled_core_intervals_.push_back(third);
+  register_allocator.unhandled_core_intervals_.push_back(second);
+  register_allocator.unhandled_core_intervals_.push_back(first);
 
   // Set just one register available to make all intervals compete for the same.
   register_allocator.number_of_registers_ = 1;
@@ -902,11 +902,11 @@ TEST(RegisterAllocatorTest, SpillInactive) {
   register_allocator.LinearScan();
 
   // Test that there is no conflicts between intervals.
-  GrowableArray<LiveInterval*> intervals(&allocator, 0);
-  intervals.Add(first);
-  intervals.Add(second);
-  intervals.Add(third);
-  intervals.Add(fourth);
+  ArenaVector<LiveInterval*> intervals(allocator.Adapter());
+  intervals.push_back(first);
+  intervals.push_back(second);
+  intervals.push_back(third);
+  intervals.push_back(fourth);
   ASSERT_TRUE(RegisterAllocator::ValidateIntervals(
       intervals, 0, 0, codegen, &allocator, true, false));
 }
index 1956781..338a3aa 100644 (file)
@@ -21,8 +21,8 @@ namespace art {
 void SideEffectsAnalysis::Run() {
   // Inlining might have created more blocks, so we need to increase the size
   // if needed.
-  block_effects_.SetSize(graph_->GetBlocks().size());
-  loop_effects_.SetSize(graph_->GetBlocks().size());
+  block_effects_.resize(graph_->GetBlocks().size());
+  loop_effects_.resize(graph_->GetBlocks().size());
 
   // In DEBUG mode, ensure side effects are properly initialized to empty.
   if (kIsDebugBuild) {
@@ -54,7 +54,7 @@ void SideEffectsAnalysis::Run() {
       }
     }
 
-    block_effects_.Put(block->GetBlockId(), effects);
+    block_effects_[block->GetBlockId()] = effects;
 
     if (block->IsLoopHeader()) {
       // The side effects of the loop header are part of the loop.
@@ -76,16 +76,19 @@ void SideEffectsAnalysis::Run() {
 
 SideEffects SideEffectsAnalysis::GetLoopEffects(HBasicBlock* block) const {
   DCHECK(block->IsLoopHeader());
-  return loop_effects_.Get(block->GetBlockId());
+  DCHECK_LT(block->GetBlockId(), loop_effects_.size());
+  return loop_effects_[block->GetBlockId()];
 }
 
 SideEffects SideEffectsAnalysis::GetBlockEffects(HBasicBlock* block) const {
-  return block_effects_.Get(block->GetBlockId());
+  DCHECK_LT(block->GetBlockId(), block_effects_.size());
+  return block_effects_[block->GetBlockId()];
 }
 
 void SideEffectsAnalysis::UpdateLoopEffects(HLoopInformation* info, SideEffects effects) {
-  int id = info->GetHeader()->GetBlockId();
-  loop_effects_.Put(id, loop_effects_.Get(id).Union(effects));
+  uint32_t id = info->GetHeader()->GetBlockId();
+  DCHECK_LT(id, loop_effects_.size());
+  loop_effects_[id] = loop_effects_[id].Union(effects);
 }
 
 }  // namespace art
index 9888140..bac6088 100644 (file)
@@ -17,6 +17,7 @@
 #ifndef ART_COMPILER_OPTIMIZING_SIDE_EFFECTS_ANALYSIS_H_
 #define ART_COMPILER_OPTIMIZING_SIDE_EFFECTS_ANALYSIS_H_
 
+#include "base/arena_containers.h"
 #include "nodes.h"
 #include "optimization.h"
 
@@ -27,8 +28,10 @@ class SideEffectsAnalysis : public HOptimization {
   explicit SideEffectsAnalysis(HGraph* graph)
       : HOptimization(graph, kSideEffectsAnalysisPassName),
         graph_(graph),
-        block_effects_(graph->GetArena(), graph->GetBlocks().size(), SideEffects::None()),
-        loop_effects_(graph->GetArena(), graph->GetBlocks().size(), SideEffects::None()) {}
+        block_effects_(graph->GetBlocks().size(),
+                       graph->GetArena()->Adapter(kArenaAllocSideEffectsAnalysis)),
+        loop_effects_(graph->GetBlocks().size(),
+                      graph->GetArena()->Adapter(kArenaAllocSideEffectsAnalysis)) {}
 
   SideEffects GetLoopEffects(HBasicBlock* block) const;
   SideEffects GetBlockEffects(HBasicBlock* block) const;
@@ -51,11 +54,11 @@ class SideEffectsAnalysis : public HOptimization {
 
   // Side effects of individual blocks, that is the union of the side effects
   // of the instructions in the block.
-  GrowableArray<SideEffects> block_effects_;
+  ArenaVector<SideEffects> block_effects_;
 
   // Side effects of loops, that is the union of the side effects of the
   // blocks contained in that loop.
-  GrowableArray<SideEffects> loop_effects_;
+  ArenaVector<SideEffects> loop_effects_;
 
   ART_FRIEND_TEST(GVNTest, LoopSideEffects);
   DISALLOW_COPY_AND_ASSIGN(SideEffectsAnalysis);
index 1e9a813..b869d57 100644 (file)
@@ -43,11 +43,11 @@ static bool IsInnerLoop(HLoopInformation* outer, HLoopInformation* inner) {
       && inner->IsIn(*outer);
 }
 
-static void AddToListForLinearization(GrowableArray<HBasicBlock*>* worklist, HBasicBlock* block) {
-  size_t insert_at = worklist->Size();
+static void AddToListForLinearization(ArenaVector<HBasicBlock*>* worklist, HBasicBlock* block) {
   HLoopInformation* block_loop = block->GetLoopInformation();
-  for (; insert_at > 0; --insert_at) {
-    HBasicBlock* current = worklist->Get(insert_at - 1);
+  auto insert_pos = worklist->rbegin();  // insert_pos.base() will be the actual position.
+  for (auto end = worklist->rend(); insert_pos != end; ++insert_pos) {
+    HBasicBlock* current = *insert_pos;
     HLoopInformation* current_loop = current->GetLoopInformation();
     if (InSameLoop(block_loop, current_loop)
         || !IsLoop(current_loop)
@@ -56,7 +56,7 @@ static void AddToListForLinearization(GrowableArray<HBasicBlock*>* worklist, HBa
       break;
     }
   }
-  worklist->InsertAt(insert_at, block);
+  worklist->insert(insert_pos.base(), block);
 }
 
 void SsaLivenessAnalysis::LinearizeGraph() {
@@ -69,15 +69,15 @@ void SsaLivenessAnalysis::LinearizeGraph() {
   //      current reverse post order in the graph, but it would require making
   //      order queries to a GrowableArray, which is not the best data structure
   //      for it.
-  GrowableArray<uint32_t> forward_predecessors(graph_->GetArena(), graph_->GetBlocks().size());
-  forward_predecessors.SetSize(graph_->GetBlocks().size());
+  ArenaVector<uint32_t> forward_predecessors(graph_->GetBlocks().size(),
+                                             graph_->GetArena()->Adapter(kArenaAllocSsaLiveness));
   for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
     HBasicBlock* block = it.Current();
     size_t number_of_forward_predecessors = block->GetPredecessors().size();
     if (block->IsLoopHeader()) {
       number_of_forward_predecessors -= block->GetLoopInformation()->NumberOfBackEdges();
     }
-    forward_predecessors.Put(block->GetBlockId(), number_of_forward_predecessors);
+    forward_predecessors[block->GetBlockId()] = number_of_forward_predecessors;
   }
 
   // (2): Following a worklist approach, first start with the entry block, and
@@ -85,20 +85,21 @@ void SsaLivenessAnalysis::LinearizeGraph() {
   //      successor block are visited, the successor block is added in the worklist
   //      following an order that satisfies the requirements to build our linear graph.
   graph_->linear_order_.reserve(graph_->GetReversePostOrder().size());
-  GrowableArray<HBasicBlock*> worklist(graph_->GetArena(), 1);
-  worklist.Add(graph_->GetEntryBlock());
+  ArenaVector<HBasicBlock*> worklist(graph_->GetArena()->Adapter(kArenaAllocSsaLiveness));
+  worklist.push_back(graph_->GetEntryBlock());
   do {
-    HBasicBlock* current = worklist.Pop();
+    HBasicBlock* current = worklist.back();
+    worklist.pop_back();
     graph_->linear_order_.push_back(current);
     for (HBasicBlock* successor : current->GetSuccessors()) {
       int block_id = successor->GetBlockId();
-      size_t number_of_remaining_predecessors = forward_predecessors.Get(block_id);
+      size_t number_of_remaining_predecessors = forward_predecessors[block_id];
       if (number_of_remaining_predecessors == 1) {
         AddToListForLinearization(&worklist, successor);
       }
-      forward_predecessors.Put(block_id, number_of_remaining_predecessors - 1);
+      forward_predecessors[block_id] = number_of_remaining_predecessors - 1;
     }
-  } while (!worklist.IsEmpty());
+  } while (!worklist.empty());
 }
 
 void SsaLivenessAnalysis::NumberInstructions() {
@@ -122,7 +123,7 @@ void SsaLivenessAnalysis::NumberInstructions() {
       codegen_->AllocateLocations(current);
       LocationSummary* locations = current->GetLocations();
       if (locations != nullptr && locations->Out().IsValid()) {
-        instructions_from_ssa_index_.Add(current);
+        instructions_from_ssa_index_.push_back(current);
         current->SetSsaIndex(ssa_index++);
         current->SetLiveInterval(
             LiveInterval::MakeInterval(graph_->GetArena(), current->GetType(), current));
@@ -132,7 +133,7 @@ void SsaLivenessAnalysis::NumberInstructions() {
     lifetime_position += 2;
 
     // Add a null marker to notify we are starting a block.
-    instructions_from_lifetime_position_.Add(nullptr);
+    instructions_from_lifetime_position_.push_back(nullptr);
 
     for (HInstructionIterator inst_it(block->GetInstructions()); !inst_it.Done();
          inst_it.Advance()) {
@@ -140,12 +141,12 @@ void SsaLivenessAnalysis::NumberInstructions() {
       codegen_->AllocateLocations(current);
       LocationSummary* locations = current->GetLocations();
       if (locations != nullptr && locations->Out().IsValid()) {
-        instructions_from_ssa_index_.Add(current);
+        instructions_from_ssa_index_.push_back(current);
         current->SetSsaIndex(ssa_index++);
         current->SetLiveInterval(
             LiveInterval::MakeInterval(graph_->GetArena(), current->GetType(), current));
       }
-      instructions_from_lifetime_position_.Add(current);
+      instructions_from_lifetime_position_.push_back(current);
       current->SetLifetimePosition(lifetime_position);
       lifetime_position += 2;
     }
@@ -158,9 +159,9 @@ void SsaLivenessAnalysis::NumberInstructions() {
 void SsaLivenessAnalysis::ComputeLiveness() {
   for (HLinearOrderIterator it(*graph_); !it.Done(); it.Advance()) {
     HBasicBlock* block = it.Current();
-    block_infos_.Put(
-        block->GetBlockId(),
-        new (graph_->GetArena()) BlockInfo(graph_->GetArena(), *block, number_of_ssa_values_));
+    DCHECK_LT(block->GetBlockId(), block_infos_.size());
+    block_infos_[block->GetBlockId()] =
+        new (graph_->GetArena()) BlockInfo(graph_->GetArena(), *block, number_of_ssa_values_);
   }
 
   // Compute the live ranges, as well as the initial live_in, live_out, and kill sets.
@@ -212,7 +213,7 @@ void SsaLivenessAnalysis::ComputeLiveRanges() {
     // Add a range that covers this block to all instructions live_in because of successors.
     // Instructions defined in this block will have their start of the range adjusted.
     for (uint32_t idx : live_in->Indexes()) {
-      HInstruction* current = instructions_from_ssa_index_.Get(idx);
+      HInstruction* current = GetInstructionFromSsaIndex(idx);
       current->GetLiveInterval()->AddRange(block->GetLifetimeStart(), block->GetLifetimeEnd());
     }
 
@@ -277,7 +278,7 @@ void SsaLivenessAnalysis::ComputeLiveRanges() {
       // For all live_in instructions at the loop header, we need to create a range
       // that covers the full loop.
       for (uint32_t idx : live_in->Indexes()) {
-        HInstruction* current = instructions_from_ssa_index_.Get(idx);
+        HInstruction* current = GetInstructionFromSsaIndex(idx);
         current->GetLiveInterval()->AddLoopRange(block->GetLifetimeStart(), last_position);
       }
     }
index 3aedaa5..414cc7d 100644 (file)
@@ -206,7 +206,7 @@ class SafepointPosition : public ArenaObject<kArenaAllocMisc> {
  * An interval is a list of disjoint live ranges where an instruction is live.
  * Each instruction that has uses gets an interval.
  */
-class LiveInterval : public ArenaObject<kArenaAllocMisc> {
+class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> {
  public:
   static LiveInterval* MakeInterval(ArenaAllocator* allocator,
                                     Primitive::Type type,
@@ -1106,33 +1106,39 @@ class SsaLivenessAnalysis : public ValueObject {
   SsaLivenessAnalysis(HGraph* graph, CodeGenerator* codegen)
       : graph_(graph),
         codegen_(codegen),
-        block_infos_(graph->GetArena(), graph->GetBlocks().size()),
-        instructions_from_ssa_index_(graph->GetArena(), 0),
-        instructions_from_lifetime_position_(graph->GetArena(), 0),
+        block_infos_(graph->GetBlocks().size(),
+                     nullptr,
+                     graph->GetArena()->Adapter(kArenaAllocSsaLiveness)),
+        instructions_from_ssa_index_(graph->GetArena()->Adapter(kArenaAllocSsaLiveness)),
+        instructions_from_lifetime_position_(graph->GetArena()->Adapter(kArenaAllocSsaLiveness)),
         number_of_ssa_values_(0) {
-    block_infos_.SetSize(graph->GetBlocks().size());
   }
 
   void Analyze();
 
   BitVector* GetLiveInSet(const HBasicBlock& block) const {
-    return &block_infos_.Get(block.GetBlockId())->live_in_;
+    DCHECK_LT(block.GetBlockId(), block_infos_.size());
+    return &block_infos_[block.GetBlockId()]->live_in_;
   }
 
   BitVector* GetLiveOutSet(const HBasicBlock& block) const {
-    return &block_infos_.Get(block.GetBlockId())->live_out_;
+    DCHECK_LT(block.GetBlockId(), block_infos_.size());
+    return &block_infos_[block.GetBlockId()]->live_out_;
   }
 
   BitVector* GetKillSet(const HBasicBlock& block) const {
-    return &block_infos_.Get(block.GetBlockId())->kill_;
+    DCHECK_LT(block.GetBlockId(), block_infos_.size());
+    return &block_infos_[block.GetBlockId()]->kill_;
   }
 
   HInstruction* GetInstructionFromSsaIndex(size_t index) const {
-    return instructions_from_ssa_index_.Get(index);
+    DCHECK_LT(index, instructions_from_ssa_index_.size());
+    return instructions_from_ssa_index_[index];
   }
 
   HInstruction* GetInstructionFromPosition(size_t index) const {
-    return instructions_from_lifetime_position_.Get(index);
+    DCHECK_LT(index, instructions_from_lifetime_position_.size());
+    return instructions_from_lifetime_position_[index];
   }
 
   HBasicBlock* GetBlockFromPosition(size_t index) const {
@@ -1163,7 +1169,7 @@ class SsaLivenessAnalysis : public ValueObject {
   }
 
   size_t GetMaxLifetimePosition() const {
-    return instructions_from_lifetime_position_.Size() * 2 - 1;
+    return instructions_from_lifetime_position_.size() * 2 - 1;
   }
 
   size_t GetNumberOfSsaValues() const {
@@ -1218,13 +1224,13 @@ class SsaLivenessAnalysis : public ValueObject {
 
   HGraph* const graph_;
   CodeGenerator* const codegen_;
-  GrowableArray<BlockInfo*> block_infos_;
+  ArenaVector<BlockInfo*> block_infos_;
 
   // Temporary array used when computing live_in, live_out, and kill sets.
-  GrowableArray<HInstruction*> instructions_from_ssa_index_;
+  ArenaVector<HInstruction*> instructions_from_ssa_index_;
 
   // Temporary array used when inserting moves in the graph.
-  GrowableArray<HInstruction*> instructions_from_lifetime_position_;
+  ArenaVector<HInstruction*> instructions_from_lifetime_position_;
   size_t number_of_ssa_values_;
 
   ART_FRIEND_TEST(RegisterAllocatorTest, SpillInactive);
index a9f04cd..72f9ddd 100644 (file)
@@ -35,7 +35,7 @@ void SsaDeadPhiElimination::MarkDeadPhis() {
         HUseListNode<HInstruction*>* current = use_it.Current();
         HInstruction* user = current->GetUser();
         if (!user->IsPhi()) {
-          worklist_.Add(phi);
+          worklist_.push_back(phi);
           phi->SetLive();
           break;
         }
@@ -44,12 +44,13 @@ void SsaDeadPhiElimination::MarkDeadPhis() {
   }
 
   // Process the worklist by propagating liveness to phi inputs.
-  while (!worklist_.IsEmpty()) {
-    HPhi* phi = worklist_.Pop();
+  while (!worklist_.empty()) {
+    HPhi* phi = worklist_.back();
+    worklist_.pop_back();
     for (HInputIterator it(phi); !it.Done(); it.Advance()) {
       HInstruction* input = it.Current();
       if (input->IsPhi() && input->AsPhi()->IsDead()) {
-        worklist_.Add(input->AsPhi());
+        worklist_.push_back(input->AsPhi());
         input->AsPhi()->SetLive();
       }
     }
@@ -103,12 +104,13 @@ void SsaRedundantPhiElimination::Run() {
   for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
     HBasicBlock* block = it.Current();
     for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
-      worklist_.Add(inst_it.Current()->AsPhi());
+      worklist_.push_back(inst_it.Current()->AsPhi());
     }
   }
 
-  while (!worklist_.IsEmpty()) {
-    HPhi* phi = worklist_.Pop();
+  while (!worklist_.empty()) {
+    HPhi* phi = worklist_.back();
+    worklist_.pop_back();
 
     // If the phi has already been processed, continue.
     if (!phi->IsInBlock()) {
@@ -155,7 +157,7 @@ void SsaRedundantPhiElimination::Run() {
       HUseListNode<HInstruction*>* current = it.Current();
       HInstruction* user = current->GetUser();
       if (user->IsPhi()) {
-        worklist_.Add(user->AsPhi());
+        worklist_.push_back(user->AsPhi());
       }
     }
 
index 67351f2..b48e820 100644 (file)
@@ -17,6 +17,7 @@
 #ifndef ART_COMPILER_OPTIMIZING_SSA_PHI_ELIMINATION_H_
 #define ART_COMPILER_OPTIMIZING_SSA_PHI_ELIMINATION_H_
 
+#include "base/arena_containers.h"
 #include "nodes.h"
 #include "optimization.h"
 
@@ -30,7 +31,9 @@ class SsaDeadPhiElimination : public HOptimization {
  public:
   explicit SsaDeadPhiElimination(HGraph* graph)
       : HOptimization(graph, kSsaDeadPhiEliminationPassName),
-        worklist_(graph->GetArena(), kDefaultWorklistSize) {}
+        worklist_(graph->GetArena()->Adapter(kArenaAllocSsaPhiElimination)) {
+    worklist_.reserve(kDefaultWorklistSize);
+  }
 
   void Run() OVERRIDE;
 
@@ -40,7 +43,7 @@ class SsaDeadPhiElimination : public HOptimization {
   static constexpr const char* kSsaDeadPhiEliminationPassName = "dead_phi_elimination";
 
  private:
-  GrowableArray<HPhi*> worklist_;
+  ArenaVector<HPhi*> worklist_;
 
   static constexpr size_t kDefaultWorklistSize = 8;
 
@@ -57,14 +60,16 @@ class SsaRedundantPhiElimination : public HOptimization {
  public:
   explicit SsaRedundantPhiElimination(HGraph* graph)
       : HOptimization(graph, kSsaRedundantPhiEliminationPassName),
-        worklist_(graph->GetArena(), kDefaultWorklistSize) {}
+        worklist_(graph->GetArena()->Adapter(kArenaAllocSsaPhiElimination)) {
+    worklist_.reserve(kDefaultWorklistSize);
+  }
 
   void Run() OVERRIDE;
 
   static constexpr const char* kSsaRedundantPhiEliminationPassName = "redundant_phi_elimination";
 
  private:
-  GrowableArray<HPhi*> worklist_;
+  ArenaVector<HPhi*> worklist_;
 
   static constexpr size_t kDefaultWorklistSize = 8;
 
index 4e51f55..c1a1088 100644 (file)
@@ -55,6 +55,7 @@ const char* const ArenaAllocatorStatsImpl<kCount>::kAllocNames[] = {
   "RegAlloc     ",
   "Data         ",
   "STL          ",
+  "GraphBuilder ",
   "Graph        ",
   "BasicBlock   ",
   "BlockList    ",
@@ -74,12 +75,20 @@ const char* const ArenaAllocatorStatsImpl<kCount>::kAllocNames[] = {
   "Environment  ",
   "EnvVRegs     ",
   "EnvLocations ",
+  "LocSummary   ",
   "SsaBuilder   ",
   "MoveOperands ",
   "CodeBuffer   ",
   "StackMaps    ",
   "BaselineMaps ",
   "Optimization ",
+  "GVN          ",
+  "SsaLiveness  ",
+  "SsaPhiElim   ",
+  "RefTypeProp  ",
+  "PrimTypeProp ",
+  "SideEffects  ",
+  "RegAllocator ",
 };
 
 template <bool kCount>
index c5eb741..be96862 100644 (file)
@@ -65,6 +65,7 @@ enum ArenaAllocKind {
   kArenaAllocRegAlloc,
   kArenaAllocData,
   kArenaAllocSTL,
+  kArenaAllocGraphBuilder,
   kArenaAllocGraph,
   kArenaAllocBasicBlock,
   kArenaAllocBlockList,
@@ -84,12 +85,20 @@ enum ArenaAllocKind {
   kArenaAllocEnvironment,
   kArenaAllocEnvironmentVRegs,
   kArenaAllocEnvironmentLocations,
+  kArenaAllocLocationSummary,
   kArenaAllocSsaBuilder,
   kArenaAllocMoveOperands,
   kArenaAllocCodeBuffer,
   kArenaAllocStackMaps,
   kArenaAllocBaselineMaps,
   kArenaAllocOptimization,
+  kArenaAllocGvn,
+  kArenaAllocSsaLiveness,
+  kArenaAllocSsaPhiElimination,
+  kArenaAllocReferenceTypePropagation,
+  kArenaAllocPrimitiveTypePropagation,
+  kArenaAllocSideEffectsAnalysis,
+  kArenaAllocRegisterAllocator,
   kNumArenaAllocKinds
 };