return false;
}
-/*
- * BasicBlock Optimization pass implementation start.
- */
-void BBOptimizations::Start(PassDataHolder* data) const {
- DCHECK(data != nullptr);
- CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
- DCHECK(c_unit != nullptr);
- /*
- * This pass has a different ordering depEnding on the suppress exception,
- * so do the pass here for now:
- * - Later, the Start should just change the ordering and we can move the extended
- * creation into the pass driver's main job with a new iterator
- */
- c_unit->mir_graph->BasicBlockOptimization();
-}
-
} // namespace art
return ((c_unit->disable_opt & (1 << kBBOpt)) == 0);
}
- void Start(PassDataHolder* data) const;
+ void Start(PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ c_unit->mir_graph->BasicBlockOptimizationStart();
+
+ /*
+ * This pass has a different ordering depending on the suppress exception,
+ * so do the pass here for now:
+ * - Later, the Start should just change the ordering and we can move the extended
+ * creation into the pass driver's main job with a new iterator
+ */
+ c_unit->mir_graph->BasicBlockOptimization();
+ }
+
+ void End(PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ c_unit->mir_graph->BasicBlockOptimizationEnd();
+ }
};
/**
bb->data_flow_info->live_in_v = live_in_v_;
}
}
- cu_.mir_graph->num_blocks_ = count;
ASSERT_EQ(count, cu_.mir_graph->block_list_.size());
cu_.mir_graph->entry_block_ = cu_.mir_graph->block_list_[1];
ASSERT_EQ(kEntryBlock, cu_.mir_graph->entry_block_->block_type);
skip_compilation = true;
*skip_message = "Huge method: " + std::to_string(GetNumDalvikInsns());
// If we're got a huge number of basic blocks, don't bother with further analysis.
- if (static_cast<size_t>(num_blocks_) > (compiler_options.GetHugeMethodThreshold() / 2)) {
+ if (static_cast<size_t>(GetNumBlocks()) > (compiler_options.GetHugeMethodThreshold() / 2)) {
return true;
}
} else if (compiler_options.IsLargeMethod(GetNumDalvikInsns()) &&
}
int MIRGraph::AddNewSReg(int v_reg) {
- int subscript = ++ssa_last_defs_[v_reg];
+ int subscript = ++temp_.ssa.ssa_last_defs_[v_reg];
uint32_t ssa_reg = GetNumSSARegs();
SetNumSSARegs(ssa_reg + 1);
ssa_base_vregs_.push_back(v_reg);
/* Find out the latest SSA register for a given Dalvik register */
void MIRGraph::HandleSSAUse(int* uses, int dalvik_reg, int reg_index) {
DCHECK((dalvik_reg >= 0) && (dalvik_reg < static_cast<int>(GetNumOfCodeAndTempVRs())));
- uses[reg_index] = vreg_to_ssa_map_[dalvik_reg];
+ uses[reg_index] = temp_.ssa.vreg_to_ssa_map_[dalvik_reg];
}
/* Setup a new SSA register for a given Dalvik register */
void MIRGraph::HandleSSADef(int* defs, int dalvik_reg, int reg_index) {
DCHECK((dalvik_reg >= 0) && (dalvik_reg < static_cast<int>(GetNumOfCodeAndTempVRs())));
int ssa_reg = AddNewSReg(dalvik_reg);
- vreg_to_ssa_map_[dalvik_reg] = ssa_reg;
+ temp_.ssa.vreg_to_ssa_map_[dalvik_reg] = ssa_reg;
defs[reg_index] = ssa_reg;
}
static_cast<int*>(arena_->Alloc(sizeof(int) * GetNumOfCodeAndTempVRs(),
kArenaAllocDFInfo));
- memcpy(bb->data_flow_info->vreg_to_ssa_map_exit, vreg_to_ssa_map_,
+ memcpy(bb->data_flow_info->vreg_to_ssa_map_exit, temp_.ssa.vreg_to_ssa_map_,
sizeof(int) * GetNumOfCodeAndTempVRs());
return true;
}
* Initialize the DalvikToSSAMap map. There is one entry for each
* Dalvik register, and the SSA names for those are the same.
*/
- vreg_to_ssa_map_ =
- static_cast<int*>(arena_->Alloc(sizeof(int) * num_reg,
- kArenaAllocDFInfo));
+ temp_.ssa.vreg_to_ssa_map_ =
+ reinterpret_cast<int*>(temp_scoped_alloc_->Alloc(sizeof(int) * num_reg, kArenaAllocDFInfo));
/* Keep track of the higest def for each dalvik reg */
- ssa_last_defs_ =
- static_cast<int*>(arena_->Alloc(sizeof(int) * num_reg,
- kArenaAllocDFInfo));
+ temp_.ssa.ssa_last_defs_ =
+ reinterpret_cast<int*>(temp_scoped_alloc_->Alloc(sizeof(int) * num_reg, kArenaAllocDFInfo));
for (unsigned int i = 0; i < num_reg; i++) {
- vreg_to_ssa_map_[i] = i;
- ssa_last_defs_[i] = 0;
+ temp_.ssa.vreg_to_ssa_map_[i] = i;
+ temp_.ssa.ssa_last_defs_[i] = 0;
}
// Create a compiler temporary for Method*. This is done after SSA initialization.
cu_(cu),
ssa_base_vregs_(arena->Adapter(kArenaAllocSSAToDalvikMap)),
ssa_subscripts_(arena->Adapter(kArenaAllocSSAToDalvikMap)),
- vreg_to_ssa_map_(NULL),
- ssa_last_defs_(NULL),
is_constant_v_(NULL),
constant_values_(NULL),
use_counts_(arena->Adapter()),
num_reachable_blocks_(0),
max_num_reachable_blocks_(0),
dfs_orders_up_to_date_(false),
+ domination_up_to_date_(false),
+ mir_ssa_rep_up_to_date_(false),
+ topological_order_up_to_date_(false),
dfs_order_(arena->Adapter(kArenaAllocDfsPreOrder)),
dfs_post_order_(arena->Adapter(kArenaAllocDfsPostOrder)),
dom_post_order_traversal_(arena->Adapter(kArenaAllocDomPostOrder)),
try_block_addr_(NULL),
entry_block_(NULL),
exit_block_(NULL),
- num_blocks_(0),
current_code_item_(NULL),
dex_pc_to_block_map_(arena->Adapter()),
m_units_(arena->Adapter()),
if (current_method_ == 0) {
DCHECK(entry_block_ == NULL);
DCHECK(exit_block_ == NULL);
- DCHECK_EQ(num_blocks_, 0U);
+ DCHECK_EQ(GetNumBlocks(), 0U);
// Use id 0 to represent a null block.
BasicBlock* null_block = CreateNewBB(kNullBlock);
DCHECK_EQ(null_block->id, NullBasicBlockId);
// Update the maximum number of reachable blocks.
max_num_reachable_blocks_ = num_reachable_blocks_;
+
+ // Mark MIR SSA representations as up to date.
+ mir_ssa_rep_up_to_date_ = true;
}
size_t MIRGraph::GetNumDalvikInsns() const {
topological_order_loop_head_stack_.clear();
topological_order_loop_head_stack_.reserve(max_nested_loops);
max_nested_loops_ = max_nested_loops;
+ topological_order_up_to_date_ = true;
}
bool BasicBlock::IsExceptionBlock() const {
// Create a new basic block with block_id as num_blocks_ that is
// post-incremented.
BasicBlock* MIRGraph::CreateNewBB(BBType block_type) {
- BasicBlock* res = NewMemBB(block_type, num_blocks_++);
+ BasicBlockId id = static_cast<BasicBlockId>(block_list_.size());
+ BasicBlock* res = NewMemBB(block_type, id);
block_list_.push_back(res);
return res;
}
driver.Launch();
}
-void MIRGraph::InitializeBasicBlockData() {
- num_blocks_ = block_list_.size();
-}
-
int MIR::DecodedInstruction::FlagsOf() const {
// Calculate new index.
int idx = static_cast<int>(opcode) - kNumPackedOpcodes;
}
unsigned int GetNumBlocks() const {
- return num_blocks_;
+ return block_list_.size();
}
/**
void DumpRegLocTable(RegLocation* table, int count);
+ void BasicBlockOptimizationStart();
void BasicBlockOptimization();
+ void BasicBlockOptimizationEnd();
const ArenaVector<BasicBlockId>& GetTopologicalSortOrder() {
DCHECK(!topological_order_.empty());
void AllocateSSAUseData(MIR *mir, int num_uses);
void AllocateSSADefData(MIR *mir, int num_defs);
void CalculateBasicBlockInformation();
- void InitializeBasicBlockData();
void ComputeDFSOrders();
void ComputeDefBlockMatrix();
void ComputeDominators();
return dfs_orders_up_to_date_;
}
+ bool DominationUpToDate() const {
+ return domination_up_to_date_;
+ }
+
+ bool MirSsaRepUpToDate() const {
+ return mir_ssa_rep_up_to_date_;
+ }
+
+ bool TopologicalOrderUpToDate() const {
+ return topological_order_up_to_date_;
+ }
+
/*
* IsDebugBuild sanity check: keep track of the Dex PCs for catch entries so that later on
* we can verify that all catch entries have native PC entries.
CompilationUnit* const cu_;
ArenaVector<int> ssa_base_vregs_;
ArenaVector<int> ssa_subscripts_;
- // Map original Dalvik virtual reg i to the current SSA name.
- int* vreg_to_ssa_map_; // length == method->registers_size
- int* ssa_last_defs_; // length == method->registers_size
ArenaBitVector* is_constant_v_; // length == num_ssa_reg
int* constant_values_; // length == num_ssa_reg
// Use counts of ssa names.
unsigned int num_reachable_blocks_;
unsigned int max_num_reachable_blocks_;
bool dfs_orders_up_to_date_;
+ bool domination_up_to_date_;
+ bool mir_ssa_rep_up_to_date_;
+ bool topological_order_up_to_date_;
ArenaVector<BasicBlockId> dfs_order_;
ArenaVector<BasicBlockId> dfs_post_order_;
ArenaVector<BasicBlockId> dom_post_order_traversal_;
size_t num_vregs;
ArenaBitVector* work_live_vregs;
ArenaBitVector** def_block_matrix; // num_vregs x num_blocks_.
+ // Map original Dalvik virtual reg i to the current SSA name.
+ int* vreg_to_ssa_map_; // length == method->registers_size
+ int* ssa_last_defs_; // length == method->registers_size
} ssa;
// Global value numbering.
struct {
ArenaBitVector* try_block_addr_;
BasicBlock* entry_block_;
BasicBlock* exit_block_;
- unsigned int num_blocks_;
const DexFile::CodeItem* current_code_item_;
ArenaVector<uint16_t> dex_pc_to_block_map_; // FindBlock lookup cache.
ArenaVector<DexCompilationUnit*> m_units_; // List of methods included in this graph
cu_.arena.Alloc(sizeof(BasicBlockDataFlow), kArenaAllocDFInfo));
}
}
- cu_.mir_graph->num_blocks_ = count;
ASSERT_EQ(count, cu_.mir_graph->block_list_.size());
cu_.mir_graph->entry_block_ = cu_.mir_graph->block_list_[1];
ASSERT_EQ(kEntryBlock, cu_.mir_graph->entry_block_->block_type);
child->UpdatePredecessor(bb_next->id, bb->id);
}
- // DFS orders are not up to date anymore.
+ // DFS orders, domination and topological order are not up to date anymore.
dfs_orders_up_to_date_ = false;
+ domination_up_to_date_ = false;
+ topological_order_up_to_date_ = false;
// Now, loop back and see if we can keep going
}
return false; // Not iterative - return value will be ignored
}
-void MIRGraph::BasicBlockOptimization() {
+void MIRGraph::BasicBlockOptimizationStart() {
if ((cu_->disable_opt & (1 << kLocalValueNumbering)) == 0) {
temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
temp_.gvn.ifield_ids_ =
temp_.gvn.sfield_ids_ =
GlobalValueNumbering::PrepareGvnFieldIds(temp_scoped_alloc_.get(), sfield_lowering_infos_);
}
+}
+void MIRGraph::BasicBlockOptimization() {
if ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) {
ClearAllVisitedFlags();
PreOrderDfsIterator iter2(this);
BasicBlockOpt(bb);
}
}
+}
+void MIRGraph::BasicBlockOptimizationEnd() {
// Clean up after LVN.
temp_.gvn.ifield_ids_ = nullptr;
temp_.gvn.sfield_ids_ = nullptr;
cu_.arena.Alloc(sizeof(BasicBlockDataFlow), kArenaAllocDFInfo));
}
}
- cu_.mir_graph->num_blocks_ = count;
ASSERT_EQ(count, cu_.mir_graph->block_list_.size());
cu_.mir_graph->entry_block_ = cu_.mir_graph->block_list_[1];
ASSERT_EQ(kEntryBlock, cu_.mir_graph->entry_block_->block_type);
// The initial list of passes to be used by the PassDriveMEPostOpt.
template<>
const Pass* const PassDriver<PassDriverMEPostOpt>::g_passes[] = {
- GetPassInstance<InitializeData>(),
- GetPassInstance<ClearPhiInstructions>(),
- GetPassInstance<DFSOrders>(),
- GetPassInstance<BuildDomination>(),
- GetPassInstance<TopologicalSortOrders>(),
- GetPassInstance<DefBlockMatrix>(),
- GetPassInstance<CreatePhiNodes>(),
- GetPassInstance<ClearVisitedFlag>(),
- GetPassInstance<SSAConversion>(),
- GetPassInstance<PhiNodeOperands>(),
- GetPassInstance<ConstantPropagation>(),
- GetPassInstance<PerformInitRegLocations>(),
- GetPassInstance<MethodUseCount>(),
- GetPassInstance<FreeData>(),
+ GetPassInstance<DFSOrders>(),
+ GetPassInstance<BuildDomination>(),
+ GetPassInstance<TopologicalSortOrders>(),
+ GetPassInstance<InitializeSSATransformation>(),
+ GetPassInstance<ClearPhiInstructions>(),
+ GetPassInstance<DefBlockMatrix>(),
+ GetPassInstance<CreatePhiNodes>(),
+ GetPassInstance<SSAConversion>(),
+ GetPassInstance<PhiNodeOperands>(),
+ GetPassInstance<ConstantPropagation>(),
+ GetPassInstance<PerformInitRegLocations>(),
+ GetPassInstance<MethodUseCount>(),
+ GetPassInstance<FinishSSATransformation>(),
};
// The number of the passes in the initial list of Passes (g_passes).
namespace art {
/**
- * @class InitializeData
+ * @class PassMEMirSsaRep
+ * @brief Convenience class for passes that check MIRGraph::MirSsaRepUpToDate().
+ */
+class PassMEMirSsaRep : public PassME {
+ public:
+ PassMEMirSsaRep(const char* name, DataFlowAnalysisMode type = kAllNodes)
+ : PassME(name, type) {
+ }
+
+ bool Gate(const PassDataHolder* data) const OVERRIDE {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ return !c_unit->mir_graph->MirSsaRepUpToDate();
+ }
+};
+
+/**
+ * @class InitializeSSATransformation
* @brief There is some data that needs to be initialized before performing
* the post optimization passes.
*/
-class InitializeData : public PassME {
+class InitializeSSATransformation : public PassMEMirSsaRep {
public:
- InitializeData() : PassME("InitializeData", kNoNodes) {
+ InitializeSSATransformation() : PassMEMirSsaRep("InitializeSSATransformation", kNoNodes) {
}
void Start(PassDataHolder* data) const {
DCHECK(data != nullptr);
CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
DCHECK(c_unit != nullptr);
- c_unit->mir_graph.get()->InitializeBasicBlockData();
- c_unit->mir_graph.get()->SSATransformationStart();
+ c_unit->mir_graph->SSATransformationStart();
+ c_unit->mir_graph->CompilerInitializeSSAConversion();
}
};
* @class ClearPhiInformation
* @brief Clear the PHI nodes from the CFG.
*/
-class ClearPhiInstructions : public PassME {
+class ClearPhiInstructions : public PassMEMirSsaRep {
public:
- ClearPhiInstructions() : PassME("ClearPhiInstructions") {
+ ClearPhiInstructions() : PassMEMirSsaRep("ClearPhiInstructions") {
}
bool Worker(PassDataHolder* data) const;
BuildDomination() : PassME("BuildDomination", kNoNodes) {
}
+ bool Gate(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ return !c_unit->mir_graph->DominationUpToDate();
+ }
+
void Start(PassDataHolder* data) const {
DCHECK(data != nullptr);
CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
DCHECK(c_unit != nullptr);
- c_unit->mir_graph.get()->ComputeDominators();
- c_unit->mir_graph.get()->CompilerInitializeSSAConversion();
+ c_unit->mir_graph->ComputeDominators();
}
void End(PassDataHolder* data) const {
TopologicalSortOrders() : PassME("TopologicalSortOrders", kNoNodes) {
}
+ bool Gate(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(c_unit != nullptr);
+ return !c_unit->mir_graph->TopologicalOrderUpToDate();
+ }
+
void Start(PassDataHolder* data) const {
DCHECK(data != nullptr);
CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
* @class DefBlockMatrix
* @brief Calculate the matrix of definition per basic block
*/
-class DefBlockMatrix : public PassME {
+class DefBlockMatrix : public PassMEMirSsaRep {
public:
- DefBlockMatrix() : PassME("DefBlockMatrix", kNoNodes) {
+ DefBlockMatrix() : PassMEMirSsaRep("DefBlockMatrix", kNoNodes) {
}
void Start(PassDataHolder* data) const {
* @class CreatePhiNodes
* @brief Pass to create the phi nodes after SSA calculation
*/
-class CreatePhiNodes : public PassME {
+class CreatePhiNodes : public PassMEMirSsaRep {
public:
- CreatePhiNodes() : PassME("CreatePhiNodes", kNoNodes) {
+ CreatePhiNodes() : PassMEMirSsaRep("CreatePhiNodes", kNoNodes) {
}
void Start(PassDataHolder* data) const {
};
/**
- * @class ClearVisitedFlag
- * @brief Pass to clear the visited flag for all basic blocks.
- */
-
-class ClearVisitedFlag : public PassME {
- public:
- ClearVisitedFlag() : PassME("ClearVisitedFlag", kNoNodes) {
- }
-
- void Start(PassDataHolder* data) const {
- DCHECK(data != nullptr);
- CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
- DCHECK(c_unit != nullptr);
- c_unit->mir_graph.get()->ClearAllVisitedFlags();
- }
-};
-
-/**
* @class SSAConversion
* @brief Pass for SSA conversion of MIRs
*/
-class SSAConversion : public PassME {
+class SSAConversion : public PassMEMirSsaRep {
public:
- SSAConversion() : PassME("SSAConversion", kNoNodes) {
+ SSAConversion() : PassMEMirSsaRep("SSAConversion", kNoNodes) {
}
void Start(PassDataHolder* data) const {
CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
DCHECK(c_unit != nullptr);
MIRGraph *mir_graph = c_unit->mir_graph.get();
+ mir_graph->ClearAllVisitedFlags();
mir_graph->DoDFSPreOrderSSARename(mir_graph->GetEntryBlock());
}
};
* @class PhiNodeOperands
* @brief Pass to insert the Phi node operands to basic blocks
*/
-class PhiNodeOperands : public PassME {
+class PhiNodeOperands : public PassMEMirSsaRep {
public:
- PhiNodeOperands() : PassME("PhiNodeOperands", kPreOrderDFSTraversal) {
+ PhiNodeOperands() : PassMEMirSsaRep("PhiNodeOperands", kPreOrderDFSTraversal) {
}
bool Worker(PassDataHolder* data) const {
* @class InitRegLocations
* @brief Initialize Register Locations.
*/
-class PerformInitRegLocations : public PassME {
+class PerformInitRegLocations : public PassMEMirSsaRep {
public:
- PerformInitRegLocations() : PassME("PerformInitRegLocation", kNoNodes) {
+ PerformInitRegLocations() : PassMEMirSsaRep("PerformInitRegLocation", kNoNodes) {
}
void Start(PassDataHolder* data) const {
* @class ConstantPropagation
* @brief Perform a constant propagation pass.
*/
-class ConstantPropagation : public PassME {
+class ConstantPropagation : public PassMEMirSsaRep {
public:
- ConstantPropagation() : PassME("ConstantPropagation") {
+ ConstantPropagation() : PassMEMirSsaRep("ConstantPropagation") {
}
bool Worker(PassDataHolder* data) const {
};
/**
- * @class FreeData
+ * @class FinishSSATransformation
* @brief There is some data that needs to be freed after performing the post optimization passes.
*/
-class FreeData : public PassME {
+class FinishSSATransformation : public PassMEMirSsaRep {
public:
- FreeData() : PassME("FreeData", kNoNodes) {
+ FinishSSATransformation() : PassMEMirSsaRep("FinishSSATransformation", kNoNodes) {
}
void End(PassDataHolder* data) const {
num_reachable_blocks_ = dfs_order_.size();
- if (num_reachable_blocks_ != num_blocks_) {
+ if (num_reachable_blocks_ != GetNumBlocks()) {
// Kill all unreachable blocks.
AllNodesIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
dom_post_order_traversal_.reserve(num_reachable_blocks_);
ClearAllVisitedFlags();
- DCHECK(temp_scoped_alloc_.get() != nullptr);
+ ScopedArenaAllocator allocator(&cu_->arena_stack);
ScopedArenaVector<std::pair<BasicBlock*, ArenaBitVector::IndexIterator>> work_stack(
- temp_scoped_alloc_->Adapter());
+ allocator.Adapter());
bb->visited = true;
work_stack.push_back(std::make_pair(bb, bb->i_dominated->Indexes().begin()));
while (!work_stack.empty()) {
for (BasicBlock* bb = iter5.Next(); bb != NULL; bb = iter5.Next()) {
ComputeDominanceFrontier(bb);
}
+
+ domination_up_to_date_ = true;
}
/*
ScopedArenaAllocator allocator(&cu_->arena_stack);
int* saved_ssa_map =
static_cast<int*>(allocator.Alloc(map_size, kArenaAllocDalvikToSSAMap));
- memcpy(saved_ssa_map, vreg_to_ssa_map_, map_size);
+ memcpy(saved_ssa_map, temp_.ssa.vreg_to_ssa_map_, map_size);
if (block->fall_through != NullBasicBlockId) {
DoDFSPreOrderSSARename(GetBasicBlock(block->fall_through));
/* Restore SSA map snapshot */
- memcpy(vreg_to_ssa_map_, saved_ssa_map, map_size);
+ memcpy(temp_.ssa.vreg_to_ssa_map_, saved_ssa_map, map_size);
}
if (block->taken != NullBasicBlockId) {
DoDFSPreOrderSSARename(GetBasicBlock(block->taken));
/* Restore SSA map snapshot */
- memcpy(vreg_to_ssa_map_, saved_ssa_map, map_size);
+ memcpy(temp_.ssa.vreg_to_ssa_map_, saved_ssa_map, map_size);
}
if (block->successor_block_list_type != kNotUsed) {
for (SuccessorBlockInfo* successor_block_info : block->successor_blocks) {
BasicBlock* succ_bb = GetBasicBlock(successor_block_info->block);
DoDFSPreOrderSSARename(succ_bb);
/* Restore SSA map snapshot */
- memcpy(vreg_to_ssa_map_, saved_ssa_map, map_size);
+ memcpy(temp_.ssa.vreg_to_ssa_map_, saved_ssa_map, map_size);
}
}
return;