diff options
Diffstat (limited to 'source/opt/loop_unswitch_pass.cpp')
-rw-r--r-- | source/opt/loop_unswitch_pass.cpp | 250 |
1 files changed, 121 insertions, 129 deletions
diff --git a/source/opt/loop_unswitch_pass.cpp b/source/opt/loop_unswitch_pass.cpp index 12f82a8e..b9eb61bc 100644 --- a/source/opt/loop_unswitch_pass.cpp +++ b/source/opt/loop_unswitch_pass.cpp @@ -52,8 +52,8 @@ namespace { // - The loop invariant condition is not uniform. class LoopUnswitch { public: - LoopUnswitch(opt::IRContext* context, opt::Function* function, - opt::Loop* loop, opt::LoopDescriptor* loop_desc) + LoopUnswitch(IRContext* context, Function* function, Loop* loop, + LoopDescriptor* loop_desc) : function_(function), loop_(loop), loop_desc_(*loop_desc), @@ -70,10 +70,10 @@ class LoopUnswitch { if (switch_block_) return true; if (loop_->IsSafeToClone()) return false; - opt::CFG& cfg = *context_->cfg(); + CFG& cfg = *context_->cfg(); for (uint32_t bb_id : loop_->GetBlocks()) { - opt::BasicBlock* bb = cfg.block(bb_id); + BasicBlock* bb = cfg.block(bb_id); if (bb->terminator()->IsBranch() && bb->terminator()->opcode() != SpvOpBranch) { if (IsConditionLoopInvariant(bb->terminator())) { @@ -87,8 +87,8 @@ class LoopUnswitch { } // Return the iterator to the basic block |bb|. - opt::Function::iterator FindBasicBlockPosition(opt::BasicBlock* bb_to_find) { - opt::Function::iterator it = function_->FindBlock(bb_to_find->id()); + Function::iterator FindBasicBlockPosition(BasicBlock* bb_to_find) { + Function::iterator it = function_->FindBlock(bb_to_find->id()); assert(it != function_->end() && "Basic Block not found"); return it; } @@ -96,13 +96,12 @@ class LoopUnswitch { // Creates a new basic block and insert it into the function |fn| at the // position |ip|. This function preserves the def/use and instr to block // managers. - opt::BasicBlock* CreateBasicBlock(opt::Function::iterator ip) { + BasicBlock* CreateBasicBlock(Function::iterator ip) { analysis::DefUseManager* def_use_mgr = context_->get_def_use_mgr(); - opt::BasicBlock* bb = - &*ip.InsertBefore(std::unique_ptr<opt::BasicBlock>(new opt::BasicBlock( - std::unique_ptr<opt::Instruction>(new opt::Instruction( - context_, SpvOpLabel, 0, context_->TakeNextId(), {}))))); + BasicBlock* bb = &*ip.InsertBefore(std::unique_ptr<BasicBlock>( + new BasicBlock(std::unique_ptr<Instruction>(new Instruction( + context_, SpvOpLabel, 0, context_->TakeNextId(), {}))))); bb->SetParent(function_); def_use_mgr->AnalyzeInstDef(bb->GetLabelInst()); context_->set_instr_block(bb->GetLabelInst(), bb); @@ -117,7 +116,7 @@ class LoopUnswitch { assert(loop_->GetPreHeaderBlock() && "This loop has no pre-header block"); assert(loop_->IsLCSSA() && "This loop is not in LCSSA form"); - opt::CFG& cfg = *context_->cfg(); + CFG& cfg = *context_->cfg(); DominatorTree* dom_tree = &context_->GetDominatorAnalysis(function_)->GetDomTree(); analysis::DefUseManager* def_use_mgr = context_->get_def_use_mgr(); @@ -133,29 +132,28 @@ class LoopUnswitch { ////////////////////////////////////////////////////////////////////////////// // Get the merge block if it exists. - opt::BasicBlock* if_merge_block = loop_->GetMergeBlock(); + BasicBlock* if_merge_block = loop_->GetMergeBlock(); // The merge block is only created if the loop has a unique exit block. We // have this guarantee for structured loops, for compute loop it will // trivially help maintain both a structured-like form and LCSAA. - opt::BasicBlock* loop_merge_block = + BasicBlock* loop_merge_block = if_merge_block ? CreateBasicBlock(FindBasicBlockPosition(if_merge_block)) : nullptr; if (loop_merge_block) { // Add the instruction and update managers. - opt::InstructionBuilder builder( + InstructionBuilder builder( context_, loop_merge_block, - opt::IRContext::kAnalysisDefUse | - opt::IRContext::kAnalysisInstrToBlockMapping); + IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping); builder.AddBranch(if_merge_block->id()); builder.SetInsertPoint(&*loop_merge_block->begin()); cfg.RegisterBlock(loop_merge_block); def_use_mgr->AnalyzeInstDef(loop_merge_block->GetLabelInst()); // Update CFG. if_merge_block->ForEachPhiInst( - [loop_merge_block, &builder, this](opt::Instruction* phi) { - opt::Instruction* cloned = phi->Clone(context_); - builder.AddInstruction(std::unique_ptr<opt::Instruction>(cloned)); + [loop_merge_block, &builder, this](Instruction* phi) { + Instruction* cloned = phi->Clone(context_); + builder.AddInstruction(std::unique_ptr<Instruction>(cloned)); phi->SetInOperand(0, {cloned->result_id()}); phi->SetInOperand(1, {loop_merge_block->id()}); for (uint32_t j = phi->NumInOperands() - 1; j > 1; j--) @@ -165,7 +163,7 @@ class LoopUnswitch { std::vector<uint32_t> preds = cfg.preds(if_merge_block->id()); for (uint32_t pid : preds) { if (pid == loop_merge_block->id()) continue; - opt::BasicBlock* p_bb = cfg.block(pid); + BasicBlock* p_bb = cfg.block(pid); p_bb->ForEachSuccessorLabel( [if_merge_block, loop_merge_block](uint32_t* id) { if (*id == if_merge_block->id()) *id = loop_merge_block->id(); @@ -174,7 +172,7 @@ class LoopUnswitch { } cfg.RemoveNonExistingEdges(if_merge_block->id()); // Update loop descriptor. - if (opt::Loop* ploop = loop_->GetParent()) { + if (Loop* ploop = loop_->GetParent()) { ploop->AddBasicBlock(loop_merge_block); loop_desc_.SetBasicBlockToLoop(loop_merge_block->id(), ploop); } @@ -199,20 +197,20 @@ class LoopUnswitch { // for the constant branch. //////////////////////////////////////////////////////////////////////////// - opt::BasicBlock* if_block = loop_->GetPreHeaderBlock(); + BasicBlock* if_block = loop_->GetPreHeaderBlock(); // If this preheader is the parent loop header, // we need to create a dedicated block for the if. - opt::BasicBlock* loop_pre_header = + BasicBlock* loop_pre_header = CreateBasicBlock(++FindBasicBlockPosition(if_block)); - opt::InstructionBuilder(context_, loop_pre_header, - opt::IRContext::kAnalysisDefUse | - opt::IRContext::kAnalysisInstrToBlockMapping) + InstructionBuilder( + context_, loop_pre_header, + IRContext::kAnalysisDefUse | IRContext::kAnalysisInstrToBlockMapping) .AddBranch(loop_->GetHeaderBlock()->id()); if_block->tail()->SetInOperand(0, {loop_pre_header->id()}); // Update loop descriptor. - if (opt::Loop* ploop = loop_desc_[if_block]) { + if (Loop* ploop = loop_desc_[if_block]) { ploop->AddBasicBlock(loop_pre_header); loop_desc_.SetBasicBlockToLoop(loop_pre_header->id(), ploop); } @@ -224,7 +222,7 @@ class LoopUnswitch { cfg.RemoveNonExistingEdges(loop_->GetHeaderBlock()->id()); loop_->GetHeaderBlock()->ForEachPhiInst( - [loop_pre_header, if_block](opt::Instruction* phi) { + [loop_pre_header, if_block](Instruction* phi) { phi->ForEachInId([loop_pre_header, if_block](uint32_t* id) { if (*id == if_block->id()) { *id = loop_pre_header->id(); @@ -260,9 +258,9 @@ class LoopUnswitch { // - Specialize the loop // ///////////////////////////// - opt::Instruction* iv_condition = &*switch_block_->tail(); + Instruction* iv_condition = &*switch_block_->tail(); SpvOp iv_opcode = iv_condition->opcode(); - opt::Instruction* condition = + Instruction* condition = def_use_mgr->GetDef(iv_condition->GetOperand(0).words[0]); analysis::ConstantManager* cst_mgr = context_->get_constant_mgr(); @@ -271,10 +269,10 @@ class LoopUnswitch { // Build the list of value for which we need to clone and specialize the // loop. - std::vector<std::pair<opt::Instruction*, opt::BasicBlock*>> constant_branch; + std::vector<std::pair<Instruction*, BasicBlock*>> constant_branch; // Special case for the original loop - opt::Instruction* original_loop_constant_value; - opt::BasicBlock* original_loop_target; + Instruction* original_loop_constant_value; + BasicBlock* original_loop_target; if (iv_opcode == SpvOpBranchConditional) { constant_branch.emplace_back( cst_mgr->GetDefiningInstruction(cst_mgr->GetConstant(cond_type, {0})), @@ -309,13 +307,13 @@ class LoopUnswitch { } for (auto& specialisation_pair : constant_branch) { - opt::Instruction* specialisation_value = specialisation_pair.first; + Instruction* specialisation_value = specialisation_pair.first; ////////////////////////////////////////////////////////// // Step 3: Duplicate |loop_|. ////////////////////////////////////////////////////////// LoopUtils::LoopCloningResult clone_result; - opt::Loop* cloned_loop = + Loop* cloned_loop = loop_utils.CloneLoop(&clone_result, ordered_loop_blocks_); specialisation_pair.second = cloned_loop->GetPreHeaderBlock(); @@ -327,11 +325,11 @@ class LoopUnswitch { std::unordered_set<uint32_t> dead_blocks; std::unordered_set<uint32_t> unreachable_merges; SimplifyLoop( - opt::make_range( - opt::UptrVectorIterator<opt::BasicBlock>( - &clone_result.cloned_bb_, clone_result.cloned_bb_.begin()), - opt::UptrVectorIterator<opt::BasicBlock>( - &clone_result.cloned_bb_, clone_result.cloned_bb_.end())), + make_range( + UptrVectorIterator<BasicBlock>(&clone_result.cloned_bb_, + clone_result.cloned_bb_.begin()), + UptrVectorIterator<BasicBlock>(&clone_result.cloned_bb_, + clone_result.cloned_bb_.end())), cloned_loop, condition, specialisation_value, &dead_blocks); // We tagged dead blocks, create the loop before we invalidate any basic @@ -339,8 +337,8 @@ class LoopUnswitch { cloned_loop = CleanLoopNest(cloned_loop, dead_blocks, &unreachable_merges); CleanUpCFG( - opt::UptrVectorIterator<opt::BasicBlock>( - &clone_result.cloned_bb_, clone_result.cloned_bb_.begin()), + UptrVectorIterator<BasicBlock>(&clone_result.cloned_bb_, + clone_result.cloned_bb_.begin()), dead_blocks, unreachable_merges); /////////////////////////////////////////////////////////// @@ -348,10 +346,10 @@ class LoopUnswitch { /////////////////////////////////////////////////////////// for (uint32_t merge_bb_id : if_merging_blocks) { - opt::BasicBlock* merge = context_->cfg()->block(merge_bb_id); + BasicBlock* merge = context_->cfg()->block(merge_bb_id); // We are in LCSSA so we only care about phi instructions. merge->ForEachPhiInst([is_from_original_loop, &dead_blocks, - &clone_result](opt::Instruction* phi) { + &clone_result](Instruction* phi) { uint32_t num_in_operands = phi->NumInOperands(); for (uint32_t i = 0; i < num_in_operands; i += 2) { uint32_t pred = phi->GetSingleWordInOperand(i + 1); @@ -382,11 +380,11 @@ class LoopUnswitch { { std::unordered_set<uint32_t> dead_blocks; std::unordered_set<uint32_t> unreachable_merges; - SimplifyLoop(opt::make_range(function_->begin(), function_->end()), loop_, + SimplifyLoop(make_range(function_->begin(), function_->end()), loop_, condition, original_loop_constant_value, &dead_blocks); for (uint32_t merge_bb_id : if_merging_blocks) { - opt::BasicBlock* merge = context_->cfg()->block(merge_bb_id); + BasicBlock* merge = context_->cfg()->block(merge_bb_id); // LCSSA, so we only care about phi instructions. // If we the phi is reduced to a single incoming branch, do not // propagate it to preserve LCSSA. @@ -417,7 +415,7 @@ class LoopUnswitch { // Delete the old jump context_->KillInst(&*if_block->tail()); - opt::InstructionBuilder builder(context_, if_block); + InstructionBuilder builder(context_, if_block); if (iv_opcode == SpvOpBranchConditional) { assert(constant_branch.size() == 1); builder.AddConditionalBranch( @@ -425,7 +423,7 @@ class LoopUnswitch { constant_branch[0].second->id(), if_merge_block ? if_merge_block->id() : kInvalidId); } else { - std::vector<std::pair<opt::Operand::OperandData, uint32_t>> targets; + std::vector<std::pair<Operand::OperandData, uint32_t>> targets; for (auto& t : constant_branch) { targets.emplace_back(t.first->GetInOperand(0).words, t.second->id()); } @@ -439,7 +437,7 @@ class LoopUnswitch { ordered_loop_blocks_.clear(); context_->InvalidateAnalysesExceptFor( - opt::IRContext::Analysis::kAnalysisLoopAnalysis); + IRContext::Analysis::kAnalysisLoopAnalysis); } // Returns true if the unswitch killed the original |loop_|. @@ -447,37 +445,37 @@ class LoopUnswitch { private: using ValueMapTy = std::unordered_map<uint32_t, uint32_t>; - using BlockMapTy = std::unordered_map<uint32_t, opt::BasicBlock*>; + using BlockMapTy = std::unordered_map<uint32_t, BasicBlock*>; - opt::Function* function_; - opt::Loop* loop_; - opt::LoopDescriptor& loop_desc_; - opt::IRContext* context_; + Function* function_; + Loop* loop_; + LoopDescriptor& loop_desc_; + IRContext* context_; - opt::BasicBlock* switch_block_; + BasicBlock* switch_block_; // Map between instructions and if they are dynamically uniform. std::unordered_map<uint32_t, bool> dynamically_uniform_; // The loop basic blocks in structured order. - std::vector<opt::BasicBlock*> ordered_loop_blocks_; + std::vector<BasicBlock*> ordered_loop_blocks_; // Returns the next usable id for the context. uint32_t TakeNextId() { return context_->TakeNextId(); } // Patches |bb|'s phi instruction by removing incoming value from unexisting // or tagged as dead branches. - void PatchPhis(opt::BasicBlock* bb, + void PatchPhis(BasicBlock* bb, const std::unordered_set<uint32_t>& dead_blocks, bool preserve_phi) { - opt::CFG& cfg = *context_->cfg(); + CFG& cfg = *context_->cfg(); - std::vector<opt::Instruction*> phi_to_kill; + std::vector<Instruction*> phi_to_kill; const std::vector<uint32_t>& bb_preds = cfg.preds(bb->id()); auto is_branch_dead = [&bb_preds, &dead_blocks](uint32_t id) { return dead_blocks.count(id) || std::find(bb_preds.begin(), bb_preds.end(), id) == bb_preds.end(); }; bb->ForEachPhiInst([&phi_to_kill, &is_branch_dead, preserve_phi, - this](opt::Instruction* insn) { + this](Instruction* insn) { uint32_t i = 0; while (i < insn->NumInOperands()) { uint32_t incoming_id = insn->GetSingleWordInOperand(i + 1); @@ -498,7 +496,7 @@ class LoopUnswitch { insn->GetSingleWordInOperand(0)); } }); - for (opt::Instruction* insn : phi_to_kill) { + for (Instruction* insn : phi_to_kill) { context_->KillInst(insn); } } @@ -506,20 +504,20 @@ class LoopUnswitch { // Removes any block that is tagged as dead, if the block is in // |unreachable_merges| then all block's instructions are replaced by a // OpUnreachable. - void CleanUpCFG(opt::UptrVectorIterator<opt::BasicBlock> bb_it, + void CleanUpCFG(UptrVectorIterator<BasicBlock> bb_it, const std::unordered_set<uint32_t>& dead_blocks, const std::unordered_set<uint32_t>& unreachable_merges) { - opt::CFG& cfg = *context_->cfg(); + CFG& cfg = *context_->cfg(); while (bb_it != bb_it.End()) { - opt::BasicBlock& bb = *bb_it; + BasicBlock& bb = *bb_it; if (unreachable_merges.count(bb.id())) { if (bb.begin() != bb.tail() || bb.terminator()->opcode() != SpvOpUnreachable) { // Make unreachable, but leave the label. bb.KillAllInsts(false); - opt::InstructionBuilder(context_, &bb).AddUnreachable(); + InstructionBuilder(context_, &bb).AddUnreachable(); cfg.RemoveNonExistingEdges(bb.id()); } ++bb_it; @@ -537,7 +535,7 @@ class LoopUnswitch { // Return true if |c_inst| is a Boolean constant and set |cond_val| with the // value that |c_inst| - bool GetConstCondition(const opt::Instruction* c_inst, bool* cond_val) { + bool GetConstCondition(const Instruction* c_inst, bool* cond_val) { bool cond_is_const; switch (c_inst->opcode()) { case SpvOpConstantFalse: { @@ -564,35 +562,36 @@ class LoopUnswitch { // - |loop| must be in the LCSSA form; // - |cst_value| must be constant or null (to represent the default target // of an OpSwitch). - void SimplifyLoop( - opt::IteratorRange<opt::UptrVectorIterator<opt::BasicBlock>> block_range, - opt::Loop* loop, opt::Instruction* to_version_insn, - opt::Instruction* cst_value, std::unordered_set<uint32_t>* dead_blocks) { - opt::CFG& cfg = *context_->cfg(); + void SimplifyLoop(IteratorRange<UptrVectorIterator<BasicBlock>> block_range, + Loop* loop, Instruction* to_version_insn, + Instruction* cst_value, + std::unordered_set<uint32_t>* dead_blocks) { + CFG& cfg = *context_->cfg(); analysis::DefUseManager* def_use_mgr = context_->get_def_use_mgr(); std::function<bool(uint32_t)> ignore_node; ignore_node = [loop](uint32_t bb_id) { return !loop->IsInsideLoop(bb_id); }; - std::vector<std::pair<opt::Instruction*, uint32_t>> use_list; - def_use_mgr->ForEachUse( - to_version_insn, [&use_list, &ignore_node, this]( - opt::Instruction* inst, uint32_t operand_index) { - opt::BasicBlock* bb = context_->get_instr_block(inst); + std::vector<std::pair<Instruction*, uint32_t>> use_list; + def_use_mgr->ForEachUse(to_version_insn, + [&use_list, &ignore_node, this]( + Instruction* inst, uint32_t operand_index) { + BasicBlock* bb = context_->get_instr_block(inst); - if (!bb || ignore_node(bb->id())) { - // Out of the loop, the specialization does not apply any more. - return; - } - use_list.emplace_back(inst, operand_index); - }); + if (!bb || ignore_node(bb->id())) { + // Out of the loop, the specialization does not + // apply any more. + return; + } + use_list.emplace_back(inst, operand_index); + }); // First pass: inject the specialized value into the loop (and only the // loop). for (auto use : use_list) { - opt::Instruction* inst = use.first; + Instruction* inst = use.first; uint32_t operand_index = use.second; - opt::BasicBlock* bb = context_->get_instr_block(inst); + BasicBlock* bb = context_->get_instr_block(inst); // If it is not a branch, simply inject the value. if (!inst->IsBranch()) { @@ -628,9 +627,9 @@ class LoopUnswitch { live_target = inst->GetSingleWordInOperand(1); if (cst_value) { if (!cst_value->IsConstant()) break; - const opt::Operand& cst = cst_value->GetInOperand(0); + const Operand& cst = cst_value->GetInOperand(0); for (uint32_t i = 2; i < inst->NumInOperands(); i += 2) { - const opt::Operand& literal = inst->GetInOperand(i); + const Operand& literal = inst->GetInOperand(i); if (literal == cst) { live_target = inst->GetSingleWordInOperand(i + 1); break; @@ -649,13 +648,11 @@ class LoopUnswitch { } if (live_target != 0) { // Check for the presence of the merge block. - if (opt::Instruction* merge = bb->GetMergeInst()) - context_->KillInst(merge); + if (Instruction* merge = bb->GetMergeInst()) context_->KillInst(merge); context_->KillInst(&*bb->tail()); - opt::InstructionBuilder builder( - context_, bb, - opt::IRContext::kAnalysisDefUse | - opt::IRContext::kAnalysisInstrToBlockMapping); + InstructionBuilder builder(context_, bb, + IRContext::kAnalysisDefUse | + IRContext::kAnalysisInstrToBlockMapping); builder.AddBranch(live_target); } } @@ -663,7 +660,7 @@ class LoopUnswitch { // Go through the loop basic block and tag all blocks that are obviously // dead. std::unordered_set<uint32_t> visited; - for (opt::BasicBlock& bb : block_range) { + for (BasicBlock& bb : block_range) { if (ignore_node(bb.id())) continue; visited.insert(bb.id()); @@ -678,12 +675,12 @@ class LoopUnswitch { } if (!has_live_pred) { dead_blocks->insert(bb.id()); - const opt::BasicBlock& cbb = bb; + const BasicBlock& cbb = bb; // Patch the phis for any back-edge. cbb.ForEachSuccessorLabel( [dead_blocks, &visited, &cfg, this](uint32_t id) { if (!visited.count(id) || dead_blocks->count(id)) return; - opt::BasicBlock* succ = cfg.block(id); + BasicBlock* succ = cfg.block(id); PatchPhis(succ, *dead_blocks, false); }); continue; @@ -695,7 +692,7 @@ class LoopUnswitch { // Returns true if the header is not reachable or tagged as dead or if we // never loop back. - bool IsLoopDead(opt::BasicBlock* header, opt::BasicBlock* latch, + bool IsLoopDead(BasicBlock* header, BasicBlock* latch, const std::unordered_set<uint32_t>& dead_blocks) { if (!header || dead_blocks.count(header->id())) return true; if (!latch || dead_blocks.count(latch->id())) return true; @@ -715,15 +712,14 @@ class LoopUnswitch { // |unreachable_merges|. // The function returns the pointer to |loop| or nullptr if the loop was // killed. - opt::Loop* CleanLoopNest(opt::Loop* loop, - const std::unordered_set<uint32_t>& dead_blocks, - std::unordered_set<uint32_t>* unreachable_merges) { + Loop* CleanLoopNest(Loop* loop, + const std::unordered_set<uint32_t>& dead_blocks, + std::unordered_set<uint32_t>* unreachable_merges) { // This represent the pair of dead loop and nearest alive parent (nullptr if // no parent). - std::unordered_map<opt::Loop*, opt::Loop*> dead_loops; - auto get_parent = [&dead_loops](opt::Loop* l) -> opt::Loop* { - std::unordered_map<opt::Loop*, opt::Loop*>::iterator it = - dead_loops.find(l); + std::unordered_map<Loop*, Loop*> dead_loops; + auto get_parent = [&dead_loops](Loop* l) -> Loop* { + std::unordered_map<Loop*, Loop*>::iterator it = dead_loops.find(l); if (it != dead_loops.end()) return it->second; return nullptr; }; @@ -731,8 +727,7 @@ class LoopUnswitch { bool is_main_loop_dead = IsLoopDead(loop->GetHeaderBlock(), loop->GetLatchBlock(), dead_blocks); if (is_main_loop_dead) { - if (opt::Instruction* merge = - loop->GetHeaderBlock()->GetLoopMergeInst()) { + if (Instruction* merge = loop->GetHeaderBlock()->GetLoopMergeInst()) { context_->KillInst(merge); } dead_loops[loop] = loop->GetParent(); @@ -740,12 +735,11 @@ class LoopUnswitch { dead_loops[loop] = loop; // For each loop, check if we killed it. If we did, find a suitable parent // for its children. - for (opt::Loop& sub_loop : - opt::make_range(++opt::TreeDFIterator<opt::Loop>(loop), - opt::TreeDFIterator<opt::Loop>())) { + for (Loop& sub_loop : + make_range(++TreeDFIterator<Loop>(loop), TreeDFIterator<Loop>())) { if (IsLoopDead(sub_loop.GetHeaderBlock(), sub_loop.GetLatchBlock(), dead_blocks)) { - if (opt::Instruction* merge = + if (Instruction* merge = sub_loop.GetHeaderBlock()->GetLoopMergeInst()) { context_->KillInst(merge); } @@ -765,7 +759,7 @@ class LoopUnswitch { // Remove dead blocks from live loops. for (uint32_t bb_id : dead_blocks) { - opt::Loop* l = loop_desc_[bb_id]; + Loop* l = loop_desc_[bb_id]; if (l) { l->RemoveBasicBlock(bb_id); loop_desc_.ForgetBasicBlock(bb_id); @@ -774,8 +768,8 @@ class LoopUnswitch { std::for_each( dead_loops.begin(), dead_loops.end(), - [&loop, this](std::unordered_map<opt::Loop*, - opt::Loop*>::iterator::reference it) { + [&loop, + this](std::unordered_map<Loop*, Loop*>::iterator::reference it) { if (it.first == loop) loop = nullptr; loop_desc_.RemoveLoop(it.first); }); @@ -785,7 +779,7 @@ class LoopUnswitch { // Returns true if |var| is dynamically uniform. // Note: this is currently approximated as uniform. - bool IsDynamicallyUniform(opt::Instruction* var, const opt::BasicBlock* entry, + bool IsDynamicallyUniform(Instruction* var, const BasicBlock* entry, const DominatorTree& post_dom_tree) { assert(post_dom_tree.IsPostDominator()); analysis::DefUseManager* def_use_mgr = context_->get_def_use_mgr(); @@ -800,7 +794,7 @@ class LoopUnswitch { is_uniform = false; dec_mgr->WhileEachDecoration(var->result_id(), SpvDecorationUniform, - [&is_uniform](const opt::Instruction&) { + [&is_uniform](const Instruction&) { is_uniform = true; return false; }); @@ -808,7 +802,7 @@ class LoopUnswitch { return is_uniform; } - opt::BasicBlock* parent = context_->get_instr_block(var); + BasicBlock* parent = context_->get_instr_block(var); if (!parent) { return is_uniform = true; } @@ -819,7 +813,7 @@ class LoopUnswitch { if (var->opcode() == SpvOpLoad) { const uint32_t PtrTypeId = def_use_mgr->GetDef(var->GetSingleWordInOperand(0))->type_id(); - const opt::Instruction* PtrTypeInst = def_use_mgr->GetDef(PtrTypeId); + const Instruction* PtrTypeInst = def_use_mgr->GetDef(PtrTypeId); uint32_t storage_class = PtrTypeInst->GetSingleWordInOperand(kTypePointerStorageClassInIdx); if (storage_class != SpvStorageClassUniform && @@ -840,13 +834,12 @@ class LoopUnswitch { } // Returns true if |insn| is constant and dynamically uniform within the loop. - bool IsConditionLoopInvariant(opt::Instruction* insn) { + bool IsConditionLoopInvariant(Instruction* insn) { assert(insn->IsBranch()); assert(insn->opcode() != SpvOpBranch); analysis::DefUseManager* def_use_mgr = context_->get_def_use_mgr(); - opt::Instruction* condition = - def_use_mgr->GetDef(insn->GetOperand(0).words[0]); + Instruction* condition = def_use_mgr->GetDef(insn->GetOperand(0).words[0]); return !loop_->IsInsideLoop(condition) && IsDynamicallyUniform( condition, function_->entry().get(), @@ -858,29 +851,28 @@ class LoopUnswitch { Pass::Status LoopUnswitchPass::Process() { bool modified = false; - opt::Module* module = context()->module(); + Module* module = context()->module(); // Process each function in the module - for (opt::Function& f : *module) { + for (Function& f : *module) { modified |= ProcessFunction(&f); } return modified ? Status::SuccessWithChange : Status::SuccessWithoutChange; } -bool LoopUnswitchPass::ProcessFunction(opt::Function* f) { +bool LoopUnswitchPass::ProcessFunction(Function* f) { bool modified = false; - std::unordered_set<opt::Loop*> processed_loop; + std::unordered_set<Loop*> processed_loop; - opt::LoopDescriptor& loop_descriptor = *context()->GetLoopDescriptor(f); + LoopDescriptor& loop_descriptor = *context()->GetLoopDescriptor(f); bool loop_changed = true; while (loop_changed) { loop_changed = false; - for (opt::Loop& loop : - opt::make_range(++opt::TreeDFIterator<opt::Loop>( - loop_descriptor.GetDummyRootLoop()), - opt::TreeDFIterator<opt::Loop>())) { + for (Loop& loop : + make_range(++TreeDFIterator<Loop>(loop_descriptor.GetDummyRootLoop()), + TreeDFIterator<Loop>())) { if (processed_loop.count(&loop)) continue; processed_loop.insert(&loop); |