Eliminate function scope variables with one store, if possible.
// possible.
Optimizer::PassToken CreateLocalAccessChainConvertPass();
+// Creates a local single store elimination pass.
+// For each entry point function, this pass eliminates loads and stores for
+// function scope variable that are stored to only once, where possible. Only
+// whole variable loads and stores are eliminated; access-chain references are
+// not optimized. Replace all loads of such variables with the value that is
+// stored and eliminate any resulting dead code.
+//
+// Currently, the presence of access chains and function calls can inhibit this
+// pass, however the Inlining and LocalAccessChainConvert passes can make it
+// more effective. In additional, many non-load/store memory operations are
+// not supported and will prohibit optimization of a function. Support of
+// these operations are future work.
+//
+// This pass will reduce the work needed to be done by LocalSingleBlockElim
+// and LocalSSARewrite and can improve the effectiveness of other passes such
+// as DeadBranchElimination which depend on values for their analysis.
+Optimizer::PassToken CreateLocalSingleStoreElimPass();
+
// Creates a compact ids pass.
// The pass remaps result ids to a compact and gapless range starting from %1.
Optimizer::PassToken CreateCompactIdsPass();
ir_loader.h
local_access_chain_convert_pass.h
local_single_block_elim_pass.h
+ local_single_store_elim_pass.h
log.h
module.h
null_pass.h
ir_loader.cpp
local_access_chain_convert_pass.cpp
local_single_block_elim_pass.cpp
+ local_single_store_elim_pass.cpp
module.cpp
set_spec_constant_default_value_pass.cpp
optimizer.cpp
--- /dev/null
+// Copyright (c) 2017 The Khronos Group Inc.
+// Copyright (c) 2017 Valve Corporation
+// Copyright (c) 2017 LunarG Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "local_single_store_elim_pass.h"
+
+#include "cfa.h"
+#include "iterator.h"
+#include "spirv/1.0/GLSL.std.450.h"
+
+static const int kSpvEntryPointFunctionId = 1;
+static const int kSpvStorePtrId = 0;
+static const int kSpvStoreValId = 1;
+static const int kSpvLoadPtrId = 0;
+static const int kSpvAccessChainPtrId = 0;
+static const int kSpvTypePointerStorageClass = 0;
+static const int kSpvTypePointerTypeId = 1;
+
+// Universal Limit of ResultID + 1
+static const int kInvalidId = 0x400000;
+
+namespace spvtools {
+namespace opt {
+
+bool LocalSingleStoreElimPass::IsNonPtrAccessChain(const SpvOp opcode) const {
+ return opcode == SpvOpAccessChain || opcode == SpvOpInBoundsAccessChain;
+}
+
+bool LocalSingleStoreElimPass::IsMathType(
+ const ir::Instruction* typeInst) const {
+ switch (typeInst->opcode()) {
+ case SpvOpTypeInt:
+ case SpvOpTypeFloat:
+ case SpvOpTypeBool:
+ case SpvOpTypeVector:
+ case SpvOpTypeMatrix:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+bool LocalSingleStoreElimPass::IsTargetType(
+ const ir::Instruction* typeInst) const {
+ if (IsMathType(typeInst))
+ return true;
+ if (typeInst->opcode() == SpvOpTypeArray)
+ return IsMathType(def_use_mgr_->GetDef(typeInst->GetSingleWordOperand(1)));
+ if (typeInst->opcode() != SpvOpTypeStruct)
+ return false;
+ // All struct members must be math type
+ int nonMathComp = 0;
+ typeInst->ForEachInId([&nonMathComp,this](const uint32_t* tid) {
+ ir::Instruction* compTypeInst = def_use_mgr_->GetDef(*tid);
+ if (!IsMathType(compTypeInst)) ++nonMathComp;
+ });
+ return nonMathComp == 0;
+}
+
+ir::Instruction* LocalSingleStoreElimPass::GetPtr(
+ ir::Instruction* ip, uint32_t* varId) {
+ *varId = ip->GetSingleWordInOperand(
+ ip->opcode() == SpvOpStore ? kSpvStorePtrId : kSpvLoadPtrId);
+ ir::Instruction* ptrInst = def_use_mgr_->GetDef(*varId);
+ ir::Instruction* varInst = ptrInst;
+ while (IsNonPtrAccessChain(varInst->opcode())) {
+ *varId = varInst->GetSingleWordInOperand(kSpvAccessChainPtrId);
+ varInst = def_use_mgr_->GetDef(*varId);
+ }
+ return ptrInst;
+}
+
+bool LocalSingleStoreElimPass::IsTargetVar(uint32_t varId) {
+ if (seen_non_target_vars_.find(varId) != seen_non_target_vars_.end())
+ return false;
+ if (seen_target_vars_.find(varId) != seen_target_vars_.end())
+ return true;
+ const ir::Instruction* varInst = def_use_mgr_->GetDef(varId);
+ assert(varInst->opcode() == SpvOpVariable);
+ const uint32_t varTypeId = varInst->type_id();
+ const ir::Instruction* varTypeInst = def_use_mgr_->GetDef(varTypeId);
+ if (varTypeInst->GetSingleWordInOperand(kSpvTypePointerStorageClass) !=
+ SpvStorageClassFunction) {
+ seen_non_target_vars_.insert(varId);
+ return false;
+ }
+ const uint32_t varPteTypeId =
+ varTypeInst->GetSingleWordInOperand(kSpvTypePointerTypeId);
+ ir::Instruction* varPteTypeInst = def_use_mgr_->GetDef(varPteTypeId);
+ if (!IsTargetType(varPteTypeInst)) {
+ seen_non_target_vars_.insert(varId);
+ return false;
+ }
+ seen_target_vars_.insert(varId);
+ return true;
+}
+
+bool LocalSingleStoreElimPass::HasOnlySupportedRefs(uint32_t ptrId) {
+ if (supported_ref_ptrs_.find(ptrId) != supported_ref_ptrs_.end())
+ return true;
+ analysis::UseList* uses = def_use_mgr_->GetUses(ptrId);
+ assert(uses != nullptr);
+ for (auto u : *uses) {
+ SpvOp op = u.inst->opcode();
+ if (IsNonPtrAccessChain(op)) {
+ if (!HasOnlySupportedRefs(u.inst->result_id()))
+ return false;
+ }
+ else if (op != SpvOpStore && op != SpvOpLoad && op != SpvOpName)
+ return false;
+ }
+ supported_ref_ptrs_.insert(ptrId);
+ return true;
+}
+
+void LocalSingleStoreElimPass::SingleStoreAnalyze(ir::Function* func) {
+ ssa_var2store_.clear();
+ non_ssa_vars_.clear();
+ store2idx_.clear();
+ store2blk_.clear();
+ for (auto bi = func->begin(); bi != func->end(); ++bi) {
+ uint32_t instIdx = 0;
+ for (auto ii = bi->begin(); ii != bi->end(); ++ii, ++instIdx) {
+ switch (ii->opcode()) {
+ case SpvOpStore: {
+ // Verify store variable is target type
+ uint32_t varId;
+ ir::Instruction* ptrInst = GetPtr(&*ii, &varId);
+ if (non_ssa_vars_.find(varId) != non_ssa_vars_.end())
+ continue;
+ if (!HasOnlySupportedRefs(varId)) {
+ non_ssa_vars_.insert(varId);
+ continue;
+ }
+ if (IsNonPtrAccessChain(ptrInst->opcode())) {
+ non_ssa_vars_.insert(varId);
+ ssa_var2store_.erase(varId);
+ continue;
+ }
+ // Verify target type and function storage class
+ if (!IsTargetVar(varId)) {
+ non_ssa_vars_.insert(varId);
+ continue;
+ }
+ // Ignore variables with multiple stores
+ if (ssa_var2store_.find(varId) != ssa_var2store_.end()) {
+ non_ssa_vars_.insert(varId);
+ ssa_var2store_.erase(varId);
+ continue;
+ }
+ // Remember pointer to variable's store and it's
+ // ordinal position in block
+ ssa_var2store_[varId] = &*ii;
+ store2idx_[&*ii] = instIdx;
+ store2blk_[&*ii] = &*bi;
+ } break;
+ default:
+ break;
+ } // switch
+ }
+ }
+}
+
+void LocalSingleStoreElimPass::ReplaceAndDeleteLoad(
+ ir::Instruction* loadInst, uint32_t replId) {
+ (void) def_use_mgr_->ReplaceAllUsesWith(loadInst->result_id(), replId);
+ DCEInst(loadInst);
+}
+
+LocalSingleStoreElimPass::GetBlocksFunction
+LocalSingleStoreElimPass::AugmentedCFGSuccessorsFunction() const {
+ return [this](const ir::BasicBlock* block) {
+ auto asmi = augmented_successors_map_.find(block);
+ if (asmi != augmented_successors_map_.end())
+ return &(*asmi).second;
+ auto smi = successors_map_.find(block);
+ return &(*smi).second;
+ };
+}
+
+LocalSingleStoreElimPass::GetBlocksFunction
+LocalSingleStoreElimPass::AugmentedCFGPredecessorsFunction() const {
+ return [this](const ir::BasicBlock* block) {
+ auto apmi = augmented_predecessors_map_.find(block);
+ if (apmi != augmented_predecessors_map_.end())
+ return &(*apmi).second;
+ auto pmi = predecessors_map_.find(block);
+ return &(*pmi).second;
+ };
+}
+
+void LocalSingleStoreElimPass::CalculateImmediateDominators(
+ ir::Function* func) {
+ // Compute CFG
+ vector<ir::BasicBlock*> ordered_blocks;
+ predecessors_map_.clear();
+ successors_map_.clear();
+ for (auto& blk : *func) {
+ ordered_blocks.push_back(&blk);
+ blk.ForEachSuccessorLabel([&blk, &ordered_blocks, this](uint32_t sbid) {
+ successors_map_[&blk].push_back(label2block_[sbid]);
+ predecessors_map_[label2block_[sbid]].push_back(&blk);
+ });
+ }
+ // Compute Augmented CFG
+ augmented_successors_map_.clear();
+ augmented_predecessors_map_.clear();
+ successors_map_[&pseudo_exit_block_] = {};
+ predecessors_map_[&pseudo_entry_block_] = {};
+ auto succ_func = [this](const ir::BasicBlock* b)
+ { return &successors_map_[b]; };
+ auto pred_func = [this](const ir::BasicBlock* b)
+ { return &predecessors_map_[b]; };
+ CFA<ir::BasicBlock>::ComputeAugmentedCFG(
+ ordered_blocks,
+ &pseudo_entry_block_,
+ &pseudo_exit_block_,
+ &augmented_successors_map_,
+ &augmented_predecessors_map_,
+ succ_func,
+ pred_func);
+ // Compute Dominators
+ vector<const ir::BasicBlock*> postorder;
+ auto ignore_block = [](cbb_ptr) {};
+ auto ignore_edge = [](cbb_ptr, cbb_ptr) {};
+ spvtools::CFA<ir::BasicBlock>::DepthFirstTraversal(
+ ordered_blocks[0], AugmentedCFGSuccessorsFunction(),
+ ignore_block, [&](cbb_ptr b) { postorder.push_back(b); },
+ ignore_edge);
+ auto edges = spvtools::CFA<ir::BasicBlock>::CalculateDominators(
+ postorder, AugmentedCFGPredecessorsFunction());
+ idom_.clear();
+ for (auto edge : edges)
+ idom_[edge.first] = edge.second;
+}
+
+bool LocalSingleStoreElimPass::Dominates(
+ ir::BasicBlock* blk0, uint32_t idx0,
+ ir::BasicBlock* blk1, uint32_t idx1) {
+ if (blk0 == blk1)
+ return idx0 <= idx1;
+ ir::BasicBlock* b = blk1;
+ while (idom_[b] != b) {
+ b = idom_[b];
+ if (b == blk0)
+ return true;
+ }
+ return false;
+}
+
+bool LocalSingleStoreElimPass::SingleStoreProcess(ir::Function* func) {
+ CalculateImmediateDominators(func);
+ bool modified = false;
+ for (auto bi = func->begin(); bi != func->end(); ++bi) {
+ uint32_t instIdx = 0;
+ for (auto ii = bi->begin(); ii != bi->end(); ++ii, ++instIdx) {
+ if (ii->opcode() != SpvOpLoad)
+ continue;
+ uint32_t varId;
+ ir::Instruction* ptrInst = GetPtr(&*ii, &varId);
+ // Skip access chain loads
+ if (IsNonPtrAccessChain(ptrInst->opcode()))
+ continue;
+ if (ptrInst->opcode() != SpvOpVariable)
+ continue;
+ const auto vsi = ssa_var2store_.find(varId);
+ if (vsi == ssa_var2store_.end())
+ continue;
+ if (non_ssa_vars_.find(varId) != non_ssa_vars_.end())
+ continue;
+ // store must dominate load
+ if (!Dominates(store2blk_[vsi->second], store2idx_[vsi->second], &*bi, instIdx))
+ continue;
+ // Use store value as replacement id
+ uint32_t replId = vsi->second->GetSingleWordInOperand(kSpvStoreValId);
+ // replace all instances of the load's id with the SSA value's id
+ ReplaceAndDeleteLoad(&*ii, replId);
+ modified = true;
+ }
+ }
+ return modified;
+}
+
+bool LocalSingleStoreElimPass::HasLoads(uint32_t varId) const {
+ analysis::UseList* uses = def_use_mgr_->GetUses(varId);
+ if (uses == nullptr)
+ return false;
+ for (auto u : *uses) {
+ SpvOp op = u.inst->opcode();
+ // TODO(): The following is slightly conservative. Could be
+ // better handling of non-store/name.
+ if (IsNonPtrAccessChain(op) || op == SpvOpCopyObject) {
+ if (HasLoads(u.inst->result_id()))
+ return true;
+ }
+ else if (op != SpvOpStore && op != SpvOpName)
+ return true;
+ }
+ return false;
+}
+
+bool LocalSingleStoreElimPass::IsLiveVar(uint32_t varId) const {
+ // non-function scope vars are live
+ const ir::Instruction* varInst = def_use_mgr_->GetDef(varId);
+ assert(varInst->opcode() == SpvOpVariable);
+ const uint32_t varTypeId = varInst->type_id();
+ const ir::Instruction* varTypeInst = def_use_mgr_->GetDef(varTypeId);
+ if (varTypeInst->GetSingleWordInOperand(kSpvTypePointerStorageClass) !=
+ SpvStorageClassFunction)
+ return true;
+ // test if variable is loaded from
+ return HasLoads(varId);
+}
+
+bool LocalSingleStoreElimPass::IsLiveStore(ir::Instruction* storeInst) {
+ // get store's variable
+ uint32_t varId;
+ (void) GetPtr(storeInst, &varId);
+ return IsLiveVar(varId);
+}
+
+void LocalSingleStoreElimPass::AddStores(
+ uint32_t ptr_id, std::queue<ir::Instruction*>* insts) {
+ analysis::UseList* uses = def_use_mgr_->GetUses(ptr_id);
+ if (uses != nullptr) {
+ for (auto u : *uses) {
+ if (IsNonPtrAccessChain(u.inst->opcode()))
+ AddStores(u.inst->result_id(), insts);
+ else if (u.inst->opcode() == SpvOpStore)
+ insts->push(u.inst);
+ }
+ }
+}
+
+void LocalSingleStoreElimPass::DCEInst(ir::Instruction* inst) {
+ std::queue<ir::Instruction*> deadInsts;
+ deadInsts.push(inst);
+ while (!deadInsts.empty()) {
+ ir::Instruction* di = deadInsts.front();
+ // Don't delete labels
+ if (di->opcode() == SpvOpLabel) {
+ deadInsts.pop();
+ continue;
+ }
+ // Remember operands
+ std::queue<uint32_t> ids;
+ di->ForEachInId([&ids](uint32_t* iid) {
+ ids.push(*iid);
+ });
+ uint32_t varId = 0;
+ // Remember variable if dead load
+ if (di->opcode() == SpvOpLoad)
+ (void) GetPtr(di, &varId);
+ def_use_mgr_->KillInst(di);
+ // For all operands with no remaining uses, add their instruction
+ // to the dead instruction queue.
+ while (!ids.empty()) {
+ uint32_t id = ids.front();
+ analysis::UseList* uses = def_use_mgr_->GetUses(id);
+ if (uses == nullptr)
+ deadInsts.push(def_use_mgr_->GetDef(id));
+ ids.pop();
+ }
+ // if a load was deleted and it was the variable's
+ // last load, add all its stores to dead queue
+ if (varId != 0 && !IsLiveVar(varId))
+ AddStores(varId, &deadInsts);
+ deadInsts.pop();
+ }
+}
+
+bool LocalSingleStoreElimPass::SingleStoreDCE() {
+ bool modified = false;
+ for (auto v : ssa_var2store_) {
+ // check that it hasn't already been DCE'd
+ if (v.second->opcode() != SpvOpStore)
+ continue;
+ if (non_ssa_vars_.find(v.first) != non_ssa_vars_.end())
+ continue;
+ if (!IsLiveStore(v.second)) {
+ DCEInst(v.second);
+ modified = true;
+ }
+ }
+ return modified;
+}
+
+bool LocalSingleStoreElimPass::LocalSingleStoreElim(ir::Function* func) {
+ bool modified = false;
+ SingleStoreAnalyze(func);
+ if (ssa_var2store_.empty())
+ return false;
+ modified |= SingleStoreProcess(func);
+ modified |= SingleStoreDCE();
+ return modified;
+}
+
+void LocalSingleStoreElimPass::Initialize(ir::Module* module) {
+ module_ = module;
+
+ // Initialize function and block maps
+ id2function_.clear();
+ label2block_.clear();
+ for (auto& fn : *module_) {
+ id2function_[fn.result_id()] = &fn;
+ for (auto& blk : fn) {
+ uint32_t bid = blk.id();
+ label2block_[bid] = &blk;
+ }
+ }
+
+ // Initialize Target Type Caches
+ seen_target_vars_.clear();
+ seen_non_target_vars_.clear();
+
+ // Initialize Supported Ref Pointer Cache
+ supported_ref_ptrs_.clear();
+
+ // TODO: Reuse def/use (and other state) from previous passes
+ def_use_mgr_.reset(new analysis::DefUseManager(consumer(), module_));
+
+ // Initialize next unused Id
+ next_id_ = module_->id_bound();
+};
+
+Pass::Status LocalSingleStoreElimPass::ProcessImpl() {
+ // Assumes logical addressing only
+ if (module_->HasCapability(SpvCapabilityAddresses))
+ return Status::SuccessWithoutChange;
+ bool modified = false;
+ // Call Mem2Reg on all remaining functions.
+ for (auto& e : module_->entry_points()) {
+ ir::Function* fn =
+ id2function_[e.GetSingleWordOperand(kSpvEntryPointFunctionId)];
+ modified = modified || LocalSingleStoreElim(fn);
+ }
+ FinalizeNextId(module_);
+ return modified ? Status::SuccessWithChange : Status::SuccessWithoutChange;
+}
+
+LocalSingleStoreElimPass::LocalSingleStoreElimPass()
+ : module_(nullptr), def_use_mgr_(nullptr),
+ pseudo_entry_block_(std::unique_ptr<ir::Instruction>(
+ new ir::Instruction(SpvOpLabel, 0, 0, {}))),
+ pseudo_exit_block_(std::unique_ptr<ir::Instruction>(
+ new ir::Instruction(SpvOpLabel, 0, kInvalidId, {}))),
+ next_id_(0) {}
+
+Pass::Status LocalSingleStoreElimPass::Process(ir::Module* module) {
+ Initialize(module);
+ return ProcessImpl();
+}
+
+} // namespace opt
+} // namespace spvtools
+
--- /dev/null
+// Copyright (c) 2017 The Khronos Group Inc.
+// Copyright (c) 2017 Valve Corporation
+// Copyright (c) 2017 LunarG Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef LIBSPIRV_OPT_LOCAL_SINGLE_STORE_ELIM_PASS_H_
+#define LIBSPIRV_OPT_LOCAL_SINGLE_STORE_ELIM_PASS_H_
+
+
+#include <algorithm>
+#include <map>
+#include <queue>
+#include <unordered_map>
+#include <unordered_set>
+#include <utility>
+
+#include "basic_block.h"
+#include "def_use_manager.h"
+#include "module.h"
+#include "pass.h"
+
+namespace spvtools {
+namespace opt {
+
+// See optimizer.hpp for documentation.
+class LocalSingleStoreElimPass : public Pass {
+ using cbb_ptr = const ir::BasicBlock*;
+
+ public:
+ LocalSingleStoreElimPass();
+ const char* name() const override { return "eliminate-local-single-store"; }
+ Status Process(ir::Module*) override;
+
+ private:
+ // Returns true if |opcode| is a non-ptr access chain op
+ bool IsNonPtrAccessChain(const SpvOp opcode) const;
+
+ // Returns true if |typeInst| is a scalar type
+ // or a vector or matrix
+ bool IsMathType(const ir::Instruction* typeInst) const;
+
+ // Returns true if |typeInst| is a math type or a struct or array
+ // of a math type.
+ bool IsTargetType(const ir::Instruction* typeInst) const;
+
+ // Given a load or store |ip|, return the pointer instruction.
+ // Also return the base variable's id in |varId|.
+ ir::Instruction* GetPtr(ir::Instruction* ip, uint32_t* varId);
+
+ // Return true if |varId| is a previously identified target variable.
+ // Return false if |varId| is a previously identified non-target variable.
+ // If variable is not cached, return true if variable is a function scope
+ // variable of target type, false otherwise. Updates caches of target
+ // and non-target variables.
+ bool IsTargetVar(uint32_t varId);
+
+ // Return true if all refs through |ptrId| are only loads or stores and
+ // cache ptrId in supported_ref_ptrs_.
+ bool HasOnlySupportedRefs(uint32_t ptrId);
+
+ // Find all function scope variables in |func| that are stored to
+ // only once (SSA) and map to their stored value id. Only analyze
+ // variables of scalar, vector, matrix types and struct and array
+ // types comprising only these types. Currently this analysis is
+ // is not done in the presence of function calls. TODO(): Allow
+ // analysis in the presence of function calls.
+ void SingleStoreAnalyze(ir::Function* func);
+
+ // Replace all instances of |loadInst|'s id with |replId| and delete
+ // |loadInst|.
+ void ReplaceAndDeleteLoad(ir::Instruction* loadInst, uint32_t replId);
+
+ using GetBlocksFunction =
+ std::function<const std::vector<ir::BasicBlock*>*(const ir::BasicBlock*)>;
+
+ /// Returns the block successors function for the augmented CFG.
+ GetBlocksFunction AugmentedCFGSuccessorsFunction() const;
+
+ /// Returns the block predecessors function for the augmented CFG.
+ GetBlocksFunction AugmentedCFGPredecessorsFunction() const;
+
+ // Calculate immediate dominators for |func|'s CFG. Leaves result
+ // in idom_. Entries for augmented CFG (pseudo blocks) are not created.
+ void CalculateImmediateDominators(ir::Function* func);
+
+ // Return true if instruction in |blk0| at ordinal position |idx0|
+ // dominates instruction in |blk1| at position |idx1|.
+ bool Dominates(ir::BasicBlock* blk0, uint32_t idx0,
+ ir::BasicBlock* blk1, uint32_t idx1);
+
+ // For each load of an SSA variable in |func|, replace all uses of
+ // the load with the value stored if the store dominates the load.
+ // Assumes that SingleStoreAnalyze() has just been run. Return true
+ // if any instructions are modified.
+ bool SingleStoreProcess(ir::Function* func);
+
+ // Return true if any instruction loads from |varId|
+ bool HasLoads(uint32_t varId) const;
+
+ // Return true if |varId| is not a function variable or if it has
+ // a load
+ bool IsLiveVar(uint32_t varId) const;
+
+ // Return true if |storeInst| is not a function variable or if its
+ // base variable has a load
+ bool IsLiveStore(ir::Instruction* storeInst);
+
+ // Add stores using |ptr_id| to |insts|
+ void AddStores(uint32_t ptr_id, std::queue<ir::Instruction*>* insts);
+
+ // Delete |inst| and iterate DCE on all its operands if they are now
+ // useless. If a load is deleted and its variable has no other loads,
+ // delete all its variable's stores.
+ void DCEInst(ir::Instruction* inst);
+
+ // Remove all stores to useless SSA variables. Remove useless
+ // access chains and variables as well. Assumes SingleStoreAnalyze
+ // and SingleStoreProcess has been run.
+ bool SingleStoreDCE();
+
+ // Do "single-store" optimization of function variables defined only
+ // with a single non-access-chain store in |func|. Replace all their
+ // non-access-chain loads with the value that is stored and eliminate
+ // any resulting dead code.
+ bool LocalSingleStoreElim(ir::Function* func);
+
+ // Save next available id into |module|.
+ inline void FinalizeNextId(ir::Module* module) {
+ module->SetIdBound(next_id_);
+ }
+
+ // Return next available id and generate next.
+ inline uint32_t TakeNextId() {
+ return next_id_++;
+ }
+
+ void Initialize(ir::Module* module);
+ Pass::Status ProcessImpl();
+
+ // Module this pass is processing
+ ir::Module* module_;
+
+ // Def-Uses for the module we are processing
+ std::unique_ptr<analysis::DefUseManager> def_use_mgr_;
+
+ // Map from function's result id to function
+ std::unordered_map<uint32_t, ir::Function*> id2function_;
+
+ // Map from block's label id to block
+ std::unordered_map<uint32_t, ir::BasicBlock*> label2block_;
+
+ // Map from SSA Variable to its single store
+ std::unordered_map<uint32_t, ir::Instruction*> ssa_var2store_;
+
+ // Map from store to its ordinal position in its block.
+ std::unordered_map<ir::Instruction*, uint32_t> store2idx_;
+
+ // Map from store to its block.
+ std::unordered_map<ir::Instruction*, ir::BasicBlock*> store2blk_;
+
+ // Set of non-SSA Variables
+ std::unordered_set<uint32_t> non_ssa_vars_;
+
+ // Cache of previously seen target types
+ std::unordered_set<uint32_t> seen_target_vars_;
+
+ // Cache of previously seen non-target types
+ std::unordered_set<uint32_t> seen_non_target_vars_;
+
+ // Variables with only supported references, ie. loads and stores using
+ // variable directly or through non-ptr access chains.
+ std::unordered_set<uint32_t> supported_ref_ptrs_;
+
+ // Augmented CFG Entry Block
+ ir::BasicBlock pseudo_entry_block_;
+
+ // Augmented CFG Exit Block
+ ir::BasicBlock pseudo_exit_block_;
+
+ // CFG Predecessors
+ std::unordered_map<const ir::BasicBlock*, std::vector<ir::BasicBlock*>>
+ predecessors_map_;
+
+ // CFG Successors
+ std::unordered_map<const ir::BasicBlock*, std::vector<ir::BasicBlock*>>
+ successors_map_;
+
+ // CFG Augmented Predecessors
+ std::unordered_map<const ir::BasicBlock*, std::vector<ir::BasicBlock*>>
+ augmented_predecessors_map_;
+
+ // CFG Augmented Successors
+ std::unordered_map<const ir::BasicBlock*, std::vector<ir::BasicBlock*>>
+ augmented_successors_map_;
+
+ // Immediate Dominator Map
+ // If block has no idom it points to itself.
+ std::unordered_map<ir::BasicBlock*, ir::BasicBlock*> idom_;
+
+ // Next unused ID
+ uint32_t next_id_;
+
+};
+
+} // namespace opt
+} // namespace spvtools
+
+#endif // LIBSPIRV_OPT_LOCAL_SINGLE_STORE_ELIM_PASS_H_
+
MakeUnique<opt::LocalSingleBlockLoadStoreElimPass>());
}
+Optimizer::PassToken CreateLocalSingleStoreElimPass() {
+ return MakeUnique<Optimizer::PassToken::Impl>(
+ MakeUnique<opt::LocalSingleStoreElimPass>());
+}
+
Optimizer::PassToken CreateCompactIdsPass() {
return MakeUnique<Optimizer::PassToken::Impl>(
MakeUnique<opt::CompactIdsPass>());
#include "fold_spec_constant_op_and_composite_pass.h"
#include "inline_pass.h"
#include "local_single_block_elim_pass.h"
+#include "local_single_store_elim_pass.h"
#include "freeze_spec_constant_value_pass.h"
#include "local_access_chain_convert_pass.h"
#include "null_pass.h"
LIBS SPIRV-Tools-opt
)
+add_spvtools_unittest(TARGET pass_local_single_store_elim
+ SRCS local_single_store_elim_test.cpp pass_utils.cpp
+ LIBS SPIRV-Tools-opt
+)
+
add_spvtools_unittest(TARGET pass_eliminate_dead_const
SRCS eliminate_dead_const_test.cpp pass_utils.cpp
LIBS SPIRV-Tools-opt
--- /dev/null
+// Copyright (c) 2017 Valve Corporation
+// Copyright (c) 2017 LunarG Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "pass_fixture.h"
+#include "pass_utils.h"
+
+namespace {
+
+using namespace spvtools;
+
+using LocalSingleStoreElimTest = PassTest<::testing::Test>;
+
+
+TEST_F(LocalSingleStoreElimTest, PositiveAndNegative) {
+ // Single store to v is optimized. Multiple store to
+ // f is not optimized.
+ //
+ // #version 140
+ //
+ // in vec4 BaseColor;
+ // in float fi;
+ //
+ // void main()
+ // {
+ // vec4 v = BaseColor;
+ // float f = fi;
+ // if (f < 0)
+ // f = 0.0;
+ // gl_FragColor = v + f;
+ // }
+
+ const std::string predefs =
+ R"(OpCapability Shader
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint Fragment %main "main" %BaseColor %fi %gl_FragColor
+OpExecutionMode %main OriginUpperLeft
+OpSource GLSL 140
+OpName %main "main"
+OpName %v "v"
+OpName %BaseColor "BaseColor"
+OpName %f "f"
+OpName %fi "fi"
+OpName %gl_FragColor "gl_FragColor"
+%void = OpTypeVoid
+%9 = OpTypeFunction %void
+%float = OpTypeFloat 32
+%v4float = OpTypeVector %float 4
+%_ptr_Function_v4float = OpTypePointer Function %v4float
+%_ptr_Input_v4float = OpTypePointer Input %v4float
+%BaseColor = OpVariable %_ptr_Input_v4float Input
+%_ptr_Function_float = OpTypePointer Function %float
+%_ptr_Input_float = OpTypePointer Input %float
+%fi = OpVariable %_ptr_Input_float Input
+%float_0 = OpConstant %float 0
+%bool = OpTypeBool
+%_ptr_Output_v4float = OpTypePointer Output %v4float
+%gl_FragColor = OpVariable %_ptr_Output_v4float Output
+)";
+
+ const std::string before =
+ R"(%main = OpFunction %void None %9
+%19 = OpLabel
+%v = OpVariable %_ptr_Function_v4float Function
+%f = OpVariable %_ptr_Function_float Function
+%20 = OpLoad %v4float %BaseColor
+OpStore %v %20
+%21 = OpLoad %float %fi
+OpStore %f %21
+%22 = OpLoad %float %f
+%23 = OpFOrdLessThan %bool %22 %float_0
+OpSelectionMerge %24 None
+OpBranchConditional %23 %25 %24
+%25 = OpLabel
+OpStore %f %float_0
+OpBranch %24
+%24 = OpLabel
+%26 = OpLoad %v4float %v
+%27 = OpLoad %float %f
+%28 = OpCompositeConstruct %v4float %27 %27 %27 %27
+%29 = OpFAdd %v4float %26 %28
+OpStore %gl_FragColor %29
+OpReturn
+OpFunctionEnd
+)";
+
+ const std::string after =
+ R"(%main = OpFunction %void None %9
+%19 = OpLabel
+%v = OpVariable %_ptr_Function_v4float Function
+%f = OpVariable %_ptr_Function_float Function
+%20 = OpLoad %v4float %BaseColor
+%21 = OpLoad %float %fi
+OpStore %f %21
+%22 = OpLoad %float %f
+%23 = OpFOrdLessThan %bool %22 %float_0
+OpSelectionMerge %24 None
+OpBranchConditional %23 %25 %24
+%25 = OpLabel
+OpStore %f %float_0
+OpBranch %24
+%24 = OpLabel
+%27 = OpLoad %float %f
+%28 = OpCompositeConstruct %v4float %27 %27 %27 %27
+%29 = OpFAdd %v4float %20 %28
+OpStore %gl_FragColor %29
+OpReturn
+OpFunctionEnd
+)";
+
+ SinglePassRunAndCheck<opt::LocalSingleStoreElimPass>(predefs + before,
+ predefs + after, true, true);
+}
+
+TEST_F(LocalSingleStoreElimTest, MultipleLoads) {
+ // Single store to multiple loads of v is optimized.
+ //
+ // #version 140
+ //
+ // in vec4 BaseColor;
+ // in float fi;
+ //
+ // void main()
+ // {
+ // vec4 v = BaseColor;
+ // float f = fi;
+ // if (f < 0)
+ // f = 0.0;
+ // gl_FragColor = v + f;
+ // }
+
+ const std::string predefs =
+ R"(OpCapability Shader
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint Fragment %main "main" %BaseColor %fi %gl_FragColor
+OpExecutionMode %main OriginUpperLeft
+OpSource GLSL 140
+OpName %main "main"
+OpName %v "v"
+OpName %BaseColor "BaseColor"
+OpName %fi "fi"
+OpName %r "r"
+OpName %gl_FragColor "gl_FragColor"
+%void = OpTypeVoid
+%9 = OpTypeFunction %void
+%float = OpTypeFloat 32
+%v4float = OpTypeVector %float 4
+%_ptr_Function_v4float = OpTypePointer Function %v4float
+%_ptr_Input_v4float = OpTypePointer Input %v4float
+%BaseColor = OpVariable %_ptr_Input_v4float Input
+%_ptr_Input_float = OpTypePointer Input %float
+%fi = OpVariable %_ptr_Input_float Input
+%float_0 = OpConstant %float 0
+%bool = OpTypeBool
+%float_1 = OpConstant %float 1
+%_ptr_Output_v4float = OpTypePointer Output %v4float
+%gl_FragColor = OpVariable %_ptr_Output_v4float Output
+)";
+
+ const std::string before =
+ R"(%main = OpFunction %void None %9
+%19 = OpLabel
+%v = OpVariable %_ptr_Function_v4float Function
+%r = OpVariable %_ptr_Function_v4float Function
+%20 = OpLoad %v4float %BaseColor
+OpStore %v %20
+%21 = OpLoad %float %fi
+%22 = OpFOrdLessThan %bool %21 %float_0
+OpSelectionMerge %23 None
+OpBranchConditional %22 %24 %25
+%24 = OpLabel
+%26 = OpLoad %v4float %v
+OpStore %r %26
+OpBranch %23
+%25 = OpLabel
+%27 = OpLoad %v4float %v
+%28 = OpCompositeConstruct %v4float %float_1 %float_1 %float_1 %float_1
+%29 = OpFSub %v4float %28 %27
+OpStore %r %29
+OpBranch %23
+%23 = OpLabel
+%30 = OpLoad %v4float %r
+OpStore %gl_FragColor %30
+OpReturn
+OpFunctionEnd
+)";
+
+ const std::string after =
+ R"(%main = OpFunction %void None %9
+%19 = OpLabel
+%v = OpVariable %_ptr_Function_v4float Function
+%r = OpVariable %_ptr_Function_v4float Function
+%20 = OpLoad %v4float %BaseColor
+%21 = OpLoad %float %fi
+%22 = OpFOrdLessThan %bool %21 %float_0
+OpSelectionMerge %23 None
+OpBranchConditional %22 %24 %25
+%24 = OpLabel
+OpStore %r %20
+OpBranch %23
+%25 = OpLabel
+%28 = OpCompositeConstruct %v4float %float_1 %float_1 %float_1 %float_1
+%29 = OpFSub %v4float %28 %20
+OpStore %r %29
+OpBranch %23
+%23 = OpLabel
+%30 = OpLoad %v4float %r
+OpStore %gl_FragColor %30
+OpReturn
+OpFunctionEnd
+)";
+
+ SinglePassRunAndCheck<opt::LocalSingleStoreElimPass>(predefs + before,
+ predefs + after, true, true);
+}
+
+TEST_F(LocalSingleStoreElimTest, NoStoreElimWithInterveningAccessChainLoad) {
+ // Last load of v is eliminated, but access chain load and store of v isn't
+ //
+ // #version 140
+ //
+ // in vec4 BaseColor;
+ //
+ // void main()
+ // {
+ // vec4 v = BaseColor;
+ // float f = v[3];
+ // gl_FragColor = v * f;
+ // }
+
+ const std::string predefs =
+ R"(OpCapability Shader
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint Fragment %main "main" %BaseColor %gl_FragColor
+OpExecutionMode %main OriginUpperLeft
+OpSource GLSL 140
+OpName %main "main"
+OpName %v "v"
+OpName %BaseColor "BaseColor"
+OpName %f "f"
+OpName %gl_FragColor "gl_FragColor"
+%void = OpTypeVoid
+%8 = OpTypeFunction %void
+%float = OpTypeFloat 32
+%v4float = OpTypeVector %float 4
+%_ptr_Function_v4float = OpTypePointer Function %v4float
+%_ptr_Input_v4float = OpTypePointer Input %v4float
+%BaseColor = OpVariable %_ptr_Input_v4float Input
+%_ptr_Function_float = OpTypePointer Function %float
+%uint = OpTypeInt 32 0
+%uint_3 = OpConstant %uint 3
+%_ptr_Output_v4float = OpTypePointer Output %v4float
+%gl_FragColor = OpVariable %_ptr_Output_v4float Output
+)";
+
+ const std::string before =
+ R"(%main = OpFunction %void None %8
+%17 = OpLabel
+%v = OpVariable %_ptr_Function_v4float Function
+%f = OpVariable %_ptr_Function_float Function
+%18 = OpLoad %v4float %BaseColor
+OpStore %v %18
+%19 = OpAccessChain %_ptr_Function_float %v %uint_3
+%20 = OpLoad %float %19
+OpStore %f %20
+%21 = OpLoad %v4float %v
+%22 = OpLoad %float %f
+%23 = OpVectorTimesScalar %v4float %21 %22
+OpStore %gl_FragColor %23
+OpReturn
+OpFunctionEnd
+)";
+
+ const std::string after =
+ R"(%main = OpFunction %void None %8
+%17 = OpLabel
+%v = OpVariable %_ptr_Function_v4float Function
+%f = OpVariable %_ptr_Function_float Function
+%18 = OpLoad %v4float %BaseColor
+OpStore %v %18
+%19 = OpAccessChain %_ptr_Function_float %v %uint_3
+%20 = OpLoad %float %19
+%23 = OpVectorTimesScalar %v4float %18 %20
+OpStore %gl_FragColor %23
+OpReturn
+OpFunctionEnd
+)";
+
+ SinglePassRunAndCheck<opt::LocalSingleStoreElimPass>(predefs + before,
+ predefs + after, true, true);
+}
+
+TEST_F(LocalSingleStoreElimTest, NoReplaceOfDominatingPartialStore) {
+ // Note: SPIR-V hand edited to initialize v to vec4(0.0)
+ //
+ // #version 140
+ //
+ // in vec4 BaseColor;
+ //
+ // void main()
+ // {
+ // vec4 v;
+ // float v[1] = 1.0;
+ // gl_FragColor = v;
+ // }
+
+ const std::string assembly =
+ R"(OpCapability Shader
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint Fragment %main "main" %gl_FragColor %BaseColor
+OpExecutionMode %main OriginUpperLeft
+OpSource GLSL 140
+OpName %main "main"
+OpName %v "v"
+OpName %gl_FragColor "gl_FragColor"
+OpName %BaseColor "BaseColor"
+%void = OpTypeVoid
+%7 = OpTypeFunction %void
+%float = OpTypeFloat 32
+%v4float = OpTypeVector %float 4
+%_ptr_Function_v4float = OpTypePointer Function %v4float
+%float_0 = OpConstant %float 0
+%12 = OpConstantComposite %v4float %float_0 %float_0 %float_0 %float_0
+%float_1 = OpConstant %float 1
+%uint = OpTypeInt 32 0
+%uint_1 = OpConstant %uint 1
+%_ptr_Function_float = OpTypePointer Function %float
+%_ptr_Output_v4float = OpTypePointer Output %v4float
+%gl_FragColor = OpVariable %_ptr_Output_v4float Output
+%_ptr_Input_v4float = OpTypePointer Input %v4float
+%BaseColor = OpVariable %_ptr_Input_v4float Input
+%main = OpFunction %void None %7
+%19 = OpLabel
+%v = OpVariable %_ptr_Function_v4float Function %12
+%20 = OpAccessChain %_ptr_Function_float %v %uint_1
+OpStore %20 %float_1
+%21 = OpLoad %v4float %v
+OpStore %gl_FragColor %21
+OpReturn
+OpFunctionEnd
+)";
+
+ SinglePassRunAndCheck<opt::LocalSingleStoreElimPass>(
+ assembly, assembly, true, true);
+}
+
+TEST_F(LocalSingleStoreElimTest, NoReplaceInPresenceOfUnsupportedInst) {
+ // Note: PositiveNegative test hand edited to insert OpCopyObject
+ //
+ // #version 140
+ //
+ // in vec4 BaseColor;
+ // in float fi;
+ //
+ // void main()
+ // {
+ // vec4 v = BaseColor;
+ // float f = fi;
+ // if (f < 0)
+ // f = 0.0;
+ // gl_FragColor = v + f;
+ // }
+
+ const std::string assembly =
+ R"(OpCapability Shader
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint Fragment %main "main" %BaseColor %fi %gl_FragColor
+OpExecutionMode %main OriginUpperLeft
+OpSource GLSL 140
+OpName %main "main"
+OpName %v "v"
+OpName %BaseColor "BaseColor"
+OpName %f "f"
+OpName %fi "fi"
+OpName %gl_FragColor "gl_FragColor"
+%void = OpTypeVoid
+%9 = OpTypeFunction %void
+%float = OpTypeFloat 32
+%v4float = OpTypeVector %float 4
+%_ptr_Function_v4float = OpTypePointer Function %v4float
+%_ptr_Input_v4float = OpTypePointer Input %v4float
+%BaseColor = OpVariable %_ptr_Input_v4float Input
+%_ptr_Function_float = OpTypePointer Function %float
+%_ptr_Input_float = OpTypePointer Input %float
+%fi = OpVariable %_ptr_Input_float Input
+%float_0 = OpConstant %float 0
+%bool = OpTypeBool
+%_ptr_Output_v4float = OpTypePointer Output %v4float
+%gl_FragColor = OpVariable %_ptr_Output_v4float Output
+%main = OpFunction %void None %9
+%19 = OpLabel
+%v = OpVariable %_ptr_Function_v4float Function
+%f = OpVariable %_ptr_Function_float Function
+%20 = OpLoad %v4float %BaseColor
+OpStore %v %20
+%21 = OpLoad %float %fi
+OpStore %f %21
+%22 = OpLoad %float %f
+%23 = OpFOrdLessThan %bool %22 %float_0
+OpSelectionMerge %24 None
+OpBranchConditional %23 %25 %24
+%25 = OpLabel
+OpStore %f %float_0
+OpBranch %24
+%24 = OpLabel
+%26 = OpCopyObject %_ptr_Function_v4float %v
+%27 = OpLoad %v4float %26
+%28 = OpLoad %float %f
+%29 = OpCompositeConstruct %v4float %28 %28 %28 %28
+%30 = OpFAdd %v4float %27 %29
+OpStore %gl_FragColor %30
+OpReturn
+OpFunctionEnd
+)";
+
+ SinglePassRunAndCheck<opt::LocalSingleStoreElimPass>(
+ assembly, assembly, true, true);
+}
+
+TEST_F(LocalSingleStoreElimTest, NoOptIfStoreNotDominating) {
+ // Single store to f not optimized because it does not dominate
+ // the load.
+ //
+ // #version 140
+ //
+ // in vec4 BaseColor;
+ // in float fi;
+ //
+ // void main()
+ // {
+ // float f;
+ // if (fi < 0)
+ // f = 0.5;
+ // if (fi < 0)
+ // gl_FragColor = BaseColor * f;
+ // else
+ // gl_FragColor = BaseColor;
+ // }
+
+ const std::string assembly =
+ R"(OpCapability Shader
+%1 = OpExtInstImport "GLSL.std.450"
+OpMemoryModel Logical GLSL450
+OpEntryPoint Fragment %main "main" %fi %gl_FragColor %BaseColor
+OpExecutionMode %main OriginUpperLeft
+OpSource GLSL 140
+OpName %main "main"
+OpName %fi "fi"
+OpName %f "f"
+OpName %gl_FragColor "gl_FragColor"
+OpName %BaseColor "BaseColor"
+%void = OpTypeVoid
+%8 = OpTypeFunction %void
+%float = OpTypeFloat 32
+%_ptr_Input_float = OpTypePointer Input %float
+%fi = OpVariable %_ptr_Input_float Input
+%float_0 = OpConstant %float 0
+%bool = OpTypeBool
+%_ptr_Function_float = OpTypePointer Function %float
+%float_0_5 = OpConstant %float 0.5
+%v4float = OpTypeVector %float 4
+%_ptr_Output_v4float = OpTypePointer Output %v4float
+%gl_FragColor = OpVariable %_ptr_Output_v4float Output
+%_ptr_Input_v4float = OpTypePointer Input %v4float
+%BaseColor = OpVariable %_ptr_Input_v4float Input
+%main = OpFunction %void None %8
+%18 = OpLabel
+%f = OpVariable %_ptr_Function_float Function
+%19 = OpLoad %float %fi
+%20 = OpFOrdLessThan %bool %19 %float_0
+OpSelectionMerge %21 None
+OpBranchConditional %20 %22 %21
+%22 = OpLabel
+OpStore %f %float_0_5
+OpBranch %21
+%21 = OpLabel
+%23 = OpLoad %float %fi
+%24 = OpFOrdLessThan %bool %23 %float_0
+OpSelectionMerge %25 None
+OpBranchConditional %24 %26 %27
+%26 = OpLabel
+%28 = OpLoad %v4float %BaseColor
+%29 = OpLoad %float %f
+%30 = OpVectorTimesScalar %v4float %28 %29
+OpStore %gl_FragColor %30
+OpBranch %25
+%27 = OpLabel
+%31 = OpLoad %v4float %BaseColor
+OpStore %gl_FragColor %31
+OpBranch %25
+%25 = OpLabel
+OpReturn
+OpFunctionEnd
+)";
+
+ SinglePassRunAndCheck<opt::LocalSingleStoreElimPass>(assembly, assembly,
+ true, true);
+}
+
+// TODO(greg-lunarg): Add tests to verify handling of these cases:
+//
+// Other types
+// Others?
+
+} // anonymous namespace
optimizer.RegisterPass(CreateLocalAccessChainConvertPass());
} else if (0 == strcmp(cur_arg, "--eliminate-local-single-block")) {
optimizer.RegisterPass(CreateLocalSingleBlockLoadStoreElimPass());
+ } else if (0 == strcmp(cur_arg, "--eliminate-local-single-store")) {
+ optimizer.RegisterPass(CreateLocalSingleStoreElimPass());
} else if (0 == strcmp(cur_arg, "--eliminate-dead-const")) {
optimizer.RegisterPass(CreateEliminateDeadConstantPass());
} else if (0 == strcmp(cur_arg, "--fold-spec-const-op-composite")) {