These need to be modified to work directly with JIT's allocator(s) instead of going through IAllocator. It may also be useful to adjust these to account for the fact that the JIT never releases memory.
Besides, the JIT is the primary user of these classes - only ExpandArray(Stack) isn't used anywhere else and SimplerHashTable's only other user is the gcinfo library.
Renamed headers and classes to avoid potential conflicts with the old ones.
Also made the JIT's hash table behavior the default to avoid the need to specify it in hash table instantiations.
ASSERT_TP& Compiler::GetAssertionDep(unsigned lclNum)
{
- ExpandArray<ASSERT_TP>& dep = *optAssertionDep;
+ JitExpandArray<ASSERT_TP>& dep = *optAssertionDep;
if (dep[lclNum] == nullptr)
{
dep[lclNum] = BitVecOps::MakeEmpty(apTraits);
if (optAssertionDep == nullptr)
{
- optAssertionDep = new (this, CMK_AssertionProp) ExpandArray<ASSERT_TP>(getAllocator(), max(1, lvaCount));
+ optAssertionDep = new (this, CMK_AssertionProp) JitExpandArray<ASSERT_TP>(getAllocator(), max(1, lvaCount));
}
optAssertionTraitsInit(optMaxAssertionCount);
// Static vars.
BasicBlock::MemoryPhiArg* BasicBlock::EmptyMemoryPhiDef = (BasicBlock::MemoryPhiArg*)0x1;
-unsigned PtrKeyFuncs<BasicBlock>::GetHashCode(const BasicBlock* ptr)
+unsigned JitPtrKeyFuncs<BasicBlock>::GetHashCode(const BasicBlock* ptr)
{
#ifdef DEBUG
unsigned hash = SsaStressHashHelper();
#include "blockset.h"
#include "jitstd.h"
#include "bitvec.h"
-#include "simplerhash.h"
+#include "jithashtable.h"
/*****************************************************************************/
typedef BitVec EXPSET_TP;
};
template <>
-struct PtrKeyFuncs<BasicBlock> : public KeyFuncsDefEquals<const BasicBlock*>
+struct JitPtrKeyFuncs<BasicBlock> : public JitKeyFuncsDefEquals<const BasicBlock*>
{
public:
// Make sure hashing is deterministic and not on "ptr."
};
// A set of blocks.
-typedef SimplerHashTable<BasicBlock*, PtrKeyFuncs<BasicBlock>, bool, JitSimplerHashBehavior> BlkSet;
+typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, bool> BlkSet;
// A map of block -> set of blocks, can be used as sparse block trees.
-typedef SimplerHashTable<BasicBlock*, PtrKeyFuncs<BasicBlock>, BlkSet*, JitSimplerHashBehavior> BlkToBlkSetMap;
+typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, BlkSet*> BlkToBlkSetMap;
// Map from Block to Block. Used for a variety of purposes.
-typedef SimplerHashTable<BasicBlock*, PtrKeyFuncs<BasicBlock>, BasicBlock*, JitSimplerHashBehavior> BlockToBlockMap;
+typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, BasicBlock*> BlockToBlockMap;
// In compiler terminology the control flow between two BasicBlocks
// is typically referred to as an "edge". Most well known are the
m_condStr = other.m_condStr;
}
- // GetHashCode() and Equals() are needed by SimplerHashTable
+ // GetHashCode() and Equals() are needed by JitHashTable
static unsigned GetHashCode(FileLine fl)
{
}
};
-typedef SimplerHashTable<FileLine, FileLine, size_t, JitSimplerHashBehavior> FileLineToCountMap;
+typedef JitHashTable<FileLine, FileLine, size_t> FileLineToCountMap;
FileLineToCountMap* NowayAssertMap;
void Compiler::RecordNowayAssert(const char* filename, unsigned line, const char* condStr)
#endif // MEASURE_MEM_ALLOC
#ifdef LEGACY_BACKEND
- compQMarks = new (this, CMK_Unknown) ExpandArrayStack<GenTreePtr>(getAllocator());
+ compQMarks = new (this, CMK_Unknown) JitExpandArrayStack<GenTreePtr>(getAllocator());
#endif
}
// If this method were a real constructor for Compiler, these would
// become method initializations.
- impPendingBlockMembers = ExpandArray<BYTE>(getAllocator());
- impSpillCliquePredMembers = ExpandArray<BYTE>(getAllocator());
- impSpillCliqueSuccMembers = ExpandArray<BYTE>(getAllocator());
+ impPendingBlockMembers = JitExpandArray<BYTE>(getAllocator());
+ impSpillCliquePredMembers = JitExpandArray<BYTE>(getAllocator());
+ impSpillCliqueSuccMembers = JitExpandArray<BYTE>(getAllocator());
memset(&lvMemoryPerSsaData, 0, sizeof(PerSsaArray));
lvMemoryPerSsaData.Init(getAllocator());
#include "instr.h"
#include "regalloc.h"
#include "sm.h"
-#include "simplerhash.h"
+#include "jithashtable.h"
#include "cycletimer.h"
#include "blockset.h"
#include "jitstd.h"
#include "arraystack.h"
#include "hashbv.h"
#include "fp.h"
-#include "expandarray.h"
+#include "jitexpandarray.h"
#include "tinyarray.h"
#include "valuenum.h"
#include "reglist.h"
}
};
-typedef ExpandArray<LclSsaVarDsc> PerSsaArray;
+typedef JitExpandArray<LclSsaVarDsc> PerSsaArray;
class LclVarDsc
{
}
};
-typedef SimplerHashTable<GenTreePtr, PtrKeyFuncs<GenTree>, TestLabelAndNum, JitSimplerHashBehavior> NodeToTestDataMap;
+typedef JitHashTable<GenTreePtr, JitPtrKeyFuncs<GenTree>, TestLabelAndNum> NodeToTestDataMap;
// XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#endif // DEBUG
flowList* BlockPredsWithEH(BasicBlock* blk);
// This table is useful for memoization of the method above.
- typedef SimplerHashTable<BasicBlock*, PtrKeyFuncs<BasicBlock>, flowList*, JitSimplerHashBehavior>
- BlockToFlowListMap;
+ typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, flowList*> BlockToFlowListMap;
BlockToFlowListMap* m_blockToEHPreds;
BlockToFlowListMap* GetBlockToEHPreds()
{
PendingDsc* impPendingFree; // Freed up dscs that can be reused
// We keep a byte-per-block map (dynamically extended) in the top-level Compiler object of a compilation.
- ExpandArray<BYTE> impPendingBlockMembers;
+ JitExpandArray<BYTE> impPendingBlockMembers;
// Return the byte for "b" (allocating/extending impPendingBlockMembers if necessary.)
// Operates on the map in the top-level ancestor.
// When we compute a "spill clique" (see above) these byte-maps are allocated to have a byte per basic
// block, and represent the predecessor and successor members of the clique currently being computed.
// *** Access to these will need to be locked in a parallel compiler.
- ExpandArray<BYTE> impSpillCliquePredMembers;
- ExpandArray<BYTE> impSpillCliqueSuccMembers;
+ JitExpandArray<BYTE> impSpillCliquePredMembers;
+ JitExpandArray<BYTE> impSpillCliqueSuccMembers;
enum SpillCliqueDir
{
// "x", and a def of a new SSA name for "x". The tree only has one local variable for "x", so it has to choose
// whether to treat that as the use or def. It chooses the "use", and thus the old SSA name. This map allows us
// to record/recover the "def" SSA number, given the lcl var node for "x" in such a tree.
- typedef SimplerHashTable<GenTreePtr, PtrKeyFuncs<GenTree>, unsigned, JitSimplerHashBehavior> NodeToUnsignedMap;
+ typedef JitHashTable<GenTreePtr, JitPtrKeyFuncs<GenTree>, unsigned> NodeToUnsignedMap;
NodeToUnsignedMap* m_opAsgnVarDefSsaNums;
NodeToUnsignedMap* GetOpAsgnVarDefSsaNums()
{
{
}
};
- typedef SimplerHashTable<GenTreePtr, PtrKeyFuncs<GenTree>, IndirectAssignmentAnnotation*, JitSimplerHashBehavior>
- NodeToIndirAssignMap;
+ typedef JitHashTable<GenTreePtr, JitPtrKeyFuncs<GenTree>, IndirectAssignmentAnnotation*> NodeToIndirAssignMap;
NodeToIndirAssignMap* m_indirAssignMap;
NodeToIndirAssignMap* GetIndirAssignMap()
{
void UpdateTarget(CompAllocator* alloc, BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to);
};
- typedef SimplerHashTable<BasicBlock*, PtrKeyFuncs<BasicBlock>, SwitchUniqueSuccSet, JitSimplerHashBehavior>
- BlockToSwitchDescMap;
+ typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, SwitchUniqueSuccSet> BlockToSwitchDescMap;
private:
// Maps BasicBlock*'s that end in switch statements to SwitchUniqueSuccSets that allow
void optHoistLoopCode();
// To represent sets of VN's that have already been hoisted in outer loops.
- typedef SimplerHashTable<ValueNum, SmallPrimitiveKeyFuncs<ValueNum>, bool, JitSimplerHashBehavior> VNToBoolMap;
+ typedef JitHashTable<ValueNum, JitSmallPrimitiveKeyFuncs<ValueNum>, bool> VNToBoolMap;
typedef VNToBoolMap VNSet;
struct LoopHoistContext
int lpLoopVarFPCount; // The register count for the FP LclVars that are read/written inside this loop
int lpVarInOutFPCount; // The register count for the FP LclVars that are alive inside or accross this loop
- typedef SimplerHashTable<CORINFO_FIELD_HANDLE,
- PtrKeyFuncs<struct CORINFO_FIELD_STRUCT_>,
- bool,
- JitSimplerHashBehavior>
- FieldHandleSet;
+ typedef JitHashTable<CORINFO_FIELD_HANDLE, JitPtrKeyFuncs<struct CORINFO_FIELD_STRUCT_>, bool> FieldHandleSet;
FieldHandleSet* lpFieldsModified; // This has entries (mappings to "true") for all static field and object
// instance fields modified
// in the loop.
- typedef SimplerHashTable<CORINFO_CLASS_HANDLE,
- PtrKeyFuncs<struct CORINFO_CLASS_STRUCT_>,
- bool,
- JitSimplerHashBehavior>
- ClassHandleSet;
+ typedef JitHashTable<CORINFO_CLASS_HANDLE, JitPtrKeyFuncs<struct CORINFO_CLASS_STRUCT_>, bool> ClassHandleSet;
ClassHandleSet* lpArrayElemTypesModified; // Bits set indicate the set of sz array element types such that
// arrays of that type are modified
// in the loop.
CSEdsc** optCSEhash;
CSEdsc** optCSEtab;
- typedef SimplerHashTable<GenTreePtr, PtrKeyFuncs<GenTree>, GenTreePtr, JitSimplerHashBehavior> NodeToNodeMap;
+ typedef JitHashTable<GenTreePtr, JitPtrKeyFuncs<GenTree>, GenTreePtr> NodeToNodeMap;
NodeToNodeMap* optCseCheckedBoundMap; // Maps bound nodes to ancestor compares that should be
// re-numbered with the bound to improve range check elimination
public:
// VN based copy propagation.
typedef ArrayStack<GenTreePtr> GenTreePtrStack;
- typedef SimplerHashTable<unsigned, SmallPrimitiveKeyFuncs<unsigned>, GenTreePtrStack*, JitSimplerHashBehavior>
- LclNumToGenTreePtrStack;
+ typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, GenTreePtrStack*> LclNumToGenTreePtrStack;
// Kill set to track variables with intervening definitions.
VARSET_TP optCopyPropKillSet;
#ifdef DEBUG
GenTreePtr optAssertionPropCurrentTree;
#endif
- AssertionIndex* optComplementaryAssertionMap;
- ExpandArray<ASSERT_TP>* optAssertionDep; // table that holds dependent assertions (assertions
- // using the value of a local var) for each local var
- AssertionDsc* optAssertionTabPrivate; // table that holds info about value assignments
- AssertionIndex optAssertionCount; // total number of assertions in the assertion table
+ AssertionIndex* optComplementaryAssertionMap;
+ JitExpandArray<ASSERT_TP>* optAssertionDep; // table that holds dependent assertions (assertions
+ // using the value of a local var) for each local var
+ AssertionDsc* optAssertionTabPrivate; // table that holds info about value assignments
+ AssertionIndex optAssertionCount; // total number of assertions in the assertion table
AssertionIndex optMaxAssertionCount;
public:
return optAssertionCount;
}
ASSERT_TP* bbJtrueAssertionOut;
- typedef SimplerHashTable<ValueNum, SmallPrimitiveKeyFuncs<ValueNum>, ASSERT_TP, JitSimplerHashBehavior>
- ValueNumToAssertsMap;
+ typedef JitHashTable<ValueNum, JitSmallPrimitiveKeyFuncs<ValueNum>, ASSERT_TP> ValueNumToAssertsMap;
ValueNumToAssertsMap* optValueNumToAsserts;
// Assertion prop helpers.
// whose return type is other than TYP_VOID. 2) GT_CALL node is a frequently used
// structure and IL offset is needed only when generating debuggable code. Therefore
// it is desirable to avoid memory size penalty in retail scenarios.
- typedef SimplerHashTable<GenTreePtr, PtrKeyFuncs<GenTree>, IL_OFFSETX, JitSimplerHashBehavior>
- CallSiteILOffsetTable;
+ typedef JitHashTable<GenTreePtr, JitPtrKeyFuncs<GenTree>, IL_OFFSETX> CallSiteILOffsetTable;
CallSiteILOffsetTable* genCallSite2ILOffsetMap;
unsigned genReturnLocal; // Local number for the return value when applicable.
// the importing is completely finished.
#ifdef LEGACY_BACKEND
- ExpandArrayStack<GenTreePtr>* compQMarks; // The set of QMark nodes created in the current compilation, so
- // we can iterate over these efficiently.
+ JitExpandArrayStack<GenTreePtr>* compQMarks; // The set of QMark nodes created in the current compilation, so
+ // we can iterate over these efficiently.
#endif
#if CPU_USES_BLOCK_MOVE
// Max value of scope count for which we would use linear search; for larger values we would use hashtable lookup.
static const unsigned MAX_LINEAR_FIND_LCL_SCOPELIST = 32;
- typedef SimplerHashTable<unsigned, SmallPrimitiveKeyFuncs<unsigned>, VarScopeMapInfo*, JitSimplerHashBehavior>
- VarNumToScopeDscMap;
+ typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, VarScopeMapInfo*> VarNumToScopeDscMap;
// Map to keep variables' scope indexed by varNum containing it's scope dscs at the index.
VarNumToScopeDscMap* compVarScopeMap;
return compRoot->m_nodeTestData;
}
- typedef SimplerHashTable<GenTreePtr, PtrKeyFuncs<GenTree>, int, JitSimplerHashBehavior> NodeToIntMap;
+ typedef JitHashTable<GenTreePtr, JitPtrKeyFuncs<GenTree>, int> NodeToIntMap;
// Returns the set (i.e., the domain of the result map) of nodes that are keys in m_nodeTestData, and
// currently occur in the AST graph.
return compRoot->m_fieldSeqStore;
}
- typedef SimplerHashTable<GenTreePtr, PtrKeyFuncs<GenTree>, FieldSeqNode*, JitSimplerHashBehavior> NodeToFieldSeqMap;
+ typedef JitHashTable<GenTreePtr, JitPtrKeyFuncs<GenTree>, FieldSeqNode*> NodeToFieldSeqMap;
// Some nodes of "TYP_BYREF" or "TYP_I_IMPL" actually represent the address of a field within a struct, but since
// the offset of the field is zero, there's no "GT_ADD" node. We normally attach a field sequence to the constant
// CoreRT. Such case is handled same as the default case.
void fgAddFieldSeqForZeroOffset(GenTreePtr op1, FieldSeqNode* fieldSeq);
- typedef SimplerHashTable<const GenTree*, PtrKeyFuncs<GenTree>, ArrayInfo, JitSimplerHashBehavior>
- NodeToArrayInfoMap;
+ typedef JitHashTable<const GenTree*, JitPtrKeyFuncs<GenTree>, ArrayInfo> NodeToArrayInfoMap;
NodeToArrayInfoMap* m_arrayInfoMap;
NodeToArrayInfoMap* GetArrayInfoMap()
};
#endif // _HOST_64BIT_
-typedef SimplerHashTable<size_t, SizeTKeyFuncs<size_t>, CORINFO_METHOD_HANDLE, JitSimplerHashBehavior>
- AddrToMethodHandleMap;
-typedef SimplerHashTable<size_t, SizeTKeyFuncs<size_t>, size_t, JitSimplerHashBehavior> AddrToAddrMap;
+typedef JitHashTable<size_t, SizeTKeyFuncs<size_t>, CORINFO_METHOD_HANDLE> AddrToMethodHandleMap;
+typedef JitHashTable<size_t, SizeTKeyFuncs<size_t>, size_t> AddrToAddrMap;
class Compiler;
#else // !JIT32_GCENCODER
#include "gcinfoencoder.h"
-#include "simplerhash.h"
// Do explicit instantiation.
-template class SimplerHashTable<RegSlotIdKey, RegSlotIdKey, GcSlotId, JitSimplerHashBehavior>;
-template class SimplerHashTable<StackSlotIdKey, StackSlotIdKey, GcSlotId, JitSimplerHashBehavior>;
+template class JitHashTable<RegSlotIdKey, RegSlotIdKey, GcSlotId>;
+template class JitHashTable<StackSlotIdKey, StackSlotIdKey, GcSlotId>;
#ifdef DEBUG
#include "ssaconfig.h" // For "SsaConfig::RESERVED_SSA_NUM"
#include "reglist.h"
#include "valuenumtype.h"
-#include "simplerhash.h"
+#include "jithashtable.h"
#include "nodeinfo.h"
#include "simd.h"
// This class canonicalizes field sequences.
class FieldSeqStore
{
- typedef SimplerHashTable<FieldSeqNode, /*KeyFuncs*/ FieldSeqNode, FieldSeqNode*, JitSimplerHashBehavior>
- FieldSeqNodeCanonMap;
+ typedef JitHashTable<FieldSeqNode, /*KeyFuncs*/ FieldSeqNode, FieldSeqNode*> FieldSeqNodeCanonMap;
CompAllocator* m_alloc;
FieldSeqNodeCanonMap* m_canonMap;
--- /dev/null
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#pragma once
+
+#include "iallocator.h"
+
+// An array of T that expands (and never shrinks) to accomodate references (with default value T() for
+// elements newly created by expansion.)
+template <class T>
+class JitExpandArray
+{
+protected:
+ IAllocator* m_alloc; // The IAllocator object that should be used to allocate members.
+ T* m_members; // Pointer to the element array.
+ unsigned m_size; // The size of "m_members".
+ unsigned m_minSize; // The minimum array size to allocate.
+
+ // Ensures that "m_size" > "idx", and that "m_members" is at least large enough to be
+ // indexed by "idx".
+ void EnsureCoversInd(unsigned idx);
+
+ // Requires that m_members is not NULL, and that
+ // low <= high <= m_size. Sets elements low to high-1 of m_members to T().
+ void InitializeRange(unsigned low, unsigned high)
+ {
+ assert(m_members != NULL);
+ assert(low <= high && high <= m_size);
+ for (unsigned i = low; i < high; i++)
+ m_members[i] = T();
+ }
+
+public:
+ // Initializes "*this" to represent an empty array of size zero.
+ // Use "alloc" for allocation of internal objects. If "minSize" is specified,
+ // the allocated size of the internal representation will hold at least that many
+ // T's.
+ JitExpandArray(IAllocator* alloc, unsigned minSize = 1)
+ : m_alloc(alloc), m_members(NULL), m_size(0), m_minSize(minSize)
+ {
+ assert(minSize > 0);
+ }
+
+ ~JitExpandArray()
+ {
+ if (m_members != NULL)
+ m_alloc->Free(m_members);
+ }
+
+ // Like the constructor above, to re-initialize to the empty state.
+ void Init(IAllocator* alloc, unsigned minSize = 1)
+ {
+ if (m_members != NULL)
+ m_alloc->Free(m_members);
+ m_alloc = alloc;
+ m_members = NULL;
+ m_size = 0;
+ m_minSize = minSize;
+ }
+
+ // Resets "*this" to represent an array of size zero, with the given "minSize".
+ void Reset(unsigned minSize)
+ {
+ m_minSize = minSize;
+ Reset();
+ }
+
+ // Resets "*this" to represent an array of size zero, whose
+ // allocated representation can represent at least "m_minSize" T's.
+ void Reset()
+ {
+ if (m_minSize > m_size)
+ EnsureCoversInd(m_minSize - 1);
+ InitializeRange(0, m_size);
+ }
+
+ // Returns the T at index "idx". Expands the representation, if necessary,
+ // to contain "idx" in its domain, so the result will be an all-zero T if
+ // it had not previously been set.
+ T Get(unsigned idx)
+ {
+ EnsureCoversInd(idx);
+ return m_members[idx];
+ }
+
+ // Like "Get", but returns a reference, so suitable for use as the LHS of an assignment.
+ T& GetRef(unsigned idx)
+ {
+ EnsureCoversInd(idx);
+ return m_members[idx];
+ }
+
+ // Expands the representation, if necessary, to contain "idx" in its domain, and
+ // sets the value at "idx" to "val".
+ void Set(unsigned idx, T val)
+ {
+ EnsureCoversInd(idx);
+ m_members[idx] = val;
+ }
+
+ T& operator[](unsigned idx)
+ {
+ EnsureCoversInd(idx);
+ return m_members[idx];
+ }
+};
+
+template <class T>
+class JitExpandArrayStack : public JitExpandArray<T>
+{
+ unsigned m_used;
+
+public:
+ JitExpandArrayStack(IAllocator* alloc, unsigned minSize = 1) : JitExpandArray<T>(alloc, minSize), m_used(0)
+ {
+ }
+
+ void Set(unsigned idx, T val)
+ {
+ JitExpandArray<T>::Set(idx, val);
+ m_used = max((idx + 1), m_used);
+ }
+
+ // Resets "*this" to represent an array of size zero, whose
+ // allocated representation can represent at least "m_minSize" T's.
+ void Reset()
+ {
+ JitExpandArray<T>::Reset();
+ m_used = 0;
+ }
+
+ // Returns the index at which "val" is stored.
+ unsigned Push(T val)
+ {
+ unsigned res = m_used;
+ JitExpandArray<T>::Set(m_used, val);
+ m_used++;
+ return res;
+ }
+
+ // Requires Size() > 0
+ T Pop()
+ {
+ assert(Size() > 0);
+ m_used--;
+ return this->m_members[m_used];
+ }
+
+ // Requires Size() > 0
+ T Top()
+ {
+ assert(Size() > 0);
+ return this->m_members[m_used - 1];
+ }
+
+ // Requires Size() > 0
+ T& TopRef()
+ {
+ assert(Size() > 0);
+ return this->m_members[m_used - 1];
+ }
+
+ // Requires that "idx" < "m_used" (asserting this in debug), and returns
+ // "Get(idx)" (which is covered, by the invariant that all indices in "[0..m_used)" are
+ // covered).
+ T GetNoExpand(unsigned idx)
+ {
+ assert(idx < m_used);
+ return this->m_members[idx];
+ }
+
+ // Requires that "idx" < "m_used" (asserting this in debug).
+ // Removes the element at "idx" and shifts contents of the array beyond "idx", if any,
+ // to occupy the free slot created at "idx".
+ // O(n) worst case operation, no memory is allocated.
+ void Remove(unsigned idx)
+ {
+ assert(idx < m_used);
+ if (idx < m_used - 1)
+ {
+ memmove(&this->m_members[idx], &this->m_members[idx + 1], (m_used - idx - 1) * sizeof(T));
+ }
+ m_used--;
+ }
+
+ unsigned Size()
+ {
+ return m_used;
+ }
+};
+
+template <class T>
+void JitExpandArray<T>::EnsureCoversInd(unsigned idx)
+{
+ if (idx >= m_size)
+ {
+ unsigned oldSize = m_size;
+ T* oldMembers = m_members;
+ m_size = max(idx + 1, max(m_minSize, m_size * 2));
+ if (sizeof(T) < sizeof(int))
+ {
+ m_members = (T*)m_alloc->ArrayAlloc(ALIGN_UP(m_size * sizeof(T), sizeof(int)), sizeof(BYTE));
+ }
+ else
+ {
+ m_members = (T*)m_alloc->ArrayAlloc(m_size, sizeof(T));
+ }
+ if (oldMembers != NULL)
+ {
+ memcpy(m_members, oldMembers, oldSize * sizeof(T));
+ m_alloc->Free(oldMembers);
+ }
+ InitializeRange(oldSize, m_size);
+ }
+}
}
};
-typedef SimplerHashTable<RegSlotIdKey, RegSlotIdKey, GcSlotId, JitSimplerHashBehavior> RegSlotMap;
-typedef SimplerHashTable<StackSlotIdKey, StackSlotIdKey, GcSlotId, JitSimplerHashBehavior> StackSlotMap;
+typedef JitHashTable<RegSlotIdKey, RegSlotIdKey, GcSlotId> RegSlotMap;
+typedef JitHashTable<StackSlotIdKey, StackSlotIdKey, GcSlotId> StackSlotMap;
#endif
-typedef SimplerHashTable<GenTreePtr, PtrKeyFuncs<GenTree>, VARSET_TP*, JitSimplerHashBehavior> NodeToVarsetPtrMap;
+typedef JitHashTable<GenTreePtr, JitPtrKeyFuncs<GenTree>, VARSET_TP*> NodeToVarsetPtrMap;
class GCInfo
{
--- /dev/null
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#pragma once
+
+#include "iallocator.h"
+
+// JitHashTable implements a mapping from a Key type to a Value type,
+// via a hash table.
+
+// Synchronization is the responsibility of the caller: if a
+// JitHashTable is used in a multithreaded context, the table should be
+// associated with a lock.
+
+// JitHashTable actually takes four template arguments: Key,
+// KeyFuncs, Value, and Behavior. We don't assume that Key has hash or equality
+// functions specific names; rather, we assume that KeyFuncs has
+// statics methods
+// int GetHashCode(Key)
+// and
+// bool Equals(Key, Key)
+// and use those. An
+// instantiator can thus make a small "adaptor class" to invoke
+// existing instance method hash and/or equality functions. If the
+// implementor of a candidate Key class K understands this convention,
+// these static methods can be implemented by K, so that K can be used
+// as the actual arguments for the both Key and KeyTrait classes.
+//
+// The "Behavior" argument provides the following static members:
+//
+// s_growth_factor_numerator
+// s_growth_factor_denominator Factor to grow allocation (numerator/denominator).
+// Typically inherited from default traits (3/2)
+//
+// s_density_factor_numerator
+// s_density_factor_denominator Maxium occupied density of table before growth
+// occurs (num/denom). Typically inherited (3/4).
+//
+// s_minimum_allocation Minimum table allocation count (size on first growth.) It is
+// probably preferable to call Reallocate on initialization rather
+// than override his from the default traits. (7)
+//
+// NoMemory() Called when the hash table is unable to grow due to potential
+// overflow or the lack of a sufficiently large prime.
+
+class JitHashTableBehavior
+{
+public:
+ static const unsigned s_growth_factor_numerator = 3;
+ static const unsigned s_growth_factor_denominator = 2;
+
+ static const unsigned s_density_factor_numerator = 3;
+ static const unsigned s_density_factor_denominator = 4;
+
+ static const unsigned s_minimum_allocation = 7;
+
+ inline static void DECLSPEC_NORETURN NoMemory()
+ {
+ NOMEM();
+ }
+};
+
+// Stores info about primes, including the magic number and shift amount needed
+// to implement a divide without using the divide instruction
+class JitPrimeInfo
+{
+public:
+ JitPrimeInfo() : prime(0), magic(0), shift(0)
+ {
+ }
+ JitPrimeInfo(unsigned p, unsigned m, unsigned s) : prime(p), magic(m), shift(s)
+ {
+ }
+ unsigned prime;
+ unsigned magic;
+ unsigned shift;
+
+ inline unsigned magicNumberDivide(unsigned numerator) const;
+ inline unsigned magicNumberRem(unsigned numerator) const;
+};
+
+// Hash table class definition
+
+template <typename Key, typename KeyFuncs, typename Value, typename Behavior = JitHashTableBehavior>
+class JitHashTable
+{
+public:
+ // Forward declaration.
+ class KeyIterator;
+
+ // Constructor/destructor. SHash tables always start out empty, with no
+ // allocation overhead. Call Reallocate to prime with an initial size if
+ // desired. Pass NULL as the IAllocator* if you want to use DefaultAllocator
+ // (basically, operator new/delete).
+
+ JitHashTable(IAllocator* alloc);
+ ~JitHashTable();
+
+ // operators new/delete when an IAllocator is to be used.
+ void* operator new(size_t sz, IAllocator* alloc);
+ void* operator new[](size_t sz, IAllocator* alloc);
+ void operator delete(void* p, IAllocator* alloc);
+ void operator delete[](void* p, IAllocator* alloc);
+
+ // If the table contains a mapping for "key", returns "true" and
+ // sets "*pVal" to the value to which "key" maps. Otherwise,
+ // returns false, and does not modify "*pVal".
+ bool Lookup(Key k, Value* pVal = NULL) const;
+
+ Value* LookupPointer(Key k) const;
+
+ // Causes the table to map "key" to "val". Returns "true" if
+ // "key" had already been mapped by the table, "false" otherwise.
+ bool Set(Key k, Value val);
+
+ // Ensures that "key" is not mapped to a value by the "table."
+ // Returns "true" iff it had been mapped.
+ bool Remove(Key k);
+
+ // Remove all mappings in the table.
+ void RemoveAll();
+
+ // Begin and End pointers for iteration over entire table.
+ KeyIterator Begin() const;
+ KeyIterator End() const;
+
+ // Return the number of elements currently stored in the table
+ unsigned GetCount() const;
+
+private:
+ // Forward declaration of the linked-list node class.
+ struct Node;
+
+ unsigned GetIndexForKey(Key k) const;
+
+ // If the table has a mapping for "k", return the node containing
+ // that mapping, else "NULL".
+ Node* FindNode(Key k) const;
+
+ // Resizes a hash table for growth. The new size is computed based
+ // on the current population, growth factor, and maximum density factor.
+ void Grow();
+
+ // See if it is OK to grow the hash table by one element. If not, reallocate
+ // the hash table.
+ void CheckGrowth();
+
+public:
+ // Reallocates a hash table to a specific size. The size must be big enough
+ // to hold all elements in the table appropriately.
+ //
+ // Note that the actual table size must always be a prime number; the number
+ // passed in will be upward adjusted if necessary.
+ void Reallocate(unsigned newTableSize);
+
+ // For iteration, we use a pattern similar to the STL "forward
+ // iterator" pattern. It basically consists of wrapping an
+ // "iteration variable" in an object, and providing pointer-like
+ // operators on the iterator. Example usage:
+ //
+ // for (JitHashTable::KeyIterator iter = foo->Begin(), end = foo->End(); !iter.Equal(end); iter++)
+ // {
+ // // use foo, iter.
+ // }
+ // iter.Get() will yield (a reference to) the
+ // current key. It will assert the equivalent of "iter != end."
+ class KeyIterator
+ {
+ private:
+ friend class JitHashTable;
+
+ // The method implementations have to be here for portability.
+ // Some compilers won't compile the separate implementation in shash.inl
+
+ Node** m_table;
+ Node* m_node;
+ unsigned m_tableSize;
+ unsigned m_index;
+
+ public:
+ KeyIterator(const JitHashTable* hash, BOOL begin)
+ : m_table(hash->m_table)
+ , m_node(NULL)
+ , m_tableSize(hash->m_tableSizeInfo.prime)
+ , m_index(begin ? 0 : m_tableSize)
+ {
+ if (begin && hash->m_tableCount > 0)
+ {
+ assert(m_table != NULL);
+ while (m_index < m_tableSize && m_table[m_index] == NULL)
+ m_index++;
+
+ if (m_index >= m_tableSize)
+ {
+ return;
+ }
+ else
+ {
+ m_node = m_table[m_index];
+ }
+ assert(m_node != NULL);
+ }
+ }
+
+ const Key& Get() const
+ {
+ assert(m_node != NULL);
+
+ return m_node->m_key;
+ }
+
+ const Value& GetValue() const
+ {
+ assert(m_node != NULL);
+
+ return m_node->m_val;
+ }
+
+ void SetValue(const Value& value) const
+ {
+ assert(m_node != NULL);
+
+ m_node->m_val = value;
+ }
+
+ void Next()
+ {
+ if (m_node != NULL)
+ {
+ m_node = m_node->m_next;
+ if (m_node != NULL)
+ {
+ return;
+ }
+
+ // Otherwise...
+ m_index++;
+ }
+ while (m_index < m_tableSize && m_table[m_index] == NULL)
+ m_index++;
+
+ if (m_index >= m_tableSize)
+ {
+ m_node = NULL;
+ return;
+ }
+ else
+ {
+ m_node = m_table[m_index];
+ }
+ assert(m_node != NULL);
+ }
+
+ bool Equal(const KeyIterator& i) const
+ {
+ return i.m_node == m_node;
+ }
+
+ void operator++()
+ {
+ Next();
+ }
+
+ void operator++(int)
+ {
+ Next();
+ }
+ };
+
+ // HashTableRef only exists to support operator[]
+ // operator[] returns a HashTableRef which enables operator[] to support reading and writing
+ // in a normal array, it just returns a ref an actual element, which is not possible here.
+ class HashTableRef
+ {
+ public:
+ // this is really the getter for the array.
+ operator Value()
+ {
+
+ Value result;
+ table->Lookup(key, &result);
+ return result;
+ }
+
+ void operator=(const Value v)
+ {
+ table->Set(key, v);
+ }
+
+ friend class JitHashTable;
+
+ protected:
+ HashTableRef(JitHashTable* t, Key k)
+ {
+ table = t;
+ key = k;
+ }
+
+ JitHashTable* table;
+ Key key;
+ };
+
+ Value& operator[](Key k) const
+ {
+ Value* p = LookupPointer(k);
+ assert(p);
+ return *p;
+ }
+
+private:
+ // Find the next prime number >= the given value.
+ static JitPrimeInfo NextPrime(unsigned number);
+
+ // Instance members
+ IAllocator* m_alloc; // IAllocator to use in this
+ // table.
+ // The node type.
+ struct Node
+ {
+ Node* m_next; // Assume that the alignment requirement of Key and Value are no greater than Node*, so put m_next
+ // to avoid unnecessary padding.
+ Key m_key;
+ Value m_val;
+
+ Node(Key k, Value v, Node* next) : m_next(next), m_key(k), m_val(v)
+ {
+ }
+
+ void* operator new(size_t sz, IAllocator* alloc)
+ {
+ return alloc->Alloc(sz);
+ }
+
+ void operator delete(void* p, IAllocator* alloc)
+ {
+ alloc->Free(p);
+ }
+ };
+
+ Node** m_table; // pointer to table
+ JitPrimeInfo m_tableSizeInfo; // size of table (a prime) and information about it
+ unsigned m_tableCount; // number of elements in table
+ unsigned m_tableMax; // maximum occupied count
+};
+
+#include "jithashtable.inl"
+
+// A few simple KeyFuncs types...
+
+// Base class for types whose equality function is the same as their "==".
+template <typename T>
+struct JitKeyFuncsDefEquals
+{
+ static bool Equals(const T& x, const T& y)
+ {
+ return x == y;
+ }
+};
+
+template <typename T>
+struct JitPtrKeyFuncs : public JitKeyFuncsDefEquals<const T*>
+{
+public:
+ static unsigned GetHashCode(const T* ptr)
+ {
+ // Hmm. Maybe (unsigned) ought to be "ssize_t" -- or this ought to be ifdef'd by size.
+ return static_cast<unsigned>(reinterpret_cast<uintptr_t>(ptr));
+ }
+};
+
+template <typename T> // Must be coercable to "unsigned" with no loss of information.
+struct JitSmallPrimitiveKeyFuncs : public JitKeyFuncsDefEquals<T>
+{
+ static unsigned GetHashCode(const T& val)
+ {
+ return static_cast<unsigned>(val);
+ }
+};
+
+template <typename T> // Assumed to be of size sizeof(UINT64).
+struct JitLargePrimitiveKeyFuncs : public JitKeyFuncsDefEquals<T>
+{
+ static unsigned GetHashCode(const T val)
+ {
+ // A static cast when T is a float or a double converts the value (i.e. 0.25 converts to 0)
+ //
+ // Instead we want to use all of the bits of a float to create the hash value
+ // So we cast the address of val to a pointer to an equivalent sized unsigned int
+ // This allows us to read the actual bit representation of a float type
+ //
+ // We can't read beyond the end of val, so we use sizeof(T) to determine
+ // exactly how many bytes to read
+ //
+ if (sizeof(T) == 8)
+ {
+ // cast &val to (UINT64 *) then deref to get the bits
+ UINT64 asUINT64 = *(reinterpret_cast<const UINT64*>(&val));
+
+ // Get the upper and lower 32-bit values from the 64-bit value
+ UINT32 upper32 = static_cast<UINT32>(asUINT64 >> 32);
+ UINT32 lower32 = static_cast<UINT32>(asUINT64 & 0xFFFFFFFF);
+
+ // Exclusive-Or the upper32 and the lower32 values
+ return static_cast<unsigned>(upper32 ^ lower32);
+ }
+ else if (sizeof(T) == 4)
+ {
+ // cast &val to (UINT32 *) then deref to get the bits
+ UINT32 asUINT32 = *(reinterpret_cast<const UINT32*>(&val));
+
+ // Just return the 32-bit value
+ return static_cast<unsigned>(asUINT32);
+ }
+ else if ((sizeof(T) == 2) || (sizeof(T) == 1))
+ {
+ // For small sizes we must have an integer type
+ // so we can just use the static_cast.
+ //
+ return static_cast<unsigned>(val);
+ }
+ else
+ {
+ // Only support Hashing for types that are 8,4,2 or 1 bytes in size
+ assert(!"Unsupported size");
+ return static_cast<unsigned>(val); // compile-time error here when we have a illegal size
+ }
+ }
+};
--- /dev/null
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+// To implement magic-number divide with a 32-bit magic number,
+// multiply by the magic number, take the top 64 bits, and shift that
+// by the amount given in the table.
+
+unsigned JitPrimeInfo::magicNumberDivide(unsigned numerator) const
+{
+ unsigned __int64 num = numerator;
+ unsigned __int64 mag = magic;
+ unsigned __int64 product = (num * mag) >> (32 + shift);
+ return (unsigned)product;
+}
+
+unsigned JitPrimeInfo::magicNumberRem(unsigned numerator) const
+{
+ unsigned div = magicNumberDivide(numerator);
+ unsigned result = numerator - (div * prime);
+ assert(result == numerator % prime);
+ return result;
+}
+
+template <typename Key, typename KeyFuncs, typename Value, typename Behavior>
+JitHashTable<Key,KeyFuncs,Value,Behavior>::JitHashTable(IAllocator* alloc)
+ : m_alloc(alloc),
+ m_table(NULL),
+ m_tableSizeInfo(),
+ m_tableCount(0),
+ m_tableMax(0)
+{
+ assert(m_alloc != nullptr);
+
+#ifndef __GNUC__ // these crash GCC
+ static_assert_no_msg(Behavior::s_growth_factor_numerator > Behavior::s_growth_factor_denominator);
+ static_assert_no_msg(Behavior::s_density_factor_numerator < Behavior::s_density_factor_denominator);
+#endif
+}
+
+template <typename Key, typename KeyFuncs, typename Value, typename Behavior>
+JitHashTable<Key,KeyFuncs,Value,Behavior>::~JitHashTable()
+{
+ RemoveAll();
+}
+
+template <typename Key, typename KeyFuncs, typename Value, typename Behavior>
+void * JitHashTable<Key,KeyFuncs,Value,Behavior>::operator new(size_t sz, IAllocator * alloc)
+{
+ return alloc->Alloc(sz);
+}
+
+template <typename Key, typename KeyFuncs, typename Value, typename Behavior>
+void * JitHashTable<Key,KeyFuncs,Value,Behavior>::operator new[](size_t sz, IAllocator * alloc)
+{
+ return alloc->Alloc(sz);
+}
+
+template <typename Key, typename KeyFuncs, typename Value, typename Behavior>
+void JitHashTable<Key,KeyFuncs,Value,Behavior>::operator delete(void * p, IAllocator * alloc)
+{
+ alloc->Free(p);
+}
+
+template <typename Key, typename KeyFuncs, typename Value, typename Behavior>
+void JitHashTable<Key,KeyFuncs,Value,Behavior>::operator delete[](void * p, IAllocator * alloc)
+{
+ alloc->Free(p);
+}
+
+template <typename Key, typename KeyFuncs, typename Value, typename Behavior>
+unsigned JitHashTable<Key,KeyFuncs,Value,Behavior>::GetCount() const
+{
+ return m_tableCount;
+}
+
+template <typename Key, typename KeyFuncs, typename Value, typename Behavior>
+bool JitHashTable<Key,KeyFuncs,Value,Behavior>::Lookup(Key key, Value* pVal) const
+{
+ Node* pN = FindNode(key);
+
+ if (pN != NULL)
+ {
+ if (pVal != NULL)
+ {
+ *pVal = pN->m_val;
+ }
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+}
+
+template <typename Key, typename KeyFuncs, typename Value, typename Behavior>
+Value *JitHashTable<Key,KeyFuncs,Value,Behavior>::LookupPointer(Key key) const
+{
+ Node* pN = FindNode(key);
+
+ if (pN != NULL)
+ return &(pN->m_val);
+ else
+ return NULL;
+}
+
+template <typename Key, typename KeyFuncs, typename Value, typename Behavior>
+typename JitHashTable<Key,KeyFuncs,Value,Behavior>::Node*
+JitHashTable<Key,KeyFuncs,Value,Behavior>::FindNode(Key k) const
+{
+ if (m_tableSizeInfo.prime == 0)
+ return NULL;
+
+ unsigned index = GetIndexForKey(k);
+
+ Node* pN = m_table[index];
+ if (pN == NULL)
+ return NULL;
+
+ // Otherwise...
+ while (pN != NULL && !KeyFuncs::Equals(k, pN->m_key))
+ pN = pN->m_next;
+
+ assert(pN == NULL || KeyFuncs::Equals(k, pN->m_key));
+
+ // If pN != NULL, it's the node for the key, else the key isn't mapped.
+ return pN;
+}
+
+template <typename Key, typename KeyFuncs, typename Value, typename Behavior>
+unsigned JitHashTable<Key,KeyFuncs,Value,Behavior>::GetIndexForKey(Key k) const
+{
+ unsigned hash = KeyFuncs::GetHashCode(k);
+
+ unsigned index = m_tableSizeInfo.magicNumberRem(hash);
+
+ return index;
+}
+
+template <typename Key, typename KeyFuncs, typename Value, typename Behavior>
+bool JitHashTable<Key,KeyFuncs,Value,Behavior>::Set(Key k, Value v)
+{
+ CheckGrowth();
+
+ assert(m_tableSizeInfo.prime != 0);
+
+ unsigned index = GetIndexForKey(k);
+
+ Node* pN = m_table[index];
+ while (pN != NULL && !KeyFuncs::Equals(k, pN->m_key))
+ {
+ pN = pN->m_next;
+ }
+ if (pN != NULL)
+ {
+ pN->m_val = v;
+ return true;
+ }
+ else
+ {
+ Node* pNewNode = new (m_alloc) Node(k, v, m_table[index]);
+ m_table[index] = pNewNode;
+ m_tableCount++;
+ return false;
+ }
+}
+
+template <typename Key, typename KeyFuncs, typename Value, typename Behavior>
+bool JitHashTable<Key,KeyFuncs,Value,Behavior>::Remove(Key k)
+{
+ unsigned index = GetIndexForKey(k);
+
+ Node* pN = m_table[index];
+ Node** ppN = &m_table[index];
+ while (pN != NULL && !KeyFuncs::Equals(k, pN->m_key))
+ {
+ ppN = &pN->m_next;
+ pN = pN->m_next;
+ }
+ if (pN != NULL)
+ {
+ *ppN = pN->m_next;
+ m_tableCount--;
+ Node::operator delete(pN, m_alloc);
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+}
+
+template <typename Key, typename KeyFuncs, typename Value, typename Behavior>
+void JitHashTable<Key,KeyFuncs,Value,Behavior>::RemoveAll()
+{
+ for (unsigned i = 0; i < m_tableSizeInfo.prime; i++)
+ {
+ for (Node* pN = m_table[i]; pN != NULL; )
+ {
+ Node* pNext = pN->m_next;
+ Node::operator delete(pN, m_alloc);
+ pN = pNext;
+ }
+ }
+ m_alloc->Free(m_table);
+
+ m_table = NULL;
+ m_tableSizeInfo = JitPrimeInfo();
+ m_tableCount = 0;
+ m_tableMax = 0;
+
+ return;
+}
+
+template <typename Key, typename KeyFuncs, typename Value, typename Behavior>
+typename JitHashTable<Key,KeyFuncs,Value,Behavior>::KeyIterator JitHashTable<Key,KeyFuncs,Value,Behavior>::Begin() const
+{
+ KeyIterator i(this, TRUE);
+ return i;
+}
+
+template <typename Key, typename KeyFuncs, typename Value, typename Behavior>
+typename JitHashTable<Key,KeyFuncs,Value,Behavior>::KeyIterator JitHashTable<Key,KeyFuncs,Value,Behavior>::End() const
+{
+ return KeyIterator(this, FALSE);
+}
+
+template <typename Key, typename KeyFuncs, typename Value, typename Behavior>
+void JitHashTable<Key,KeyFuncs,Value,Behavior>::CheckGrowth()
+{
+ if (m_tableCount == m_tableMax)
+ {
+ Grow();
+ }
+}
+
+template <typename Key, typename KeyFuncs, typename Value, typename Behavior>
+void JitHashTable<Key,KeyFuncs,Value,Behavior>::Grow()
+{
+ unsigned newSize = (unsigned) (m_tableCount
+ * Behavior::s_growth_factor_numerator / Behavior::s_growth_factor_denominator
+ * Behavior::s_density_factor_denominator / Behavior::s_density_factor_numerator);
+ if (newSize < Behavior::s_minimum_allocation)
+ newSize = Behavior::s_minimum_allocation;
+
+ // handle potential overflow
+ if (newSize < m_tableCount)
+ Behavior::NoMemory();
+
+ Reallocate(newSize);
+}
+
+template <typename Key, typename KeyFuncs, typename Value, typename Behavior>
+void JitHashTable<Key,KeyFuncs,Value,Behavior>::Reallocate(unsigned newTableSize)
+{
+ assert(newTableSize >= (GetCount() * Behavior::s_density_factor_denominator / Behavior::s_density_factor_numerator));
+
+ // Allocation size must be a prime number. This is necessary so that hashes uniformly
+ // distribute to all indices, and so that chaining will visit all indices in the hash table.
+ JitPrimeInfo newPrime = NextPrime(newTableSize);
+ newTableSize = newPrime.prime;
+
+ Node** newTable = (Node**)m_alloc->ArrayAlloc(newTableSize, sizeof(Node*));
+
+ for (unsigned i = 0; i < newTableSize; i++) {
+ newTable[i] = NULL;
+ }
+
+ // Move all entries over to new table (re-using the Node structures.)
+
+ for (unsigned i = 0; i < m_tableSizeInfo.prime; i++)
+ {
+ Node* pN = m_table[i];
+ while (pN != NULL)
+ {
+ Node* pNext = pN->m_next;
+
+ unsigned newIndex = newPrime.magicNumberRem(KeyFuncs::GetHashCode(pN->m_key));
+ pN->m_next = newTable[newIndex];
+ newTable[newIndex] = pN;
+
+ pN = pNext;
+ }
+ }
+
+ // @todo:
+ // We might want to try to delay this cleanup to allow asynchronous readers
+ if (m_table != NULL)
+ m_alloc->Free(m_table);
+
+ m_table = newTable;
+ m_tableSizeInfo = newPrime;
+ m_tableMax = (unsigned) (newTableSize * Behavior::s_density_factor_numerator / Behavior::s_density_factor_denominator);
+}
+
+// Table of primes and their magic-number-divide constant.
+// For more info see the book "Hacker's Delight" chapter 10.9 "Unsigned Division by Divisors >= 1"
+// These were selected by looking for primes, each roughly twice as big as the next, having
+// 32-bit magic numbers, (because the algorithm for using 33-bit magic numbers is slightly slower).
+//
+
+SELECTANY const JitPrimeInfo primeInfo[] =
+{
+ JitPrimeInfo(9, 0x38e38e39, 1),
+ JitPrimeInfo(23, 0xb21642c9, 4),
+ JitPrimeInfo(59, 0x22b63cbf, 3),
+ JitPrimeInfo(131, 0xfa232cf3, 7),
+ JitPrimeInfo(239, 0x891ac73b, 7),
+ JitPrimeInfo(433, 0x975a751, 4),
+ JitPrimeInfo(761, 0x561e46a5, 8),
+ JitPrimeInfo(1399, 0xbb612aa3, 10),
+ JitPrimeInfo(2473, 0x6a009f01, 10),
+ JitPrimeInfo(4327, 0xf2555049, 12),
+ JitPrimeInfo(7499, 0x45ea155f, 11),
+ JitPrimeInfo(12973, 0x1434f6d3, 10),
+ JitPrimeInfo(22433, 0x2ebe18db, 12),
+ JitPrimeInfo(46559, 0xb42bebd5, 15),
+ JitPrimeInfo(96581, 0xadb61b1b, 16),
+ JitPrimeInfo(200341, 0x29df2461, 15),
+ JitPrimeInfo(415517, 0xa181c46d, 18),
+ JitPrimeInfo(861719, 0x4de0bde5, 18),
+ JitPrimeInfo(1787021, 0x9636c46f, 20),
+ JitPrimeInfo(3705617, 0x4870adc1, 20),
+ JitPrimeInfo(7684087, 0x8bbc5b83, 22),
+ JitPrimeInfo(15933877, 0x86c65361, 23),
+ JitPrimeInfo(33040633, 0x40fec79b, 23),
+ JitPrimeInfo(68513161, 0x7d605cd1, 25),
+ JitPrimeInfo(142069021, 0xf1da390b, 27),
+ JitPrimeInfo(294594427, 0x74a2507d, 27),
+ JitPrimeInfo(733045421, 0x5dbec447, 28),
+};
+
+template <typename Key, typename KeyFuncs, typename Value, typename Behavior>
+JitPrimeInfo JitHashTable<Key,KeyFuncs,Value,Behavior>::NextPrime(unsigned number)
+{
+ for (int i = 0; i < (int) (sizeof(primeInfo) / sizeof(primeInfo[0])); i++) {
+ if (primeInfo[i].prime >= number)
+ return primeInfo[i];
+ }
+
+ // overflow
+ Behavior::NoMemory();
+}
// Return Values:
// Return the optInfo array member. The method doesn't allocate memory.
//
-ExpandArrayStack<LcOptInfo*>* LoopCloneContext::GetLoopOptInfo(unsigned loopNum)
+JitExpandArrayStack<LcOptInfo*>* LoopCloneContext::GetLoopOptInfo(unsigned loopNum)
{
return optInfo[loopNum];
}
// Return Values:
// The array of optimization candidates for the loop.
//
-ExpandArrayStack<LcOptInfo*>* LoopCloneContext::EnsureLoopOptInfo(unsigned loopNum)
+JitExpandArrayStack<LcOptInfo*>* LoopCloneContext::EnsureLoopOptInfo(unsigned loopNum)
{
if (optInfo[loopNum] == nullptr)
{
- optInfo[loopNum] = new (alloc) ExpandArrayStack<LcOptInfo*>(alloc, 4);
+ optInfo[loopNum] = new (alloc) JitExpandArrayStack<LcOptInfo*>(alloc, 4);
}
return optInfo[loopNum];
}
// Return Values:
// The array of cloning conditions for the loop.
//
-ExpandArrayStack<LC_Condition>* LoopCloneContext::EnsureConditions(unsigned loopNum)
+JitExpandArrayStack<LC_Condition>* LoopCloneContext::EnsureConditions(unsigned loopNum)
{
if (conditions[loopNum] == nullptr)
{
- conditions[loopNum] = new (alloc) ExpandArrayStack<LC_Condition>(alloc, 4);
+ conditions[loopNum] = new (alloc) JitExpandArrayStack<LC_Condition>(alloc, 4);
}
return conditions[loopNum];
}
// Return Values:
// The array of cloning conditions for the loop.
//
-ExpandArrayStack<LC_Condition>* LoopCloneContext::GetConditions(unsigned loopNum)
+JitExpandArrayStack<LC_Condition>* LoopCloneContext::GetConditions(unsigned loopNum)
{
return conditions[loopNum];
}
// Return Values:
// The array of dereferences for the loop.
//
-ExpandArrayStack<LC_Array>* LoopCloneContext::EnsureDerefs(unsigned loopNum)
+JitExpandArrayStack<LC_Array>* LoopCloneContext::EnsureDerefs(unsigned loopNum)
{
if (derefs[loopNum] == nullptr)
{
- derefs[loopNum] = new (alloc) ExpandArrayStack<LC_Array>(alloc, 4);
+ derefs[loopNum] = new (alloc) JitExpandArrayStack<LC_Array>(alloc, 4);
}
return derefs[loopNum];
}
//
bool LoopCloneContext::HasBlockConditions(unsigned loopNum)
{
- ExpandArrayStack<ExpandArrayStack<LC_Condition>*>* levelCond = blockConditions[loopNum];
+ JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* levelCond = blockConditions[loopNum];
if (levelCond == nullptr)
{
return false;
// Return Values:
// Return block conditions.
//
-ExpandArrayStack<ExpandArrayStack<LC_Condition>*>* LoopCloneContext::GetBlockConditions(unsigned loopNum)
+JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* LoopCloneContext::GetBlockConditions(unsigned loopNum)
{
assert(HasBlockConditions(loopNum));
return blockConditions[loopNum];
// Return Values:
// Return block conditions.
//
-ExpandArrayStack<ExpandArrayStack<LC_Condition>*>* LoopCloneContext::EnsureBlockConditions(unsigned loopNum,
- unsigned condBlocks)
+JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* LoopCloneContext::EnsureBlockConditions(unsigned loopNum,
+ unsigned condBlocks)
{
if (blockConditions[loopNum] == nullptr)
{
- blockConditions[loopNum] = new (alloc) ExpandArrayStack<ExpandArrayStack<LC_Condition>*>(alloc, condBlocks);
+ blockConditions[loopNum] =
+ new (alloc) JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>(alloc, condBlocks);
}
- ExpandArrayStack<ExpandArrayStack<LC_Condition>*>* levelCond = blockConditions[loopNum];
+ JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* levelCond = blockConditions[loopNum];
for (unsigned i = 0; i < condBlocks; ++i)
{
- levelCond->Set(i, new (alloc) ExpandArrayStack<LC_Condition>(alloc));
+ levelCond->Set(i, new (alloc) JitExpandArrayStack<LC_Condition>(alloc));
}
return levelCond;
}
#ifdef DEBUG
void LoopCloneContext::PrintBlockConditions(unsigned loopNum)
{
- ExpandArrayStack<ExpandArrayStack<LC_Condition>*>* levelCond = blockConditions[loopNum];
+ JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* levelCond = blockConditions[loopNum];
if (levelCond == nullptr || levelCond->Size() == 0)
{
JITDUMP("No block conditions\n");
bool allTrue = true;
bool anyFalse = false;
- ExpandArrayStack<LC_Condition>& conds = *conditions[loopNum];
+ JitExpandArrayStack<LC_Condition>& conds = *conditions[loopNum];
JITDUMP("Evaluating %d loop cloning conditions for loop %d\n", conds.Size(), loopNum);
//
// Sometimes, two conditions will combine together to yield a single condition, then remove a
// duplicate condition.
-void LoopCloneContext::OptimizeConditions(ExpandArrayStack<LC_Condition>& conds)
+void LoopCloneContext::OptimizeConditions(JitExpandArrayStack<LC_Condition>& conds)
{
for (unsigned i = 0; i < conds.Size(); ++i)
{
{
return;
}
- ExpandArrayStack<ExpandArrayStack<LC_Condition>*>* levelCond = blockConditions[loopNum];
+ JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* levelCond = blockConditions[loopNum];
for (unsigned i = 0; i < levelCond->Size(); ++i)
{
OptimizeConditions(*((*levelCond)[i]));
printf("\n");
}
#endif
- ExpandArrayStack<LC_Condition>& conds = *conditions[loopNum];
+ JitExpandArrayStack<LC_Condition>& conds = *conditions[loopNum];
OptimizeConditions(conds);
#ifdef DEBUG
// Return Values:
// None.
//
-void LoopCloneContext::CondToStmtInBlock(Compiler* comp,
- ExpandArrayStack<LC_Condition>& conds,
- BasicBlock* block,
- bool reverse)
+void LoopCloneContext::CondToStmtInBlock(Compiler* comp,
+ JitExpandArrayStack<LC_Condition>& conds,
+ BasicBlock* block,
+ bool reverse)
{
noway_assert(conds.Size() > 0);
// Return Values:
// None
//
-void LC_Deref::DeriveLevelConditions(ExpandArrayStack<ExpandArrayStack<LC_Condition>*>* conds)
+void LC_Deref::DeriveLevelConditions(JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* conds)
{
if (level == 0)
{
{
if (children == nullptr)
{
- children = new (alloc) ExpandArrayStack<LC_Deref*>(alloc);
+ children = new (alloc) JitExpandArrayStack<LC_Deref*>(alloc);
}
}
//
// static
-LC_Deref* LC_Deref::Find(ExpandArrayStack<LC_Deref*>* children, unsigned lcl)
+LC_Deref* LC_Deref::Find(JitExpandArrayStack<LC_Deref*>* children, unsigned lcl)
{
if (children == nullptr)
{
*/
struct ArrIndex
{
- unsigned arrLcl; // The array base local num
- ExpandArrayStack<unsigned> indLcls; // The indices local nums
- ExpandArrayStack<GenTree*> bndsChks; // The bounds checks nodes along each dimension.
- unsigned rank; // Rank of the array
- BasicBlock* useBlock; // Block where the [] occurs
+ unsigned arrLcl; // The array base local num
+ JitExpandArrayStack<unsigned> indLcls; // The indices local nums
+ JitExpandArrayStack<GenTree*> bndsChks; // The bounds checks nodes along each dimension.
+ unsigned rank; // Rank of the array
+ BasicBlock* useBlock; // Block where the [] occurs
ArrIndex(CompAllocator* alloc) : arrLcl(BAD_VAR_NUM), indLcls(alloc), bndsChks(alloc), rank(0), useBlock(nullptr)
{
*/
struct LC_Deref
{
- const LC_Array array;
- ExpandArrayStack<LC_Deref*>* children;
+ const LC_Array array;
+ JitExpandArrayStack<LC_Deref*>* children;
unsigned level;
bool HasChildren();
void EnsureChildren(CompAllocator* alloc);
- static LC_Deref* Find(ExpandArrayStack<LC_Deref*>* children, unsigned lcl);
+ static LC_Deref* Find(JitExpandArrayStack<LC_Deref*>* children, unsigned lcl);
- void DeriveLevelConditions(ExpandArrayStack<ExpandArrayStack<LC_Condition>*>* len);
+ void DeriveLevelConditions(JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* len);
#ifdef DEBUG
void Print(unsigned indent = 0)
{
#ifdef _MSC_VER
(*children)[i]->Print(indent + 1);
#else // _MSC_VER
- (*((ExpandArray<LC_Deref*>*)children))[i]->Print(indent + 1);
+ (*((JitExpandArray<LC_Deref*>*)children))[i]->Print(indent + 1);
#endif // _MSC_VER
}
}
*/
struct LoopCloneContext
{
- CompAllocator* alloc; // The allocator
- ExpandArrayStack<LcOptInfo*>** optInfo; // The array of optimization opportunities found in each loop. (loop x
- // optimization-opportunities)
- ExpandArrayStack<LC_Condition>** conditions; // The array of conditions that influence which path to take for each
- // loop. (loop x cloning-conditions)
- ExpandArrayStack<LC_Array>** derefs; // The array of dereference conditions found in each loop. (loop x
- // deref-conditions)
- ExpandArrayStack<ExpandArrayStack<LC_Condition>*>** blockConditions; // The array of block levels of conditions for
- // each loop. (loop x level x conditions)
+ CompAllocator* alloc; // The allocator
+ JitExpandArrayStack<LcOptInfo*>** optInfo; // The array of optimization opportunities found in each loop. (loop x
+ // optimization-opportunities)
+ JitExpandArrayStack<LC_Condition>** conditions; // The array of conditions that influence which path to take for
+ // each
+ // loop. (loop x cloning-conditions)
+ JitExpandArrayStack<LC_Array>** derefs; // The array of dereference conditions found in each loop. (loop x
+ // deref-conditions)
+ JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>** blockConditions; // The array of block levels of
+ // conditions for
+ // each loop. (loop x level x conditions)
LoopCloneContext(unsigned loopCount, CompAllocator* alloc) : alloc(alloc)
{
- optInfo = new (alloc) ExpandArrayStack<LcOptInfo*>*[loopCount];
- conditions = new (alloc) ExpandArrayStack<LC_Condition>*[loopCount];
- derefs = new (alloc) ExpandArrayStack<LC_Array>*[loopCount];
- blockConditions = new (alloc) ExpandArrayStack<ExpandArrayStack<LC_Condition>*>*[loopCount];
+ optInfo = new (alloc) JitExpandArrayStack<LcOptInfo*>*[loopCount];
+ conditions = new (alloc) JitExpandArrayStack<LC_Condition>*[loopCount];
+ derefs = new (alloc) JitExpandArrayStack<LC_Array>*[loopCount];
+ blockConditions = new (alloc) JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>*[loopCount];
for (unsigned i = 0; i < loopCount; ++i)
{
optInfo[i] = nullptr;
}
// Evaluate conditions into a JTRUE stmt and put it in the block. Reverse condition if 'reverse' is true.
- void CondToStmtInBlock(Compiler* comp, ExpandArrayStack<LC_Condition>& conds, BasicBlock* block, bool reverse);
+ void CondToStmtInBlock(Compiler* comp, JitExpandArrayStack<LC_Condition>& conds, BasicBlock* block, bool reverse);
// Get all the optimization information for loop "loopNum"; This information is held in "optInfo" array.
// If NULL this allocates the optInfo[loopNum] array for "loopNum"
- ExpandArrayStack<LcOptInfo*>* EnsureLoopOptInfo(unsigned loopNum);
+ JitExpandArrayStack<LcOptInfo*>* EnsureLoopOptInfo(unsigned loopNum);
// Get all the optimization information for loop "loopNum"; This information is held in "optInfo" array.
// If NULL this does not allocate the optInfo[loopNum] array for "loopNum"
- ExpandArrayStack<LcOptInfo*>* GetLoopOptInfo(unsigned loopNum);
+ JitExpandArrayStack<LcOptInfo*>* GetLoopOptInfo(unsigned loopNum);
// Cancel all optimizations for loop "loopNum" by clearing out the "conditions" member if non-null
// and setting the optInfo to "null.", If "null", then the user of this class is not supposed to
void CancelLoopOptInfo(unsigned loopNum);
// Get the conditions that decide which loop to take for "loopNum." If NULL allocate an empty array.
- ExpandArrayStack<LC_Condition>* EnsureConditions(unsigned loopNum);
+ JitExpandArrayStack<LC_Condition>* EnsureConditions(unsigned loopNum);
// Get the conditions for loop. No allocation is performed.
- ExpandArrayStack<LC_Condition>* GetConditions(unsigned loopNum);
+ JitExpandArrayStack<LC_Condition>* GetConditions(unsigned loopNum);
// Ensure that the "deref" conditions array is allocated.
- ExpandArrayStack<LC_Array>* EnsureDerefs(unsigned loopNum);
+ JitExpandArrayStack<LC_Array>* EnsureDerefs(unsigned loopNum);
// Get block conditions for each loop, no allocation is performed.
- ExpandArrayStack<ExpandArrayStack<LC_Condition>*>* GetBlockConditions(unsigned loopNum);
+ JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* GetBlockConditions(unsigned loopNum);
// Ensure that the block condition is present, if not allocate space.
- ExpandArrayStack<ExpandArrayStack<LC_Condition>*>* EnsureBlockConditions(unsigned loopNum, unsigned totalBlocks);
+ JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* EnsureBlockConditions(unsigned loopNum,
+ unsigned totalBlocks);
// Print the block conditions for the loop.
void PrintBlockConditions(unsigned loopNum);
void EvaluateConditions(unsigned loopNum, bool* pAllTrue, bool* pAnyFalse DEBUGARG(bool verbose));
private:
- void OptimizeConditions(ExpandArrayStack<LC_Condition>& conds);
+ void OptimizeConditions(JitExpandArrayStack<LC_Condition>& conds);
public:
// Optimize conditions to remove redundant conditions.
unsigned fromBBNum;
unsigned toBBNum;
};
- typedef SimplerHashTable<unsigned, SmallPrimitiveKeyFuncs<unsigned>, SplitEdgeInfo, JitSimplerHashBehavior>
- SplitBBNumToTargetBBNumMap;
+ typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, SplitEdgeInfo> SplitBBNumToTargetBBNumMap;
SplitBBNumToTargetBBNumMap* splitBBNumToTargetBBNumMap;
SplitBBNumToTargetBBNumMap* getSplitBBNumToTargetBBNumMap()
{
JITDUMP("------------------------------------------------------------\n");
JITDUMP("Deriving cloning conditions for L%02u\n", loopNum);
- LoopDsc* loop = &optLoopTable[loopNum];
- ExpandArrayStack<LcOptInfo*>* optInfos = context->GetLoopOptInfo(loopNum);
+ LoopDsc* loop = &optLoopTable[loopNum];
+ JitExpandArrayStack<LcOptInfo*>* optInfos = context->GetLoopOptInfo(loopNum);
if (loop->lpTestOper() == GT_LT)
{
//
bool Compiler::optComputeDerefConditions(unsigned loopNum, LoopCloneContext* context)
{
- ExpandArrayStack<LC_Deref*> nodes(getAllocator());
- int maxRank = -1;
+ JitExpandArrayStack<LC_Deref*> nodes(getAllocator());
+ int maxRank = -1;
// Get the dereference-able arrays.
- ExpandArrayStack<LC_Array>* deref = context->EnsureDerefs(loopNum);
+ JitExpandArrayStack<LC_Array>* deref = context->EnsureDerefs(loopNum);
// For each array in the dereference list, construct a tree,
// where the nodes are array and index variables and an edge 'u-v'
}
// Derive conditions into an 'array of level x array of conditions' i.e., levelCond[levels][conds]
- ExpandArrayStack<ExpandArrayStack<LC_Condition>*>* levelCond = context->EnsureBlockConditions(loopNum, condBlocks);
+ JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* levelCond =
+ context->EnsureBlockConditions(loopNum, condBlocks);
for (unsigned i = 0; i < nodes.Size(); ++i)
{
nodes[i]->DeriveLevelConditions(levelCond);
//
void Compiler::optPerformStaticOptimizations(unsigned loopNum, LoopCloneContext* context DEBUGARG(bool dynamicPath))
{
- ExpandArrayStack<LcOptInfo*>* optInfos = context->GetLoopOptInfo(loopNum);
+ JitExpandArrayStack<LcOptInfo*>* optInfos = context->GetLoopOptInfo(loopNum);
for (unsigned i = 0; i < optInfos->Size(); ++i)
{
LcOptInfo* optInfo = optInfos->GetRef(i);
// For each loop, derive cloning conditions for the optimization candidates.
for (unsigned i = 0; i < optLoopCount; ++i)
{
- ExpandArrayStack<LcOptInfo*>* optInfos = context.GetLoopOptInfo(i);
+ JitExpandArrayStack<LcOptInfo*>* optInfos = context.GetLoopOptInfo(i);
if (optInfos == nullptr)
{
continue;
JITDUMP("Inserting loop cloning conditions\n");
assert(context->HasBlockConditions(loopNum));
- BasicBlock* curCond = head;
- ExpandArrayStack<ExpandArrayStack<LC_Condition>*>* levelCond = context->GetBlockConditions(loopNum);
+ BasicBlock* curCond = head;
+ JitExpandArrayStack<JitExpandArrayStack<LC_Condition>*>* levelCond = context->GetBlockConditions(loopNum);
for (unsigned i = 0; i < levelCond->Size(); ++i)
{
bool isHeaderBlock = (curCond == head);
// Find the set of definitely-executed blocks.
// Ideally, the definitely-executed blocks are the ones that post-dominate the entry block.
// Until we have post-dominators, we'll special-case for single-exit blocks.
- ExpandArrayStack<BasicBlock*> defExec(getAllocatorLoopHoist());
+ JitExpandArrayStack<BasicBlock*> defExec(getAllocatorLoopHoist());
if (pLoopDsc->lpFlags & LPFLG_ONE_EXIT)
{
assert(pLoopDsc->lpExit != nullptr);
#pragma once
#include "compiler.h"
-#include "expandarray.h"
static bool IntAddOverflows(int max1, int max2)
{
Location();
};
- typedef SimplerHashTable<GenTreePtr, PtrKeyFuncs<GenTree>, bool, JitSimplerHashBehavior> OverflowMap;
- typedef SimplerHashTable<GenTreePtr, PtrKeyFuncs<GenTree>, Range*, JitSimplerHashBehavior> RangeMap;
- typedef SimplerHashTable<GenTreePtr, PtrKeyFuncs<GenTree>, BasicBlock*, JitSimplerHashBehavior> SearchPath;
- typedef SimplerHashTable<INT64, LargePrimitiveKeyFuncs<INT64>, Location*, JitSimplerHashBehavior> VarToLocMap;
- typedef SimplerHashTable<INT64, LargePrimitiveKeyFuncs<INT64>, ExpandArrayStack<Location*>*, JitSimplerHashBehavior>
- VarToLocArrayMap;
+ typedef JitHashTable<GenTreePtr, JitPtrKeyFuncs<GenTree>, bool> OverflowMap;
+ typedef JitHashTable<GenTreePtr, JitPtrKeyFuncs<GenTree>, Range*> RangeMap;
+ typedef JitHashTable<GenTreePtr, JitPtrKeyFuncs<GenTree>, BasicBlock*> SearchPath;
+ typedef JitHashTable<INT64, JitLargePrimitiveKeyFuncs<INT64>, Location*> VarToLocMap;
+ typedef JitHashTable<INT64, JitLargePrimitiveKeyFuncs<INT64>, JitExpandArrayStack<Location*>*> VarToLocArrayMap;
// Generate a hashcode unique for this ssa var.
UINT64 HashCode(unsigned lclNum, unsigned ssaNum);
}
};
- typedef SimplerHashTable<ssize_t, SmallPrimitiveKeyFuncs<ssize_t>, SSAName, JitSimplerHashBehavior>
- LabelToSSANameMap;
- typedef SimplerHashTable<SSAName, SSAName, ssize_t, JitSimplerHashBehavior> SSANameToLabelMap;
+ typedef JitHashTable<ssize_t, JitSmallPrimitiveKeyFuncs<ssize_t>, SSAName> LabelToSSANameMap;
+ typedef JitHashTable<SSAName, SSAName, ssize_t> SSANameToLabelMap;
// If we have no test data, early out.
if (m_nodeTestData == nullptr)
}
}
-class JitSimplerHashBehavior
-{
-public:
- static const unsigned s_growth_factor_numerator = 3;
- static const unsigned s_growth_factor_denominator = 2;
-
- static const unsigned s_density_factor_numerator = 3;
- static const unsigned s_density_factor_denominator = 4;
-
- static const unsigned s_minimum_allocation = 7;
-
- inline static void DECLSPEC_NORETURN NoMemory()
- {
- NOMEM();
- }
-};
-
#if defined(DEBUG) || defined(INLINE_DATA)
// ConfigMethodRange describes a set of methods, specified via their
}
#endif // DEBUG
-typedef ExpandArrayStack<BasicBlock*> BlockStack;
+typedef JitExpandArrayStack<BasicBlock*> BlockStack;
// This represents the "to do" state of the value number computation.
struct ValueNumberState
// TODO-Cleanup: new JitTestLabels for lib vs cons vs both VN classes?
void Compiler::JitTestCheckVN()
{
- typedef SimplerHashTable<ssize_t, SmallPrimitiveKeyFuncs<ssize_t>, ValueNum, JitSimplerHashBehavior> LabelToVNMap;
- typedef SimplerHashTable<ValueNum, SmallPrimitiveKeyFuncs<ValueNum>, ssize_t, JitSimplerHashBehavior> VNToLabelMap;
+ typedef JitHashTable<ssize_t, JitSmallPrimitiveKeyFuncs<ssize_t>, ValueNum> LabelToVNMap;
+ typedef JitHashTable<ValueNum, JitSmallPrimitiveKeyFuncs<ValueNum>, ssize_t> VNToLabelMap;
// If we have no test data, early out.
if (m_nodeTestData == nullptr)
NodeToTestDataMap* testData = GetNodeTestData();
// First we have to know which nodes in the tree are reachable.
- typedef SimplerHashTable<GenTreePtr, PtrKeyFuncs<GenTree>, int, JitSimplerHashBehavior> NodeToIntMap;
+ typedef JitHashTable<GenTreePtr, JitPtrKeyFuncs<GenTree>, int> NodeToIntMap;
NodeToIntMap* reachable = FindReachableNodesInNodeTestData();
LabelToVNMap* labelToVN = new (getAllocatorDebugOnly()) LabelToVNMap(getAllocatorDebugOnly());
// VNMap - map from something to ValueNum, where something is typically a constant value or a VNFunc
// This class has two purposes - to abstract the implementation and to validate the ValueNums
// being stored or retrieved.
- template <class fromType, class keyfuncs = LargePrimitiveKeyFuncs<fromType>>
- class VNMap : public SimplerHashTable<fromType, keyfuncs, ValueNum, JitSimplerHashBehavior>
+ template <class fromType, class keyfuncs = JitLargePrimitiveKeyFuncs<fromType>>
+ class VNMap : public JitHashTable<fromType, keyfuncs, ValueNum>
{
public:
- VNMap(CompAllocator* alloc) : SimplerHashTable<fromType, keyfuncs, ValueNum, JitSimplerHashBehavior>(alloc)
+ VNMap(CompAllocator* alloc) : JitHashTable<fromType, keyfuncs, ValueNum>(alloc)
{
}
~VNMap()
{
- ~VNMap<fromType, keyfuncs>::SimplerHashTable();
+ ~VNMap<fromType, keyfuncs>::JitHashTable();
}
bool Set(fromType k, ValueNum val)
{
assert(val != RecursiveVN);
- return SimplerHashTable<fromType, keyfuncs, ValueNum, JitSimplerHashBehavior>::Set(k, val);
+ return JitHashTable<fromType, keyfuncs, ValueNum>::Set(k, val);
}
bool Lookup(fromType k, ValueNum* pVal = nullptr) const
{
- bool result = SimplerHashTable<fromType, keyfuncs, ValueNum, JitSimplerHashBehavior>::Lookup(k, pVal);
+ bool result = JitHashTable<fromType, keyfuncs, ValueNum>::Lookup(k, pVal);
assert(!result || *pVal != RecursiveVN);
return result;
}
};
};
- struct VNHandle : public KeyFuncsDefEquals<VNHandle>
+ struct VNHandle : public JitKeyFuncsDefEquals<VNHandle>
{
ssize_t m_cnsVal;
unsigned m_flags;
// So we have to be careful about breaking infinite recursion. We can ignore "recursive" results -- if all the
// non-recursive results are the same, the recursion indicates that the loop structure didn't alter the result.
// This stack represents the set of outer phis such that select(phi, ind) is being evaluated.
- ExpandArrayStack<VNDefFunc2Arg> m_fixedPointMapSels;
+ JitExpandArrayStack<VNDefFunc2Arg> m_fixedPointMapSels;
#ifdef DEBUG
// Returns "true" iff "m_fixedPointMapSels" is non-empty, and it's top element is
CheckedBoundVNSet m_checkedBoundVNs;
// This is a map from "chunk number" to the attributes of the chunk.
- ExpandArrayStack<Chunk*> m_chunks;
+ JitExpandArrayStack<Chunk*> m_chunks;
// These entries indicate the current allocation chunk, if any, for each valid combination of <var_types,
// ChunkExtraAttribute, loopNumber>. Valid combinations require attribs==CEA_None or loopNum==MAX_LOOP_NUM.
return m_handleMap;
}
- struct LargePrimitiveKeyFuncsFloat : public LargePrimitiveKeyFuncs<float>
+ struct LargePrimitiveKeyFuncsFloat : public JitLargePrimitiveKeyFuncs<float>
{
static bool Equals(float x, float y)
{
}
// In the JIT we need to distinguish -0.0 and 0.0 for optimizations.
- struct LargePrimitiveKeyFuncsDouble : public LargePrimitiveKeyFuncs<double>
+ struct LargePrimitiveKeyFuncsDouble : public JitLargePrimitiveKeyFuncs<double>
{
static bool Equals(double x, double y)
{
return m_byrefCnsMap;
}
- struct VNDefFunc0ArgKeyFuncs : public KeyFuncsDefEquals<VNDefFunc1Arg>
+ struct VNDefFunc0ArgKeyFuncs : public JitKeyFuncsDefEquals<VNDefFunc1Arg>
{
static unsigned GetHashCode(VNDefFunc1Arg val)
{
return m_VNFunc0Map;
}
- struct VNDefFunc1ArgKeyFuncs : public KeyFuncsDefEquals<VNDefFunc1Arg>
+ struct VNDefFunc1ArgKeyFuncs : public JitKeyFuncsDefEquals<VNDefFunc1Arg>
{
static unsigned GetHashCode(VNDefFunc1Arg val)
{
return m_VNFunc1Map;
}
- struct VNDefFunc2ArgKeyFuncs : public KeyFuncsDefEquals<VNDefFunc2Arg>
+ struct VNDefFunc2ArgKeyFuncs : public JitKeyFuncsDefEquals<VNDefFunc2Arg>
{
static unsigned GetHashCode(VNDefFunc2Arg val)
{
return m_VNFunc2Map;
}
- struct VNDefFunc3ArgKeyFuncs : public KeyFuncsDefEquals<VNDefFunc3Arg>
+ struct VNDefFunc3ArgKeyFuncs : public JitKeyFuncsDefEquals<VNDefFunc3Arg>
{
static unsigned GetHashCode(VNDefFunc3Arg val)
{
return m_VNFunc3Map;
}
- struct VNDefFunc4ArgKeyFuncs : public KeyFuncsDefEquals<VNDefFunc4Arg>
+ struct VNDefFunc4ArgKeyFuncs : public JitKeyFuncsDefEquals<VNDefFunc4Arg>
{
static unsigned GetHashCode(VNDefFunc4Arg val)
{