1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
10 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
11 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
20 #include "ssaconfig.h"
22 // Windows x86 and Windows ARM/ARM64 may not define _isnanf() but they do define _isnan().
23 // We will redirect the macros to these other functions if the macro is not defined for the
24 // platform. This has the side effect of a possible implicit upcasting for arguments passed.
25 #if (defined(_TARGET_X86_) || defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)) && !defined(FEATURE_PAL)
28 #define _isnanf _isnan
31 #endif // (defined(_TARGET_X86_) || defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)) && !defined(FEATURE_PAL)
33 // We need to use target-specific NaN values when statically compute expressions.
34 // Otherwise, cross crossgen (e.g. x86_arm) would have different binary outputs
35 // from native crossgen (i.e. arm_arm) when the NaN got "embedded" into code.
37 // For example, when placing NaN value in r3 register
38 // x86_arm crossgen would emit
41 // while arm_arm crossgen (and JIT) output is
47 //------------------------------------------------------------------------
48 // NaN: Return target-specific float NaN value
51 // "Default" NaN value returned by expression 0.0f / 0.0f on x86/x64 has
52 // different binary representation (0xffc00000) than NaN on
53 // ARM32/ARM64 (0x7fc00000).
57 #if defined(_TARGET_XARCH_)
58 unsigned bits = 0xFFC00000u;
59 #elif defined(_TARGET_ARMARCH_)
60 unsigned bits = 0x7FC00000u;
62 #error Unsupported or unset target architecture
65 static_assert(sizeof(bits) == sizeof(result), "sizeof(unsigned) must equal sizeof(float)");
66 memcpy(&result, &bits, sizeof(result));
73 //------------------------------------------------------------------------
74 // NaN: Return target-specific double NaN value
77 // "Default" NaN value returned by expression 0.0 / 0.0 on x86/x64 has
78 // different binary representation (0xfff8000000000000) than NaN on
79 // ARM32/ARM64 (0x7ff8000000000000).
83 #if defined(_TARGET_XARCH_)
84 unsigned long long bits = 0xFFF8000000000000ull;
85 #elif defined(_TARGET_ARMARCH_)
86 unsigned long long bits = 0x7FF8000000000000ull;
88 #error Unsupported or unset target architecture
91 static_assert(sizeof(bits) == sizeof(result), "sizeof(unsigned long long) must equal sizeof(double)");
92 memcpy(&result, &bits, sizeof(result));
97 //------------------------------------------------------------------------
98 // FpAdd: Computes value1 + value2
101 // TFpTraits::NaN() - If target ARM32/ARM64 and result value is NaN
102 // value1 + value2 - Otherwise
105 // See FloatTraits::NaN() and DoubleTraits::NaN() notes.
107 template <typename TFp, typename TFpTraits>
108 TFp FpAdd(TFp value1, TFp value2)
110 #ifdef _TARGET_ARMARCH_
111 // If [value1] is negative infinity and [value2] is positive infinity
112 // the result is NaN.
113 // If [value1] is positive infinity and [value2] is negative infinity
114 // the result is NaN.
116 if (!_finite(value1) && !_finite(value2))
118 if (value1 < 0 && value2 > 0)
120 return TFpTraits::NaN();
123 if (value1 > 0 && value2 < 0)
125 return TFpTraits::NaN();
128 #endif // _TARGET_ARMARCH_
130 return value1 + value2;
133 //------------------------------------------------------------------------
134 // FpSub: Computes value1 - value2
137 // TFpTraits::NaN() - If target ARM32/ARM64 and result value is NaN
138 // value1 - value2 - Otherwise
141 // See FloatTraits::NaN() and DoubleTraits::NaN() notes.
143 template <typename TFp, typename TFpTraits>
144 TFp FpSub(TFp value1, TFp value2)
146 #ifdef _TARGET_ARMARCH_
147 // If [value1] is positive infinity and [value2] is positive infinity
148 // the result is NaN.
149 // If [value1] is negative infinity and [value2] is negative infinity
150 // the result is NaN.
152 if (!_finite(value1) && !_finite(value2))
154 if (value1 > 0 && value2 > 0)
156 return TFpTraits::NaN();
159 if (value1 < 0 && value2 < 0)
161 return TFpTraits::NaN();
164 #endif // _TARGET_ARMARCH_
166 return value1 - value2;
169 //------------------------------------------------------------------------
170 // FpMul: Computes value1 * value2
173 // TFpTraits::NaN() - If target ARM32/ARM64 and result value is NaN
174 // value1 * value2 - Otherwise
177 // See FloatTraits::NaN() and DoubleTraits::NaN() notes.
179 template <typename TFp, typename TFpTraits>
180 TFp FpMul(TFp value1, TFp value2)
182 #ifdef _TARGET_ARMARCH_
183 // From the ECMA standard:
185 // If [value1] is zero and [value2] is infinity
186 // the result is NaN.
187 // If [value1] is infinity and [value2] is zero
188 // the result is NaN.
190 if (value1 == 0 && !_finite(value2) && !_isnan(value2))
192 return TFpTraits::NaN();
194 if (!_finite(value1) && !_isnan(value1) && value2 == 0)
196 return TFpTraits::NaN();
198 #endif // _TARGET_ARMARCH_
200 return value1 * value2;
203 //------------------------------------------------------------------------
204 // FpDiv: Computes value1 / value2
207 // TFpTraits::NaN() - If target ARM32/ARM64 and result value is NaN
208 // value1 / value2 - Otherwise
211 // See FloatTraits::NaN() and DoubleTraits::NaN() notes.
213 template <typename TFp, typename TFpTraits>
214 TFp FpDiv(TFp dividend, TFp divisor)
216 #ifdef _TARGET_ARMARCH_
217 // From the ECMA standard:
219 // If [dividend] is zero and [divisor] is zero
220 // the result is NaN.
221 // If [dividend] is infinity and [divisor] is infinity
222 // the result is NaN.
224 if (dividend == 0 && divisor == 0)
226 return TFpTraits::NaN();
228 else if (!_finite(dividend) && !_isnan(dividend) && !_finite(divisor) && !_isnan(divisor))
230 return TFpTraits::NaN();
232 #endif // _TARGET_ARMARCH_
234 return dividend / divisor;
237 template <typename TFp, typename TFpTraits>
238 TFp FpRem(TFp dividend, TFp divisor)
240 // From the ECMA standard:
242 // If [divisor] is zero or [dividend] is infinity
243 // the result is NaN.
244 // If [divisor] is infinity,
245 // the result is [dividend]
247 if (divisor == 0 || !_finite(dividend))
249 return TFpTraits::NaN();
251 else if (!_finite(divisor) && !_isnan(divisor))
256 return (TFp)fmod((double)dividend, (double)divisor);
259 VNFunc GetVNFuncForOper(genTreeOps oper, bool isUnsigned)
261 if (!isUnsigned || (oper == GT_EQ) || (oper == GT_NE))
290 ValueNumStore::ValueNumStore(Compiler* comp, CompAllocator alloc)
294 , m_fixedPointMapSels(alloc, 8)
295 , m_checkedBoundVNs(alloc)
297 , m_intCnsMap(nullptr)
298 , m_longCnsMap(nullptr)
299 , m_handleMap(nullptr)
300 , m_floatCnsMap(nullptr)
301 , m_doubleCnsMap(nullptr)
302 , m_byrefCnsMap(nullptr)
303 , m_VNFunc0Map(nullptr)
304 , m_VNFunc1Map(nullptr)
305 , m_VNFunc2Map(nullptr)
306 , m_VNFunc3Map(nullptr)
307 , m_VNFunc4Map(nullptr)
312 // We have no current allocation chunks.
313 for (unsigned i = 0; i < TYP_COUNT; i++)
315 for (unsigned j = CEA_None; j <= CEA_Count + MAX_LOOP_NUM; j++)
317 m_curAllocChunk[i][j] = NoChunk;
321 for (unsigned i = 0; i < SmallIntConstNum; i++)
323 m_VNsForSmallIntConsts[i] = NoVN;
325 // We will reserve chunk 0 to hold some special constants, like the constant NULL, the "exception" value, and the
327 Chunk* specialConstChunk = new (m_alloc) Chunk(m_alloc, &m_nextChunkBase, TYP_REF, CEA_Const, MAX_LOOP_NUM);
328 specialConstChunk->m_numUsed +=
329 SRC_NumSpecialRefConsts; // Implicitly allocate 0 ==> NULL, and 1 ==> Exception, 2 ==> ZeroMap.
330 ChunkNum cn = m_chunks.Push(specialConstChunk);
333 m_mapSelectBudget = (int)JitConfig.JitVNMapSelBudget(); // We cast the unsigned DWORD to a signed int.
335 // This value must be non-negative and non-zero, reset the value to DEFAULT_MAP_SELECT_BUDGET if it isn't.
336 if (m_mapSelectBudget <= 0)
338 m_mapSelectBudget = DEFAULT_MAP_SELECT_BUDGET;
342 unsigned ValueNumStore::VNFuncArity(VNFunc vnf)
344 // Read the bit field out of the table...
345 return (s_vnfOpAttribs[vnf] & VNFOA_ArityMask) >> VNFOA_ArityShift;
349 bool ValueNumStore::IsOverflowIntDiv(int v0, int v1)
351 return (v1 == -1) && (v0 == INT32_MIN);
354 bool ValueNumStore::IsOverflowIntDiv(INT64 v0, INT64 v1)
356 return (v1 == -1) && (v0 == INT64_MIN);
358 template <typename T>
359 bool ValueNumStore::IsOverflowIntDiv(T v0, T v1)
365 bool ValueNumStore::IsIntZero(int v)
370 bool ValueNumStore::IsIntZero(unsigned v)
375 bool ValueNumStore::IsIntZero(INT64 v)
380 bool ValueNumStore::IsIntZero(UINT64 v)
384 template <typename T>
385 bool ValueNumStore::IsIntZero(T v)
394 template <typename T>
395 T ValueNumStore::EvalOp(VNFunc vnf, T v0)
397 genTreeOps oper = genTreeOps(vnf);
399 // Here we handle unary ops that are the same for all types.
403 // Note that GT_NEG is the only valid unary floating point operation
410 // Otherwise must be handled by the type specific method
411 return EvalOpSpecialized(vnf, v0);
415 double ValueNumStore::EvalOpSpecialized<double>(VNFunc vnf, double v0)
417 // Here we handle specialized double unary ops.
418 noway_assert(!"EvalOpSpecialized<double> - unary");
423 float ValueNumStore::EvalOpSpecialized<float>(VNFunc vnf, float v0)
425 // Here we handle specialized float unary ops.
426 noway_assert(!"EvalOpSpecialized<float> - unary");
430 template <typename T>
431 T ValueNumStore::EvalOpSpecialized(VNFunc vnf, T v0)
433 if (vnf < VNF_Boundary)
435 genTreeOps oper = genTreeOps(vnf);
450 noway_assert(!"Unhandled operation in EvalOpSpecialized<T> - unary");
458 template <typename T>
459 T ValueNumStore::EvalOp(VNFunc vnf, T v0, T v1, ValueNum* pExcSet)
461 // Here we handle the binary ops that are the same for all types.
463 if (vnf < VNF_Boundary)
465 genTreeOps oper = genTreeOps(vnf);
467 // Temporary will be removed
472 if (IsOverflowIntDiv(v0, v1))
474 *pExcSet = VNExcSetSingleton(VNForFunc(TYP_REF, VNF_ArithmeticExc));
484 *pExcSet = VNExcSetSingleton(VNForFunc(TYP_REF, VNF_DivideByZeroExc));
495 // Otherwise must be handled by the type specific method
496 return EvalOpSpecialized(vnf, v0, v1);
500 double ValueNumStore::EvalOpSpecialized<double>(VNFunc vnf, double v0, double v1)
502 // Here we handle specialized double binary ops.
503 if (vnf < VNF_Boundary)
505 genTreeOps oper = genTreeOps(vnf);
511 return FpAdd<double, DoubleTraits>(v0, v1);
513 return FpSub<double, DoubleTraits>(v0, v1);
515 return FpMul<double, DoubleTraits>(v0, v1);
517 return FpDiv<double, DoubleTraits>(v0, v1);
519 return FpRem<double, DoubleTraits>(v0, v1);
522 // For any other value of 'oper', we will assert below
527 noway_assert(!"EvalOpSpecialized<double> - binary");
532 float ValueNumStore::EvalOpSpecialized<float>(VNFunc vnf, float v0, float v1)
534 // Here we handle specialized float binary ops.
535 if (vnf < VNF_Boundary)
537 genTreeOps oper = genTreeOps(vnf);
543 return FpAdd<float, FloatTraits>(v0, v1);
545 return FpSub<float, FloatTraits>(v0, v1);
547 return FpMul<float, FloatTraits>(v0, v1);
549 return FpDiv<float, FloatTraits>(v0, v1);
551 return FpRem<float, FloatTraits>(v0, v1);
554 // For any other value of 'oper', we will assert below
558 assert(!"EvalOpSpecialized<float> - binary");
562 template <typename T>
563 T ValueNumStore::EvalOpSpecialized(VNFunc vnf, T v0, T v1)
565 typedef typename jitstd::make_unsigned<T>::type UT;
567 assert((sizeof(T) == 4) || (sizeof(T) == 8));
569 // Here we handle binary ops that are the same for all integer types
570 if (vnf < VNF_Boundary)
572 genTreeOps oper = genTreeOps(vnf);
584 assert(IsIntZero(v1) == false);
585 assert(IsOverflowIntDiv(v0, v1) == false);
589 assert(IsIntZero(v1) == false);
590 assert(IsOverflowIntDiv(v0, v1) == false);
594 assert(IsIntZero(v1) == false);
595 return T(UT(v0) / UT(v1));
598 assert(IsIntZero(v1) == false);
599 return T(UT(v0) % UT(v1));
611 return v0 << (v1 & 0x3F);
620 return v0 >> (v1 & 0x3F);
629 return UINT64(v0) >> (v1 & 0x3F);
633 return UINT32(v0) >> v1;
638 return (v0 << v1) | (UINT64(v0) >> (64 - v1));
642 return (v0 << v1) | (UINT32(v0) >> (32 - v1));
648 return (v0 << (64 - v1)) | (UINT64(v0) >> v1);
652 return (v0 << (32 - v1)) | (UINT32(v0) >> v1);
656 // For any other value of 'oper', we will assert below
660 else // must be a VNF_ function
664 // Here we handle those that are the same for all integer types.
667 return T(UT(v0) + UT(v1));
669 return T(UT(v0) - UT(v1));
671 return T(UT(v0) * UT(v1));
674 // For any other value of 'vnf', we will assert below
679 noway_assert(!"Unhandled operation in EvalOpSpecialized<T> - binary");
684 int ValueNumStore::EvalComparison<double>(VNFunc vnf, double v0, double v1)
686 // Here we handle specialized double comparisons.
688 // We must check for a NaN argument as they they need special handling
689 bool hasNanArg = (_isnan(v0) || _isnan(v1));
691 if (vnf < VNF_Boundary)
693 genTreeOps oper = genTreeOps(vnf);
697 // return false in all cases except for GT_NE;
698 return (oper == GT_NE);
716 // For any other value of 'oper', we will assert below
720 noway_assert(!"Unhandled operation in EvalComparison<double>");
725 int ValueNumStore::EvalComparison<float>(VNFunc vnf, float v0, float v1)
727 // Here we handle specialized float comparisons.
729 // We must check for a NaN argument as they they need special handling
730 bool hasNanArg = (_isnanf(v0) || _isnanf(v1));
732 if (vnf < VNF_Boundary)
734 genTreeOps oper = genTreeOps(vnf);
738 // return false in all cases except for GT_NE;
739 return (oper == GT_NE);
757 // For any other value of 'oper', we will assert below
761 else // must be a VNF_ function
765 // always returns true
780 // For any other value of 'vnf', we will assert below
784 noway_assert(!"Unhandled operation in EvalComparison<float>");
788 template <typename T>
789 int ValueNumStore::EvalComparison(VNFunc vnf, T v0, T v1)
791 typedef typename jitstd::make_unsigned<T>::type UT;
793 // Here we handle the compare ops that are the same for all integer types.
794 if (vnf < VNF_Boundary)
796 genTreeOps oper = genTreeOps(vnf);
812 // For any other value of 'oper', we will assert below
816 else // must be a VNF_ function
821 return T(UT(v0) > UT(v1));
823 return T(UT(v0) >= UT(v1));
825 return T(UT(v0) < UT(v1));
827 return T(UT(v0) <= UT(v1));
829 // For any other value of 'vnf', we will assert below
833 noway_assert(!"Unhandled operation in EvalComparison<T>");
837 // Create a ValueNum for an exception set singleton for 'x'
839 ValueNum ValueNumStore::VNExcSetSingleton(ValueNum x)
841 return VNForFunc(TYP_REF, VNF_ExcSetCons, x, VNForEmptyExcSet());
843 // Create a ValueNumPair for an exception set singleton for 'xp'
845 ValueNumPair ValueNumStore::VNPExcSetSingleton(ValueNumPair xp)
847 return ValueNumPair(VNExcSetSingleton(xp.GetLiberal()), VNExcSetSingleton(xp.GetConservative()));
850 ValueNum ValueNumStore::VNExcSetUnion(ValueNum xs0, ValueNum xs1 DEBUGARG(bool topLevel))
852 if (xs0 == VNForEmptyExcSet())
856 else if (xs1 == VNForEmptyExcSet())
863 bool b0 = GetVNFunc(xs0, &funcXs0);
864 assert(b0 && funcXs0.m_func == VNF_ExcSetCons); // Precondition: xs0 is an exception set.
866 bool b1 = GetVNFunc(xs1, &funcXs1);
867 assert(b1 && funcXs1.m_func == VNF_ExcSetCons); // Precondition: xs1 is an exception set.
869 if (funcXs0.m_args[0] < funcXs1.m_args[0])
871 res = VNForFunc(TYP_REF, VNF_ExcSetCons, funcXs0.m_args[0],
872 VNExcSetUnion(funcXs0.m_args[1], xs1 DEBUGARG(false)));
874 else if (funcXs0.m_args[0] == funcXs1.m_args[0])
876 // Equal elements; only add one to the result.
877 res = VNExcSetUnion(funcXs0.m_args[1], xs1);
881 assert(funcXs0.m_args[0] > funcXs1.m_args[0]);
882 res = VNForFunc(TYP_REF, VNF_ExcSetCons, funcXs1.m_args[0],
883 VNExcSetUnion(xs0, funcXs1.m_args[1] DEBUGARG(false)));
890 ValueNumPair ValueNumStore::VNPExcSetUnion(ValueNumPair xs0vnp, ValueNumPair xs1vnp)
892 return ValueNumPair(VNExcSetUnion(xs0vnp.GetLiberal(), xs1vnp.GetLiberal()),
893 VNExcSetUnion(xs0vnp.GetConservative(), xs1vnp.GetConservative()));
896 void ValueNumStore::VNUnpackExc(ValueNum vnWx, ValueNum* pvn, ValueNum* pvnx)
898 assert(vnWx != NoVN);
900 if (GetVNFunc(vnWx, &funcApp) && funcApp.m_func == VNF_ValWithExc)
902 *pvn = funcApp.m_args[0];
903 *pvnx = funcApp.m_args[1];
911 void ValueNumStore::VNPUnpackExc(ValueNumPair vnWx, ValueNumPair* pvn, ValueNumPair* pvnx)
913 VNUnpackExc(vnWx.GetLiberal(), pvn->GetLiberalAddr(), pvnx->GetLiberalAddr());
914 VNUnpackExc(vnWx.GetConservative(), pvn->GetConservativeAddr(), pvnx->GetConservativeAddr());
917 //--------------------------------------------------------------------------------
918 // VNNormVal: - Returns a Value Number that represents the result for the
919 // normal (non-exceptional) evaluation for the expression.
922 // vn - The Value Number for the expression, including any excSet.
923 // This excSet is an optional item and represents the set of
924 // possible exceptions for the expression.
927 // - The Value Number for the expression without the exception set.
928 // This can be the orginal 'vn', when there are no exceptions.
930 // Notes: - Whenever we have an exception set the Value Number will be
931 // a VN func with VNF_ValWithExc.
932 // This VN func has the normal value as m_args[0]
934 ValueNum ValueNumStore::VNNormVal(ValueNum vn)
937 if (GetVNFunc(vn, &funcApp) && funcApp.m_func == VNF_ValWithExc)
939 return funcApp.m_args[0];
947 //--------------------------------------------------------------------------------
948 // VNPNormVal: - Returns a Value Number Pair that represents the result for the
949 // normal (non-exceptional) evaluation for the expression.
950 // (see VNNormVal for more details)
952 // Notes: = This method is used to form a Value Number Pair when we
953 // want both the Liberal and Conservative Value NUmbers
955 ValueNumPair ValueNumStore::VNPNormVal(ValueNumPair vnp)
957 return ValueNumPair(VNNormVal(vnp.GetLiberal()), VNNormVal(vnp.GetConservative()));
960 //---------------------------------------------------------------------------
961 // VNExcVal: - Returns a Value Number that represents the set of possible
962 // exceptions that could be encountered for the expression.
965 // vn - The Value Number for the expression, including any excSet.
966 // This excSet is an optional item and represents the set of
967 // possible exceptions for the expression.
970 // - The Value Number for the set of exceptions of the expression.
971 // If the 'vn' has no exception set then a special Value Number
972 // representing the empty exception set is returned.
974 // Notes: - Whenever we have an exception set the Value Number will be
975 // a VN func with VNF_ValWithExc.
976 // This VN func has the exception set as m_args[1]
978 ValueNum ValueNumStore::VNExcVal(ValueNum vn)
981 if (GetVNFunc(vn, &funcApp) && funcApp.m_func == VNF_ValWithExc)
983 return funcApp.m_args[1];
987 return VNForEmptyExcSet();
991 //--------------------------------------------------------------------------------
992 // VNPExcVal: - Returns a Value Number Pair that represents the set of possible
993 // exceptions that could be encountered for the expression.
994 // (see VNExcVal for more details)
996 // Notes: = This method is used to form a Value Number Pair when we
997 // want both the Liberal and Conservative Value NUmbers
999 ValueNumPair ValueNumStore::VNPExcVal(ValueNumPair vnp)
1001 return ValueNumPair(VNExcVal(vnp.GetLiberal()), VNExcVal(vnp.GetConservative()));
1004 //---------------------------------------------------------------------------
1005 // VNWithExc: - Returns a Value Number that also can have both a normal value
1006 // as well as am exception set.
1009 // vn - The current Value Number for the expression, it may include
1010 // an exception set.
1011 // excSet - The Value Number representing the new exception set that
1012 // is to be added to any exceptions already present in 'vn'
1015 // - The new Value Number for the combination the two inputs.
1016 // If the 'excSet' is the special Value Number representing
1017 // the empty exception set then 'vn' is returned.
1019 // Notes: - We use a Set Union operation, 'VNExcSetUnion', to add any
1020 // new exception items from 'excSet' to the existing set.
1022 ValueNum ValueNumStore::VNWithExc(ValueNum vn, ValueNum excSet)
1024 if (excSet == VNForEmptyExcSet())
1031 ValueNum vnX = VNForEmptyExcSet();
1032 VNUnpackExc(vn, &vnNorm, &vnX);
1033 return VNForFunc(TypeOfVN(vnNorm), VNF_ValWithExc, vnNorm, VNExcSetUnion(vnX, excSet));
1037 //--------------------------------------------------------------------------------
1038 // VNPWithExc: - Returns a Value Number Pair that also can have both a normal value
1039 // as well as am exception set.
1040 // (see VNWithExc for more details)
1042 // Notes: = This method is used to form a Value Number Pair when we
1043 // want both the Liberal and Conservative Value NUmbers
1045 ValueNumPair ValueNumStore::VNPWithExc(ValueNumPair vnp, ValueNumPair excSetVNP)
1047 return ValueNumPair(VNWithExc(vnp.GetLiberal(), excSetVNP.GetLiberal()),
1048 VNWithExc(vnp.GetConservative(), excSetVNP.GetConservative()));
1051 bool ValueNumStore::IsKnownNonNull(ValueNum vn)
1058 return GetVNFunc(vn, &funcAttr) && (s_vnfOpAttribs[funcAttr.m_func] & VNFOA_KnownNonNull) != 0;
1061 bool ValueNumStore::IsSharedStatic(ValueNum vn)
1068 return GetVNFunc(vn, &funcAttr) && (s_vnfOpAttribs[funcAttr.m_func] & VNFOA_SharedStatic) != 0;
1071 ValueNumStore::Chunk::Chunk(CompAllocator alloc,
1072 ValueNum* pNextBaseVN,
1074 ChunkExtraAttribs attribs,
1075 BasicBlock::loopNumber loopNum)
1076 : m_defs(nullptr), m_numUsed(0), m_baseVN(*pNextBaseVN), m_typ(typ), m_attribs(attribs), m_loopNum(loopNum)
1078 // Allocate "m_defs" here, according to the typ/attribs pair.
1083 break; // Nothing to do.
1088 m_defs = new (alloc) Alloc<TYP_INT>::Type[ChunkSize];
1091 m_defs = new (alloc) Alloc<TYP_FLOAT>::Type[ChunkSize];
1094 m_defs = new (alloc) Alloc<TYP_LONG>::Type[ChunkSize];
1097 m_defs = new (alloc) Alloc<TYP_DOUBLE>::Type[ChunkSize];
1100 m_defs = new (alloc) Alloc<TYP_BYREF>::Type[ChunkSize];
1103 // We allocate space for a single REF constant, NULL, so we can access these values uniformly.
1104 // Since this value is always the same, we represent it as a static.
1105 m_defs = &s_specialRefConsts[0];
1106 break; // Nothing to do.
1108 assert(false); // Should not reach here.
1113 m_defs = new (alloc) VNHandle[ChunkSize];
1117 m_defs = new (alloc) VNFunc[ChunkSize];
1121 m_defs = new (alloc) VNDefFunc1Arg[ChunkSize];
1124 m_defs = new (alloc) VNDefFunc2Arg[ChunkSize];
1127 m_defs = new (alloc) VNDefFunc3Arg[ChunkSize];
1130 m_defs = new (alloc) VNDefFunc4Arg[ChunkSize];
1135 *pNextBaseVN += ChunkSize;
1138 ValueNumStore::Chunk* ValueNumStore::GetAllocChunk(var_types typ,
1139 ChunkExtraAttribs attribs,
1140 BasicBlock::loopNumber loopNum)
1144 if (loopNum == MAX_LOOP_NUM)
1146 // Loop nest is unknown/irrelevant for this VN.
1151 // Loop nest is interesting. Since we know this is only true for unique VNs, we know attribs will
1152 // be CEA_None and can just index based on loop number.
1153 noway_assert(attribs == CEA_None);
1154 // Map NOT_IN_LOOP -> MAX_LOOP_NUM to make the index range contiguous [0..MAX_LOOP_NUM]
1155 index = CEA_Count + (loopNum == BasicBlock::NOT_IN_LOOP ? MAX_LOOP_NUM : loopNum);
1157 ChunkNum cn = m_curAllocChunk[typ][index];
1160 res = m_chunks.Get(cn);
1161 if (res->m_numUsed < ChunkSize)
1166 // Otherwise, must allocate a new one.
1167 res = new (m_alloc) Chunk(m_alloc, &m_nextChunkBase, typ, attribs, loopNum);
1168 cn = m_chunks.Push(res);
1169 m_curAllocChunk[typ][index] = cn;
1173 ValueNum ValueNumStore::VNForIntCon(INT32 cnsVal)
1175 if (IsSmallIntConst(cnsVal))
1177 unsigned ind = cnsVal - SmallIntConstMin;
1178 ValueNum vn = m_VNsForSmallIntConsts[ind];
1183 vn = GetVNForIntCon(cnsVal);
1184 m_VNsForSmallIntConsts[ind] = vn;
1189 return GetVNForIntCon(cnsVal);
1193 ValueNum ValueNumStore::VNForLongCon(INT64 cnsVal)
1196 if (GetLongCnsMap()->Lookup(cnsVal, &res))
1202 Chunk* c = GetAllocChunk(TYP_LONG, CEA_Const);
1203 unsigned offsetWithinChunk = c->AllocVN();
1204 res = c->m_baseVN + offsetWithinChunk;
1205 reinterpret_cast<INT64*>(c->m_defs)[offsetWithinChunk] = cnsVal;
1206 GetLongCnsMap()->Set(cnsVal, res);
1211 ValueNum ValueNumStore::VNForFloatCon(float cnsVal)
1214 if (GetFloatCnsMap()->Lookup(cnsVal, &res))
1220 Chunk* c = GetAllocChunk(TYP_FLOAT, CEA_Const);
1221 unsigned offsetWithinChunk = c->AllocVN();
1222 res = c->m_baseVN + offsetWithinChunk;
1223 reinterpret_cast<float*>(c->m_defs)[offsetWithinChunk] = cnsVal;
1224 GetFloatCnsMap()->Set(cnsVal, res);
1229 ValueNum ValueNumStore::VNForDoubleCon(double cnsVal)
1232 if (GetDoubleCnsMap()->Lookup(cnsVal, &res))
1238 Chunk* c = GetAllocChunk(TYP_DOUBLE, CEA_Const);
1239 unsigned offsetWithinChunk = c->AllocVN();
1240 res = c->m_baseVN + offsetWithinChunk;
1241 reinterpret_cast<double*>(c->m_defs)[offsetWithinChunk] = cnsVal;
1242 GetDoubleCnsMap()->Set(cnsVal, res);
1247 ValueNum ValueNumStore::VNForByrefCon(INT64 cnsVal)
1250 if (GetByrefCnsMap()->Lookup(cnsVal, &res))
1256 Chunk* c = GetAllocChunk(TYP_BYREF, CEA_Const);
1257 unsigned offsetWithinChunk = c->AllocVN();
1258 res = c->m_baseVN + offsetWithinChunk;
1259 reinterpret_cast<INT64*>(c->m_defs)[offsetWithinChunk] = cnsVal;
1260 GetByrefCnsMap()->Set(cnsVal, res);
1265 ValueNum ValueNumStore::VNForCastOper(var_types castToType, bool srcIsUnsigned /*=false*/)
1267 assert(castToType != TYP_STRUCT);
1268 INT32 cnsVal = INT32(castToType) << INT32(VCA_BitCount);
1269 assert((cnsVal & INT32(VCA_ReservedBits)) == 0);
1273 // We record the srcIsUnsigned by or-ing a 0x01
1274 cnsVal |= INT32(VCA_UnsignedSrc);
1276 ValueNum result = VNForIntCon(cnsVal);
1279 if (m_pComp->verbose)
1281 printf(" VNForCastOper(%s%s) is " FMT_VN "\n", varTypeName(castToType), srcIsUnsigned ? ", unsignedSrc" : "",
1289 ValueNum ValueNumStore::VNForHandle(ssize_t cnsVal, unsigned handleFlags)
1291 assert((handleFlags & ~GTF_ICON_HDL_MASK) == 0);
1295 VNHandle::Initialize(&handle, cnsVal, handleFlags);
1296 if (GetHandleMap()->Lookup(handle, &res))
1302 Chunk* c = GetAllocChunk(TYP_I_IMPL, CEA_Handle);
1303 unsigned offsetWithinChunk = c->AllocVN();
1304 res = c->m_baseVN + offsetWithinChunk;
1305 reinterpret_cast<VNHandle*>(c->m_defs)[offsetWithinChunk] = handle;
1306 GetHandleMap()->Set(handle, res);
1311 // Returns the value number for zero of the given "typ".
1312 // It has an unreached() for a "typ" that has no zero value, such as TYP_VOID.
1313 ValueNum ValueNumStore::VNZeroForType(var_types typ)
1324 return VNForIntCon(0);
1327 return VNForLongCon(0);
1329 return VNForFloatCon(0.0f);
1331 return VNForDoubleCon(0.0);
1335 return VNForByrefCon(0);
1338 // TODO-CQ: Improve value numbering for SIMD types.
1343 #endif // FEATURE_SIMD
1344 return VNForZeroMap(); // Recursion!
1346 // These should be unreached.
1348 unreached(); // Should handle all types.
1352 // Returns the value number for one of the given "typ".
1353 // It returns NoVN for a "typ" that has no one value, such as TYP_REF.
1354 ValueNum ValueNumStore::VNOneForType(var_types typ)
1365 return VNForIntCon(1);
1368 return VNForLongCon(1);
1370 return VNForFloatCon(1.0f);
1372 return VNForDoubleCon(1.0);
1379 class Object* ValueNumStore::s_specialRefConsts[] = {nullptr, nullptr, nullptr};
1381 // Nullary operators (i.e., symbolic constants).
1382 ValueNum ValueNumStore::VNForFunc(var_types typ, VNFunc func)
1384 assert(VNFuncArity(func) == 0);
1385 assert(func != VNF_NotAField);
1389 if (GetVNFunc0Map()->Lookup(func, &res))
1395 Chunk* c = GetAllocChunk(typ, CEA_Func0);
1396 unsigned offsetWithinChunk = c->AllocVN();
1397 res = c->m_baseVN + offsetWithinChunk;
1398 reinterpret_cast<VNFunc*>(c->m_defs)[offsetWithinChunk] = func;
1399 GetVNFunc0Map()->Set(func, res);
1404 ValueNum ValueNumStore::VNForFunc(var_types typ, VNFunc func, ValueNum arg0VN)
1406 assert(arg0VN == VNNormVal(arg0VN)); // Arguments don't carry exceptions.
1409 VNDefFunc1Arg fstruct(func, arg0VN);
1411 // Do constant-folding.
1412 if (CanEvalForConstantArgs(func) && IsVNConstant(arg0VN))
1414 return EvalFuncForConstantArgs(typ, func, arg0VN);
1417 if (GetVNFunc1Map()->Lookup(fstruct, &res))
1423 // Otherwise, create a new VN for this application.
1424 Chunk* c = GetAllocChunk(typ, CEA_Func1);
1425 unsigned offsetWithinChunk = c->AllocVN();
1426 res = c->m_baseVN + offsetWithinChunk;
1427 reinterpret_cast<VNDefFunc1Arg*>(c->m_defs)[offsetWithinChunk] = fstruct;
1428 GetVNFunc1Map()->Set(fstruct, res);
1433 // Windows x86 and Windows ARM/ARM64 may not define _isnanf() but they do define _isnan().
1434 // We will redirect the macros to these other functions if the macro is not defined for the
1435 // platform. This has the side effect of a possible implicit upcasting for arguments passed.
1436 #if (defined(_TARGET_X86_) || defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)) && !defined(FEATURE_PAL)
1438 #if !defined(_isnanf)
1439 #define _isnanf _isnan
1444 ValueNum ValueNumStore::VNForFunc(var_types typ, VNFunc func, ValueNum arg0VN, ValueNum arg1VN)
1446 assert(arg0VN != NoVN && arg1VN != NoVN);
1447 assert(arg0VN == VNNormVal(arg0VN)); // Arguments carry no exceptions.
1448 assert(arg1VN == VNNormVal(arg1VN)); // Arguments carry no exceptions.
1449 assert(VNFuncArity(func) == 2);
1450 assert(func != VNF_MapSelect); // Precondition: use the special function VNForMapSelect defined for that.
1454 // Do constant-folding.
1455 if (CanEvalForConstantArgs(func) && IsVNConstant(arg0VN) && IsVNConstant(arg1VN))
1457 bool canFold = true; // Normally we will be able to fold this 'func'
1459 // Special case for VNF_Cast of constant handles
1460 // Don't allow eval/fold of a GT_CAST(non-I_IMPL, Handle)
1462 if ((func == VNF_Cast) && (typ != TYP_I_IMPL) && IsVNHandle(arg0VN))
1467 // It is possible for us to have mismatched types (see Bug 750863)
1468 // We don't try to fold a binary operation when one of the constant operands
1469 // is a floating-point constant and the other is not.
1471 var_types arg0VNtyp = TypeOfVN(arg0VN);
1472 bool arg0IsFloating = varTypeIsFloating(arg0VNtyp);
1474 var_types arg1VNtyp = TypeOfVN(arg1VN);
1475 bool arg1IsFloating = varTypeIsFloating(arg1VNtyp);
1477 if (arg0IsFloating != arg1IsFloating)
1482 // NaNs are unordered wrt to other floats. While an ordered
1483 // comparison would return false, an unordered comparison
1484 // will return true if any operands are a NaN. We only perform
1485 // ordered NaN comparison in EvalComparison.
1486 if ((arg0IsFloating && (((arg0VNtyp == TYP_FLOAT) && _isnanf(GetConstantSingle(arg0VN))) ||
1487 ((arg0VNtyp == TYP_DOUBLE) && _isnan(GetConstantDouble(arg0VN))))) ||
1488 (arg1IsFloating && (((arg1VNtyp == TYP_FLOAT) && _isnanf(GetConstantSingle(arg1VN))) ||
1489 ((arg1VNtyp == TYP_DOUBLE) && _isnan(GetConstantDouble(arg1VN))))))
1493 if (typ == TYP_BYREF)
1495 // We don't want to fold expressions that produce TYP_BYREF
1501 return EvalFuncForConstantArgs(typ, func, arg0VN, arg1VN);
1504 // We canonicalize commutative operations.
1505 // (Perhaps should eventually handle associative/commutative [AC] ops -- but that gets complicated...)
1506 if (VNFuncIsCommutative(func))
1508 // Order arg0 arg1 by numerical VN value.
1509 if (arg0VN > arg1VN)
1511 jitstd::swap(arg0VN, arg1VN);
1514 VNDefFunc2Arg fstruct(func, arg0VN, arg1VN);
1515 if (GetVNFunc2Map()->Lookup(fstruct, &res))
1521 // We have ways of evaluating some binary functions.
1522 if (func < VNF_Boundary)
1524 if (typ != TYP_BYREF) // We don't want/need to optimize a zero byref
1526 ValueNum resultVN = NoVN;
1527 ValueNum ZeroVN, OneVN; // We may need to create one of these in the switch below.
1528 switch (genTreeOps(func))
1531 // This identity does not apply for floating point (when x == -0.0)
1532 // (x + 0) == (0 + x) => x
1533 ZeroVN = VNZeroForType(typ);
1534 if (VNIsEqual(arg0VN, ZeroVN))
1538 else if (VNIsEqual(arg1VN, ZeroVN))
1545 // This identity does not apply for floating point (when x == -0.0)
1548 ZeroVN = VNZeroForType(typ);
1549 if (VNIsEqual(arg1VN, ZeroVN))
1553 else if (VNIsEqual(arg0VN, arg1VN))
1560 // (x * 1) == (1 * x) => x
1561 OneVN = VNOneForType(typ);
1564 if (arg0VN == OneVN)
1568 else if (arg1VN == OneVN)
1574 if (!varTypeIsFloating(typ))
1576 // (x * 0) == (0 * x) => 0 (unless x is NaN, which we must assume a fp value may be)
1577 ZeroVN = VNZeroForType(typ);
1578 if (arg0VN == ZeroVN)
1582 else if (arg1VN == ZeroVN)
1592 OneVN = VNOneForType(typ);
1595 if (arg1VN == OneVN)
1604 // (x | 0) == (0 | x) => x
1605 // (x ^ 0) == (0 ^ x) => x
1606 ZeroVN = VNZeroForType(typ);
1607 if (arg0VN == ZeroVN)
1611 else if (arg1VN == ZeroVN)
1618 // (x & 0) == (0 & x) => 0
1619 ZeroVN = VNZeroForType(typ);
1620 if (arg0VN == ZeroVN)
1624 else if (arg1VN == ZeroVN)
1639 ZeroVN = VNZeroForType(typ);
1640 if (arg1VN == ZeroVN)
1649 // (x == x) => true (unless x is NaN)
1650 // (x <= x) => true (unless x is NaN)
1651 // (x >= x) => true (unless x is NaN)
1652 if (VNIsEqual(arg0VN, arg1VN))
1654 resultVN = VNOneForType(typ);
1656 if ((arg0VN == VNForNull() && IsKnownNonNull(arg1VN)) ||
1657 (arg1VN == VNForNull() && IsKnownNonNull(arg0VN)))
1659 resultVN = VNZeroForType(typ);
1666 // (x != x) => false (unless x is NaN)
1667 // (x > x) => false (unless x is NaN)
1668 // (x < x) => false (unless x is NaN)
1669 if (VNIsEqual(arg0VN, arg1VN))
1671 resultVN = VNZeroForType(typ);
1673 if ((arg0VN == VNForNull() && IsKnownNonNull(arg1VN)) ||
1674 (arg1VN == VNForNull() && IsKnownNonNull(arg0VN)))
1676 resultVN = VNOneForType(typ);
1684 if ((resultVN != NoVN) && (TypeOfVN(resultVN) == typ))
1690 else // must be a VNF_ function
1692 if (VNIsEqual(arg0VN, arg1VN))
1696 if ((func == VNF_LE_UN) || (func == VNF_GE_UN))
1698 return VNOneForType(typ);
1703 else if ((func == VNF_LT_UN) || (func == VNF_GT_UN) || (func == VNF_SUB_UN))
1705 return VNZeroForType(typ);
1709 if (func == VNF_CastClass)
1711 // In terms of values, a castclass always returns its second argument, the object being cast.
1712 // The IL operation may also throw an exception
1713 return VNWithExc(arg1VN, VNExcSetSingleton(VNForFunc(TYP_REF, VNF_InvalidCastExc, arg1VN, arg0VN)));
1717 // Otherwise, assign a new VN for the function application.
1718 Chunk* c = GetAllocChunk(typ, CEA_Func2);
1719 unsigned offsetWithinChunk = c->AllocVN();
1720 res = c->m_baseVN + offsetWithinChunk;
1721 reinterpret_cast<VNDefFunc2Arg*>(c->m_defs)[offsetWithinChunk] = fstruct;
1722 GetVNFunc2Map()->Set(fstruct, res);
1727 //------------------------------------------------------------------------------
1728 // VNForMapStore : Evaluate VNF_MapStore with the given arguments.
1733 // arg0VN - Map value number
1734 // arg1VN - Index value number
1735 // arg2VN - New value for map[index]
1738 // Value number for the result of the evaluation.
1740 ValueNum ValueNumStore::VNForMapStore(var_types typ, ValueNum arg0VN, ValueNum arg1VN, ValueNum arg2VN)
1742 ValueNum result = VNForFunc(typ, VNF_MapStore, arg0VN, arg1VN, arg2VN);
1744 if (m_pComp->verbose)
1746 printf(" VNForMapStore(" FMT_VN ", " FMT_VN ", " FMT_VN "):%s returns ", arg0VN, arg1VN, arg2VN,
1748 m_pComp->vnPrint(result, 1);
1755 //------------------------------------------------------------------------------
1756 // VNForMapSelect : Evaluate VNF_MapSelect with the given arguments.
1760 // vnk - Value number kind
1762 // arg0VN - Map value number
1763 // arg1VN - Index value number
1766 // Value number for the result of the evaluation.
1769 // This requires a "ValueNumKind" because it will attempt, given "select(phi(m1, ..., mk), ind)", to evaluate
1770 // "select(m1, ind)", ..., "select(mk, ind)" to see if they agree. It needs to know which kind of value number
1771 // (liberal/conservative) to read from the SSA def referenced in the phi argument.
1773 ValueNum ValueNumStore::VNForMapSelect(ValueNumKind vnk, var_types typ, ValueNum arg0VN, ValueNum arg1VN)
1775 int budget = m_mapSelectBudget;
1776 bool usedRecursiveVN = false;
1777 ValueNum result = VNForMapSelectWork(vnk, typ, arg0VN, arg1VN, &budget, &usedRecursiveVN);
1779 // The remaining budget should always be between [0..m_mapSelectBudget]
1780 assert((budget >= 0) && (budget <= m_mapSelectBudget));
1783 if (m_pComp->verbose)
1785 printf(" VNForMapSelect(" FMT_VN ", " FMT_VN "):%s returns ", arg0VN, arg1VN, varTypeName(typ));
1786 m_pComp->vnPrint(result, 1);
1793 //------------------------------------------------------------------------------
1794 // VNForMapSelectWork : A method that does the work for VNForMapSelect and may call itself recursively.
1798 // vnk - Value number kind
1800 // arg0VN - Zeroth argument
1801 // arg1VN - First argument
1802 // pBudget - Remaining budget for the outer evaluation
1803 // pUsedRecursiveVN - Out-parameter that is set to true iff RecursiveVN was returned from this method
1804 // or from a method called during one of recursive invocations.
1807 // Value number for the result of the evaluation.
1810 // This requires a "ValueNumKind" because it will attempt, given "select(phi(m1, ..., mk), ind)", to evaluate
1811 // "select(m1, ind)", ..., "select(mk, ind)" to see if they agree. It needs to know which kind of value number
1812 // (liberal/conservative) to read from the SSA def referenced in the phi argument.
1814 ValueNum ValueNumStore::VNForMapSelectWork(
1815 ValueNumKind vnk, var_types typ, ValueNum arg0VN, ValueNum arg1VN, int* pBudget, bool* pUsedRecursiveVN)
1818 // This label allows us to directly implement a tail call by setting up the arguments, and doing a goto to here.
1819 assert(arg0VN != NoVN && arg1VN != NoVN);
1820 assert(arg0VN == VNNormVal(arg0VN)); // Arguments carry no exceptions.
1821 assert(arg1VN == VNNormVal(arg1VN)); // Arguments carry no exceptions.
1823 *pUsedRecursiveVN = false;
1826 // Provide a mechanism for writing tests that ensure we don't call this ridiculously often.
1829 // This printing is sometimes useful in debugging.
1830 // if ((m_numMapSels % 1000) == 0) printf("%d VNF_MapSelect applications.\n", m_numMapSels);
1832 unsigned selLim = JitConfig.JitVNMapSelLimit();
1833 assert(selLim == 0 || m_numMapSels < selLim);
1837 VNDefFunc2Arg fstruct(VNF_MapSelect, arg0VN, arg1VN);
1838 if (GetVNFunc2Map()->Lookup(fstruct, &res))
1845 // Give up if we've run out of budget.
1846 if (--(*pBudget) <= 0)
1848 // We have to use 'nullptr' for the basic block here, because subsequent expressions
1849 // in different blocks may find this result in the VNFunc2Map -- other expressions in
1850 // the IR may "evaluate" to this same VNForExpr, so it is not "unique" in the sense
1851 // that permits the BasicBlock attribution.
1852 res = VNForExpr(nullptr, typ);
1853 GetVNFunc2Map()->Set(fstruct, res);
1857 // If it's recursive, stop the recursion.
1858 if (SelectIsBeingEvaluatedRecursively(arg0VN, arg1VN))
1860 *pUsedRecursiveVN = true;
1864 if (arg0VN == VNForZeroMap())
1866 return VNZeroForType(typ);
1868 else if (IsVNFunc(arg0VN))
1871 GetVNFunc(arg0VN, &funcApp);
1872 if (funcApp.m_func == VNF_MapStore)
1874 // select(store(m, i, v), i) == v
1875 if (funcApp.m_args[1] == arg1VN)
1877 #if FEATURE_VN_TRACE_APPLY_SELECTORS
1878 JITDUMP(" AX1: select([" FMT_VN "]store(" FMT_VN ", " FMT_VN ", " FMT_VN "), " FMT_VN
1879 ") ==> " FMT_VN ".\n",
1880 funcApp.m_args[0], arg0VN, funcApp.m_args[1], funcApp.m_args[2], arg1VN, funcApp.m_args[2]);
1882 return funcApp.m_args[2];
1884 // i # j ==> select(store(m, i, v), j) == select(m, j)
1885 // Currently the only source of distinctions is when both indices are constants.
1886 else if (IsVNConstant(arg1VN) && IsVNConstant(funcApp.m_args[1]))
1888 assert(funcApp.m_args[1] != arg1VN); // we already checked this above.
1889 #if FEATURE_VN_TRACE_APPLY_SELECTORS
1890 JITDUMP(" AX2: " FMT_VN " != " FMT_VN " ==> select([" FMT_VN "]store(" FMT_VN ", " FMT_VN
1891 ", " FMT_VN "), " FMT_VN ") ==> select(" FMT_VN ", " FMT_VN ").\n",
1892 arg1VN, funcApp.m_args[1], arg0VN, funcApp.m_args[0], funcApp.m_args[1], funcApp.m_args[2],
1893 arg1VN, funcApp.m_args[0], arg1VN);
1895 // This is the equivalent of the recursive tail call:
1896 // return VNForMapSelect(vnk, typ, funcApp.m_args[0], arg1VN);
1897 // Make sure we capture any exceptions from the "i" and "v" of the store...
1898 arg0VN = funcApp.m_args[0];
1902 else if (funcApp.m_func == VNF_PhiDef || funcApp.m_func == VNF_PhiMemoryDef)
1904 unsigned lclNum = BAD_VAR_NUM;
1905 bool isMemory = false;
1906 VNFuncApp phiFuncApp;
1907 bool defArgIsFunc = false;
1908 if (funcApp.m_func == VNF_PhiDef)
1910 lclNum = unsigned(funcApp.m_args[0]);
1911 defArgIsFunc = GetVNFunc(funcApp.m_args[2], &phiFuncApp);
1915 assert(funcApp.m_func == VNF_PhiMemoryDef);
1917 defArgIsFunc = GetVNFunc(funcApp.m_args[1], &phiFuncApp);
1919 if (defArgIsFunc && phiFuncApp.m_func == VNF_Phi)
1921 // select(phi(m1, m2), x): if select(m1, x) == select(m2, x), return that, else new fresh.
1922 // Get the first argument of the phi.
1924 // We need to be careful about breaking infinite recursion. Record the outer select.
1925 m_fixedPointMapSels.Push(VNDefFunc2Arg(VNF_MapSelect, arg0VN, arg1VN));
1927 assert(IsVNConstant(phiFuncApp.m_args[0]));
1928 unsigned phiArgSsaNum = ConstantValue<unsigned>(phiFuncApp.m_args[0]);
1932 phiArgVN = m_pComp->GetMemoryPerSsaData(phiArgSsaNum)->m_vnPair.Get(vnk);
1936 phiArgVN = m_pComp->lvaTable[lclNum].GetPerSsaData(phiArgSsaNum)->m_vnPair.Get(vnk);
1938 if (phiArgVN != ValueNumStore::NoVN)
1940 bool allSame = true;
1941 ValueNum argRest = phiFuncApp.m_args[1];
1942 ValueNum sameSelResult =
1943 VNForMapSelectWork(vnk, typ, phiArgVN, arg1VN, pBudget, pUsedRecursiveVN);
1945 // It is possible that we just now exceeded our budget, if so we need to force an early exit
1946 // and stop calling VNForMapSelectWork
1949 // We don't have any budget remaining to verify that all phiArgs are the same
1950 // so setup the default failure case now.
1954 while (allSame && argRest != ValueNumStore::NoVN)
1956 ValueNum cur = argRest;
1957 VNFuncApp phiArgFuncApp;
1958 if (GetVNFunc(argRest, &phiArgFuncApp) && phiArgFuncApp.m_func == VNF_Phi)
1960 cur = phiArgFuncApp.m_args[0];
1961 argRest = phiArgFuncApp.m_args[1];
1965 argRest = ValueNumStore::NoVN; // Cause the loop to terminate.
1967 assert(IsVNConstant(cur));
1968 phiArgSsaNum = ConstantValue<unsigned>(cur);
1971 phiArgVN = m_pComp->GetMemoryPerSsaData(phiArgSsaNum)->m_vnPair.Get(vnk);
1975 phiArgVN = m_pComp->lvaTable[lclNum].GetPerSsaData(phiArgSsaNum)->m_vnPair.Get(vnk);
1977 if (phiArgVN == ValueNumStore::NoVN)
1983 bool usedRecursiveVN = false;
1984 ValueNum curResult =
1985 VNForMapSelectWork(vnk, typ, phiArgVN, arg1VN, pBudget, &usedRecursiveVN);
1986 *pUsedRecursiveVN |= usedRecursiveVN;
1987 if (sameSelResult == ValueNumStore::RecursiveVN)
1989 sameSelResult = curResult;
1991 if (curResult != ValueNumStore::RecursiveVN && curResult != sameSelResult)
1997 if (allSame && sameSelResult != ValueNumStore::RecursiveVN)
1999 // Make sure we're popping what we pushed.
2000 assert(FixedPointMapSelsTopHasValue(arg0VN, arg1VN));
2001 m_fixedPointMapSels.Pop();
2003 // To avoid exponential searches, we make sure that this result is memo-ized.
2004 // The result is always valid for memoization if we didn't rely on RecursiveVN to get it.
2005 // If RecursiveVN was used, we are processing a loop and we can't memo-ize this intermediate
2006 // result if, e.g., this block is in a multi-entry loop.
2007 if (!*pUsedRecursiveVN)
2009 GetVNFunc2Map()->Set(fstruct, sameSelResult);
2012 return sameSelResult;
2014 // Otherwise, fall through to creating the select(phi(m1, m2), x) function application.
2016 // Make sure we're popping what we pushed.
2017 assert(FixedPointMapSelsTopHasValue(arg0VN, arg1VN));
2018 m_fixedPointMapSels.Pop();
2023 // Otherwise, assign a new VN for the function application.
2024 Chunk* c = GetAllocChunk(typ, CEA_Func2);
2025 unsigned offsetWithinChunk = c->AllocVN();
2026 res = c->m_baseVN + offsetWithinChunk;
2027 reinterpret_cast<VNDefFunc2Arg*>(c->m_defs)[offsetWithinChunk] = fstruct;
2028 GetVNFunc2Map()->Set(fstruct, res);
2033 ValueNum ValueNumStore::EvalFuncForConstantArgs(var_types typ, VNFunc func, ValueNum arg0VN)
2035 assert(CanEvalForConstantArgs(func));
2036 assert(IsVNConstant(arg0VN));
2037 switch (TypeOfVN(arg0VN))
2041 int resVal = EvalOp<int>(func, ConstantValue<int>(arg0VN));
2042 // Unary op on a handle results in a handle.
2043 return IsVNHandle(arg0VN) ? VNForHandle(ssize_t(resVal), GetHandleFlags(arg0VN)) : VNForIntCon(resVal);
2047 INT64 resVal = EvalOp<INT64>(func, ConstantValue<INT64>(arg0VN));
2048 // Unary op on a handle results in a handle.
2049 return IsVNHandle(arg0VN) ? VNForHandle(ssize_t(resVal), GetHandleFlags(arg0VN)) : VNForLongCon(resVal);
2053 float resVal = EvalOp<float>(func, ConstantValue<float>(arg0VN));
2054 return VNForFloatCon(resVal);
2058 double resVal = EvalOp<double>(func, ConstantValue<double>(arg0VN));
2059 return VNForDoubleCon(resVal);
2063 // If arg0 has a possible exception, it wouldn't have been constant.
2064 assert(!VNHasExc(arg0VN));
2066 assert(arg0VN == VNForNull()); // Only other REF constant.
2067 assert(func == VNFunc(GT_ARR_LENGTH)); // Only function we can apply to a REF constant!
2068 return VNWithExc(VNForVoid(), VNExcSetSingleton(VNForFunc(TYP_REF, VNF_NullPtrExc, VNForNull())));
2071 // We will assert below
2074 noway_assert(!"Unhandled operation in EvalFuncForConstantArgs");
2078 bool ValueNumStore::SelectIsBeingEvaluatedRecursively(ValueNum map, ValueNum ind)
2080 for (unsigned i = 0; i < m_fixedPointMapSels.Size(); i++)
2082 VNDefFunc2Arg& elem = m_fixedPointMapSels.GetRef(i);
2083 assert(elem.m_func == VNF_MapSelect);
2084 if (elem.m_arg0 == map && elem.m_arg1 == ind)
2093 bool ValueNumStore::FixedPointMapSelsTopHasValue(ValueNum map, ValueNum index)
2095 if (m_fixedPointMapSels.Size() == 0)
2099 VNDefFunc2Arg& top = m_fixedPointMapSels.TopRef();
2100 return top.m_func == VNF_MapSelect && top.m_arg0 == map && top.m_arg1 == index;
2104 // Given an integer constant value number return its value as an int.
2106 int ValueNumStore::GetConstantInt32(ValueNum argVN)
2108 assert(IsVNConstant(argVN));
2109 var_types argVNtyp = TypeOfVN(argVN);
2116 result = ConstantValue<int>(argVN);
2118 #ifndef _TARGET_64BIT_
2121 result = (int)ConstantValue<size_t>(argVN);
2130 // Given an integer constant value number return its value as an INT64.
2132 INT64 ValueNumStore::GetConstantInt64(ValueNum argVN)
2134 assert(IsVNConstant(argVN));
2135 var_types argVNtyp = TypeOfVN(argVN);
2142 result = (INT64)ConstantValue<int>(argVN);
2145 result = ConstantValue<INT64>(argVN);
2149 result = (INT64)ConstantValue<size_t>(argVN);
2157 // Given a double constant value number return its value as a double.
2159 double ValueNumStore::GetConstantDouble(ValueNum argVN)
2161 assert(IsVNConstant(argVN));
2162 assert(TypeOfVN(argVN) == TYP_DOUBLE);
2164 return ConstantValue<double>(argVN);
2167 // Given a float constant value number return its value as a float.
2169 float ValueNumStore::GetConstantSingle(ValueNum argVN)
2171 assert(IsVNConstant(argVN));
2172 assert(TypeOfVN(argVN) == TYP_FLOAT);
2174 return ConstantValue<float>(argVN);
2177 // Compute the proper value number when the VNFunc has all constant arguments
2178 // This essentially performs constant folding at value numbering time
2180 ValueNum ValueNumStore::EvalFuncForConstantArgs(var_types typ, VNFunc func, ValueNum arg0VN, ValueNum arg1VN)
2182 assert(CanEvalForConstantArgs(func));
2183 assert(IsVNConstant(arg0VN) && IsVNConstant(arg1VN));
2184 assert(!VNHasExc(arg0VN) && !VNHasExc(arg1VN)); // Otherwise, would not be constant.
2186 // if our func is the VNF_Cast operation we handle it first
2187 if (func == VNF_Cast)
2189 return EvalCastForConstantArgs(typ, func, arg0VN, arg1VN);
2192 var_types arg0VNtyp = TypeOfVN(arg0VN);
2193 var_types arg1VNtyp = TypeOfVN(arg1VN);
2195 // When both arguments are floating point types
2196 // We defer to the EvalFuncForConstantFPArgs()
2197 if (varTypeIsFloating(arg0VNtyp) && varTypeIsFloating(arg1VNtyp))
2199 return EvalFuncForConstantFPArgs(typ, func, arg0VN, arg1VN);
2202 // after this we shouldn't have to deal with floating point types for arg0VN or arg1VN
2203 assert(!varTypeIsFloating(arg0VNtyp));
2204 assert(!varTypeIsFloating(arg1VNtyp));
2206 // Stack-normalize the result type.
2207 if (varTypeIsSmall(typ))
2212 ValueNum result; // left uninitialized, we are required to initialize it on all paths below.
2213 ValueNum excSet = VNForEmptyExcSet();
2215 // Are both args of the same type?
2216 if (arg0VNtyp == arg1VNtyp)
2218 if (arg0VNtyp == TYP_INT)
2220 int arg0Val = ConstantValue<int>(arg0VN);
2221 int arg1Val = ConstantValue<int>(arg1VN);
2223 if (VNFuncIsComparison(func))
2225 assert(typ == TYP_INT);
2226 result = VNForIntCon(EvalComparison(func, arg0Val, arg1Val));
2230 assert(typ == TYP_INT);
2231 int resultVal = EvalOp<int>(func, arg0Val, arg1Val, &excSet);
2232 // Bin op on a handle results in a handle.
2233 ValueNum handleVN = IsVNHandle(arg0VN) ? arg0VN : IsVNHandle(arg1VN) ? arg1VN : NoVN;
2234 if (handleVN != NoVN)
2236 result = VNForHandle(ssize_t(resultVal), GetHandleFlags(handleVN)); // Use VN for Handle
2240 result = VNWithExc(VNForIntCon(resultVal), excSet);
2244 else if (arg0VNtyp == TYP_LONG)
2246 INT64 arg0Val = ConstantValue<INT64>(arg0VN);
2247 INT64 arg1Val = ConstantValue<INT64>(arg1VN);
2249 if (VNFuncIsComparison(func))
2251 assert(typ == TYP_INT);
2252 result = VNForIntCon(EvalComparison(func, arg0Val, arg1Val));
2256 assert(typ == TYP_LONG);
2257 INT64 resultVal = EvalOp<INT64>(func, arg0Val, arg1Val, &excSet);
2258 ValueNum handleVN = IsVNHandle(arg0VN) ? arg0VN : IsVNHandle(arg1VN) ? arg1VN : NoVN;
2259 ValueNum resultVN = (handleVN != NoVN)
2260 ? VNForHandle(ssize_t(resultVal), GetHandleFlags(handleVN)) // Use VN for Handle
2261 : VNForLongCon(resultVal);
2262 result = VNWithExc(resultVN, excSet);
2265 else // both args are TYP_REF or both args are TYP_BYREF
2267 INT64 arg0Val = ConstantValue<size_t>(arg0VN); // We represent ref/byref constants as size_t's.
2268 INT64 arg1Val = ConstantValue<size_t>(arg1VN); // Also we consider null to be zero.
2270 if (VNFuncIsComparison(func))
2272 assert(typ == TYP_INT);
2273 result = VNForIntCon(EvalComparison(func, arg0Val, arg1Val));
2275 else if (typ == TYP_INT) // We could see GT_OR of a constant ByRef and Null
2277 int resultVal = (int)EvalOp<INT64>(func, arg0Val, arg1Val, &excSet);
2278 result = VNWithExc(VNForIntCon(resultVal), excSet);
2280 else // We could see GT_OR of a constant ByRef and Null
2282 assert((typ == TYP_BYREF) || (typ == TYP_LONG));
2283 INT64 resultVal = EvalOp<INT64>(func, arg0Val, arg1Val, &excSet);
2284 result = VNWithExc(VNForByrefCon(resultVal), excSet);
2288 else // We have args of different types
2290 // We represent ref/byref constants as size_t's.
2291 // Also we consider null to be zero.
2293 INT64 arg0Val = GetConstantInt64(arg0VN);
2294 INT64 arg1Val = GetConstantInt64(arg1VN);
2296 if (VNFuncIsComparison(func))
2298 assert(typ == TYP_INT);
2299 result = VNForIntCon(EvalComparison(func, arg0Val, arg1Val));
2301 else if (typ == TYP_INT) // We could see GT_OR of an int and constant ByRef or Null
2303 int resultVal = (int)EvalOp<INT64>(func, arg0Val, arg1Val, &excSet);
2304 result = VNWithExc(VNForIntCon(resultVal), excSet);
2308 assert(typ != TYP_INT);
2309 ValueNum resultValx = VNForEmptyExcSet();
2310 INT64 resultVal = EvalOp<INT64>(func, arg0Val, arg1Val, &resultValx);
2312 // check for the Exception case
2313 if (resultValx != VNForEmptyExcSet())
2315 result = VNWithExc(VNForVoid(), resultValx);
2322 result = VNForByrefCon(resultVal);
2325 result = VNForLongCon(resultVal);
2328 assert(resultVal == 0); // Only valid REF constant
2329 result = VNForNull();
2341 // Compute the proper value number when the VNFunc has all constant floating-point arguments
2342 // This essentially must perform constant folding at value numbering time
2344 ValueNum ValueNumStore::EvalFuncForConstantFPArgs(var_types typ, VNFunc func, ValueNum arg0VN, ValueNum arg1VN)
2346 assert(CanEvalForConstantArgs(func));
2347 assert(IsVNConstant(arg0VN) && IsVNConstant(arg1VN));
2349 // We expect both argument types to be floating-point types
2350 var_types arg0VNtyp = TypeOfVN(arg0VN);
2351 var_types arg1VNtyp = TypeOfVN(arg1VN);
2353 assert(varTypeIsFloating(arg0VNtyp));
2354 assert(varTypeIsFloating(arg1VNtyp));
2356 // We also expect both arguments to be of the same floating-point type
2357 assert(arg0VNtyp == arg1VNtyp);
2359 ValueNum result; // left uninitialized, we are required to initialize it on all paths below.
2361 if (VNFuncIsComparison(func))
2363 assert(genActualType(typ) == TYP_INT);
2365 if (arg0VNtyp == TYP_FLOAT)
2367 result = VNForIntCon(EvalComparison(func, GetConstantSingle(arg0VN), GetConstantSingle(arg1VN)));
2371 assert(arg0VNtyp == TYP_DOUBLE);
2372 result = VNForIntCon(EvalComparison(func, GetConstantDouble(arg0VN), GetConstantDouble(arg1VN)));
2377 // We expect the return type to be the same as the argument type
2378 assert(varTypeIsFloating(typ));
2379 assert(arg0VNtyp == typ);
2381 ValueNum exception = VNForEmptyExcSet();
2383 if (typ == TYP_FLOAT)
2385 float floatResultVal =
2386 EvalOp<float>(func, GetConstantSingle(arg0VN), GetConstantSingle(arg1VN), &exception);
2387 assert(exception == VNForEmptyExcSet()); // Floating point ops don't throw.
2388 result = VNForFloatCon(floatResultVal);
2392 assert(typ == TYP_DOUBLE);
2394 double doubleResultVal =
2395 EvalOp<double>(func, GetConstantDouble(arg0VN), GetConstantDouble(arg1VN), &exception);
2396 assert(exception == VNForEmptyExcSet()); // Floating point ops don't throw.
2397 result = VNForDoubleCon(doubleResultVal);
2404 // Compute the proper value number for a VNF_Cast with constant arguments
2405 // This essentially must perform constant folding at value numbering time
2407 ValueNum ValueNumStore::EvalCastForConstantArgs(var_types typ, VNFunc func, ValueNum arg0VN, ValueNum arg1VN)
2409 assert(func == VNF_Cast);
2410 assert(IsVNConstant(arg0VN) && IsVNConstant(arg1VN));
2412 // Stack-normalize the result type.
2413 if (varTypeIsSmall(typ))
2418 var_types arg0VNtyp = TypeOfVN(arg0VN);
2419 var_types arg1VNtyp = TypeOfVN(arg1VN);
2421 // arg1VN is really the gtCastType that we are casting to
2422 assert(arg1VNtyp == TYP_INT);
2423 int arg1Val = ConstantValue<int>(arg1VN);
2424 assert(arg1Val >= 0);
2426 if (IsVNHandle(arg0VN))
2428 // We don't allow handles to be cast to random var_types.
2429 assert(typ == TYP_I_IMPL);
2432 // We previously encoded the castToType operation using vnForCastOper()
2434 bool srcIsUnsigned = ((arg1Val & INT32(VCA_UnsignedSrc)) != 0);
2435 var_types castToType = var_types(arg1Val >> INT32(VCA_BitCount));
2437 var_types castFromType = arg0VNtyp;
2439 switch (castFromType) // GT_CAST source type
2441 #ifndef _TARGET_64BIT_
2447 int arg0Val = GetConstantInt32(arg0VN);
2452 assert(typ == TYP_INT);
2453 return VNForIntCon(INT8(arg0Val));
2456 assert(typ == TYP_INT);
2457 return VNForIntCon(UINT8(arg0Val));
2459 assert(typ == TYP_INT);
2460 return VNForIntCon(INT16(arg0Val));
2462 assert(typ == TYP_INT);
2463 return VNForIntCon(UINT16(arg0Val));
2466 assert(typ == TYP_INT);
2470 assert(!IsVNHandle(arg0VN));
2471 #ifdef _TARGET_64BIT_
2472 if (typ == TYP_LONG)
2476 return VNForLongCon(INT64(unsigned(arg0Val)));
2480 return VNForLongCon(INT64(arg0Val));
2485 assert(typ == TYP_BYREF);
2488 return VNForByrefCon(INT64(unsigned(arg0Val)));
2492 return VNForByrefCon(INT64(arg0Val));
2495 #else // TARGET_32BIT
2497 return VNForLongCon(INT64(unsigned(arg0Val)));
2499 return VNForLongCon(INT64(arg0Val));
2502 assert(typ == TYP_BYREF);
2503 return VNForByrefCon((INT64)arg0Val);
2505 assert(typ == TYP_FLOAT);
2508 return VNForFloatCon(float(unsigned(arg0Val)));
2512 return VNForFloatCon(float(arg0Val));
2515 assert(typ == TYP_DOUBLE);
2518 return VNForDoubleCon(double(unsigned(arg0Val)));
2522 return VNForDoubleCon(double(arg0Val));
2530 #ifdef _TARGET_64BIT_
2535 INT64 arg0Val = GetConstantInt64(arg0VN);
2540 assert(typ == TYP_INT);
2541 return VNForIntCon(INT8(arg0Val));
2544 assert(typ == TYP_INT);
2545 return VNForIntCon(UINT8(arg0Val));
2547 assert(typ == TYP_INT);
2548 return VNForIntCon(INT16(arg0Val));
2550 assert(typ == TYP_INT);
2551 return VNForIntCon(UINT16(arg0Val));
2553 assert(typ == TYP_INT);
2554 return VNForIntCon(INT32(arg0Val));
2556 assert(typ == TYP_INT);
2557 return VNForIntCon(UINT32(arg0Val));
2560 assert(typ == TYP_LONG);
2563 assert(typ == TYP_BYREF);
2564 return VNForByrefCon((INT64)arg0Val);
2566 assert(typ == TYP_FLOAT);
2569 return VNForFloatCon(FloatingPointUtils::convertUInt64ToFloat(UINT64(arg0Val)));
2573 return VNForFloatCon(float(arg0Val));
2576 assert(typ == TYP_DOUBLE);
2579 return VNForDoubleCon(FloatingPointUtils::convertUInt64ToDouble(UINT64(arg0Val)));
2583 return VNForDoubleCon(double(arg0Val));
2591 float arg0Val = GetConstantSingle(arg0VN);
2596 assert(typ == TYP_INT);
2597 return VNForIntCon(INT8(arg0Val));
2600 assert(typ == TYP_INT);
2601 return VNForIntCon(UINT8(arg0Val));
2603 assert(typ == TYP_INT);
2604 return VNForIntCon(INT16(arg0Val));
2606 assert(typ == TYP_INT);
2607 return VNForIntCon(UINT16(arg0Val));
2609 assert(typ == TYP_INT);
2610 return VNForIntCon(INT32(arg0Val));
2612 assert(typ == TYP_INT);
2613 return VNForIntCon(UINT32(arg0Val));
2615 assert(typ == TYP_LONG);
2616 return VNForLongCon(INT64(arg0Val));
2618 assert(typ == TYP_LONG);
2619 return VNForLongCon(UINT64(arg0Val));
2621 assert(typ == TYP_FLOAT);
2622 return VNForFloatCon(arg0Val);
2624 assert(typ == TYP_DOUBLE);
2625 return VNForDoubleCon(double(arg0Val));
2632 double arg0Val = GetConstantDouble(arg0VN);
2637 assert(typ == TYP_INT);
2638 return VNForIntCon(INT8(arg0Val));
2641 assert(typ == TYP_INT);
2642 return VNForIntCon(UINT8(arg0Val));
2644 assert(typ == TYP_INT);
2645 return VNForIntCon(INT16(arg0Val));
2647 assert(typ == TYP_INT);
2648 return VNForIntCon(UINT16(arg0Val));
2650 assert(typ == TYP_INT);
2651 return VNForIntCon(INT32(arg0Val));
2653 assert(typ == TYP_INT);
2654 return VNForIntCon(UINT32(arg0Val));
2656 assert(typ == TYP_LONG);
2657 return VNForLongCon(INT64(arg0Val));
2659 assert(typ == TYP_LONG);
2660 return VNForLongCon(UINT64(arg0Val));
2662 assert(typ == TYP_FLOAT);
2663 return VNForFloatCon(float(arg0Val));
2665 assert(typ == TYP_DOUBLE);
2666 return VNForDoubleCon(arg0Val);
2676 bool ValueNumStore::CanEvalForConstantArgs(VNFunc vnf)
2678 if (vnf < VNF_Boundary)
2680 // We'll refine this as we get counterexamples. But to
2681 // a first approximation, VNFuncs that are genTreeOps should
2682 // be things we can evaluate.
2683 genTreeOps oper = genTreeOps(vnf);
2684 // Some exceptions...
2687 case GT_MKREFANY: // We can't evaluate these.
2694 assert(false && "Unexpected GT_MULHI node encountered before lowering");
2702 // some VNF_ that we can evaluate
2705 case VNF_Cast: // We can evaluate these.
2707 case VNF_ObjGetType:
2715 ValueNum ValueNumStore::VNForFunc(var_types typ, VNFunc func, ValueNum arg0VN, ValueNum arg1VN, ValueNum arg2VN)
2717 assert(arg0VN != NoVN);
2718 assert(arg1VN != NoVN);
2719 assert(arg2VN != NoVN);
2720 assert(VNFuncArity(func) == 3);
2722 // Function arguments carry no exceptions.
2723 CLANG_FORMAT_COMMENT_ANCHOR;
2726 if (func != VNF_PhiDef)
2728 // For a phi definition first and second argument are "plain" local/ssa numbers.
2729 // (I don't know if having such non-VN arguments to a VN function is a good idea -- if we wanted to declare
2730 // ValueNum to be "short" it would be a problem, for example. But we'll leave it for now, with these explicit
2732 assert(arg0VN == VNNormVal(arg0VN));
2733 assert(arg1VN == VNNormVal(arg1VN));
2735 assert(arg2VN == VNNormVal(arg2VN));
2738 assert(VNFuncArity(func) == 3);
2741 VNDefFunc3Arg fstruct(func, arg0VN, arg1VN, arg2VN);
2742 if (GetVNFunc3Map()->Lookup(fstruct, &res))
2748 Chunk* c = GetAllocChunk(typ, CEA_Func3);
2749 unsigned offsetWithinChunk = c->AllocVN();
2750 res = c->m_baseVN + offsetWithinChunk;
2751 reinterpret_cast<VNDefFunc3Arg*>(c->m_defs)[offsetWithinChunk] = fstruct;
2752 GetVNFunc3Map()->Set(fstruct, res);
2757 ValueNum ValueNumStore::VNForFunc(
2758 var_types typ, VNFunc func, ValueNum arg0VN, ValueNum arg1VN, ValueNum arg2VN, ValueNum arg3VN)
2760 assert(arg0VN != NoVN && arg1VN != NoVN && arg2VN != NoVN && arg3VN != NoVN);
2761 // Function arguments carry no exceptions.
2762 assert(arg0VN == VNNormVal(arg0VN));
2763 assert(arg1VN == VNNormVal(arg1VN));
2764 assert(arg2VN == VNNormVal(arg2VN));
2765 assert(arg3VN == VNNormVal(arg3VN));
2766 assert(VNFuncArity(func) == 4);
2769 VNDefFunc4Arg fstruct(func, arg0VN, arg1VN, arg2VN, arg3VN);
2770 if (GetVNFunc4Map()->Lookup(fstruct, &res))
2776 Chunk* c = GetAllocChunk(typ, CEA_Func4);
2777 unsigned offsetWithinChunk = c->AllocVN();
2778 res = c->m_baseVN + offsetWithinChunk;
2779 reinterpret_cast<VNDefFunc4Arg*>(c->m_defs)[offsetWithinChunk] = fstruct;
2780 GetVNFunc4Map()->Set(fstruct, res);
2785 //------------------------------------------------------------------------
2786 // VNForExpr: Opaque value number that is equivalent to itself but unique
2787 // from all other value numbers.
2790 // block - BasicBlock where the expression that produces this value occurs.
2791 // May be nullptr to force conservative "could be anywhere" interpretation.
2792 // typ - Type of the expression in the IR
2795 // A new value number distinct from any previously generated, that compares as equal
2796 // to itself, but not any other value number, and is annotated with the given
2799 ValueNum ValueNumStore::VNForExpr(BasicBlock* block, var_types typ)
2801 BasicBlock::loopNumber loopNum;
2802 if (block == nullptr)
2804 loopNum = MAX_LOOP_NUM;
2808 loopNum = block->bbNatLoopNum;
2811 // We always allocate a new, unique VN in this call.
2812 // The 'typ' is used to partition the allocation of VNs into different chunks.
2813 Chunk* c = GetAllocChunk(typ, CEA_None, loopNum);
2814 unsigned offsetWithinChunk = c->AllocVN();
2815 ValueNum result = c->m_baseVN + offsetWithinChunk;
2819 ValueNum ValueNumStore::VNApplySelectors(ValueNumKind vnk,
2821 FieldSeqNode* fieldSeq,
2822 size_t* wbFinalStructSize)
2824 if (fieldSeq == nullptr)
2830 assert(fieldSeq != FieldSeqStore::NotAField());
2832 // Skip any "FirstElem" pseudo-fields or any "ConstantIndex" pseudo-fields
2833 if (fieldSeq->IsPseudoField())
2835 return VNApplySelectors(vnk, map, fieldSeq->m_next, wbFinalStructSize);
2838 // Otherwise, is a real field handle.
2839 CORINFO_FIELD_HANDLE fldHnd = fieldSeq->m_fieldHnd;
2840 CORINFO_CLASS_HANDLE structHnd = NO_CLASS_HANDLE;
2841 ValueNum fldHndVN = VNForHandle(ssize_t(fldHnd), GTF_ICON_FIELD_HDL);
2842 noway_assert(fldHnd != nullptr);
2843 CorInfoType fieldCit = m_pComp->info.compCompHnd->getFieldType(fldHnd, &structHnd);
2844 var_types fieldType = JITtype2varType(fieldCit);
2846 size_t structSize = 0;
2847 if (varTypeIsStruct(fieldType))
2849 structSize = m_pComp->info.compCompHnd->getClassSize(structHnd);
2850 // We do not normalize the type field accesses during importation unless they
2851 // are used in a call, return or assignment.
2852 if ((fieldType == TYP_STRUCT) && (structSize <= m_pComp->largestEnregisterableStructSize()))
2854 fieldType = m_pComp->impNormStructType(structHnd);
2857 if (wbFinalStructSize != nullptr)
2859 *wbFinalStructSize = structSize;
2863 if (m_pComp->verbose)
2865 printf(" VNApplySelectors:\n");
2866 const char* modName;
2867 const char* fldName = m_pComp->eeGetFieldName(fldHnd, &modName);
2868 printf(" VNForHandle(Fseq[%s]) is " FMT_VN ", fieldType is %s", fldName, fldHndVN,
2869 varTypeName(fieldType));
2870 if (varTypeIsStruct(fieldType))
2872 printf(", size = %d", structSize);
2878 if (fieldSeq->m_next != nullptr)
2880 ValueNum newMap = VNForMapSelect(vnk, fieldType, map, fldHndVN);
2881 return VNApplySelectors(vnk, newMap, fieldSeq->m_next, wbFinalStructSize);
2883 else // end of fieldSeq
2885 return VNForMapSelect(vnk, fieldType, map, fldHndVN);
2890 ValueNum ValueNumStore::VNApplySelectorsTypeCheck(ValueNum elem, var_types indType, size_t elemStructSize)
2892 var_types elemTyp = TypeOfVN(elem);
2894 // Check if the elemTyp is matching/compatible
2896 if (indType != elemTyp)
2898 // We are trying to read from an 'elem' of type 'elemType' using 'indType' read
2900 size_t elemTypSize = (elemTyp == TYP_STRUCT) ? elemStructSize : genTypeSize(elemTyp);
2901 size_t indTypeSize = genTypeSize(indType);
2903 if ((indType == TYP_REF) && (varTypeIsStruct(elemTyp)))
2905 // indType is TYP_REF and elemTyp is TYP_STRUCT
2907 // We have a pointer to a static that is a Boxed Struct
2911 else if (indTypeSize > elemTypSize)
2913 // Reading beyong the end of 'elem'
2915 // return a new unique value number
2916 elem = VNForExpr(nullptr, indType);
2917 JITDUMP(" *** Mismatched types in VNApplySelectorsTypeCheck (reading beyond the end)\n");
2919 else if (varTypeIsStruct(indType))
2921 // indType is TYP_STRUCT
2923 // return a new unique value number
2924 elem = VNForExpr(nullptr, indType);
2925 JITDUMP(" *** Mismatched types in VNApplySelectorsTypeCheck (indType is TYP_STRUCT)\n");
2929 // We are trying to read an 'elem' of type 'elemType' using 'indType' read
2931 // insert a cast of elem to 'indType'
2932 elem = VNForCast(elem, indType, elemTyp);
2938 ValueNum ValueNumStore::VNApplySelectorsAssignTypeCoerce(ValueNum elem, var_types indType, BasicBlock* block)
2940 var_types elemTyp = TypeOfVN(elem);
2942 // Check if the elemTyp is matching/compatible
2944 if (indType != elemTyp)
2946 bool isConstant = IsVNConstant(elem);
2947 if (isConstant && (elemTyp == genActualType(indType)))
2949 // (i.e. We recorded a constant of TYP_INT for a TYP_BYTE field)
2953 // We are trying to write an 'elem' of type 'elemType' using 'indType' store
2955 if (varTypeIsStruct(indType))
2957 // return a new unique value number
2958 elem = VNForExpr(block, indType);
2959 JITDUMP(" *** Mismatched types in VNApplySelectorsAssignTypeCoerce (indType is TYP_STRUCT)\n");
2963 // We are trying to write an 'elem' of type 'elemType' using 'indType' store
2965 // insert a cast of elem to 'indType'
2966 elem = VNForCast(elem, indType, elemTyp);
2973 //------------------------------------------------------------------------
2974 // VNApplySelectorsAssign: Compute the value number corresponding to "map" but with
2975 // the element at "fieldSeq" updated to have type "elem"; this is the new memory
2976 // value for an assignment of value "elem" into the memory at location "fieldSeq"
2977 // that occurs in block "block" and has type "indType" (so long as the selectors
2978 // into that memory occupy disjoint locations, which is true for GcHeap).
2981 // vnk - Identifies whether to recurse to Conservative or Liberal value numbers
2982 // when recursing through phis
2983 // map - Value number for the field map before the assignment
2984 // elem - Value number for the value being stored (to the given field)
2985 // indType - Type of the indirection storing the value to the field
2986 // block - Block where the assignment occurs
2989 // The value number corresponding to memory after the assignment.
2991 ValueNum ValueNumStore::VNApplySelectorsAssign(
2992 ValueNumKind vnk, ValueNum map, FieldSeqNode* fieldSeq, ValueNum elem, var_types indType, BasicBlock* block)
2994 if (fieldSeq == nullptr)
2996 return VNApplySelectorsAssignTypeCoerce(elem, indType, block);
3000 assert(fieldSeq != FieldSeqStore::NotAField());
3002 // Skip any "FirstElem" pseudo-fields or any "ConstantIndex" pseudo-fields
3003 // These will occur, at least, in struct static expressions, for method table offsets.
3004 if (fieldSeq->IsPseudoField())
3006 return VNApplySelectorsAssign(vnk, map, fieldSeq->m_next, elem, indType, block);
3009 // Otherwise, fldHnd is a real field handle.
3010 CORINFO_FIELD_HANDLE fldHnd = fieldSeq->m_fieldHnd;
3011 CORINFO_CLASS_HANDLE structType = nullptr;
3012 noway_assert(fldHnd != nullptr);
3013 CorInfoType fieldCit = m_pComp->info.compCompHnd->getFieldType(fldHnd, &structType);
3014 var_types fieldType = JITtype2varType(fieldCit);
3016 ValueNum fieldHndVN = VNForHandle(ssize_t(fldHnd), GTF_ICON_FIELD_HDL);
3018 if (fieldSeq->m_next)
3020 ValueNum fseqMap = VNForMapSelect(vnk, fieldType, map, fieldHndVN);
3021 elemAfter = VNApplySelectorsAssign(vnk, fseqMap, fieldSeq->m_next, elem, indType, block);
3025 elemAfter = VNApplySelectorsAssignTypeCoerce(elem, indType, block);
3028 ValueNum newMap = VNForMapStore(fieldType, map, fieldHndVN, elemAfter);
3033 ValueNumPair ValueNumStore::VNPairApplySelectors(ValueNumPair map, FieldSeqNode* fieldSeq, var_types indType)
3035 size_t structSize = 0;
3036 ValueNum liberalVN = VNApplySelectors(VNK_Liberal, map.GetLiberal(), fieldSeq, &structSize);
3037 liberalVN = VNApplySelectorsTypeCheck(liberalVN, indType, structSize);
3040 ValueNum conservVN = VNApplySelectors(VNK_Conservative, map.GetConservative(), fieldSeq, &structSize);
3041 conservVN = VNApplySelectorsTypeCheck(conservVN, indType, structSize);
3043 return ValueNumPair(liberalVN, conservVN);
3046 bool ValueNumStore::IsVNNotAField(ValueNum vn)
3048 return m_chunks.GetNoExpand(GetChunkNum(vn))->m_attribs == CEA_NotAField;
3051 ValueNum ValueNumStore::VNForFieldSeq(FieldSeqNode* fieldSeq)
3053 if (fieldSeq == nullptr)
3057 else if (fieldSeq == FieldSeqStore::NotAField())
3059 // We always allocate a new, unique VN in this call.
3060 Chunk* c = GetAllocChunk(TYP_REF, CEA_NotAField);
3061 unsigned offsetWithinChunk = c->AllocVN();
3062 ValueNum result = c->m_baseVN + offsetWithinChunk;
3067 ssize_t fieldHndVal = ssize_t(fieldSeq->m_fieldHnd);
3068 ValueNum fieldHndVN = VNForHandle(fieldHndVal, GTF_ICON_FIELD_HDL);
3069 ValueNum seqNextVN = VNForFieldSeq(fieldSeq->m_next);
3070 ValueNum fieldSeqVN = VNForFunc(TYP_REF, VNF_FieldSeq, fieldHndVN, seqNextVN);
3073 if (m_pComp->verbose)
3075 printf(" fieldHnd " FMT_VN " is ", fieldHndVN);
3076 vnDump(m_pComp, fieldHndVN);
3079 printf(" fieldSeq " FMT_VN " is ", fieldSeqVN);
3080 vnDump(m_pComp, fieldSeqVN);
3089 FieldSeqNode* ValueNumStore::FieldSeqVNToFieldSeq(ValueNum vn)
3091 if (vn == VNForNull())
3096 assert(IsVNFunc(vn));
3099 GetVNFunc(vn, &funcApp);
3100 if (funcApp.m_func == VNF_NotAField)
3102 return FieldSeqStore::NotAField();
3105 assert(funcApp.m_func == VNF_FieldSeq);
3106 const ssize_t fieldHndVal = ConstantValue<ssize_t>(funcApp.m_args[0]);
3107 FieldSeqNode* head =
3108 m_pComp->GetFieldSeqStore()->CreateSingleton(reinterpret_cast<CORINFO_FIELD_HANDLE>(fieldHndVal));
3109 FieldSeqNode* tail = FieldSeqVNToFieldSeq(funcApp.m_args[1]);
3110 return m_pComp->GetFieldSeqStore()->Append(head, tail);
3113 ValueNum ValueNumStore::FieldSeqVNAppend(ValueNum fsVN1, ValueNum fsVN2)
3115 if (fsVN1 == VNForNull())
3120 assert(IsVNFunc(fsVN1));
3123 GetVNFunc(fsVN1, &funcApp1);
3125 if ((funcApp1.m_func == VNF_NotAField) || IsVNNotAField(fsVN2))
3127 return VNForFieldSeq(FieldSeqStore::NotAField());
3130 assert(funcApp1.m_func == VNF_FieldSeq);
3131 ValueNum tailRes = FieldSeqVNAppend(funcApp1.m_args[1], fsVN2);
3132 ValueNum fieldSeqVN = VNForFunc(TYP_REF, VNF_FieldSeq, funcApp1.m_args[0], tailRes);
3135 if (m_pComp->verbose)
3137 printf(" fieldSeq " FMT_VN " is ", fieldSeqVN);
3138 vnDump(m_pComp, fieldSeqVN);
3146 ValueNum ValueNumStore::ExtendPtrVN(GenTree* opA, GenTree* opB)
3148 if (opB->OperGet() == GT_CNS_INT)
3150 FieldSeqNode* fldSeq = opB->gtIntCon.gtFieldSeq;
3151 if (fldSeq != nullptr)
3153 return ExtendPtrVN(opA, opB->gtIntCon.gtFieldSeq);
3159 ValueNum ValueNumStore::ExtendPtrVN(GenTree* opA, FieldSeqNode* fldSeq)
3161 assert(fldSeq != nullptr);
3163 ValueNum res = NoVN;
3165 ValueNum opAvnWx = opA->gtVNPair.GetLiberal();
3166 assert(VNIsValid(opAvnWx));
3168 ValueNum opAvnx = VNForEmptyExcSet();
3169 VNUnpackExc(opAvnWx, &opAvn, &opAvnx);
3170 assert(VNIsValid(opAvn) && VNIsValid(opAvnx));
3173 if (!GetVNFunc(opAvn, &funcApp))
3178 if (funcApp.m_func == VNF_PtrToLoc)
3181 // For PtrToLoc, lib == cons.
3182 VNFuncApp consFuncApp;
3183 assert(GetVNFunc(VNNormVal(opA->GetVN(VNK_Conservative)), &consFuncApp) && consFuncApp.Equals(funcApp));
3185 ValueNum fldSeqVN = VNForFieldSeq(fldSeq);
3186 res = VNForFunc(TYP_BYREF, VNF_PtrToLoc, funcApp.m_args[0], FieldSeqVNAppend(funcApp.m_args[1], fldSeqVN));
3188 else if (funcApp.m_func == VNF_PtrToStatic)
3190 ValueNum fldSeqVN = VNForFieldSeq(fldSeq);
3191 res = VNForFunc(TYP_BYREF, VNF_PtrToStatic, FieldSeqVNAppend(funcApp.m_args[0], fldSeqVN));
3193 else if (funcApp.m_func == VNF_PtrToArrElem)
3195 ValueNum fldSeqVN = VNForFieldSeq(fldSeq);
3196 res = VNForFunc(TYP_BYREF, VNF_PtrToArrElem, funcApp.m_args[0], funcApp.m_args[1], funcApp.m_args[2],
3197 FieldSeqVNAppend(funcApp.m_args[3], fldSeqVN));
3201 res = VNWithExc(res, opAvnx);
3206 ValueNum Compiler::fgValueNumberArrIndexAssign(CORINFO_CLASS_HANDLE elemTypeEq,
3209 FieldSeqNode* fldSeq,
3213 bool invalidateArray = false;
3214 ValueNum elemTypeEqVN = vnStore->VNForHandle(ssize_t(elemTypeEq), GTF_ICON_CLASS_HDL);
3215 var_types arrElemType = DecodeElemType(elemTypeEq);
3216 ValueNum hAtArrType = vnStore->VNForMapSelect(VNK_Liberal, TYP_REF, fgCurMemoryVN[GcHeap], elemTypeEqVN);
3217 ValueNum hAtArrTypeAtArr = vnStore->VNForMapSelect(VNK_Liberal, TYP_REF, hAtArrType, arrVN);
3218 ValueNum hAtArrTypeAtArrAtInx = vnStore->VNForMapSelect(VNK_Liberal, arrElemType, hAtArrTypeAtArr, inxVN);
3220 ValueNum newValAtInx = ValueNumStore::NoVN;
3221 ValueNum newValAtArr = ValueNumStore::NoVN;
3222 ValueNum newValAtArrType = ValueNumStore::NoVN;
3224 if (fldSeq == FieldSeqStore::NotAField())
3226 // This doesn't represent a proper array access
3227 JITDUMP(" *** NotAField sequence encountered in fgValueNumberArrIndexAssign\n");
3229 // Store a new unique value for newValAtArrType
3230 newValAtArrType = vnStore->VNForExpr(compCurBB, TYP_REF);
3231 invalidateArray = true;
3235 // Note that this does the right thing if "fldSeq" is null -- returns last "rhs" argument.
3236 // This is the value that should be stored at "arr[inx]".
3238 vnStore->VNApplySelectorsAssign(VNK_Liberal, hAtArrTypeAtArrAtInx, fldSeq, rhsVN, indType, compCurBB);
3240 var_types arrElemFldType = arrElemType; // Uses arrElemType unless we has a non-null fldSeq
3241 if (vnStore->IsVNFunc(newValAtInx))
3244 vnStore->GetVNFunc(newValAtInx, &funcApp);
3245 if (funcApp.m_func == VNF_MapStore)
3247 arrElemFldType = vnStore->TypeOfVN(newValAtInx);
3251 if (indType != arrElemFldType)
3253 // Mismatched types: Store between different types (indType into array of arrElemFldType)
3256 JITDUMP(" *** Mismatched types in fgValueNumberArrIndexAssign\n");
3258 // Store a new unique value for newValAtArrType
3259 newValAtArrType = vnStore->VNForExpr(compCurBB, TYP_REF);
3260 invalidateArray = true;
3264 if (!invalidateArray)
3266 newValAtArr = vnStore->VNForMapStore(indType, hAtArrTypeAtArr, inxVN, newValAtInx);
3267 newValAtArrType = vnStore->VNForMapStore(TYP_REF, hAtArrType, arrVN, newValAtArr);
3273 printf(" hAtArrType " FMT_VN " is MapSelect(curGcHeap(" FMT_VN "), ", hAtArrType, fgCurMemoryVN[GcHeap]);
3275 if (arrElemType == TYP_STRUCT)
3277 printf("%s[]).\n", eeGetClassName(elemTypeEq));
3281 printf("%s[]).\n", varTypeName(arrElemType));
3283 printf(" hAtArrTypeAtArr " FMT_VN " is MapSelect(hAtArrType(" FMT_VN "), arr=" FMT_VN ")\n", hAtArrTypeAtArr,
3285 printf(" hAtArrTypeAtArrAtInx " FMT_VN " is MapSelect(hAtArrTypeAtArr(" FMT_VN "), inx=" FMT_VN "):%s\n",
3286 hAtArrTypeAtArrAtInx, hAtArrTypeAtArr, inxVN, varTypeName(arrElemType));
3288 if (!invalidateArray)
3290 printf(" newValAtInd " FMT_VN " is ", newValAtInx);
3291 vnStore->vnDump(this, newValAtInx);
3294 printf(" newValAtArr " FMT_VN " is ", newValAtArr);
3295 vnStore->vnDump(this, newValAtArr);
3299 printf(" newValAtArrType " FMT_VN " is ", newValAtArrType);
3300 vnStore->vnDump(this, newValAtArrType);
3305 return vnStore->VNForMapStore(TYP_REF, fgCurMemoryVN[GcHeap], elemTypeEqVN, newValAtArrType);
3308 ValueNum Compiler::fgValueNumberArrIndexVal(GenTree* tree, VNFuncApp* pFuncApp, ValueNum addrXvn)
3310 assert(vnStore->IsVNHandle(pFuncApp->m_args[0]));
3311 CORINFO_CLASS_HANDLE arrElemTypeEQ = CORINFO_CLASS_HANDLE(vnStore->ConstantValue<ssize_t>(pFuncApp->m_args[0]));
3312 ValueNum arrVN = pFuncApp->m_args[1];
3313 ValueNum inxVN = pFuncApp->m_args[2];
3314 FieldSeqNode* fldSeq = vnStore->FieldSeqVNToFieldSeq(pFuncApp->m_args[3]);
3315 return fgValueNumberArrIndexVal(tree, arrElemTypeEQ, arrVN, inxVN, addrXvn, fldSeq);
3318 ValueNum Compiler::fgValueNumberArrIndexVal(GenTree* tree,
3319 CORINFO_CLASS_HANDLE elemTypeEq,
3323 FieldSeqNode* fldSeq)
3325 assert(tree == nullptr || tree->OperIsIndir());
3327 // The VN inputs are required to be non-exceptional values.
3328 assert(arrVN == vnStore->VNNormVal(arrVN));
3329 assert(inxVN == vnStore->VNNormVal(inxVN));
3331 var_types elemTyp = DecodeElemType(elemTypeEq);
3332 var_types indType = (tree == nullptr) ? elemTyp : tree->TypeGet();
3333 ValueNum selectedElem;
3335 if (fldSeq == FieldSeqStore::NotAField())
3337 // This doesn't represent a proper array access
3338 JITDUMP(" *** NotAField sequence encountered in fgValueNumberArrIndexVal\n");
3340 // a new unique value number
3341 selectedElem = vnStore->VNForExpr(compCurBB, elemTyp);
3346 printf(" IND of PtrToArrElem is unique VN " FMT_VN ".\n", selectedElem);
3350 if (tree != nullptr)
3352 tree->gtVNPair.SetBoth(selectedElem);
3357 ValueNum elemTypeEqVN = vnStore->VNForHandle(ssize_t(elemTypeEq), GTF_ICON_CLASS_HDL);
3358 ValueNum hAtArrType = vnStore->VNForMapSelect(VNK_Liberal, TYP_REF, fgCurMemoryVN[GcHeap], elemTypeEqVN);
3359 ValueNum hAtArrTypeAtArr = vnStore->VNForMapSelect(VNK_Liberal, TYP_REF, hAtArrType, arrVN);
3360 ValueNum wholeElem = vnStore->VNForMapSelect(VNK_Liberal, elemTyp, hAtArrTypeAtArr, inxVN);
3365 printf(" hAtArrType " FMT_VN " is MapSelect(curGcHeap(" FMT_VN "), ", hAtArrType, fgCurMemoryVN[GcHeap]);
3366 if (elemTyp == TYP_STRUCT)
3368 printf("%s[]).\n", eeGetClassName(elemTypeEq));
3372 printf("%s[]).\n", varTypeName(elemTyp));
3375 printf(" hAtArrTypeAtArr " FMT_VN " is MapSelect(hAtArrType(" FMT_VN "), arr=" FMT_VN ").\n",
3376 hAtArrTypeAtArr, hAtArrType, arrVN);
3378 printf(" wholeElem " FMT_VN " is MapSelect(hAtArrTypeAtArr(" FMT_VN "), ind=" FMT_VN ").\n", wholeElem,
3379 hAtArrTypeAtArr, inxVN);
3383 selectedElem = wholeElem;
3384 size_t elemStructSize = 0;
3387 selectedElem = vnStore->VNApplySelectors(VNK_Liberal, wholeElem, fldSeq, &elemStructSize);
3388 elemTyp = vnStore->TypeOfVN(selectedElem);
3390 selectedElem = vnStore->VNApplySelectorsTypeCheck(selectedElem, indType, elemStructSize);
3391 selectedElem = vnStore->VNWithExc(selectedElem, excVN);
3394 if (verbose && (selectedElem != wholeElem))
3396 printf(" selectedElem is " FMT_VN " after applying selectors.\n", selectedElem);
3400 if (tree != nullptr)
3402 tree->gtVNPair.SetLiberal(selectedElem);
3403 // TODO-CQ: what to do here about exceptions? We don't have the array and ind conservative
3404 // values, so we don't have their exceptions. Maybe we should.
3405 tree->gtVNPair.SetConservative(vnStore->VNForExpr(compCurBB, tree->TypeGet()));
3409 return selectedElem;
3412 ValueNum Compiler::fgValueNumberByrefExposedLoad(var_types type, ValueNum pointerVN)
3414 ValueNum memoryVN = fgCurMemoryVN[ByrefExposed];
3415 // The memoization for VNFunc applications does not factor in the result type, so
3416 // VNF_ByrefExposedLoad takes the loaded type as an explicit parameter.
3417 ValueNum typeVN = vnStore->VNForIntCon(type);
3418 ValueNum loadVN = vnStore->VNForFunc(type, VNF_ByrefExposedLoad, typeVN, vnStore->VNNormVal(pointerVN), memoryVN);
3423 var_types ValueNumStore::TypeOfVN(ValueNum vn)
3430 Chunk* c = m_chunks.GetNoExpand(GetChunkNum(vn));
3434 //------------------------------------------------------------------------
3435 // LoopOfVN: If the given value number is an opaque one associated with a particular
3436 // expression in the IR, give the loop number where the expression occurs; otherwise,
3437 // returns MAX_LOOP_NUM.
3440 // vn - Value number to query
3443 // The correspondingblock's bbNatLoopNum, which may be BasicBlock::NOT_IN_LOOP.
3444 // Returns MAX_LOOP_NUM if this VN is not an opaque value number associated with
3445 // a particular expression/location in the IR.
3447 BasicBlock::loopNumber ValueNumStore::LoopOfVN(ValueNum vn)
3451 return MAX_LOOP_NUM;
3454 Chunk* c = m_chunks.GetNoExpand(GetChunkNum(vn));
3455 return c->m_loopNum;
3458 bool ValueNumStore::IsVNConstant(ValueNum vn)
3464 Chunk* c = m_chunks.GetNoExpand(GetChunkNum(vn));
3465 if (c->m_attribs == CEA_Const)
3467 return vn != VNForVoid(); // Void is not a "real" constant -- in the sense that it represents no value.
3471 return c->m_attribs == CEA_Handle;
3475 bool ValueNumStore::IsVNInt32Constant(ValueNum vn)
3477 if (!IsVNConstant(vn))
3482 return TypeOfVN(vn) == TYP_INT;
3485 unsigned ValueNumStore::GetHandleFlags(ValueNum vn)
3487 assert(IsVNHandle(vn));
3488 Chunk* c = m_chunks.GetNoExpand(GetChunkNum(vn));
3489 unsigned offset = ChunkOffset(vn);
3490 VNHandle* handle = &reinterpret_cast<VNHandle*>(c->m_defs)[offset];
3491 return handle->m_flags;
3494 bool ValueNumStore::IsVNHandle(ValueNum vn)
3501 Chunk* c = m_chunks.GetNoExpand(GetChunkNum(vn));
3502 return c->m_attribs == CEA_Handle;
3505 bool ValueNumStore::IsVNConstantBound(ValueNum vn)
3507 // Do we have "var < 100"?
3514 if (!GetVNFunc(vn, &funcAttr))
3518 if (funcAttr.m_func != (VNFunc)GT_LE && funcAttr.m_func != (VNFunc)GT_GE && funcAttr.m_func != (VNFunc)GT_LT &&
3519 funcAttr.m_func != (VNFunc)GT_GT)
3524 return IsVNInt32Constant(funcAttr.m_args[0]) != IsVNInt32Constant(funcAttr.m_args[1]);
3527 void ValueNumStore::GetConstantBoundInfo(ValueNum vn, ConstantBoundInfo* info)
3529 assert(IsVNConstantBound(vn));
3532 // Do we have var < 100?
3534 GetVNFunc(vn, &funcAttr);
3536 bool isOp1Const = IsVNInt32Constant(funcAttr.m_args[1]);
3540 info->cmpOper = funcAttr.m_func;
3541 info->cmpOpVN = funcAttr.m_args[0];
3542 info->constVal = GetConstantInt32(funcAttr.m_args[1]);
3546 info->cmpOper = GenTree::SwapRelop((genTreeOps)funcAttr.m_func);
3547 info->cmpOpVN = funcAttr.m_args[1];
3548 info->constVal = GetConstantInt32(funcAttr.m_args[0]);
3552 //------------------------------------------------------------------------
3553 // IsVNArrLenUnsignedBound: Checks if the specified vn represents an expression
3554 // such as "(uint)i < (uint)len" that implies that the index is valid
3555 // (0 <= i && i < a.len).
3558 // vn - Value number to query
3559 // info - Pointer to an UnsignedCompareCheckedBoundInfo object to return information about
3560 // the expression. Not populated if the vn expression isn't suitable (e.g. i <= len).
3561 // This enables optCreateJTrueBoundAssertion to immediatly create an OAK_NO_THROW
3562 // assertion instead of the OAK_EQUAL/NOT_EQUAL assertions created by signed compares
3563 // (IsVNCompareCheckedBound, IsVNCompareCheckedBoundArith) that require further processing.
3565 bool ValueNumStore::IsVNUnsignedCompareCheckedBound(ValueNum vn, UnsignedCompareCheckedBoundInfo* info)
3569 if (GetVNFunc(vn, &funcApp))
3571 if ((funcApp.m_func == VNF_LT_UN) || (funcApp.m_func == VNF_GE_UN))
3573 // We only care about "(uint)i < (uint)len" and its negation "(uint)i >= (uint)len"
3574 if (IsVNCheckedBound(funcApp.m_args[1]))
3576 info->vnIdx = funcApp.m_args[0];
3577 info->cmpOper = funcApp.m_func;
3578 info->vnBound = funcApp.m_args[1];
3582 else if ((funcApp.m_func == VNF_GT_UN) || (funcApp.m_func == VNF_LE_UN))
3584 // We only care about "(uint)a.len > (uint)i" and its negation "(uint)a.len <= (uint)i"
3585 if (IsVNCheckedBound(funcApp.m_args[0]))
3587 info->vnIdx = funcApp.m_args[1];
3588 // Let's keep a consistent operand order - it's always i < len, never len > i
3589 info->cmpOper = (funcApp.m_func == VNF_GT_UN) ? VNF_LT_UN : VNF_GE_UN;
3590 info->vnBound = funcApp.m_args[0];
3599 bool ValueNumStore::IsVNCompareCheckedBound(ValueNum vn)
3601 // Do we have "var < len"?
3608 if (!GetVNFunc(vn, &funcAttr))
3612 if (funcAttr.m_func != (VNFunc)GT_LE && funcAttr.m_func != (VNFunc)GT_GE && funcAttr.m_func != (VNFunc)GT_LT &&
3613 funcAttr.m_func != (VNFunc)GT_GT)
3617 if (!IsVNCheckedBound(funcAttr.m_args[0]) && !IsVNCheckedBound(funcAttr.m_args[1]))
3625 void ValueNumStore::GetCompareCheckedBound(ValueNum vn, CompareCheckedBoundArithInfo* info)
3627 assert(IsVNCompareCheckedBound(vn));
3629 // Do we have var < a.len?
3631 GetVNFunc(vn, &funcAttr);
3633 bool isOp1CheckedBound = IsVNCheckedBound(funcAttr.m_args[1]);
3634 if (isOp1CheckedBound)
3636 info->cmpOper = funcAttr.m_func;
3637 info->cmpOp = funcAttr.m_args[0];
3638 info->vnBound = funcAttr.m_args[1];
3642 info->cmpOper = GenTree::SwapRelop((genTreeOps)funcAttr.m_func);
3643 info->cmpOp = funcAttr.m_args[1];
3644 info->vnBound = funcAttr.m_args[0];
3648 bool ValueNumStore::IsVNCheckedBoundArith(ValueNum vn)
3650 // Do we have "a.len +or- var"
3658 return GetVNFunc(vn, &funcAttr) && // vn is a func.
3659 (funcAttr.m_func == (VNFunc)GT_ADD || funcAttr.m_func == (VNFunc)GT_SUB) && // the func is +/-
3660 (IsVNCheckedBound(funcAttr.m_args[0]) || IsVNCheckedBound(funcAttr.m_args[1])); // either op1 or op2 is a.len
3663 void ValueNumStore::GetCheckedBoundArithInfo(ValueNum vn, CompareCheckedBoundArithInfo* info)
3665 // Do we have a.len +/- var?
3666 assert(IsVNCheckedBoundArith(vn));
3667 VNFuncApp funcArith;
3668 GetVNFunc(vn, &funcArith);
3670 bool isOp1CheckedBound = IsVNCheckedBound(funcArith.m_args[1]);
3671 if (isOp1CheckedBound)
3673 info->arrOper = funcArith.m_func;
3674 info->arrOp = funcArith.m_args[0];
3675 info->vnBound = funcArith.m_args[1];
3679 info->arrOper = funcArith.m_func;
3680 info->arrOp = funcArith.m_args[1];
3681 info->vnBound = funcArith.m_args[0];
3685 bool ValueNumStore::IsVNCompareCheckedBoundArith(ValueNum vn)
3687 // Do we have: "var < a.len - var"
3694 if (!GetVNFunc(vn, &funcAttr))
3699 // Suitable comparator.
3700 if (funcAttr.m_func != (VNFunc)GT_LE && funcAttr.m_func != (VNFunc)GT_GE && funcAttr.m_func != (VNFunc)GT_LT &&
3701 funcAttr.m_func != (VNFunc)GT_GT)
3706 // Either the op0 or op1 is arr len arithmetic.
3707 if (!IsVNCheckedBoundArith(funcAttr.m_args[0]) && !IsVNCheckedBoundArith(funcAttr.m_args[1]))
3715 void ValueNumStore::GetCompareCheckedBoundArithInfo(ValueNum vn, CompareCheckedBoundArithInfo* info)
3717 assert(IsVNCompareCheckedBoundArith(vn));
3720 GetVNFunc(vn, &funcAttr);
3722 // Check whether op0 or op1 is checked bound arithmetic.
3723 bool isOp1CheckedBoundArith = IsVNCheckedBoundArith(funcAttr.m_args[1]);
3724 if (isOp1CheckedBoundArith)
3726 info->cmpOper = funcAttr.m_func;
3727 info->cmpOp = funcAttr.m_args[0];
3728 GetCheckedBoundArithInfo(funcAttr.m_args[1], info);
3732 info->cmpOper = GenTree::SwapRelop((genTreeOps)funcAttr.m_func);
3733 info->cmpOp = funcAttr.m_args[1];
3734 GetCheckedBoundArithInfo(funcAttr.m_args[0], info);
3738 ValueNum ValueNumStore::GetArrForLenVn(ValueNum vn)
3746 if (GetVNFunc(vn, &funcAttr) && funcAttr.m_func == (VNFunc)GT_ARR_LENGTH)
3748 return funcAttr.m_args[0];
3753 bool ValueNumStore::IsVNNewArr(ValueNum vn, VNFuncApp* funcApp)
3759 bool result = false;
3760 if (GetVNFunc(vn, funcApp))
3762 result = (funcApp->m_func == VNF_JitNewArr) || (funcApp->m_func == VNF_JitReadyToRunNewArr);
3767 int ValueNumStore::GetNewArrSize(ValueNum vn)
3770 if (IsVNNewArr(vn, &funcApp))
3772 ValueNum arg1VN = funcApp.m_args[1];
3773 if (IsVNConstant(arg1VN) && TypeOfVN(arg1VN) == TYP_INT)
3775 return ConstantValue<int>(arg1VN);
3781 bool ValueNumStore::IsVNArrLen(ValueNum vn)
3788 return (GetVNFunc(vn, &funcAttr) && funcAttr.m_func == (VNFunc)GT_ARR_LENGTH);
3791 bool ValueNumStore::IsVNCheckedBound(ValueNum vn)
3794 if (m_checkedBoundVNs.TryGetValue(vn, &dummy))
3796 // This VN appeared as the conservative VN of the length argument of some
3797 // GT_ARR_BOUND node.
3802 // Even if we haven't seen this VN in a bounds check, if it is an array length
3803 // VN then consider it a checked bound VN. This facilitates better bounds check
3804 // removal by ensuring that compares against array lengths get put in the
3805 // optCseCheckedBoundMap; such an array length might get CSEd with one that was
3806 // directly used in a bounds check, and having the map entry will let us update
3807 // the compare's VN so that OptimizeRangeChecks can recognize such compares.
3814 void ValueNumStore::SetVNIsCheckedBound(ValueNum vn)
3816 // This is meant to flag VNs for lengths that aren't known at compile time, so we can
3817 // form and propagate assertions about them. Ensure that callers filter out constant
3818 // VNs since they're not what we're looking to flag, and assertion prop can reason
3819 // directly about constants.
3820 assert(!IsVNConstant(vn));
3821 m_checkedBoundVNs.AddOrUpdate(vn, true);
3824 ValueNum ValueNumStore::EvalMathFuncUnary(var_types typ, CorInfoIntrinsics gtMathFN, ValueNum arg0VN)
3826 assert(arg0VN == VNNormVal(arg0VN));
3828 // If the math intrinsic is not implemented by target-specific instructions, such as implemented
3829 // by user calls, then don't do constant folding on it. This minimizes precision loss.
3831 if (IsVNConstant(arg0VN) && m_pComp->IsTargetIntrinsic(gtMathFN))
3833 assert(varTypeIsFloating(TypeOfVN(arg0VN)));
3835 if (typ == TYP_DOUBLE)
3837 // Both operand and its result must be of the same floating point type.
3838 assert(typ == TypeOfVN(arg0VN));
3839 double arg0Val = GetConstantDouble(arg0VN);
3844 case CORINFO_INTRINSIC_Sin:
3847 case CORINFO_INTRINSIC_Cos:
3850 case CORINFO_INTRINSIC_Sqrt:
3851 res = sqrt(arg0Val);
3853 case CORINFO_INTRINSIC_Abs:
3854 res = fabs(arg0Val);
3856 case CORINFO_INTRINSIC_Ceiling:
3857 res = ceil(arg0Val);
3859 case CORINFO_INTRINSIC_Floor:
3860 res = floor(arg0Val);
3862 case CORINFO_INTRINSIC_Round:
3863 res = FloatingPointUtils::round(arg0Val);
3866 unreached(); // the above are the only math intrinsics at the time of this writing.
3869 return VNForDoubleCon(res);
3871 else if (typ == TYP_FLOAT)
3873 // Both operand and its result must be of the same floating point type.
3874 assert(typ == TypeOfVN(arg0VN));
3875 float arg0Val = GetConstantSingle(arg0VN);
3880 case CORINFO_INTRINSIC_Sin:
3881 res = sinf(arg0Val);
3883 case CORINFO_INTRINSIC_Cos:
3884 res = cosf(arg0Val);
3886 case CORINFO_INTRINSIC_Sqrt:
3887 res = sqrtf(arg0Val);
3889 case CORINFO_INTRINSIC_Abs:
3890 res = fabsf(arg0Val);
3892 case CORINFO_INTRINSIC_Ceiling:
3893 res = ceilf(arg0Val);
3895 case CORINFO_INTRINSIC_Floor:
3896 res = floorf(arg0Val);
3898 case CORINFO_INTRINSIC_Round:
3899 res = FloatingPointUtils::round(arg0Val);
3902 unreached(); // the above are the only math intrinsics at the time of this writing.
3905 return VNForFloatCon(res);
3909 // CORINFO_INTRINSIC_Round is currently the only intrinsic that takes floating-point arguments
3910 // and that returns a non floating-point result.
3912 assert(typ == TYP_INT);
3913 assert(gtMathFN == CORINFO_INTRINSIC_Round);
3917 switch (TypeOfVN(arg0VN))
3921 double arg0Val = GetConstantDouble(arg0VN);
3922 res = int(FloatingPointUtils::round(arg0Val));
3927 float arg0Val = GetConstantSingle(arg0VN);
3928 res = int(FloatingPointUtils::round(arg0Val));
3935 return VNForIntCon(res);
3940 assert(typ == TYP_DOUBLE || typ == TYP_FLOAT || (typ == TYP_INT && gtMathFN == CORINFO_INTRINSIC_Round));
3942 VNFunc vnf = VNF_Boundary;
3945 case CORINFO_INTRINSIC_Sin:
3948 case CORINFO_INTRINSIC_Cos:
3951 case CORINFO_INTRINSIC_Cbrt:
3954 case CORINFO_INTRINSIC_Sqrt:
3957 case CORINFO_INTRINSIC_Abs:
3960 case CORINFO_INTRINSIC_Round:
3961 if (typ == TYP_DOUBLE)
3963 vnf = VNF_RoundDouble;
3965 else if (typ == TYP_FLOAT)
3967 vnf = VNF_RoundFloat;
3969 else if (typ == TYP_INT)
3975 noway_assert(!"Invalid INTRINSIC_Round");
3978 case CORINFO_INTRINSIC_Cosh:
3981 case CORINFO_INTRINSIC_Sinh:
3984 case CORINFO_INTRINSIC_Tan:
3987 case CORINFO_INTRINSIC_Tanh:
3990 case CORINFO_INTRINSIC_Asin:
3993 case CORINFO_INTRINSIC_Asinh:
3996 case CORINFO_INTRINSIC_Acos:
3999 case CORINFO_INTRINSIC_Acosh:
4002 case CORINFO_INTRINSIC_Atan:
4005 case CORINFO_INTRINSIC_Atanh:
4008 case CORINFO_INTRINSIC_Log10:
4011 case CORINFO_INTRINSIC_Exp:
4014 case CORINFO_INTRINSIC_Ceiling:
4017 case CORINFO_INTRINSIC_Floor:
4021 unreached(); // the above are the only math intrinsics at the time of this writing.
4024 return VNForFunc(typ, vnf, arg0VN);
4028 ValueNum ValueNumStore::EvalMathFuncBinary(var_types typ, CorInfoIntrinsics gtMathFN, ValueNum arg0VN, ValueNum arg1VN)
4030 assert(varTypeIsFloating(typ));
4031 assert(arg0VN == VNNormVal(arg0VN));
4032 assert(arg1VN == VNNormVal(arg1VN));
4034 VNFunc vnf = VNF_Boundary;
4036 // Currently, none of the binary math intrinsic are implemented by target-specific instructions.
4037 // To minimize precision loss, do not do constant folding on them.
4041 case CORINFO_INTRINSIC_Atan2:
4045 case CORINFO_INTRINSIC_Pow:
4050 unreached(); // the above are the only binary math intrinsics at the time of this writing.
4053 return VNForFunc(typ, vnf, arg0VN, arg1VN);
4056 bool ValueNumStore::IsVNFunc(ValueNum vn)
4062 Chunk* c = m_chunks.GetNoExpand(GetChunkNum(vn));
4063 switch (c->m_attribs)
4077 bool ValueNumStore::GetVNFunc(ValueNum vn, VNFuncApp* funcApp)
4084 Chunk* c = m_chunks.GetNoExpand(GetChunkNum(vn));
4085 unsigned offset = ChunkOffset(vn);
4086 assert(offset < c->m_numUsed);
4087 switch (c->m_attribs)
4091 VNDefFunc4Arg* farg4 = &reinterpret_cast<VNDefFunc4Arg*>(c->m_defs)[offset];
4092 funcApp->m_func = farg4->m_func;
4093 funcApp->m_arity = 4;
4094 funcApp->m_args[0] = farg4->m_arg0;
4095 funcApp->m_args[1] = farg4->m_arg1;
4096 funcApp->m_args[2] = farg4->m_arg2;
4097 funcApp->m_args[3] = farg4->m_arg3;
4102 VNDefFunc3Arg* farg3 = &reinterpret_cast<VNDefFunc3Arg*>(c->m_defs)[offset];
4103 funcApp->m_func = farg3->m_func;
4104 funcApp->m_arity = 3;
4105 funcApp->m_args[0] = farg3->m_arg0;
4106 funcApp->m_args[1] = farg3->m_arg1;
4107 funcApp->m_args[2] = farg3->m_arg2;
4112 VNDefFunc2Arg* farg2 = &reinterpret_cast<VNDefFunc2Arg*>(c->m_defs)[offset];
4113 funcApp->m_func = farg2->m_func;
4114 funcApp->m_arity = 2;
4115 funcApp->m_args[0] = farg2->m_arg0;
4116 funcApp->m_args[1] = farg2->m_arg1;
4121 VNDefFunc1Arg* farg1 = &reinterpret_cast<VNDefFunc1Arg*>(c->m_defs)[offset];
4122 funcApp->m_func = farg1->m_func;
4123 funcApp->m_arity = 1;
4124 funcApp->m_args[0] = farg1->m_arg0;
4129 VNDefFunc0Arg* farg0 = &reinterpret_cast<VNDefFunc0Arg*>(c->m_defs)[offset];
4130 funcApp->m_func = farg0->m_func;
4131 funcApp->m_arity = 0;
4136 funcApp->m_func = VNF_NotAField;
4137 funcApp->m_arity = 0;
4145 ValueNum ValueNumStore::VNForRefInAddr(ValueNum vn)
4147 var_types vnType = TypeOfVN(vn);
4148 if (vnType == TYP_REF)
4153 assert(vnType == TYP_BYREF);
4155 if (GetVNFunc(vn, &funcApp))
4157 assert(funcApp.m_arity == 2 && (funcApp.m_func == VNFunc(GT_ADD) || funcApp.m_func == VNFunc(GT_SUB)));
4158 var_types vnArg0Type = TypeOfVN(funcApp.m_args[0]);
4159 if (vnArg0Type == TYP_REF || vnArg0Type == TYP_BYREF)
4161 return VNForRefInAddr(funcApp.m_args[0]);
4165 assert(funcApp.m_func == VNFunc(GT_ADD) &&
4166 (TypeOfVN(funcApp.m_args[1]) == TYP_REF || TypeOfVN(funcApp.m_args[1]) == TYP_BYREF));
4167 return VNForRefInAddr(funcApp.m_args[1]);
4172 assert(IsVNConstant(vn));
4177 bool ValueNumStore::VNIsValid(ValueNum vn)
4179 ChunkNum cn = GetChunkNum(vn);
4180 if (cn >= m_chunks.Size())
4185 Chunk* c = m_chunks.GetNoExpand(cn);
4186 return ChunkOffset(vn) < c->m_numUsed;
4191 void ValueNumStore::vnDump(Compiler* comp, ValueNum vn, bool isPtr)
4198 else if (IsVNHandle(vn))
4200 ssize_t val = ConstantValue<ssize_t>(vn);
4201 printf("Hnd const: 0x%p", dspPtr(val));
4203 else if (IsVNConstant(vn))
4205 var_types vnt = TypeOfVN(vn);
4216 int val = ConstantValue<int>(vn);
4219 printf("PtrCns[%p]", dspPtr(val));
4224 if ((val > -1000) && (val < 1000))
4226 printf(" %ld", val);
4230 printf(" 0x%X", val);
4238 INT64 val = ConstantValue<INT64>(vn);
4241 printf("LngPtrCns: 0x%p", dspPtr(val));
4246 if ((val > -1000) && (val < 1000))
4248 printf(" %ld", val);
4250 else if ((val & 0xFFFFFFFF00000000LL) == 0)
4252 printf(" 0x%X", val);
4256 printf(" 0x%llx", val);
4262 printf("FltCns[%f]", ConstantValue<float>(vn));
4265 printf("DblCns[%f]", ConstantValue<double>(vn));
4268 if (vn == VNForNull())
4272 else if (vn == VNForVoid())
4278 assert(vn == VNForZeroMap());
4291 #endif // FEATURE_SIMD
4292 printf("structVal");
4295 // These should be unreached.
4300 else if (IsVNCompareCheckedBound(vn))
4302 CompareCheckedBoundArithInfo info;
4303 GetCompareCheckedBound(vn, &info);
4306 else if (IsVNCompareCheckedBoundArith(vn))
4308 CompareCheckedBoundArithInfo info;
4309 GetCompareCheckedBoundArithInfo(vn, &info);
4312 else if (IsVNFunc(vn))
4315 GetVNFunc(vn, &funcApp);
4316 // A few special cases...
4317 switch (funcApp.m_func)
4320 vnDumpFieldSeq(comp, &funcApp, true);
4323 vnDumpMapSelect(comp, &funcApp);
4326 vnDumpMapStore(comp, &funcApp);
4328 case VNF_ValWithExc:
4329 vnDumpValWithExc(comp, &funcApp);
4332 printf("%s(", VNFuncName(funcApp.m_func));
4333 for (unsigned i = 0; i < funcApp.m_arity; i++)
4340 printf(FMT_VN, funcApp.m_args[i]);
4342 #if FEATURE_VN_DUMP_FUNC_ARGS
4344 vnDump(comp, funcApp.m_args[i]);
4352 // Otherwise, just a VN with no structure; print just the VN.
4358 // Requires "valWithExc" to be a value with an exeception set VNFuncApp.
4359 // Prints a representation of the exeception set on standard out.
4360 void ValueNumStore::vnDumpValWithExc(Compiler* comp, VNFuncApp* valWithExc)
4362 assert(valWithExc->m_func == VNF_ValWithExc); // Precondition.
4364 ValueNum normVN = valWithExc->m_args[0]; // First arg is the VN from normal execution
4365 ValueNum excVN = valWithExc->m_args[1]; // Second arg is the set of possible exceptions
4367 assert(IsVNFunc(excVN));
4369 GetVNFunc(excVN, &excSeq);
4372 printf(FMT_VN, normVN);
4373 vnDump(comp, normVN);
4375 printf(FMT_VN, excVN);
4376 vnDumpExcSeq(comp, &excSeq, true);
4379 // Requires "excSeq" to be a ExcSetCons sequence.
4380 // Prints a representation of the set of exceptions on standard out.
4381 void ValueNumStore::vnDumpExcSeq(Compiler* comp, VNFuncApp* excSeq, bool isHead)
4383 assert(excSeq->m_func == VNF_ExcSetCons); // Precondition.
4385 ValueNum curExc = excSeq->m_args[0];
4386 bool hasTail = (excSeq->m_args[1] != VNForEmptyExcSet());
4388 if (isHead && hasTail)
4393 vnDump(comp, curExc);
4398 assert(IsVNFunc(excSeq->m_args[1]));
4400 GetVNFunc(excSeq->m_args[1], &tail);
4401 vnDumpExcSeq(comp, &tail, false);
4404 if (isHead && hasTail)
4410 void ValueNumStore::vnDumpFieldSeq(Compiler* comp, VNFuncApp* fieldSeq, bool isHead)
4412 assert(fieldSeq->m_func == VNF_FieldSeq); // Precondition.
4413 // First arg is the field handle VN.
4414 assert(IsVNConstant(fieldSeq->m_args[0]) && TypeOfVN(fieldSeq->m_args[0]) == TYP_I_IMPL);
4415 ssize_t fieldHndVal = ConstantValue<ssize_t>(fieldSeq->m_args[0]);
4416 bool hasTail = (fieldSeq->m_args[1] != VNForNull());
4418 if (isHead && hasTail)
4423 CORINFO_FIELD_HANDLE fldHnd = CORINFO_FIELD_HANDLE(fieldHndVal);
4424 if (fldHnd == FieldSeqStore::FirstElemPseudoField)
4426 printf("#FirstElem");
4428 else if (fldHnd == FieldSeqStore::ConstantIndexPseudoField)
4430 printf("#ConstantIndex");
4434 const char* modName;
4435 const char* fldName = m_pComp->eeGetFieldName(fldHnd, &modName);
4436 printf("%s", fldName);
4442 assert(IsVNFunc(fieldSeq->m_args[1]));
4444 GetVNFunc(fieldSeq->m_args[1], &tail);
4445 vnDumpFieldSeq(comp, &tail, false);
4448 if (isHead && hasTail)
4454 void ValueNumStore::vnDumpMapSelect(Compiler* comp, VNFuncApp* mapSelect)
4456 assert(mapSelect->m_func == VNF_MapSelect); // Precondition.
4458 ValueNum mapVN = mapSelect->m_args[0]; // First arg is the map id
4459 ValueNum indexVN = mapSelect->m_args[1]; // Second arg is the index
4461 comp->vnPrint(mapVN, 0);
4463 comp->vnPrint(indexVN, 0);
4467 void ValueNumStore::vnDumpMapStore(Compiler* comp, VNFuncApp* mapStore)
4469 assert(mapStore->m_func == VNF_MapStore); // Precondition.
4471 ValueNum mapVN = mapStore->m_args[0]; // First arg is the map id
4472 ValueNum indexVN = mapStore->m_args[1]; // Second arg is the index
4473 ValueNum newValVN = mapStore->m_args[2]; // Third arg is the new value
4475 comp->vnPrint(mapVN, 0);
4477 comp->vnPrint(indexVN, 0);
4479 comp->vnPrint(newValVN, 0);
4484 // Static fields, methods.
4485 static UINT8 vnfOpAttribs[VNF_COUNT];
4486 static genTreeOps genTreeOpsIllegalAsVNFunc[] = {GT_IND, // When we do heap memory.
4487 GT_NULLCHECK, GT_QMARK, GT_COLON, GT_LOCKADD, GT_XADD, GT_XCHG,
4488 GT_CMPXCHG, GT_LCLHEAP, GT_BOX,
4490 // These need special semantics:
4491 GT_COMMA, // == second argument (but with exception(s) from first).
4492 GT_ADDR, GT_ARR_BOUNDS_CHECK,
4493 GT_OBJ, // May reference heap memory.
4494 GT_BLK, // May reference heap memory.
4495 GT_INIT_VAL, // Not strictly a pass-through.
4497 // These control-flow operations need no values.
4498 GT_JTRUE, GT_RETURN, GT_SWITCH, GT_RETFILT, GT_CKFINITE};
4500 UINT8* ValueNumStore::s_vnfOpAttribs = nullptr;
4502 void ValueNumStore::InitValueNumStoreStatics()
4504 // Make sure we've gotten constants right...
4505 assert(unsigned(VNFOA_Arity) == (1 << VNFOA_ArityShift));
4506 assert(unsigned(VNFOA_AfterArity) == (unsigned(VNFOA_Arity) << VNFOA_ArityBits));
4508 s_vnfOpAttribs = &vnfOpAttribs[0];
4509 for (unsigned i = 0; i < GT_COUNT; i++)
4511 genTreeOps gtOper = static_cast<genTreeOps>(i);
4513 if (GenTree::OperIsUnary(gtOper))
4517 else if (GenTree::OperIsBinary(gtOper))
4521 // Since GT_ARR_BOUNDS_CHECK is not currently GTK_BINOP
4522 else if (gtOper == GT_ARR_BOUNDS_CHECK)
4526 vnfOpAttribs[i] |= (arity << VNFOA_ArityShift);
4528 if (GenTree::OperIsCommutative(gtOper))
4530 vnfOpAttribs[i] |= VNFOA_Commutative;
4534 // I so wish this wasn't the best way to do this...
4536 int vnfNum = VNF_Boundary + 1; // The macro definition below will update this after using it.
4538 #define ValueNumFuncDef(vnf, arity, commute, knownNonNull, sharedStatic) \
4540 vnfOpAttribs[vnfNum] |= VNFOA_Commutative; \
4542 vnfOpAttribs[vnfNum] |= VNFOA_KnownNonNull; \
4544 vnfOpAttribs[vnfNum] |= VNFOA_SharedStatic; \
4545 vnfOpAttribs[vnfNum] |= (arity << VNFOA_ArityShift); \
4548 #include "valuenumfuncs.h"
4549 #undef ValueNumFuncDef
4551 for (unsigned i = 0; i < _countof(genTreeOpsIllegalAsVNFunc); i++)
4553 vnfOpAttribs[genTreeOpsIllegalAsVNFunc[i]] |= VNFOA_IllegalGenTreeOp;
4558 // Define the name array.
4559 #define ValueNumFuncDef(vnf, arity, commute, knownNonNull, sharedStatic) #vnf,
4561 const char* ValueNumStore::VNFuncNameArr[] = {
4562 #include "valuenumfuncs.h"
4563 #undef ValueNumFuncDef
4567 const char* ValueNumStore::VNFuncName(VNFunc vnf)
4569 if (vnf < VNF_Boundary)
4571 return GenTree::OpName(genTreeOps(vnf));
4575 return VNFuncNameArr[vnf - (VNF_Boundary + 1)];
4579 static const char* s_reservedNameArr[] = {
4580 "$VN.Recursive", // -2 RecursiveVN
4581 "$VN.No", // -1 NoVN
4582 "$VN.Null", // 0 VNForNull()
4583 "$VN.ZeroMap", // 1 VNForZeroMap()
4584 "$VN.ReadOnlyHeap", // 2 VNForROH()
4585 "$VN.Void", // 3 VNForVoid()
4586 "$VN.EmptyExcSet" // 4 VNForEmptyExcSet()
4589 // Returns the string name of "vn" when it is a reserved value number, nullptr otherwise
4591 const char* ValueNumStore::reservedName(ValueNum vn)
4593 int val = vn - ValueNumStore::RecursiveVN; // Add two, making 'RecursiveVN' equal to zero
4594 int max = ValueNumStore::SRC_NumSpecialRefConsts - ValueNumStore::RecursiveVN;
4596 if ((val >= 0) && (val < max))
4598 return s_reservedNameArr[val];
4605 // Returns true if "vn" is a reserved value number
4608 bool ValueNumStore::isReservedVN(ValueNum vn)
4610 int val = vn - ValueNumStore::RecursiveVN; // Adding two, making 'RecursiveVN' equal to zero
4611 int max = ValueNumStore::SRC_NumSpecialRefConsts - ValueNumStore::RecursiveVN;
4613 if ((val >= 0) && (val < max))
4621 void ValueNumStore::RunTests(Compiler* comp)
4623 VNFunc VNF_Add = GenTreeOpToVNFunc(GT_ADD);
4625 ValueNumStore* vns = new (comp->getAllocatorDebugOnly()) ValueNumStore(comp, comp->getAllocatorDebugOnly());
4626 ValueNum vnNull = VNForNull();
4627 assert(vnNull == VNForNull());
4629 ValueNum vnFor1 = vns->VNForIntCon(1);
4630 assert(vnFor1 == vns->VNForIntCon(1));
4631 assert(vns->TypeOfVN(vnFor1) == TYP_INT);
4632 assert(vns->IsVNConstant(vnFor1));
4633 assert(vns->ConstantValue<int>(vnFor1) == 1);
4635 ValueNum vnFor100 = vns->VNForIntCon(100);
4636 assert(vnFor100 == vns->VNForIntCon(100));
4637 assert(vnFor100 != vnFor1);
4638 assert(vns->TypeOfVN(vnFor100) == TYP_INT);
4639 assert(vns->IsVNConstant(vnFor100));
4640 assert(vns->ConstantValue<int>(vnFor100) == 100);
4642 ValueNum vnFor1F = vns->VNForFloatCon(1.0f);
4643 assert(vnFor1F == vns->VNForFloatCon(1.0f));
4644 assert(vnFor1F != vnFor1 && vnFor1F != vnFor100);
4645 assert(vns->TypeOfVN(vnFor1F) == TYP_FLOAT);
4646 assert(vns->IsVNConstant(vnFor1F));
4647 assert(vns->ConstantValue<float>(vnFor1F) == 1.0f);
4649 ValueNum vnFor1D = vns->VNForDoubleCon(1.0);
4650 assert(vnFor1D == vns->VNForDoubleCon(1.0));
4651 assert(vnFor1D != vnFor1F && vnFor1D != vnFor1 && vnFor1D != vnFor100);
4652 assert(vns->TypeOfVN(vnFor1D) == TYP_DOUBLE);
4653 assert(vns->IsVNConstant(vnFor1D));
4654 assert(vns->ConstantValue<double>(vnFor1D) == 1.0);
4656 ValueNum vnRandom1 = vns->VNForExpr(nullptr, TYP_INT);
4657 ValueNum vnForFunc2a = vns->VNForFunc(TYP_INT, VNF_Add, vnFor1, vnRandom1);
4658 assert(vnForFunc2a == vns->VNForFunc(TYP_INT, VNF_Add, vnFor1, vnRandom1));
4659 assert(vnForFunc2a != vnFor1D && vnForFunc2a != vnFor1F && vnForFunc2a != vnFor1 && vnForFunc2a != vnRandom1);
4660 assert(vns->TypeOfVN(vnForFunc2a) == TYP_INT);
4661 assert(!vns->IsVNConstant(vnForFunc2a));
4662 assert(vns->IsVNFunc(vnForFunc2a));
4664 bool b = vns->GetVNFunc(vnForFunc2a, &fa2a);
4666 assert(fa2a.m_func == VNF_Add && fa2a.m_arity == 2 && fa2a.m_args[0] == vnFor1 && fa2a.m_args[1] == vnRandom1);
4668 ValueNum vnForFunc2b = vns->VNForFunc(TYP_INT, VNF_Add, vnFor1, vnFor100);
4669 assert(vnForFunc2b == vns->VNForFunc(TYP_INT, VNF_Add, vnFor1, vnFor100));
4670 assert(vnForFunc2b != vnFor1D && vnForFunc2b != vnFor1F && vnForFunc2b != vnFor1 && vnForFunc2b != vnFor100);
4671 assert(vns->TypeOfVN(vnForFunc2b) == TYP_INT);
4672 assert(vns->IsVNConstant(vnForFunc2b));
4673 assert(vns->ConstantValue<int>(vnForFunc2b) == 101);
4675 // printf("Did ValueNumStore::RunTests.\n");
4679 typedef JitExpandArrayStack<BasicBlock*> BlockStack;
4681 // This represents the "to do" state of the value number computation.
4682 struct ValueNumberState
4684 // These two stacks collectively represent the set of blocks that are candidates for
4685 // processing, because at least one predecessor has been processed. Blocks on "m_toDoAllPredsDone"
4686 // have had *all* predecessors processed, and thus are candidates for some extra optimizations.
4687 // Blocks on "m_toDoNotAllPredsDone" have at least one predecessor that has not been processed.
4688 // Blocks are initially on "m_toDoNotAllPredsDone" may be moved to "m_toDoAllPredsDone" when their last
4689 // unprocessed predecessor is processed, thus maintaining the invariants.
4690 BlockStack m_toDoAllPredsDone;
4691 BlockStack m_toDoNotAllPredsDone;
4695 // TBD: This should really be a bitset...
4697 // first bit indicates completed,
4698 // second bit indicates that it's been pushed on all-done stack,
4699 // third bit indicates that it's been pushed on not-all-done stack.
4705 BVB_onAllDone = 0x2,
4706 BVB_onNotAllDone = 0x4,
4709 bool GetVisitBit(unsigned bbNum, BlockVisitBits bvb)
4711 return (m_visited[bbNum] & bvb) != 0;
4713 void SetVisitBit(unsigned bbNum, BlockVisitBits bvb)
4715 m_visited[bbNum] |= bvb;
4718 ValueNumberState(Compiler* comp)
4719 : m_toDoAllPredsDone(comp->getAllocator(), /*minSize*/ 4)
4720 , m_toDoNotAllPredsDone(comp->getAllocator(), /*minSize*/ 4)
4722 , m_visited(new (comp, CMK_ValueNumber) BYTE[comp->fgBBNumMax + 1]())
4726 BasicBlock* ChooseFromNotAllPredsDone()
4728 assert(m_toDoAllPredsDone.Size() == 0);
4729 // If we have no blocks with all preds done, then (ideally, if all cycles have been captured by loops)
4730 // we must have at least one block within a loop. We want to do the loops first. Doing a loop entry block
4731 // should break the cycle, making the rest of the body of the loop (unless there's a nested loop) doable by the
4732 // all-preds-done rule. If several loop entry blocks are available, at least one should have all non-loop preds
4733 // done -- we choose that.
4734 for (unsigned i = 0; i < m_toDoNotAllPredsDone.Size(); i++)
4736 BasicBlock* cand = m_toDoNotAllPredsDone.Get(i);
4738 // Skip any already-completed blocks (a block may have all its preds finished, get added to the
4739 // all-preds-done todo set, and get processed there). Do this by moving the last one down, to
4740 // keep the array compact.
4741 while (GetVisitBit(cand->bbNum, BVB_complete))
4743 if (i + 1 < m_toDoNotAllPredsDone.Size())
4745 cand = m_toDoNotAllPredsDone.Pop();
4746 m_toDoNotAllPredsDone.Set(i, cand);
4750 // "cand" is the last element; delete it.
4751 (void)m_toDoNotAllPredsDone.Pop();
4755 // We may have run out of non-complete candidates above. If so, we're done.
4756 if (i == m_toDoNotAllPredsDone.Size())
4761 // See if "cand" is a loop entry.
4763 if (m_comp->optBlockIsLoopEntry(cand, &lnum))
4765 // "lnum" is the innermost loop of which "cand" is the entry; find the outermost.
4766 unsigned lnumPar = m_comp->optLoopTable[lnum].lpParent;
4767 while (lnumPar != BasicBlock::NOT_IN_LOOP)
4769 if (m_comp->optLoopTable[lnumPar].lpEntry == cand)
4777 lnumPar = m_comp->optLoopTable[lnumPar].lpParent;
4780 bool allNonLoopPredsDone = true;
4781 for (flowList* pred = m_comp->BlockPredsWithEH(cand); pred != nullptr; pred = pred->flNext)
4783 BasicBlock* predBlock = pred->flBlock;
4784 if (!m_comp->optLoopTable[lnum].lpContains(predBlock))
4786 if (!GetVisitBit(predBlock->bbNum, BVB_complete))
4788 allNonLoopPredsDone = false;
4792 if (allNonLoopPredsDone)
4799 // If we didn't find a loop entry block with all non-loop preds done above, then return a random member (if
4801 if (m_toDoNotAllPredsDone.Size() == 0)
4807 return m_toDoNotAllPredsDone.Pop();
4811 // Debugging output that is too detailed for a normal JIT dump...
4812 #define DEBUG_VN_VISIT 0
4814 // Record that "blk" has been visited, and add any unvisited successors of "blk" to the appropriate todo set.
4815 void FinishVisit(BasicBlock* blk)
4817 #ifdef DEBUG_VN_VISIT
4818 JITDUMP("finish(" FMT_BB ").\n", blk->bbNum);
4819 #endif // DEBUG_VN_VISIT
4821 SetVisitBit(blk->bbNum, BVB_complete);
4823 for (BasicBlock* succ : blk->GetAllSuccs(m_comp))
4825 #ifdef DEBUG_VN_VISIT
4826 JITDUMP(" Succ(" FMT_BB ").\n", succ->bbNum);
4827 #endif // DEBUG_VN_VISIT
4829 if (GetVisitBit(succ->bbNum, BVB_complete))
4833 #ifdef DEBUG_VN_VISIT
4834 JITDUMP(" Not yet completed.\n");
4835 #endif // DEBUG_VN_VISIT
4837 bool allPredsVisited = true;
4838 for (flowList* pred = m_comp->BlockPredsWithEH(succ); pred != nullptr; pred = pred->flNext)
4840 BasicBlock* predBlock = pred->flBlock;
4841 if (!GetVisitBit(predBlock->bbNum, BVB_complete))
4843 allPredsVisited = false;
4848 if (allPredsVisited)
4850 #ifdef DEBUG_VN_VISIT
4851 JITDUMP(" All preds complete, adding to allDone.\n");
4852 #endif // DEBUG_VN_VISIT
4854 assert(!GetVisitBit(succ->bbNum, BVB_onAllDone)); // Only last completion of last succ should add to
4856 m_toDoAllPredsDone.Push(succ);
4857 SetVisitBit(succ->bbNum, BVB_onAllDone);
4861 #ifdef DEBUG_VN_VISIT
4862 JITDUMP(" Not all preds complete Adding to notallDone, if necessary...\n");
4863 #endif // DEBUG_VN_VISIT
4865 if (!GetVisitBit(succ->bbNum, BVB_onNotAllDone))
4867 #ifdef DEBUG_VN_VISIT
4868 JITDUMP(" Was necessary.\n");
4869 #endif // DEBUG_VN_VISIT
4870 m_toDoNotAllPredsDone.Push(succ);
4871 SetVisitBit(succ->bbNum, BVB_onNotAllDone);
4879 return m_toDoAllPredsDone.Size() > 0 || m_toDoNotAllPredsDone.Size() > 0;
4883 void Compiler::fgValueNumber()
4886 // This could be a JITDUMP, but some people find it convenient to set a breakpoint on the printf.
4889 printf("\n*************** In fgValueNumber()\n");
4893 // If we skipped SSA, skip VN as well.
4894 if (fgSsaPassesCompleted == 0)
4899 // Allocate the value number store.
4900 assert(fgVNPassesCompleted > 0 || vnStore == nullptr);
4901 if (fgVNPassesCompleted == 0)
4903 CompAllocator allocator(getAllocator(CMK_ValueNumber));
4904 vnStore = new (allocator) ValueNumStore(this, allocator);
4909 // Make sure the memory SSA names have no value numbers.
4910 for (unsigned i = 0; i < lvMemoryPerSsaData.GetCount(); i++)
4912 lvMemoryPerSsaData.GetSsaDefByIndex(i)->m_vnPair = noVnp;
4914 for (BasicBlock* blk = fgFirstBB; blk != nullptr; blk = blk->bbNext)
4916 // Now iterate over the block's statements, and their trees.
4917 for (GenTree* stmts = blk->FirstNonPhiDef(); stmts != nullptr; stmts = stmts->gtNext)
4919 assert(stmts->IsStatement());
4920 for (GenTree* tree = stmts->gtStmt.gtStmtList; tree; tree = tree->gtNext)
4922 tree->gtVNPair.SetBoth(ValueNumStore::NoVN);
4928 // Compute the side effects of loops.
4929 optComputeLoopSideEffects();
4931 // At the block level, we will use a modified worklist algorithm. We will have two
4932 // "todo" sets of unvisited blocks. Blocks (other than the entry block) are put in a
4933 // todo set only when some predecessor has been visited, so all blocks have at least one
4934 // predecessor visited. The distinction between the two sets is whether *all* predecessors have
4935 // already been visited. We visit such blocks preferentially if they exist, since phi definitions
4936 // in such blocks will have all arguments defined, enabling a simplification in the case that all
4937 // arguments to the phi have the same VN. If no such blocks exist, we pick a block with at least
4938 // one unvisited predecessor. In this case, we assign a new VN for phi definitions.
4940 // Start by giving incoming arguments value numbers.
4941 // Also give must-init vars a zero of their type.
4942 for (unsigned lclNum = 0; lclNum < lvaCount; lclNum++)
4944 if (!lvaInSsa(lclNum))
4949 LclVarDsc* varDsc = &lvaTable[lclNum];
4950 assert(varDsc->lvTracked);
4952 if (varDsc->lvIsParam)
4954 // We assume that code equivalent to this variable initialization loop
4955 // has been performed when doing SSA naming, so that all the variables we give
4956 // initial VNs to here have been given initial SSA definitions there.
4957 // SSA numbers always start from FIRST_SSA_NUM, and we give the value number to SSA name FIRST_SSA_NUM.
4958 // We use the VNF_InitVal(i) from here so we know that this value is loop-invariant
4960 ValueNum initVal = vnStore->VNForFunc(varDsc->TypeGet(), VNF_InitVal, vnStore->VNForIntCon(lclNum));
4961 LclSsaVarDsc* ssaDef = varDsc->GetPerSsaData(SsaConfig::FIRST_SSA_NUM);
4962 ssaDef->m_vnPair.SetBoth(initVal);
4963 ssaDef->m_defLoc.m_blk = fgFirstBB;
4965 else if (info.compInitMem || varDsc->lvMustInit ||
4966 VarSetOps::IsMember(this, fgFirstBB->bbLiveIn, varDsc->lvVarIndex))
4968 // The last clause covers the use-before-def variables (the ones that are live-in to the the first block),
4969 // these are variables that are read before being initialized (at least on some control flow paths)
4970 // if they are not must-init, then they get VNF_InitVal(i), as with the param case.)
4972 bool isZeroed = (info.compInitMem || varDsc->lvMustInit);
4973 ValueNum initVal = ValueNumStore::NoVN; // We must assign a new value to initVal
4974 var_types typ = varDsc->TypeGet();
4978 case TYP_LCLBLK: // The outgoing args area for arm and x64
4979 case TYP_BLK: // A blob of memory
4980 // TYP_BLK is used for the EHSlots LclVar on x86 (aka shadowSPslotsVar)
4981 // and for the lvaInlinedPInvokeFrameVar on x64, arm and x86
4982 // The stack associated with these LclVars are not zero initialized
4983 // thus we set 'initVN' to a new, unique VN.
4985 initVal = vnStore->VNForExpr(fgFirstBB);
4991 // LclVars of TYP_BYREF can be zero-inited.
4992 initVal = vnStore->VNForByrefCon(0);
4996 // Here we have uninitialized TYP_BYREF
4997 initVal = vnStore->VNForFunc(typ, VNF_InitVal, vnStore->VNForIntCon(lclNum));
5004 // By default we will zero init these LclVars
5005 initVal = vnStore->VNZeroForType(typ);
5009 initVal = vnStore->VNForFunc(typ, VNF_InitVal, vnStore->VNForIntCon(lclNum));
5014 bool isVarargParam = (lclNum == lvaVarargsBaseOfStkArgs || lclNum == lvaVarargsHandleArg);
5016 initVal = vnStore->VNForExpr(fgFirstBB); // a new, unique VN.
5018 assert(initVal != ValueNumStore::NoVN);
5020 LclSsaVarDsc* ssaDef = varDsc->GetPerSsaData(SsaConfig::FIRST_SSA_NUM);
5021 ssaDef->m_vnPair.SetBoth(initVal);
5022 ssaDef->m_defLoc.m_blk = fgFirstBB;
5025 // Give memory an initial value number (about which we know nothing).
5026 ValueNum memoryInitVal = vnStore->VNForFunc(TYP_REF, VNF_InitVal, vnStore->VNForIntCon(-1)); // Use -1 for memory.
5027 GetMemoryPerSsaData(SsaConfig::FIRST_SSA_NUM)->m_vnPair.SetBoth(memoryInitVal);
5031 printf("Memory Initial Value in BB01 is: " FMT_VN "\n", memoryInitVal);
5035 ValueNumberState vs(this);
5037 // Push the first block. This has no preds.
5038 vs.m_toDoAllPredsDone.Push(fgFirstBB);
5040 while (vs.ToDoExists())
5042 while (vs.m_toDoAllPredsDone.Size() > 0)
5044 BasicBlock* toDo = vs.m_toDoAllPredsDone.Pop();
5045 fgValueNumberBlock(toDo);
5046 // Record that we've visited "toDo", and add successors to the right sets.
5047 vs.FinishVisit(toDo);
5049 // OK, we've run out of blocks whose predecessors are done. Pick one whose predecessors are not all done,
5050 // process that. This may make more "all-done" blocks, so we'll go around the outer loop again --
5051 // note that this is an "if", not a "while" loop.
5052 if (vs.m_toDoNotAllPredsDone.Size() > 0)
5054 BasicBlock* toDo = vs.ChooseFromNotAllPredsDone();
5055 if (toDo == nullptr)
5057 continue; // We may have run out, because of completed blocks on the not-all-preds done list.
5060 fgValueNumberBlock(toDo);
5061 // Record that we've visited "toDo", and add successors to the right sest.
5062 vs.FinishVisit(toDo);
5070 fgVNPassesCompleted++;
5073 void Compiler::fgValueNumberBlock(BasicBlock* blk)
5078 compCurStmtNum = blk->bbStmtNum - 1; // Set compCurStmtNum
5081 unsigned outerLoopNum = BasicBlock::NOT_IN_LOOP;
5083 // First: visit phi's. If "newVNForPhis", give them new VN's. If not,
5084 // first check to see if all phi args have the same value.
5085 GenTree* firstNonPhi = blk->FirstNonPhiDef();
5086 for (GenTree* phiDefs = blk->bbTreeList; phiDefs != firstNonPhi; phiDefs = phiDefs->gtNext)
5088 // TODO-Cleanup: It has been proposed that we should have an IsPhiDef predicate. We would use it
5089 // in Block::FirstNonPhiDef as well.
5090 GenTree* phiDef = phiDefs->gtStmt.gtStmtExpr;
5091 assert(phiDef->OperGet() == GT_ASG);
5092 GenTreeLclVarCommon* newSsaVar = phiDef->gtOp.gtOp1->AsLclVarCommon();
5094 ValueNumPair phiAppVNP;
5095 ValueNumPair sameVNPair;
5097 GenTree* phiFunc = phiDef->gtOp.gtOp2;
5099 // At this point a GT_PHI node should never have a nullptr for gtOp1
5100 // and the gtOp1 should always be a GT_LIST node.
5101 GenTree* phiOp1 = phiFunc->gtOp.gtOp1;
5102 noway_assert(phiOp1 != nullptr);
5103 noway_assert(phiOp1->OperGet() == GT_LIST);
5105 GenTreeArgList* phiArgs = phiFunc->gtOp.gtOp1->AsArgList();
5107 // A GT_PHI node should have more than one argument.
5108 noway_assert(phiArgs->Rest() != nullptr);
5110 GenTreeLclVarCommon* phiArg = phiArgs->Current()->AsLclVarCommon();
5111 phiArgs = phiArgs->Rest();
5113 phiAppVNP.SetBoth(vnStore->VNForIntCon(phiArg->gtSsaNum));
5114 bool allSameLib = true;
5115 bool allSameCons = true;
5116 sameVNPair = lvaTable[phiArg->gtLclNum].GetPerSsaData(phiArg->gtSsaNum)->m_vnPair;
5117 if (!sameVNPair.BothDefined())
5120 allSameCons = false;
5122 while (phiArgs != nullptr)
5124 phiArg = phiArgs->Current()->AsLclVarCommon();
5125 // Set the VN of the phi arg.
5126 phiArg->gtVNPair = lvaTable[phiArg->gtLclNum].GetPerSsaData(phiArg->gtSsaNum)->m_vnPair;
5127 if (phiArg->gtVNPair.BothDefined())
5129 if (phiArg->gtVNPair.GetLiberal() != sameVNPair.GetLiberal())
5133 if (phiArg->gtVNPair.GetConservative() != sameVNPair.GetConservative())
5135 allSameCons = false;
5141 allSameCons = false;
5143 ValueNumPair phiArgSsaVNP;
5144 phiArgSsaVNP.SetBoth(vnStore->VNForIntCon(phiArg->gtSsaNum));
5145 phiAppVNP = vnStore->VNPairForFunc(newSsaVar->TypeGet(), VNF_Phi, phiArgSsaVNP, phiAppVNP);
5146 phiArgs = phiArgs->Rest();
5149 ValueNumPair newVNPair;
5152 newVNPair.SetLiberal(sameVNPair.GetLiberal());
5156 newVNPair.SetLiberal(phiAppVNP.GetLiberal());
5160 newVNPair.SetConservative(sameVNPair.GetConservative());
5164 newVNPair.SetConservative(phiAppVNP.GetConservative());
5167 LclSsaVarDsc* newSsaVarDsc = lvaTable[newSsaVar->gtLclNum].GetPerSsaData(newSsaVar->GetSsaNum());
5168 // If all the args of the phi had the same value(s, liberal and conservative), then there wasn't really
5169 // a reason to have the phi -- just pass on that value.
5170 if (allSameLib && allSameCons)
5172 newSsaVarDsc->m_vnPair = newVNPair;
5176 printf("In SSA definition, incoming phi args all same, set VN of local %d/%d to ",
5177 newSsaVar->GetLclNum(), newSsaVar->GetSsaNum());
5178 vnpPrint(newVNPair, 1);
5185 // They were not the same; we need to create a phi definition.
5186 ValueNumPair lclNumVNP;
5187 lclNumVNP.SetBoth(ValueNum(newSsaVar->GetLclNum()));
5188 ValueNumPair ssaNumVNP;
5189 ssaNumVNP.SetBoth(ValueNum(newSsaVar->GetSsaNum()));
5190 ValueNumPair vnPhiDef =
5191 vnStore->VNPairForFunc(newSsaVar->TypeGet(), VNF_PhiDef, lclNumVNP, ssaNumVNP, phiAppVNP);
5192 newSsaVarDsc->m_vnPair = vnPhiDef;
5196 printf("SSA definition: set VN of local %d/%d to ", newSsaVar->GetLclNum(), newSsaVar->GetSsaNum());
5197 vnpPrint(vnPhiDef, 1);
5204 // Now do the same for each MemoryKind.
5205 for (MemoryKind memoryKind : allMemoryKinds())
5207 // Is there a phi for this block?
5208 if (blk->bbMemorySsaPhiFunc[memoryKind] == nullptr)
5210 fgCurMemoryVN[memoryKind] = GetMemoryPerSsaData(blk->bbMemorySsaNumIn[memoryKind])->m_vnPair.GetLiberal();
5211 assert(fgCurMemoryVN[memoryKind] != ValueNumStore::NoVN);
5215 if ((memoryKind == ByrefExposed) && byrefStatesMatchGcHeapStates)
5217 // The update for GcHeap will copy its result to ByrefExposed.
5218 assert(memoryKind < GcHeap);
5219 assert(blk->bbMemorySsaPhiFunc[memoryKind] == blk->bbMemorySsaPhiFunc[GcHeap]);
5224 ValueNum newMemoryVN;
5225 if (optBlockIsLoopEntry(blk, &loopNum))
5227 newMemoryVN = fgMemoryVNForLoopSideEffects(memoryKind, blk, loopNum);
5231 // Are all the VN's the same?
5232 BasicBlock::MemoryPhiArg* phiArgs = blk->bbMemorySsaPhiFunc[memoryKind];
5233 assert(phiArgs != BasicBlock::EmptyMemoryPhiDef);
5234 // There should be > 1 args to a phi.
5235 assert(phiArgs->m_nextArg != nullptr);
5236 ValueNum phiAppVN = vnStore->VNForIntCon(phiArgs->GetSsaNum());
5237 JITDUMP(" Building phi application: $%x = SSA# %d.\n", phiAppVN, phiArgs->GetSsaNum());
5238 bool allSame = true;
5239 ValueNum sameVN = GetMemoryPerSsaData(phiArgs->GetSsaNum())->m_vnPair.GetLiberal();
5240 if (sameVN == ValueNumStore::NoVN)
5244 phiArgs = phiArgs->m_nextArg;
5245 while (phiArgs != nullptr)
5247 ValueNum phiArgVN = GetMemoryPerSsaData(phiArgs->GetSsaNum())->m_vnPair.GetLiberal();
5248 if (phiArgVN == ValueNumStore::NoVN || phiArgVN != sameVN)
5253 ValueNum oldPhiAppVN = phiAppVN;
5255 unsigned phiArgSSANum = phiArgs->GetSsaNum();
5256 ValueNum phiArgSSANumVN = vnStore->VNForIntCon(phiArgSSANum);
5257 JITDUMP(" Building phi application: $%x = SSA# %d.\n", phiArgSSANumVN, phiArgSSANum);
5258 phiAppVN = vnStore->VNForFunc(TYP_REF, VNF_Phi, phiArgSSANumVN, phiAppVN);
5259 JITDUMP(" Building phi application: $%x = phi($%x, $%x).\n", phiAppVN, phiArgSSANumVN,
5261 phiArgs = phiArgs->m_nextArg;
5265 newMemoryVN = sameVN;
5270 vnStore->VNForFunc(TYP_REF, VNF_PhiMemoryDef, vnStore->VNForHandle(ssize_t(blk), 0), phiAppVN);
5273 GetMemoryPerSsaData(blk->bbMemorySsaNumIn[memoryKind])->m_vnPair.SetLiberal(newMemoryVN);
5274 fgCurMemoryVN[memoryKind] = newMemoryVN;
5275 if ((memoryKind == GcHeap) && byrefStatesMatchGcHeapStates)
5277 // Keep the CurMemoryVNs in sync
5278 fgCurMemoryVN[ByrefExposed] = newMemoryVN;
5284 printf("The SSA definition for %s (#%d) at start of " FMT_BB " is ", memoryKindNames[memoryKind],
5285 blk->bbMemorySsaNumIn[memoryKind], blk->bbNum);
5286 vnPrint(fgCurMemoryVN[memoryKind], 1);
5292 // Now iterate over the remaining statements, and their trees.
5293 for (GenTree* stmt = firstNonPhi; stmt != nullptr; stmt = stmt->gtNext)
5295 assert(stmt->IsStatement());
5301 printf("\n***** " FMT_BB ", stmt %d (before)\n", blk->bbNum, compCurStmtNum);
5302 gtDispTree(stmt->gtStmt.gtStmtExpr);
5307 for (GenTree* tree = stmt->gtStmt.gtStmtList; tree; tree = tree->gtNext)
5309 fgValueNumberTree(tree);
5315 printf("\n***** " FMT_BB ", stmt %d (after)\n", blk->bbNum, compCurStmtNum);
5316 gtDispTree(stmt->gtStmt.gtStmtExpr);
5320 printf("---------\n");
5326 for (MemoryKind memoryKind : allMemoryKinds())
5328 if ((memoryKind == GcHeap) && byrefStatesMatchGcHeapStates)
5330 // The update to the shared SSA data will have already happened for ByrefExposed.
5331 assert(memoryKind > ByrefExposed);
5332 assert(blk->bbMemorySsaNumOut[memoryKind] == blk->bbMemorySsaNumOut[ByrefExposed]);
5333 assert(GetMemoryPerSsaData(blk->bbMemorySsaNumOut[memoryKind])->m_vnPair.GetLiberal() ==
5334 fgCurMemoryVN[memoryKind]);
5338 if (blk->bbMemorySsaNumOut[memoryKind] != blk->bbMemorySsaNumIn[memoryKind])
5340 GetMemoryPerSsaData(blk->bbMemorySsaNumOut[memoryKind])->m_vnPair.SetLiberal(fgCurMemoryVN[memoryKind]);
5344 compCurBB = nullptr;
5347 ValueNum Compiler::fgMemoryVNForLoopSideEffects(MemoryKind memoryKind,
5348 BasicBlock* entryBlock,
5349 unsigned innermostLoopNum)
5351 // "loopNum" is the innermost loop for which "blk" is the entry; find the outermost one.
5352 assert(innermostLoopNum != BasicBlock::NOT_IN_LOOP);
5353 unsigned loopsInNest = innermostLoopNum;
5354 unsigned loopNum = innermostLoopNum;
5355 while (loopsInNest != BasicBlock::NOT_IN_LOOP)
5357 if (optLoopTable[loopsInNest].lpEntry != entryBlock)
5361 loopNum = loopsInNest;
5362 loopsInNest = optLoopTable[loopsInNest].lpParent;
5368 printf("Computing %s state for block " FMT_BB ", entry block for loops %d to %d:\n",
5369 memoryKindNames[memoryKind], entryBlock->bbNum, innermostLoopNum, loopNum);
5373 // If this loop has memory havoc effects, just use a new, unique VN.
5374 if (optLoopTable[loopNum].lpLoopHasMemoryHavoc[memoryKind])
5376 ValueNum res = vnStore->VNForExpr(entryBlock, TYP_REF);
5380 printf(" Loop %d has memory havoc effect; heap state is new fresh $%x.\n", loopNum, res);
5386 // Otherwise, find the predecessors of the entry block that are not in the loop.
5387 // If there is only one such, use its memory value as the "base." If more than one,
5388 // use a new unique VN.
5389 BasicBlock* nonLoopPred = nullptr;
5390 bool multipleNonLoopPreds = false;
5391 for (flowList* pred = BlockPredsWithEH(entryBlock); pred != nullptr; pred = pred->flNext)
5393 BasicBlock* predBlock = pred->flBlock;
5394 if (!optLoopTable[loopNum].lpContains(predBlock))
5396 if (nonLoopPred == nullptr)
5398 nonLoopPred = predBlock;
5405 printf(" Entry block has >1 non-loop preds: (at least) " FMT_BB " and " FMT_BB ".\n",
5406 nonLoopPred->bbNum, predBlock->bbNum);
5409 multipleNonLoopPreds = true;
5414 if (multipleNonLoopPreds)
5416 ValueNum res = vnStore->VNForExpr(entryBlock, TYP_REF);
5420 printf(" Therefore, memory state is new, fresh $%x.\n", res);
5425 // Otherwise, there is a single non-loop pred.
5426 assert(nonLoopPred != nullptr);
5427 // What is its memory post-state?
5428 ValueNum newMemoryVN = GetMemoryPerSsaData(nonLoopPred->bbMemorySsaNumOut[memoryKind])->m_vnPair.GetLiberal();
5429 assert(newMemoryVN !=
5430 ValueNumStore::NoVN); // We must have processed the single non-loop pred before reaching the loop entry.
5435 printf(" Init %s state is $%x, with new, fresh VN at:\n", memoryKindNames[memoryKind], newMemoryVN);
5438 // Modify "base" by setting all the modified fields/field maps/array maps to unknown values.
5439 // These annotations apply specifically to the GcHeap, where we disambiguate across such stores.
5440 if (memoryKind == GcHeap)
5442 // First the fields/field maps.
5443 Compiler::LoopDsc::FieldHandleSet* fieldsMod = optLoopTable[loopNum].lpFieldsModified;
5444 if (fieldsMod != nullptr)
5446 for (Compiler::LoopDsc::FieldHandleSet::KeyIterator ki = fieldsMod->Begin(); !ki.Equal(fieldsMod->End());
5449 CORINFO_FIELD_HANDLE fldHnd = ki.Get();
5450 ValueNum fldHndVN = vnStore->VNForHandle(ssize_t(fldHnd), GTF_ICON_FIELD_HDL);
5455 const char* modName;
5456 const char* fldName = eeGetFieldName(fldHnd, &modName);
5457 printf(" VNForHandle(Fseq[%s]) is " FMT_VN "\n", fldName, fldHndVN);
5462 vnStore->VNForMapStore(TYP_REF, newMemoryVN, fldHndVN, vnStore->VNForExpr(entryBlock, TYP_REF));
5465 // Now do the array maps.
5466 Compiler::LoopDsc::ClassHandleSet* elemTypesMod = optLoopTable[loopNum].lpArrayElemTypesModified;
5467 if (elemTypesMod != nullptr)
5469 for (Compiler::LoopDsc::ClassHandleSet::KeyIterator ki = elemTypesMod->Begin();
5470 !ki.Equal(elemTypesMod->End()); ++ki)
5472 CORINFO_CLASS_HANDLE elemClsHnd = ki.Get();
5477 var_types elemTyp = DecodeElemType(elemClsHnd);
5478 if (varTypeIsStruct(elemTyp))
5480 printf(" Array map %s[]\n", eeGetClassName(elemClsHnd));
5484 printf(" Array map %s[]\n", varTypeName(elemTyp));
5489 ValueNum elemTypeVN = vnStore->VNForHandle(ssize_t(elemClsHnd), GTF_ICON_CLASS_HDL);
5490 ValueNum uniqueVN = vnStore->VNForExpr(entryBlock, TYP_REF);
5491 newMemoryVN = vnStore->VNForMapStore(TYP_REF, newMemoryVN, elemTypeVN, uniqueVN);
5497 // If there were any fields/elements modified, this should have been recorded as havoc
5498 // for ByrefExposed.
5499 assert(memoryKind == ByrefExposed);
5500 assert((optLoopTable[loopNum].lpFieldsModified == nullptr) ||
5501 optLoopTable[loopNum].lpLoopHasMemoryHavoc[memoryKind]);
5502 assert((optLoopTable[loopNum].lpArrayElemTypesModified == nullptr) ||
5503 optLoopTable[loopNum].lpLoopHasMemoryHavoc[memoryKind]);
5509 printf(" Final %s state is $%x.\n", memoryKindNames[memoryKind], newMemoryVN);
5515 void Compiler::fgMutateGcHeap(GenTree* tree DEBUGARG(const char* msg))
5517 // Update the current memory VN, and if we're tracking the heap SSA # caused by this node, record it.
5518 recordGcHeapStore(tree, vnStore->VNForExpr(compCurBB, TYP_REF) DEBUGARG(msg));
5521 void Compiler::fgMutateAddressExposedLocal(GenTree* tree DEBUGARG(const char* msg))
5523 // Update the current ByrefExposed VN, and if we're tracking the heap SSA # caused by this node, record it.
5524 recordAddressExposedLocalStore(tree, vnStore->VNForExpr(compCurBB) DEBUGARG(msg));
5527 void Compiler::recordGcHeapStore(GenTree* curTree, ValueNum gcHeapVN DEBUGARG(const char* msg))
5529 // bbMemoryDef must include GcHeap for any block that mutates the GC Heap
5530 // and GC Heap mutations are also ByrefExposed mutations
5531 assert((compCurBB->bbMemoryDef & memoryKindSet(GcHeap, ByrefExposed)) == memoryKindSet(GcHeap, ByrefExposed));
5532 fgCurMemoryVN[GcHeap] = gcHeapVN;
5534 if (byrefStatesMatchGcHeapStates)
5536 // Since GcHeap and ByrefExposed share SSA nodes, they need to share
5537 // value numbers too.
5538 fgCurMemoryVN[ByrefExposed] = gcHeapVN;
5542 // GcHeap and ByrefExposed have different defnums and VNs. We conservatively
5543 // assume that this GcHeap store may alias any byref load/store, so don't
5544 // bother trying to record the map/select stuff, and instead just an opaque VN
5546 fgCurMemoryVN[ByrefExposed] = vnStore->VNForExpr(compCurBB);
5552 printf(" fgCurMemoryVN[GcHeap] assigned for %s at ", msg);
5553 Compiler::printTreeID(curTree);
5554 printf(" to VN: " FMT_VN ".\n", gcHeapVN);
5558 // If byrefStatesMatchGcHeapStates is true, then since GcHeap and ByrefExposed share
5559 // their SSA map entries, the below will effectively update both.
5560 fgValueNumberRecordMemorySsa(GcHeap, curTree);
5563 void Compiler::recordAddressExposedLocalStore(GenTree* curTree, ValueNum memoryVN DEBUGARG(const char* msg))
5565 // This should only happen if GcHeap and ByrefExposed are being tracked separately;
5566 // otherwise we'd go through recordGcHeapStore.
5567 assert(!byrefStatesMatchGcHeapStates);
5569 // bbMemoryDef must include ByrefExposed for any block that mutates an address-exposed local
5570 assert((compCurBB->bbMemoryDef & memoryKindSet(ByrefExposed)) != 0);
5571 fgCurMemoryVN[ByrefExposed] = memoryVN;
5576 printf(" fgCurMemoryVN[ByrefExposed] assigned for %s at ", msg);
5577 Compiler::printTreeID(curTree);
5578 printf(" to VN: " FMT_VN ".\n", memoryVN);
5582 fgValueNumberRecordMemorySsa(ByrefExposed, curTree);
5585 void Compiler::fgValueNumberRecordMemorySsa(MemoryKind memoryKind, GenTree* tree)
5588 if (GetMemorySsaMap(memoryKind)->Lookup(tree, &ssaNum))
5590 GetMemoryPerSsaData(ssaNum)->m_vnPair.SetLiberal(fgCurMemoryVN[memoryKind]);
5595 Compiler::printTreeID(tree);
5596 printf(" sets %s SSA # %d to VN $%x: ", memoryKindNames[memoryKind], ssaNum, fgCurMemoryVN[memoryKind]);
5597 vnStore->vnDump(this, fgCurMemoryVN[memoryKind]);
5604 // The input 'tree' is a leaf node that is a constant
5605 // Assign the proper value number to the tree
5606 void Compiler::fgValueNumberTreeConst(GenTree* tree)
5608 genTreeOps oper = tree->OperGet();
5609 var_types typ = tree->TypeGet();
5610 assert(GenTree::OperIsConst(oper));
5623 if (tree->IsCnsIntOrI() && tree->IsIconHandle())
5625 tree->gtVNPair.SetBoth(
5626 vnStore->VNForHandle(ssize_t(tree->gtIntConCommon.IconValue()), tree->GetIconHandleFlag()));
5628 else if ((typ == TYP_LONG) || (typ == TYP_ULONG))
5630 tree->gtVNPair.SetBoth(vnStore->VNForLongCon(INT64(tree->gtIntConCommon.LngValue())));
5634 tree->gtVNPair.SetBoth(vnStore->VNForIntCon(int(tree->gtIntConCommon.IconValue())));
5639 tree->gtVNPair.SetBoth(vnStore->VNForFloatCon((float)tree->gtDblCon.gtDconVal));
5642 tree->gtVNPair.SetBoth(vnStore->VNForDoubleCon(tree->gtDblCon.gtDconVal));
5645 if (tree->gtIntConCommon.IconValue() == 0)
5647 tree->gtVNPair.SetBoth(ValueNumStore::VNForNull());
5651 assert(tree->gtFlags == GTF_ICON_STR_HDL); // Constant object can be only frozen string.
5652 tree->gtVNPair.SetBoth(
5653 vnStore->VNForHandle(ssize_t(tree->gtIntConCommon.IconValue()), tree->GetIconHandleFlag()));
5658 if (tree->gtIntConCommon.IconValue() == 0)
5660 tree->gtVNPair.SetBoth(ValueNumStore::VNForNull());
5664 assert(tree->IsCnsIntOrI());
5666 if (tree->IsIconHandle())
5668 tree->gtVNPair.SetBoth(
5669 vnStore->VNForHandle(ssize_t(tree->gtIntConCommon.IconValue()), tree->GetIconHandleFlag()));
5673 tree->gtVNPair.SetBoth(vnStore->VNForByrefCon(tree->gtIntConCommon.IconValue()));
5683 //------------------------------------------------------------------------
5684 // fgValueNumberBlockAssignment: Perform value numbering for block assignments.
5687 // tree - the block assignment to be value numbered.
5693 // 'tree' must be a block assignment (GT_INITBLK, GT_COPYBLK, GT_COPYOBJ).
5695 void Compiler::fgValueNumberBlockAssignment(GenTree* tree)
5697 GenTree* lhs = tree->gtGetOp1();
5698 GenTree* rhs = tree->gtGetOp2();
5700 // Sometimes we query the memory ssa map in an assertion, and need a dummy location for the ignored result.
5701 unsigned memorySsaNum;
5704 if (tree->OperIsInitBlkOp())
5706 GenTreeLclVarCommon* lclVarTree;
5709 if (tree->DefinesLocal(this, &lclVarTree, &isEntire))
5711 assert(lclVarTree->gtFlags & GTF_VAR_DEF);
5712 // Should not have been recorded as updating the GC heap.
5713 assert(!GetMemorySsaMap(GcHeap)->Lookup(tree, &memorySsaNum));
5715 unsigned lclNum = lclVarTree->GetLclNum();
5717 // Ignore vars that we excluded from SSA (for example, because they're address-exposed). They don't have
5718 // SSA names in which to store VN's on defs. We'll yield unique VN's when we read from them.
5719 if (lvaInSsa(lclNum))
5721 // Should not have been recorded as updating ByrefExposed.
5722 assert(!GetMemorySsaMap(ByrefExposed)->Lookup(tree, &memorySsaNum));
5724 unsigned lclDefSsaNum = GetSsaNumForLocalVarDef(lclVarTree);
5726 ValueNum initBlkVN = ValueNumStore::NoVN;
5727 GenTree* initConst = rhs;
5728 if (isEntire && initConst->OperGet() == GT_CNS_INT)
5730 unsigned initVal = 0xFF & (unsigned)initConst->AsIntConCommon()->IconValue();
5733 initBlkVN = vnStore->VNZeroForType(lclVarTree->TypeGet());
5736 ValueNum lclVarVN = (initBlkVN != ValueNumStore::NoVN)
5738 : vnStore->VNForExpr(compCurBB, var_types(lvaTable[lclNum].lvType));
5740 lvaTable[lclNum].GetPerSsaData(lclDefSsaNum)->m_vnPair.SetBoth(lclVarVN);
5744 printf("N%03u ", tree->gtSeqNum);
5745 Compiler::printTreeID(tree);
5747 gtDispNodeName(tree);
5748 printf(" V%02u/%d => ", lclNum, lclDefSsaNum);
5749 vnPrint(lclVarVN, 1);
5754 else if (lvaVarAddrExposed(lclVarTree->gtLclNum))
5756 fgMutateAddressExposedLocal(tree DEBUGARG("INITBLK - address-exposed local"));
5761 // For now, arbitrary side effect on GcHeap/ByrefExposed.
5762 // TODO-CQ: Why not be complete, and get this case right?
5763 fgMutateGcHeap(tree DEBUGARG("INITBLK - non local"));
5765 // Initblock's are of type void. Give them the void "value" -- they may occur in argument lists, which we
5766 // want to be able to give VN's to.
5767 tree->gtVNPair.SetBoth(ValueNumStore::VNForVoid());
5771 assert(tree->OperIsCopyBlkOp());
5772 // TODO-Cleanup: We should factor things so that we uniformly rely on "PtrTo" VN's, and
5773 // the memory cases can be shared with assignments.
5774 GenTreeLclVarCommon* lclVarTree = nullptr;
5775 bool isEntire = false;
5776 // Note that we don't care about exceptions here, since we're only using the values
5777 // to perform an assignment (which happens after any exceptions are raised...)
5779 if (tree->DefinesLocal(this, &lclVarTree, &isEntire))
5781 // Should not have been recorded as updating the GC heap.
5782 assert(!GetMemorySsaMap(GcHeap)->Lookup(tree, &memorySsaNum));
5784 unsigned lhsLclNum = lclVarTree->GetLclNum();
5785 FieldSeqNode* lhsFldSeq = nullptr;
5786 // If it's excluded from SSA, don't need to do anything.
5787 if (lvaInSsa(lhsLclNum))
5789 // Should not have been recorded as updating ByrefExposed.
5790 assert(!GetMemorySsaMap(ByrefExposed)->Lookup(tree, &memorySsaNum));
5792 unsigned lclDefSsaNum = GetSsaNumForLocalVarDef(lclVarTree);
5794 if (lhs->IsLocalExpr(this, &lclVarTree, &lhsFldSeq) ||
5795 (lhs->OperIsBlk() && (lhs->AsBlk()->gtBlkSize == lvaLclSize(lhsLclNum))))
5797 noway_assert(lclVarTree->gtLclNum == lhsLclNum);
5802 if (lhs->OperIsBlk())
5804 lhsAddr = lhs->AsBlk()->Addr();
5808 assert(lhs->OperGet() == GT_IND);
5809 lhsAddr = lhs->gtOp.gtOp1;
5812 // For addr-of-local expressions, lib/cons shouldn't matter.
5813 assert(lhsAddr->gtVNPair.BothEqual());
5814 ValueNum lhsAddrVN = lhsAddr->GetVN(VNK_Liberal);
5816 // Unpack the PtrToLoc value number of the address.
5817 assert(vnStore->IsVNFunc(lhsAddrVN));
5819 VNFuncApp lhsAddrFuncApp;
5820 vnStore->GetVNFunc(lhsAddrVN, &lhsAddrFuncApp);
5822 assert(lhsAddrFuncApp.m_func == VNF_PtrToLoc);
5823 assert(vnStore->IsVNConstant(lhsAddrFuncApp.m_args[0]) &&
5824 vnStore->ConstantValue<unsigned>(lhsAddrFuncApp.m_args[0]) == lhsLclNum);
5826 lhsFldSeq = vnStore->FieldSeqVNToFieldSeq(lhsAddrFuncApp.m_args[1]);
5829 // Now we need to get the proper RHS.
5830 GenTreeLclVarCommon* rhsLclVarTree = nullptr;
5831 LclVarDsc* rhsVarDsc = nullptr;
5832 FieldSeqNode* rhsFldSeq = nullptr;
5833 ValueNumPair rhsVNPair;
5834 bool isNewUniq = false;
5835 if (!rhs->OperIsIndir())
5837 if (rhs->IsLocalExpr(this, &rhsLclVarTree, &rhsFldSeq))
5839 unsigned rhsLclNum = rhsLclVarTree->GetLclNum();
5840 rhsVarDsc = &lvaTable[rhsLclNum];
5841 if (!lvaInSsa(rhsLclNum) || rhsFldSeq == FieldSeqStore::NotAField())
5843 rhsVNPair.SetBoth(vnStore->VNForExpr(compCurBB, rhsLclVarTree->TypeGet()));
5848 rhsVNPair = lvaTable[rhsLclVarTree->GetLclNum()]
5849 .GetPerSsaData(rhsLclVarTree->GetSsaNum())
5851 var_types indType = rhsLclVarTree->TypeGet();
5853 rhsVNPair = vnStore->VNPairApplySelectors(rhsVNPair, rhsFldSeq, indType);
5858 rhsVNPair.SetBoth(vnStore->VNForExpr(compCurBB, rhs->TypeGet()));
5864 GenTree* srcAddr = rhs->AsIndir()->Addr();
5865 VNFuncApp srcAddrFuncApp;
5866 if (srcAddr->IsLocalAddrExpr(this, &rhsLclVarTree, &rhsFldSeq))
5868 unsigned rhsLclNum = rhsLclVarTree->GetLclNum();
5869 rhsVarDsc = &lvaTable[rhsLclNum];
5870 if (!lvaInSsa(rhsLclNum) || rhsFldSeq == FieldSeqStore::NotAField())
5876 rhsVNPair = lvaTable[rhsLclVarTree->GetLclNum()]
5877 .GetPerSsaData(rhsLclVarTree->GetSsaNum())
5879 var_types indType = rhsLclVarTree->TypeGet();
5881 rhsVNPair = vnStore->VNPairApplySelectors(rhsVNPair, rhsFldSeq, indType);
5884 else if (vnStore->GetVNFunc(vnStore->VNNormVal(srcAddr->gtVNPair.GetLiberal()), &srcAddrFuncApp))
5886 if (srcAddrFuncApp.m_func == VNF_PtrToStatic)
5888 var_types indType = lclVarTree->TypeGet();
5889 ValueNum fieldSeqVN = srcAddrFuncApp.m_args[0];
5891 FieldSeqNode* zeroOffsetFldSeq = nullptr;
5892 if (GetZeroOffsetFieldMap()->Lookup(srcAddr, &zeroOffsetFldSeq))
5895 vnStore->FieldSeqVNAppend(fieldSeqVN, vnStore->VNForFieldSeq(zeroOffsetFldSeq));
5898 FieldSeqNode* fldSeqForStaticVar = vnStore->FieldSeqVNToFieldSeq(fieldSeqVN);
5900 if (fldSeqForStaticVar != FieldSeqStore::NotAField())
5902 // We model statics as indices into GcHeap (which is a subset of ByrefExposed).
5903 ValueNum selectedStaticVar;
5904 size_t structSize = 0;
5905 selectedStaticVar = vnStore->VNApplySelectors(VNK_Liberal, fgCurMemoryVN[GcHeap],
5906 fldSeqForStaticVar, &structSize);
5908 vnStore->VNApplySelectorsTypeCheck(selectedStaticVar, indType, structSize);
5910 rhsVNPair.SetLiberal(selectedStaticVar);
5911 rhsVNPair.SetConservative(vnStore->VNForExpr(compCurBB, indType));
5915 JITDUMP(" *** Missing field sequence info for Src/RHS of COPYBLK\n");
5916 rhsVNPair.SetBoth(vnStore->VNForExpr(compCurBB, indType)); // a new unique value number
5919 else if (srcAddrFuncApp.m_func == VNF_PtrToArrElem)
5922 fgValueNumberArrIndexVal(nullptr, &srcAddrFuncApp, vnStore->VNForEmptyExcSet());
5923 rhsVNPair.SetLiberal(elemLib);
5924 rhsVNPair.SetConservative(vnStore->VNForExpr(compCurBB, lclVarTree->TypeGet()));
5937 if (lhsFldSeq == FieldSeqStore::NotAField())
5939 // We don't have proper field sequence information for the lhs
5941 JITDUMP(" *** Missing field sequence info for Dst/LHS of COPYBLK\n");
5944 else if (lhsFldSeq != nullptr && isEntire)
5946 // This can occur in for structs with one field, itself of a struct type.
5947 // We won't promote these.
5948 // TODO-Cleanup: decide what exactly to do about this.
5949 // Always treat them as maps, making them use/def, or reconstitute the
5953 else if (!isNewUniq)
5955 ValueNumPair oldLhsVNPair = lvaTable[lhsLclNum].GetPerSsaData(lclVarTree->GetSsaNum())->m_vnPair;
5956 rhsVNPair = vnStore->VNPairApplySelectorsAssign(oldLhsVNPair, lhsFldSeq, rhsVNPair,
5957 lclVarTree->TypeGet(), compCurBB);
5962 rhsVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lclVarTree->TypeGet()));
5965 lvaTable[lhsLclNum].GetPerSsaData(lclDefSsaNum)->m_vnPair = vnStore->VNPNormVal(rhsVNPair);
5971 Compiler::printTreeID(tree);
5972 printf(" assigned VN to local var V%02u/%d: ", lhsLclNum, lclDefSsaNum);
5975 printf("new uniq ");
5977 vnpPrint(rhsVNPair, 1);
5982 else if (lvaVarAddrExposed(lhsLclNum))
5984 fgMutateAddressExposedLocal(tree DEBUGARG("COPYBLK - address-exposed local"));
5989 // For now, arbitrary side effect on GcHeap/ByrefExposed.
5990 // TODO-CQ: Why not be complete, and get this case right?
5991 fgMutateGcHeap(tree DEBUGARG("COPYBLK - non local"));
5993 // Copyblock's are of type void. Give them the void "value" -- they may occur in argument lists, which we want
5994 // to be able to give VN's to.
5995 tree->gtVNPair.SetBoth(ValueNumStore::VNForVoid());
5999 void Compiler::fgValueNumberTree(GenTree* tree)
6001 genTreeOps oper = tree->OperGet();
6004 // TODO-CQ: For now TYP_SIMD values are not handled by value numbering to be amenable for CSE'ing.
6005 if (oper == GT_SIMD)
6007 tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, TYP_UNKNOWN));
6012 #ifdef FEATURE_HW_INTRINSICS
6013 if (oper == GT_HWIntrinsic)
6015 // TODO-CQ: For now hardware intrinsics are not handled by value numbering to be amenable for CSE'ing.
6016 tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, TYP_UNKNOWN));
6018 GenTreeHWIntrinsic* hwIntrinsicNode = tree->AsHWIntrinsic();
6019 assert(hwIntrinsicNode != nullptr);
6021 // For safety/correctness we must mutate the global heap valuenumber
6022 // for any HW intrinsic that performs a memory store operation
6023 if (hwIntrinsicNode->OperIsMemoryStore())
6025 fgMutateGcHeap(tree DEBUGARG("HWIntrinsic - MemoryStore"));
6030 #endif // FEATURE_HW_INTRINSICS
6032 var_types typ = tree->TypeGet();
6033 if (GenTree::OperIsConst(oper))
6035 // If this is a struct assignment, with a constant rhs, it is an initBlk, and it is not
6036 // really useful to value number the constant.
6037 if (!varTypeIsStruct(tree))
6039 fgValueNumberTreeConst(tree);
6042 else if (GenTree::OperIsLeaf(oper))
6049 GenTreeLclVarCommon* lcl = tree->AsLclVarCommon();
6050 unsigned lclNum = lcl->gtLclNum;
6052 if ((lcl->gtFlags & GTF_VAR_DEF) == 0 ||
6053 (lcl->gtFlags & GTF_VAR_USEASG)) // If it is a "pure" def, will handled as part of the assignment.
6055 LclVarDsc* varDsc = &lvaTable[lcl->gtLclNum];
6056 if (varDsc->lvPromoted && varDsc->lvFieldCnt == 1)
6058 // If the promoted var has only one field var, treat like a use of the field var.
6059 lclNum = varDsc->lvFieldLclStart;
6062 // Initialize to the undefined value, so we know whether we hit any of the cases here.
6063 lcl->gtVNPair = ValueNumPair();
6065 if (lcl->gtSsaNum == SsaConfig::RESERVED_SSA_NUM)
6067 // Not an SSA variable.
6069 if (lvaVarAddrExposed(lclNum))
6071 // Address-exposed locals are part of ByrefExposed.
6072 ValueNum addrVN = vnStore->VNForFunc(TYP_BYREF, VNF_PtrToLoc, vnStore->VNForIntCon(lclNum),
6073 vnStore->VNForFieldSeq(nullptr));
6074 ValueNum loadVN = fgValueNumberByrefExposedLoad(typ, addrVN);
6076 lcl->gtVNPair.SetBoth(loadVN);
6080 // Assign odd cases a new, unique, VN.
6081 lcl->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lcl->TypeGet()));
6086 var_types varType = varDsc->TypeGet();
6087 ValueNumPair wholeLclVarVNP = varDsc->GetPerSsaData(lcl->gtSsaNum)->m_vnPair;
6089 // Check for mismatched LclVar size
6091 unsigned typSize = genTypeSize(genActualType(typ));
6092 unsigned varSize = genTypeSize(genActualType(varType));
6094 if (typSize == varSize)
6096 lcl->gtVNPair = wholeLclVarVNP;
6098 else // mismatched LclVar definition and LclVar use size
6100 if (typSize < varSize)
6102 // the indirection is reading less that the whole LclVar
6103 // create a new VN that represent the partial value
6105 ValueNumPair partialLclVarVNP = vnStore->VNPairForCast(wholeLclVarVNP, typ, varType);
6106 lcl->gtVNPair = partialLclVarVNP;
6110 assert(typSize > varSize);
6111 // the indirection is reading beyond the end of the field
6113 lcl->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, typ)); // return a new unique value
6118 // Temporary, to make progress.
6119 // TODO-CQ: This should become an assert again...
6120 if (lcl->gtVNPair.GetLiberal() == ValueNumStore::NoVN)
6122 assert(lcl->gtVNPair.GetConservative() == ValueNumStore::NoVN);
6124 // We don't want to fabricate arbitrary value numbers to things we can't reason about.
6125 // So far, we know about two of these cases:
6126 // Case 1) We have a local var who has never been defined but it's seen as a use.
6127 // This is the case of storeIndir(addr(lclvar)) = expr. In this case since we only
6128 // take the address of the variable, this doesn't mean it's a use nor we have to
6129 // initialize it, so in this very rare case, we fabricate a value number.
6130 // Case 2) Local variables that represent structs which are assigned using CpBlk.
6131 GenTree* nextNode = lcl->gtNext;
6132 assert((nextNode->gtOper == GT_ADDR && nextNode->gtOp.gtOp1 == lcl) ||
6133 varTypeIsStruct(lcl->TypeGet()));
6134 lcl->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lcl->TypeGet()));
6136 assert(lcl->gtVNPair.BothDefined());
6139 // TODO-Review: For the short term, we have a workaround for copyblk/initblk. Those that use
6140 // addrSpillTemp will have a statement like "addrSpillTemp = addr(local)." If we previously decided
6141 // that this block operation defines the local, we will have labeled the "local" node as a DEF
6142 // This flag propagates to the "local" on the RHS. So we'll assume that this is correct,
6143 // and treat it as a def (to a new, unique VN).
6144 else if ((lcl->gtFlags & GTF_VAR_DEF) != 0)
6146 LclVarDsc* varDsc = &lvaTable[lcl->gtLclNum];
6147 if (lcl->gtSsaNum != SsaConfig::RESERVED_SSA_NUM)
6150 .GetPerSsaData(lcl->gtSsaNum)
6151 ->m_vnPair.SetBoth(vnStore->VNForExpr(compCurBB, lcl->TypeGet()));
6153 lcl->gtVNPair = ValueNumPair(); // Avoid confusion -- we don't set the VN of a lcl being defined.
6159 // Use the value of the function pointer (actually, a method handle.)
6160 tree->gtVNPair.SetBoth(
6161 vnStore->VNForHandle(ssize_t(tree->gtFptrVal.gtFptrMethod), GTF_ICON_METHOD_HDL));
6164 // This group passes through a value from a child node.
6166 tree->SetVNsFromNode(tree->gtRetExpr.gtInlineCandidate);
6171 GenTreeLclFld* lclFld = tree->AsLclFld();
6172 assert(!lvaInSsa(lclFld->GetLclNum()) || lclFld->gtFieldSeq != nullptr);
6173 // If this is a (full) def, then the variable will be labeled with the new SSA number,
6174 // which will not have a value. We skip; it will be handled by one of the assignment-like
6175 // forms (assignment, or initBlk or copyBlk).
6176 if (((lclFld->gtFlags & GTF_VAR_DEF) == 0) || (lclFld->gtFlags & GTF_VAR_USEASG))
6178 unsigned lclNum = lclFld->GetLclNum();
6179 unsigned ssaNum = lclFld->GetSsaNum();
6180 LclVarDsc* varDsc = &lvaTable[lclNum];
6182 var_types indType = tree->TypeGet();
6183 if (lclFld->gtFieldSeq == FieldSeqStore::NotAField() || !lvaInSsa(lclFld->GetLclNum()))
6185 // This doesn't represent a proper field access or it's a struct
6186 // with overlapping fields that is hard to reason about; return a new unique VN.
6187 tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, indType));
6191 ValueNumPair lclVNPair = varDsc->GetPerSsaData(ssaNum)->m_vnPair;
6192 tree->gtVNPair = vnStore->VNPairApplySelectors(lclVNPair, lclFld->gtFieldSeq, indType);
6198 // The ones below here all get a new unique VN -- but for various reasons, explained after each.
6200 // We know nothing about the value of a caught expression.
6201 tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, tree->TypeGet()));
6205 // Skip GT_CLS_VAR nodes that are the LHS of an assignment. (We labeled these earlier.)
6206 // We will "evaluate" this as part of the assignment.
6208 if ((tree->gtFlags & GTF_CLS_VAR_ASG_LHS) == 0)
6210 bool isVolatile = (tree->gtFlags & GTF_FLD_VOLATILE) != 0;
6214 // For Volatile indirection, first mutate GcHeap/ByrefExposed
6215 fgMutateGcHeap(tree DEBUGARG("GTF_FLD_VOLATILE - read"));
6218 // We just mutate GcHeap/ByrefExposed if isVolatile is true, and then do the read as normal.
6222 // 2: volatile read s;
6225 // We should never assume that the values read by 1 and 2 are the same (because the heap was mutated
6226 // in between them)... but we *should* be able to prove that the values read in 2 and 3 are the
6230 ValueNumPair clsVarVNPair;
6232 // If the static field handle is for a struct type field, then the value of the static
6233 // is a "ref" to the boxed struct -- treat it as the address of the static (we assume that a
6234 // first element offset will be added to get to the actual struct...)
6235 GenTreeClsVar* clsVar = tree->AsClsVar();
6236 FieldSeqNode* fldSeq = clsVar->gtFieldSeq;
6237 assert(fldSeq != nullptr); // We need to have one.
6238 ValueNum selectedStaticVar = ValueNumStore::NoVN;
6239 if (gtIsStaticFieldPtrToBoxedStruct(clsVar->TypeGet(), fldSeq->m_fieldHnd))
6241 clsVarVNPair.SetBoth(
6242 vnStore->VNForFunc(TYP_BYREF, VNF_PtrToStatic, vnStore->VNForFieldSeq(fldSeq)));
6246 // This is a reference to heap memory.
6247 // We model statics as indices into GcHeap (which is a subset of ByrefExposed).
6249 FieldSeqNode* fldSeqForStaticVar =
6250 GetFieldSeqStore()->CreateSingleton(tree->gtClsVar.gtClsVarHnd);
6251 size_t structSize = 0;
6252 selectedStaticVar = vnStore->VNApplySelectors(VNK_Liberal, fgCurMemoryVN[GcHeap],
6253 fldSeqForStaticVar, &structSize);
6255 vnStore->VNApplySelectorsTypeCheck(selectedStaticVar, tree->TypeGet(), structSize);
6257 clsVarVNPair.SetLiberal(selectedStaticVar);
6258 // The conservative interpretation always gets a new, unique VN.
6259 clsVarVNPair.SetConservative(vnStore->VNForExpr(compCurBB, tree->TypeGet()));
6262 // The ValueNum returned must represent the full-sized IL-Stack value
6263 // If we need to widen this value then we need to introduce a VNF_Cast here to represent
6264 // the widened value. This is necessary since the CSE package can replace all occurances
6265 // of a given ValueNum with a LclVar that is a full-sized IL-Stack value
6267 if (varTypeIsSmall(tree->TypeGet()))
6269 var_types castToType = tree->TypeGet();
6270 clsVarVNPair = vnStore->VNPairForCast(clsVarVNPair, castToType, castToType);
6272 tree->gtVNPair = clsVarVNPair;
6276 case GT_MEMORYBARRIER: // Leaf
6277 // For MEMORYBARRIER add an arbitrary side effect on GcHeap/ByrefExposed.
6278 fgMutateGcHeap(tree DEBUGARG("MEMORYBARRIER"));
6281 // These do not represent values.
6283 case GT_JMP: // Control flow
6284 case GT_LABEL: // Control flow
6285 #if !FEATURE_EH_FUNCLETS
6286 case GT_END_LFIN: // Control flow
6289 // This node is a standin for an argument whose value will be computed later. (Perhaps it's
6290 // a register argument, and we don't want to preclude use of the register in arg evaluation yet.)
6291 // We give this a "fake" value number now; if the call in which it occurs cares about the
6292 // value (e.g., it's a helper call whose result is a function of argument values) we'll reset
6293 // this later, when the later args have been assigned VNs.
6294 tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, tree->TypeGet()));
6298 // This one is special because we should never process it in this method: it should
6299 // always be taken care of, when needed, during pre-processing of a blocks phi definitions.
6307 else if (GenTree::OperIsSimple(oper))
6310 // Sometimes we query the memory ssa map in an assertion, and need a dummy location for the ignored result.
6311 unsigned memorySsaNum;
6314 if ((oper == GT_ASG) && !varTypeIsStruct(tree))
6316 GenTree* lhs = tree->gtOp.gtOp1;
6317 GenTree* rhs = tree->gtOp.gtOp2;
6319 ValueNumPair rhsVNPair = rhs->gtVNPair;
6321 // Is the type being stored different from the type computed by the rhs?
6322 if (rhs->TypeGet() != lhs->TypeGet())
6324 // This means that there is an implicit cast on the rhs value
6326 // We will add a cast function to reflect the possible narrowing of the rhs value
6328 var_types castToType = lhs->TypeGet();
6329 var_types castFromType = rhs->TypeGet();
6330 bool isUnsigned = varTypeIsUnsigned(castFromType);
6332 rhsVNPair = vnStore->VNPairForCast(rhsVNPair, castToType, castFromType, isUnsigned);
6335 if (tree->TypeGet() != TYP_VOID)
6337 // Assignment operators, as expressions, return the value of the RHS.
6338 tree->gtVNPair = rhsVNPair;
6341 // Now that we've labeled the assignment as a whole, we don't care about exceptions.
6342 rhsVNPair = vnStore->VNPNormVal(rhsVNPair);
6344 // If the types of the rhs and lhs are different then we
6345 // may want to change the ValueNumber assigned to the lhs.
6347 if (rhs->TypeGet() != lhs->TypeGet())
6349 if (rhs->TypeGet() == TYP_REF)
6351 // If we have an unsafe IL assignment of a TYP_REF to a non-ref (typically a TYP_BYREF)
6352 // then don't propagate this ValueNumber to the lhs, instead create a new unique VN
6354 rhsVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lhs->TypeGet()));
6358 // We have to handle the case where the LHS is a comma. In that case, we don't evaluate the comma,
6359 // so we give it VNForVoid, and we're really interested in the effective value.
6360 GenTree* lhsCommaIter = lhs;
6361 while (lhsCommaIter->OperGet() == GT_COMMA)
6363 lhsCommaIter->gtVNPair.SetBoth(vnStore->VNForVoid());
6364 lhsCommaIter = lhsCommaIter->gtOp.gtOp2;
6366 lhs = lhs->gtEffectiveVal();
6368 // Now, record the new VN for an assignment (performing the indicated "state update").
6369 // It's safe to use gtEffectiveVal here, because the non-last elements of a comma list on the
6370 // LHS will come before the assignment in evaluation order.
6371 switch (lhs->OperGet())
6376 GenTreeLclVarCommon* lcl = lhs->AsLclVarCommon();
6377 unsigned lclDefSsaNum = GetSsaNumForLocalVarDef(lcl);
6379 // Should not have been recorded as updating the GC heap.
6380 assert(!GetMemorySsaMap(GcHeap)->Lookup(tree, &memorySsaNum));
6382 if (lclDefSsaNum != SsaConfig::RESERVED_SSA_NUM)
6384 // Should not have been recorded as updating ByrefExposed mem.
6385 assert(!GetMemorySsaMap(ByrefExposed)->Lookup(tree, &memorySsaNum));
6387 assert(rhsVNPair.GetLiberal() != ValueNumStore::NoVN);
6389 lhs->gtVNPair = rhsVNPair;
6390 lvaTable[lcl->gtLclNum].GetPerSsaData(lclDefSsaNum)->m_vnPair = rhsVNPair;
6395 printf("N%03u ", lhs->gtSeqNum);
6396 Compiler::printTreeID(lhs);
6398 gtDispNodeName(lhs);
6399 gtDispLeaf(lhs, nullptr);
6401 vnpPrint(lhs->gtVNPair, 1);
6406 else if (lvaVarAddrExposed(lcl->gtLclNum))
6408 // We could use MapStore here and MapSelect on reads of address-exposed locals
6409 // (using the local nums as selectors) to get e.g. propagation of values
6410 // through address-taken locals in regions of code with no calls or byref
6412 // For now, just use a new opaque VN.
6413 ValueNum heapVN = vnStore->VNForExpr(compCurBB);
6414 recordAddressExposedLocalStore(tree, heapVN DEBUGARG("local assign"));
6422 Compiler::printTreeID(tree);
6423 printf(" assigns to non-address-taken local var V%02u; excluded from SSA, so value not "
6433 GenTreeLclFld* lclFld = lhs->AsLclFld();
6434 unsigned lclDefSsaNum = GetSsaNumForLocalVarDef(lclFld);
6436 // Should not have been recorded as updating the GC heap.
6437 assert(!GetMemorySsaMap(GcHeap)->Lookup(tree, &memorySsaNum));
6439 if (lclDefSsaNum != SsaConfig::RESERVED_SSA_NUM)
6441 ValueNumPair newLhsVNPair;
6442 // Is this a full definition?
6443 if ((lclFld->gtFlags & GTF_VAR_USEASG) == 0)
6445 assert(!lclFld->IsPartialLclFld(this));
6446 assert(rhsVNPair.GetLiberal() != ValueNumStore::NoVN);
6447 newLhsVNPair = rhsVNPair;
6451 // We should never have a null field sequence here.
6452 assert(lclFld->gtFieldSeq != nullptr);
6453 if (lclFld->gtFieldSeq == FieldSeqStore::NotAField())
6455 // We don't know what field this represents. Assign a new VN to the whole variable
6456 // (since we may be writing to an unknown portion of it.)
6457 newLhsVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lvaGetActualType(lclFld->gtLclNum)));
6461 // We do know the field sequence.
6462 // The "lclFld" node will be labeled with the SSA number of its "use" identity
6463 // (we looked in a side table above for its "def" identity). Look up that value.
6464 ValueNumPair oldLhsVNPair =
6465 lvaTable[lclFld->GetLclNum()].GetPerSsaData(lclFld->GetSsaNum())->m_vnPair;
6466 newLhsVNPair = vnStore->VNPairApplySelectorsAssign(oldLhsVNPair, lclFld->gtFieldSeq,
6467 rhsVNPair, // Pre-value.
6468 lclFld->TypeGet(), compCurBB);
6471 lvaTable[lclFld->GetLclNum()].GetPerSsaData(lclDefSsaNum)->m_vnPair = newLhsVNPair;
6472 lhs->gtVNPair = newLhsVNPair;
6476 if (lhs->gtVNPair.GetLiberal() != ValueNumStore::NoVN)
6478 printf("N%03u ", lhs->gtSeqNum);
6479 Compiler::printTreeID(lhs);
6481 gtDispNodeName(lhs);
6482 gtDispLeaf(lhs, nullptr);
6484 vnpPrint(lhs->gtVNPair, 1);
6490 else if (lvaVarAddrExposed(lclFld->gtLclNum))
6492 // This side-effects ByrefExposed. Just use a new opaque VN.
6493 // As with GT_LCL_VAR, we could probably use MapStore here and MapSelect at corresponding
6494 // loads, but to do so would have to identify the subset of address-exposed locals
6495 // whose fields can be disambiguated.
6496 ValueNum heapVN = vnStore->VNForExpr(compCurBB);
6497 recordAddressExposedLocalStore(tree, heapVN DEBUGARG("local field assign"));
6503 noway_assert(!"Phi arg cannot be LHS.");
6508 noway_assert(!"GT_BLK/GT_OBJ can not be LHS when !varTypeIsStruct(tree) is true!");
6513 bool isVolatile = (lhs->gtFlags & GTF_IND_VOLATILE) != 0;
6517 // For Volatile store indirection, first mutate GcHeap/ByrefExposed
6518 fgMutateGcHeap(lhs DEBUGARG("GTF_IND_VOLATILE - store"));
6519 tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lhs->TypeGet()));
6522 GenTree* arg = lhs->gtOp.gtOp1;
6524 // Indicates whether the argument of the IND is the address of a local.
6525 bool wasLocal = false;
6527 lhs->gtVNPair = rhsVNPair;
6530 ValueNum argVN = arg->gtVNPair.GetLiberal();
6532 bool argIsVNFunc = vnStore->GetVNFunc(vnStore->VNNormVal(argVN), &funcApp);
6534 // Is this an assignment to a (field of, perhaps) a local?
6535 // If it is a PtrToLoc, lib and cons VNs will be the same.
6538 if (funcApp.m_func == VNF_PtrToLoc)
6540 assert(arg->gtVNPair.BothEqual()); // If it's a PtrToLoc, lib/cons shouldn't differ.
6541 assert(vnStore->IsVNConstant(funcApp.m_args[0]));
6542 unsigned lclNum = vnStore->ConstantValue<unsigned>(funcApp.m_args[0]);
6546 if (lvaInSsa(lclNum))
6548 FieldSeqNode* fieldSeq = vnStore->FieldSeqVNToFieldSeq(funcApp.m_args[1]);
6550 // Either "arg" is the address of (part of) a local itself, or else we have
6551 // a "rogue" PtrToLoc, one that should have made the local in question
6552 // address-exposed. Assert on that.
6553 GenTreeLclVarCommon* lclVarTree = nullptr;
6554 bool isEntire = false;
6555 unsigned lclDefSsaNum = SsaConfig::RESERVED_SSA_NUM;
6556 ValueNumPair newLhsVNPair;
6558 if (arg->DefinesLocalAddr(this, genTypeSize(lhs->TypeGet()), &lclVarTree, &isEntire))
6560 // The local #'s should agree.
6561 assert(lclNum == lclVarTree->GetLclNum());
6563 if (fieldSeq == FieldSeqStore::NotAField())
6565 // We don't know where we're storing, so give the local a new, unique VN.
6566 // Do this by considering it an "entire" assignment, with an unknown RHS.
6568 rhsVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lclVarTree->TypeGet()));
6573 newLhsVNPair = rhsVNPair;
6574 lclDefSsaNum = lclVarTree->GetSsaNum();
6578 // Don't use the lclVarTree's VN: if it's a local field, it will
6579 // already be dereferenced by it's field sequence.
6580 ValueNumPair oldLhsVNPair = lvaTable[lclVarTree->GetLclNum()]
6581 .GetPerSsaData(lclVarTree->GetSsaNum())
6583 lclDefSsaNum = GetSsaNumForLocalVarDef(lclVarTree);
6585 vnStore->VNPairApplySelectorsAssign(oldLhsVNPair, fieldSeq, rhsVNPair,
6586 lhs->TypeGet(), compCurBB);
6588 lvaTable[lclNum].GetPerSsaData(lclDefSsaNum)->m_vnPair = newLhsVNPair;
6592 unreached(); // "Rogue" PtrToLoc, as discussed above.
6598 Compiler::printTreeID(tree);
6599 printf(" assigned VN to local var V%02u/%d: VN ", lclNum, lclDefSsaNum);
6600 vnpPrint(newLhsVNPair, 1);
6605 else if (lvaVarAddrExposed(lclNum))
6607 // Need to record the effect on ByrefExposed.
6608 // We could use MapStore here and MapSelect on reads of address-exposed locals
6609 // (using the local nums as selectors) to get e.g. propagation of values
6610 // through address-taken locals in regions of code with no calls or byref
6612 // For now, just use a new opaque VN.
6613 ValueNum heapVN = vnStore->VNForExpr(compCurBB);
6614 recordAddressExposedLocalStore(tree, heapVN DEBUGARG("PtrToLoc indir"));
6619 // Was the argument of the GT_IND the address of a local, handled above?
6622 GenTree* obj = nullptr;
6623 GenTree* staticOffset = nullptr;
6624 FieldSeqNode* fldSeq = nullptr;
6626 // Is the LHS an array index expression?
6627 if (argIsVNFunc && funcApp.m_func == VNF_PtrToArrElem)
6629 CORINFO_CLASS_HANDLE elemTypeEq =
6630 CORINFO_CLASS_HANDLE(vnStore->ConstantValue<ssize_t>(funcApp.m_args[0]));
6631 ValueNum arrVN = funcApp.m_args[1];
6632 ValueNum inxVN = funcApp.m_args[2];
6633 FieldSeqNode* fldSeq = vnStore->FieldSeqVNToFieldSeq(funcApp.m_args[3]);
6635 // Does the child of the GT_IND 'arg' have an associated zero-offset field sequence?
6636 FieldSeqNode* addrFieldSeq = nullptr;
6637 if (GetZeroOffsetFieldMap()->Lookup(arg, &addrFieldSeq))
6639 fldSeq = GetFieldSeqStore()->Append(addrFieldSeq, fldSeq);
6646 Compiler::printTreeID(tree);
6647 printf(" assigns to an array element:\n");
6651 ValueNum heapVN = fgValueNumberArrIndexAssign(elemTypeEq, arrVN, inxVN, fldSeq,
6652 rhsVNPair.GetLiberal(), lhs->TypeGet());
6653 recordGcHeapStore(tree, heapVN DEBUGARG("ArrIndexAssign (case 1)"));
6655 // It may be that we haven't parsed it yet. Try.
6656 else if (lhs->gtFlags & GTF_IND_ARR_INDEX)
6659 bool b = GetArrayInfoMap()->Lookup(lhs, &arrInfo);
6661 ValueNum arrVN = ValueNumStore::NoVN;
6662 ValueNum inxVN = ValueNumStore::NoVN;
6663 FieldSeqNode* fldSeq = nullptr;
6666 GenTree* arr = nullptr;
6667 arg->ParseArrayAddress(this, &arrInfo, &arr, &inxVN, &fldSeq);
6670 fgMutateGcHeap(tree DEBUGARG("assignment to unparseable array expression"));
6673 // Otherwise, parsing succeeded.
6675 // Need to form H[arrType][arr][ind][fldSeq] = rhsVNPair.GetLiberal()
6677 // Get the element type equivalence class representative.
6678 CORINFO_CLASS_HANDLE elemTypeEq =
6679 EncodeElemType(arrInfo.m_elemType, arrInfo.m_elemStructType);
6680 arrVN = arr->gtVNPair.GetLiberal();
6682 FieldSeqNode* zeroOffsetFldSeq = nullptr;
6683 if (GetZeroOffsetFieldMap()->Lookup(arg, &zeroOffsetFldSeq))
6685 fldSeq = GetFieldSeqStore()->Append(fldSeq, zeroOffsetFldSeq);
6688 ValueNum heapVN = fgValueNumberArrIndexAssign(elemTypeEq, arrVN, inxVN, fldSeq,
6689 rhsVNPair.GetLiberal(), lhs->TypeGet());
6690 recordGcHeapStore(tree, heapVN DEBUGARG("ArrIndexAssign (case 2)"));
6692 else if (arg->IsFieldAddr(this, &obj, &staticOffset, &fldSeq))
6694 if (fldSeq == FieldSeqStore::NotAField())
6696 fgMutateGcHeap(tree DEBUGARG("NotAField"));
6700 assert(fldSeq != nullptr);
6702 CORINFO_CLASS_HANDLE fldCls = info.compCompHnd->getFieldClass(fldSeq->m_fieldHnd);
6705 // Make sure that the class containing it is not a value class (as we are expecting
6706 // an instance field)
6707 assert((info.compCompHnd->getClassAttribs(fldCls) & CORINFO_FLG_VALUECLASS) == 0);
6708 assert(staticOffset == nullptr);
6711 // Get the first (instance or static) field from field seq. GcHeap[field] will yield
6713 if (fldSeq->IsFirstElemFieldSeq())
6715 fldSeq = fldSeq->m_next;
6716 assert(fldSeq != nullptr);
6719 // Get a field sequence for just the first field in the sequence
6721 FieldSeqNode* firstFieldOnly = GetFieldSeqStore()->CreateSingleton(fldSeq->m_fieldHnd);
6723 // The final field in the sequence will need to match the 'indType'
6724 var_types indType = lhs->TypeGet();
6726 vnStore->VNApplySelectors(VNK_Liberal, fgCurMemoryVN[GcHeap], firstFieldOnly);
6728 // The type of the field is "struct" if there are more fields in the sequence,
6729 // otherwise it is the type returned from VNApplySelectors above.
6730 var_types firstFieldType = vnStore->TypeOfVN(fldMapVN);
6733 rhsVNPair.GetLiberal(); // The value number from the rhs of the assignment
6734 ValueNum newFldMapVN = ValueNumStore::NoVN;
6736 // when (obj != nullptr) we have an instance field, otherwise a static field
6737 // when (staticOffset != nullptr) it represents a offset into a static or the call to
6738 // Shared Static Base
6739 if ((obj != nullptr) || (staticOffset != nullptr))
6741 ValueNum valAtAddr = fldMapVN;
6742 ValueNum normVal = ValueNumStore::NoVN;
6746 // construct the ValueNumber for 'fldMap at obj'
6747 normVal = vnStore->VNNormVal(obj->GetVN(VNK_Liberal));
6749 vnStore->VNForMapSelect(VNK_Liberal, firstFieldType, fldMapVN, normVal);
6751 else // (staticOffset != nullptr)
6753 // construct the ValueNumber for 'fldMap at staticOffset'
6754 normVal = vnStore->VNNormVal(staticOffset->GetVN(VNK_Liberal));
6756 vnStore->VNForMapSelect(VNK_Liberal, firstFieldType, fldMapVN, normVal);
6758 // Now get rid of any remaining struct field dereferences. (if they exist)
6762 vnStore->VNApplySelectorsAssign(VNK_Liberal, valAtAddr, fldSeq->m_next,
6763 storeVal, indType, compCurBB);
6766 // From which we can construct the new ValueNumber for 'fldMap at normVal'
6767 newFldMapVN = vnStore->VNForMapStore(vnStore->TypeOfVN(fldMapVN), fldMapVN, normVal,
6772 // plain static field
6774 // Now get rid of any remaining struct field dereferences. (if they exist)
6778 vnStore->VNApplySelectorsAssign(VNK_Liberal, fldMapVN, fldSeq->m_next,
6779 storeVal, indType, compCurBB);
6782 newFldMapVN = vnStore->VNApplySelectorsAssign(VNK_Liberal, fgCurMemoryVN[GcHeap],
6783 fldSeq, storeVal, indType, compCurBB);
6786 // It is not strictly necessary to set the lhs value number,
6787 // but the dumps read better with it set to the 'storeVal' that we just computed
6788 lhs->gtVNPair.SetBoth(storeVal);
6790 // Update the field map for firstField in GcHeap to this new value.
6792 vnStore->VNApplySelectorsAssign(VNK_Liberal, fgCurMemoryVN[GcHeap], firstFieldOnly,
6793 newFldMapVN, indType, compCurBB);
6795 recordGcHeapStore(tree, heapVN DEBUGARG("StoreField"));
6800 GenTreeLclVarCommon* lclVarTree = nullptr;
6801 bool isLocal = tree->DefinesLocal(this, &lclVarTree);
6803 if (isLocal && lvaVarAddrExposed(lclVarTree->gtLclNum))
6805 // Store to address-exposed local; need to record the effect on ByrefExposed.
6806 // We could use MapStore here and MapSelect on reads of address-exposed locals
6807 // (using the local nums as selectors) to get e.g. propagation of values
6808 // through address-taken locals in regions of code with no calls or byref
6810 // For now, just use a new opaque VN.
6811 ValueNum memoryVN = vnStore->VNForExpr(compCurBB);
6812 recordAddressExposedLocalStore(tree, memoryVN DEBUGARG("PtrToLoc indir"));
6816 // If it doesn't define a local, then it might update GcHeap/ByrefExposed.
6817 // For the new ByrefExposed VN, we could use an operator here like
6818 // VNF_ByrefExposedStore that carries the VNs of the pointer and RHS, then
6819 // at byref loads if the current ByrefExposed VN happens to be
6820 // VNF_ByrefExposedStore with the same pointer VN, we could propagate the
6821 // VN from the RHS to the VN for the load. This would e.g. allow tracking
6822 // values through assignments to out params. For now, just model this
6823 // as an opaque GcHeap/ByrefExposed mutation.
6824 fgMutateGcHeap(tree DEBUGARG("assign-of-IND"));
6829 // We don't actually evaluate an IND on the LHS, so give it the Void value.
6830 tree->gtVNPair.SetBoth(vnStore->VNForVoid());
6836 bool isVolatile = (lhs->gtFlags & GTF_FLD_VOLATILE) != 0;
6840 // For Volatile store indirection, first mutate GcHeap/ByrefExposed
6841 fgMutateGcHeap(lhs DEBUGARG("GTF_CLS_VAR - store")); // always change fgCurMemoryVN
6844 // We model statics as indices into GcHeap (which is a subset of ByrefExposed).
6845 FieldSeqNode* fldSeqForStaticVar = GetFieldSeqStore()->CreateSingleton(lhs->gtClsVar.gtClsVarHnd);
6846 assert(fldSeqForStaticVar != FieldSeqStore::NotAField());
6848 ValueNum storeVal = rhsVNPair.GetLiberal(); // The value number from the rhs of the assignment
6849 storeVal = vnStore->VNApplySelectorsAssign(VNK_Liberal, fgCurMemoryVN[GcHeap], fldSeqForStaticVar,
6850 storeVal, lhs->TypeGet(), compCurBB);
6852 // It is not strictly necessary to set the lhs value number,
6853 // but the dumps read better with it set to the 'storeVal' that we just computed
6854 lhs->gtVNPair.SetBoth(storeVal);
6856 // bbMemoryDef must include GcHeap for any block that mutates the GC heap
6857 assert((compCurBB->bbMemoryDef & memoryKindSet(GcHeap)) != 0);
6859 // Update the field map for the fgCurMemoryVN and SSA for the tree
6860 recordGcHeapStore(tree, storeVal DEBUGARG("Static Field store"));
6865 assert(!"Unknown node for lhs of assignment!");
6867 // For Unknown stores, mutate GcHeap/ByrefExposed
6868 fgMutateGcHeap(lhs DEBUGARG("Unkwown Assignment - store")); // always change fgCurMemoryVN
6872 // Other kinds of assignment: initblk and copyblk.
6873 else if (oper == GT_ASG && varTypeIsStruct(tree))
6875 fgValueNumberBlockAssignment(tree);
6877 else if (oper == GT_ADDR)
6879 // We have special representations for byrefs to lvalues.
6880 GenTree* arg = tree->gtOp.gtOp1;
6881 if (arg->OperIsLocal())
6883 FieldSeqNode* fieldSeq = nullptr;
6884 ValueNum newVN = ValueNumStore::NoVN;
6885 if (!lvaInSsa(arg->gtLclVarCommon.GetLclNum()))
6887 newVN = vnStore->VNForExpr(compCurBB, TYP_BYREF);
6889 else if (arg->OperGet() == GT_LCL_FLD)
6891 fieldSeq = arg->AsLclFld()->gtFieldSeq;
6892 if (fieldSeq == nullptr)
6894 // Local field with unknown field seq -- not a precise pointer.
6895 newVN = vnStore->VNForExpr(compCurBB, TYP_BYREF);
6898 if (newVN == ValueNumStore::NoVN)
6900 assert(arg->gtLclVarCommon.GetSsaNum() != ValueNumStore::NoVN);
6901 newVN = vnStore->VNForFunc(TYP_BYREF, VNF_PtrToLoc,
6902 vnStore->VNForIntCon(arg->gtLclVarCommon.GetLclNum()),
6903 vnStore->VNForFieldSeq(fieldSeq));
6905 tree->gtVNPair.SetBoth(newVN);
6907 else if ((arg->gtOper == GT_IND) || arg->OperIsBlk())
6909 // Usually the ADDR and IND just cancel out...
6910 // except when this GT_ADDR has a valid zero-offset field sequence
6912 FieldSeqNode* zeroOffsetFieldSeq = nullptr;
6913 if (GetZeroOffsetFieldMap()->Lookup(tree, &zeroOffsetFieldSeq) &&
6914 (zeroOffsetFieldSeq != FieldSeqStore::NotAField()))
6916 ValueNum addrExtended = vnStore->ExtendPtrVN(arg->gtOp.gtOp1, zeroOffsetFieldSeq);
6917 if (addrExtended != ValueNumStore::NoVN)
6919 tree->gtVNPair.SetBoth(addrExtended); // We don't care about lib/cons differences for addresses.
6923 // ExtendPtrVN returned a failure result
6924 // So give this address a new unique value
6925 tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, TYP_BYREF));
6930 // They just cancel, so fetch the ValueNumber from the op1 of the GT_IND node.
6932 GenTree* addr = arg->AsIndir()->Addr();
6933 tree->gtVNPair = addr->gtVNPair;
6935 // For the CSE phase mark the address as GTF_DONT_CSE
6936 // because it will end up with the same value number as tree (the GT_ADDR).
6937 addr->gtFlags |= GTF_DONT_CSE;
6942 // May be more cases to do here! But we'll punt for now.
6943 tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, TYP_BYREF));
6946 else if ((oper == GT_IND) || GenTree::OperIsBlk(oper))
6948 // So far, we handle cases in which the address is a ptr-to-local, or if it's
6949 // a pointer to an object field or array element. Other cases become uses of
6950 // the current ByrefExposed value and the pointer value, so that at least we
6951 // can recognize redundant loads with no stores between them.
6952 GenTree* addr = tree->AsIndir()->Addr();
6953 GenTreeLclVarCommon* lclVarTree = nullptr;
6954 FieldSeqNode* fldSeq1 = nullptr;
6955 FieldSeqNode* fldSeq2 = nullptr;
6956 GenTree* obj = nullptr;
6957 GenTree* staticOffset = nullptr;
6958 bool isVolatile = (tree->gtFlags & GTF_IND_VOLATILE) != 0;
6960 // See if the addr has any exceptional part.
6961 ValueNumPair addrNvnp;
6962 ValueNumPair addrXvnp = ValueNumPair(ValueNumStore::VNForEmptyExcSet(), ValueNumStore::VNForEmptyExcSet());
6963 vnStore->VNPUnpackExc(addr->gtVNPair, &addrNvnp, &addrXvnp);
6965 // Is the dereference immutable? If so, model it as referencing the read-only heap.
6966 if (tree->gtFlags & GTF_IND_INVARIANT)
6968 assert(!isVolatile); // We don't expect both volatile and invariant
6970 ValueNumPair(vnStore->VNForMapSelect(VNK_Liberal, TYP_REF, ValueNumStore::VNForROH(),
6971 addrNvnp.GetLiberal()),
6972 vnStore->VNForMapSelect(VNK_Conservative, TYP_REF, ValueNumStore::VNForROH(),
6973 addrNvnp.GetConservative()));
6974 tree->gtVNPair = vnStore->VNPWithExc(tree->gtVNPair, addrXvnp);
6976 else if (isVolatile)
6978 // For Volatile indirection, mutate GcHeap/ByrefExposed
6979 fgMutateGcHeap(tree DEBUGARG("GTF_IND_VOLATILE - read"));
6981 // The value read by the GT_IND can immediately change
6982 ValueNum newUniq = vnStore->VNForExpr(compCurBB, tree->TypeGet());
6983 tree->gtVNPair = vnStore->VNPWithExc(ValueNumPair(newUniq, newUniq), addrXvnp);
6985 // We always want to evaluate the LHS when the GT_IND node is marked with GTF_IND_ARR_INDEX
6986 // as this will relabel the GT_IND child correctly using the VNF_PtrToArrElem
6987 else if ((tree->gtFlags & GTF_IND_ARR_INDEX) != 0)
6990 bool b = GetArrayInfoMap()->Lookup(tree, &arrInfo);
6993 ValueNum inxVN = ValueNumStore::NoVN;
6994 FieldSeqNode* fldSeq = nullptr;
6996 // GenTree* addr = tree->gtOp.gtOp1;
6997 ValueNum addrVN = addrNvnp.GetLiberal();
7000 GenTree* arr = nullptr;
7001 addr->ParseArrayAddress(this, &arrInfo, &arr, &inxVN, &fldSeq);
7004 tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, tree->TypeGet()));
7007 assert(fldSeq != FieldSeqStore::NotAField());
7010 // Need to form H[arrType][arr][ind][fldSeq]
7011 // Get the array element type equivalence class rep.
7012 CORINFO_CLASS_HANDLE elemTypeEq = EncodeElemType(arrInfo.m_elemType, arrInfo.m_elemStructType);
7013 ValueNum elemTypeEqVN = vnStore->VNForHandle(ssize_t(elemTypeEq), GTF_ICON_CLASS_HDL);
7015 // We take the "VNNormVal"s here, because if either has exceptional outcomes, they will be captured
7016 // as part of the value of the composite "addr" operation...
7017 ValueNum arrVN = vnStore->VNNormVal(arr->gtVNPair.GetLiberal());
7018 inxVN = vnStore->VNNormVal(inxVN);
7020 // Additionally, relabel the address with a PtrToArrElem value number.
7021 ValueNum fldSeqVN = vnStore->VNForFieldSeq(fldSeq);
7023 vnStore->VNForFunc(TYP_BYREF, VNF_PtrToArrElem, elemTypeEqVN, arrVN, inxVN, fldSeqVN);
7025 // The aggregate "addr" VN should have had all the exceptions bubble up...
7026 elemAddr = vnStore->VNWithExc(elemAddr, addrXvnp.GetLiberal());
7027 addr->gtVNPair.SetBoth(elemAddr);
7031 printf(" Relabeled IND_ARR_INDEX address node ");
7032 Compiler::printTreeID(addr);
7033 printf(" with l:" FMT_VN ": ", elemAddr);
7034 vnStore->vnDump(this, elemAddr);
7036 if (vnStore->VNNormVal(elemAddr) != elemAddr)
7038 printf(" [" FMT_VN " is: ", vnStore->VNNormVal(elemAddr));
7039 vnStore->vnDump(this, vnStore->VNNormVal(elemAddr));
7044 // We now need to retrieve the value number for the array element value
7045 // and give this value number to the GT_IND node 'tree'
7046 // We do this whenever we have an rvalue, but we don't do it for a
7047 // normal LHS assignment into an array element.
7049 if ((tree->gtFlags & GTF_IND_ASG_LHS) == 0)
7051 fgValueNumberArrIndexVal(tree, elemTypeEq, arrVN, inxVN, addrXvnp.GetLiberal(), fldSeq);
7054 // In general we skip GT_IND nodes on that are the LHS of an assignment. (We labeled these earlier.)
7055 // We will "evaluate" this as part of the assignment.
7056 else if ((tree->gtFlags & GTF_IND_ASG_LHS) == 0)
7058 FieldSeqNode* localFldSeq = nullptr;
7061 // Is it a local or a heap address?
7062 if (addr->IsLocalAddrExpr(this, &lclVarTree, &localFldSeq) && lvaInSsa(lclVarTree->GetLclNum()))
7064 unsigned lclNum = lclVarTree->GetLclNum();
7065 unsigned ssaNum = lclVarTree->GetSsaNum();
7066 LclVarDsc* varDsc = &lvaTable[lclNum];
7068 if ((localFldSeq == FieldSeqStore::NotAField()) || (localFldSeq == nullptr))
7070 tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, tree->TypeGet()));
7074 var_types indType = tree->TypeGet();
7075 ValueNumPair lclVNPair = varDsc->GetPerSsaData(ssaNum)->m_vnPair;
7076 tree->gtVNPair = vnStore->VNPairApplySelectors(lclVNPair, localFldSeq, indType);
7079 tree->gtVNPair = vnStore->VNPWithExc(tree->gtVNPair, addrXvnp);
7081 else if (vnStore->GetVNFunc(addrNvnp.GetLiberal(), &funcApp) && funcApp.m_func == VNF_PtrToStatic)
7083 var_types indType = tree->TypeGet();
7084 ValueNum fieldSeqVN = funcApp.m_args[0];
7086 FieldSeqNode* fldSeqForStaticVar = vnStore->FieldSeqVNToFieldSeq(fieldSeqVN);
7088 if (fldSeqForStaticVar != FieldSeqStore::NotAField())
7090 ValueNum selectedStaticVar;
7091 // We model statics as indices into the GcHeap (which is a subset of ByrefExposed).
7092 size_t structSize = 0;
7093 selectedStaticVar = vnStore->VNApplySelectors(VNK_Liberal, fgCurMemoryVN[GcHeap],
7094 fldSeqForStaticVar, &structSize);
7095 selectedStaticVar = vnStore->VNApplySelectorsTypeCheck(selectedStaticVar, indType, structSize);
7097 tree->gtVNPair.SetLiberal(selectedStaticVar);
7098 tree->gtVNPair.SetConservative(vnStore->VNForExpr(compCurBB, indType));
7102 JITDUMP(" *** Missing field sequence info for VNF_PtrToStatic value GT_IND\n");
7103 tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, indType)); // a new unique value number
7105 tree->gtVNPair = vnStore->VNPWithExc(tree->gtVNPair, addrXvnp);
7107 else if (vnStore->GetVNFunc(addrNvnp.GetLiberal(), &funcApp) && (funcApp.m_func == VNF_PtrToArrElem))
7109 fgValueNumberArrIndexVal(tree, &funcApp, addrXvnp.GetLiberal());
7111 else if (addr->IsFieldAddr(this, &obj, &staticOffset, &fldSeq2))
7113 if (fldSeq2 == FieldSeqStore::NotAField())
7115 tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, tree->TypeGet()));
7117 else if (fldSeq2 != nullptr)
7119 // Get the first (instance or static) field from field seq. GcHeap[field] will yield the "field
7121 CLANG_FORMAT_COMMENT_ANCHOR;
7124 CORINFO_CLASS_HANDLE fldCls = info.compCompHnd->getFieldClass(fldSeq2->m_fieldHnd);
7127 // Make sure that the class containing it is not a value class (as we are expecting an
7129 assert((info.compCompHnd->getClassAttribs(fldCls) & CORINFO_FLG_VALUECLASS) == 0);
7130 assert(staticOffset == nullptr);
7133 // Get a field sequence for just the first field in the sequence
7135 FieldSeqNode* firstFieldOnly = GetFieldSeqStore()->CreateSingleton(fldSeq2->m_fieldHnd);
7136 size_t structSize = 0;
7138 vnStore->VNApplySelectors(VNK_Liberal, fgCurMemoryVN[GcHeap], firstFieldOnly, &structSize);
7140 // The final field in the sequence will need to match the 'indType'
7141 var_types indType = tree->TypeGet();
7143 // The type of the field is "struct" if there are more fields in the sequence,
7144 // otherwise it is the type returned from VNApplySelectors above.
7145 var_types firstFieldType = vnStore->TypeOfVN(fldMapVN);
7147 ValueNum valAtAddr = fldMapVN;
7150 // construct the ValueNumber for 'fldMap at obj'
7151 ValueNum objNormVal = vnStore->VNNormVal(obj->GetVN(VNK_Liberal));
7152 valAtAddr = vnStore->VNForMapSelect(VNK_Liberal, firstFieldType, fldMapVN, objNormVal);
7154 else if (staticOffset != nullptr)
7156 // construct the ValueNumber for 'fldMap at staticOffset'
7157 ValueNum offsetNormVal = vnStore->VNNormVal(staticOffset->GetVN(VNK_Liberal));
7158 valAtAddr = vnStore->VNForMapSelect(VNK_Liberal, firstFieldType, fldMapVN, offsetNormVal);
7161 // Now get rid of any remaining struct field dereferences.
7162 if (fldSeq2->m_next)
7164 valAtAddr = vnStore->VNApplySelectors(VNK_Liberal, valAtAddr, fldSeq2->m_next, &structSize);
7166 valAtAddr = vnStore->VNApplySelectorsTypeCheck(valAtAddr, indType, structSize);
7168 tree->gtVNPair.SetLiberal(valAtAddr);
7170 // The conservative value is a new, unique VN.
7171 tree->gtVNPair.SetConservative(vnStore->VNForExpr(compCurBB, tree->TypeGet()));
7172 tree->gtVNPair = vnStore->VNPWithExc(tree->gtVNPair, addrXvnp);
7176 // Occasionally we do an explicit null test on a REF, so we just dereference it with no
7177 // field sequence. The result is probably unused.
7178 tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, tree->TypeGet()));
7179 tree->gtVNPair = vnStore->VNPWithExc(tree->gtVNPair, addrXvnp);
7182 else // We don't know where the address points, so it is an ByrefExposed load.
7184 ValueNum addrVN = addr->gtVNPair.GetLiberal();
7185 ValueNum loadVN = fgValueNumberByrefExposedLoad(typ, addrVN);
7186 tree->gtVNPair.SetLiberal(loadVN);
7187 tree->gtVNPair.SetConservative(vnStore->VNForExpr(compCurBB, tree->TypeGet()));
7188 tree->gtVNPair = vnStore->VNPWithExc(tree->gtVNPair, addrXvnp);
7192 else if (tree->OperGet() == GT_CAST)
7194 fgValueNumberCastTree(tree);
7196 else if (tree->OperGet() == GT_INTRINSIC)
7198 fgValueNumberIntrinsic(tree);
7200 else if (ValueNumStore::VNFuncIsLegal(GetVNFuncForOper(oper, (tree->gtFlags & GTF_UNSIGNED) != 0)))
7202 if (GenTree::OperIsUnary(oper))
7204 if (tree->gtOp.gtOp1 != nullptr)
7206 if (tree->OperGet() == GT_NOP)
7208 // Pass through arg vn.
7209 tree->gtVNPair = tree->gtOp.gtOp1->gtVNPair;
7213 ValueNumPair op1VNP;
7214 ValueNumPair op1VNPx = ValueNumStore::VNPForEmptyExcSet();
7215 vnStore->VNPUnpackExc(tree->gtOp.gtOp1->gtVNPair, &op1VNP, &op1VNPx);
7217 // If we are fetching the array length for an array ref that came from global memory
7218 // then for CSE safety we must use the conservative value number for both
7220 if ((tree->OperGet() == GT_ARR_LENGTH) && ((tree->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) != 0))
7222 // use the conservative value number for both when computing the VN for the ARR_LENGTH
7223 op1VNP.SetBoth(op1VNP.GetConservative());
7227 vnStore->VNPWithExc(vnStore->VNPairForFunc(tree->TypeGet(),
7228 GetVNFuncForOper(oper, (tree->gtFlags &
7229 GTF_UNSIGNED) != 0),
7234 else // Is actually nullary.
7236 // Mostly we'll leave these without a value number, assuming we'll detect these as VN failures
7237 // if they actually need to have values. With the exception of NOPs, which can sometimes have
7239 if (tree->OperGet() == GT_NOP)
7241 tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, tree->TypeGet()));
7247 assert(oper != GT_ASG); // We handled assignments earlier.
7248 assert(GenTree::OperIsBinary(oper));
7249 // Standard binary operator.
7250 ValueNumPair op2VNPair;
7251 if (tree->gtOp.gtOp2 == nullptr)
7253 op2VNPair.SetBoth(ValueNumStore::VNForNull());
7257 op2VNPair = tree->gtOp.gtOp2->gtVNPair;
7259 // A few special case: if we add a field offset constant to a PtrToXXX, we get back a new PtrToXXX.
7260 ValueNum newVN = ValueNumStore::NoVN;
7262 ValueNumPair op1vnp;
7263 ValueNumPair op1Xvnp = ValueNumStore::VNPForEmptyExcSet();
7264 vnStore->VNPUnpackExc(tree->gtOp.gtOp1->gtVNPair, &op1vnp, &op1Xvnp);
7265 ValueNumPair op2vnp;
7266 ValueNumPair op2Xvnp = ValueNumStore::VNPForEmptyExcSet();
7267 vnStore->VNPUnpackExc(op2VNPair, &op2vnp, &op2Xvnp);
7268 ValueNumPair excSet = vnStore->VNPExcSetUnion(op1Xvnp, op2Xvnp);
7272 newVN = vnStore->ExtendPtrVN(tree->gtOp.gtOp1, tree->gtOp.gtOp2);
7273 if (newVN == ValueNumStore::NoVN)
7275 newVN = vnStore->ExtendPtrVN(tree->gtOp.gtOp2, tree->gtOp.gtOp1);
7278 if (newVN != ValueNumStore::NoVN)
7280 newVN = vnStore->VNWithExc(newVN, excSet.GetLiberal());
7281 // We don't care about differences between liberal and conservative for pointer values.
7282 tree->gtVNPair.SetBoth(newVN);
7287 ValueNumPair normalRes =
7288 vnStore->VNPairForFunc(tree->TypeGet(),
7289 GetVNFuncForOper(oper, (tree->gtFlags & GTF_UNSIGNED) != 0), op1vnp,
7291 // Overflow-checking operations add an overflow exception
7292 if (tree->gtOverflowEx())
7294 ValueNum overflowExcSet =
7295 vnStore->VNExcSetSingleton(vnStore->VNForFunc(TYP_REF, VNF_OverflowExc));
7296 excSet = vnStore->VNPExcSetUnion(excSet, ValueNumPair(overflowExcSet, overflowExcSet));
7298 tree->gtVNPair = vnStore->VNPWithExc(normalRes, excSet);
7302 else // ValueNumStore::VNFuncIsLegal returns false
7304 // Some of the genTreeOps that aren't legal VNFuncs so they get special handling.
7309 ValueNumPair op1vnp;
7310 ValueNumPair op1Xvnp = ValueNumStore::VNPForEmptyExcSet();
7311 vnStore->VNPUnpackExc(tree->gtOp.gtOp1->gtVNPair, &op1vnp, &op1Xvnp);
7312 ValueNumPair op2vnp;
7313 ValueNumPair op2Xvnp = ValueNumStore::VNPForEmptyExcSet();
7315 GenTree* op2 = tree->gtGetOp2();
7316 if (op2->OperIsIndir() && ((op2->gtFlags & GTF_IND_ASG_LHS) != 0))
7318 // If op2 represents the lhs of an assignment then we give a VNForVoid for the lhs
7319 op2vnp = ValueNumPair(ValueNumStore::VNForVoid(), ValueNumStore::VNForVoid());
7321 else if ((op2->OperGet() == GT_CLS_VAR) && (op2->gtFlags & GTF_CLS_VAR_ASG_LHS))
7323 // If op2 represents the lhs of an assignment then we give a VNForVoid for the lhs
7324 op2vnp = ValueNumPair(ValueNumStore::VNForVoid(), ValueNumStore::VNForVoid());
7328 vnStore->VNPUnpackExc(op2->gtVNPair, &op2vnp, &op2Xvnp);
7331 tree->gtVNPair = vnStore->VNPWithExc(op2vnp, vnStore->VNPExcSetUnion(op1Xvnp, op2Xvnp));
7337 // Explicit null check.
7338 // Handle case where operand tree also may cause exceptions.
7339 ValueNumPair excSet = vnStore->VNPExcSetSingleton(
7340 vnStore->VNPairForFunc(TYP_REF, VNF_NullPtrExc,
7341 vnStore->VNPNormVal(tree->gtOp.gtOp1->gtVNPair)));
7342 ValueNumPair excSetBoth =
7343 vnStore->VNPExcSetUnion(excSet, vnStore->VNPExcVal(tree->gtOp.gtOp1->gtVNPair));
7345 tree->gtVNPair = vnStore->VNPWithExc(vnStore->VNPForVoid(), excSetBoth);
7349 case GT_LOCKADD: // Binop
7350 case GT_XADD: // Binop
7351 case GT_XCHG: // Binop
7352 assert(!tree->OperIs(GT_LOCKADD) && "LOCKADD should not appear before lowering");
7353 // For CMPXCHG and other intrinsics add an arbitrary side effect on GcHeap/ByrefExposed.
7354 fgMutateGcHeap(tree DEBUGARG("Interlocked intrinsic"));
7355 tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, tree->TypeGet()));
7360 // These nodes never need to have a ValueNumber
7361 tree->gtVNPair.SetBoth(ValueNumStore::NoVN);
7365 // BOX doesn't do anything at this point, the actual object allocation
7366 // and initialization happens separately (and not numbering BOX correctly
7367 // prevents seeing allocation related assertions through it)
7368 tree->gtVNPair = tree->gtGetOp1()->gtVNPair;
7372 // The default action is to give the node a new, unique VN.
7373 tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, tree->TypeGet()));
7380 assert(GenTree::OperIsSpecial(oper));
7382 // TBD: We must handle these individually. For now:
7386 fgValueNumberCall(tree->AsCall());
7389 case GT_ARR_BOUNDS_CHECK:
7392 #endif // FEATURE_SIMD
7393 #ifdef FEATURE_HW_INTRINSICS
7394 case GT_HW_INTRINSIC_CHK:
7395 #endif // FEATURE_HW_INTRINSICS
7397 // A bounds check node has no value, but may throw exceptions.
7398 ValueNumPair excSet = vnStore->VNPExcSetSingleton(
7399 vnStore->VNPairForFunc(TYP_REF, VNF_IndexOutOfRangeExc,
7400 vnStore->VNPNormVal(tree->AsBoundsChk()->gtIndex->gtVNPair),
7401 vnStore->VNPNormVal(tree->AsBoundsChk()->gtArrLen->gtVNPair)));
7402 excSet = vnStore->VNPExcSetUnion(excSet, vnStore->VNPExcVal(tree->AsBoundsChk()->gtIndex->gtVNPair));
7403 excSet = vnStore->VNPExcSetUnion(excSet, vnStore->VNPExcVal(tree->AsBoundsChk()->gtArrLen->gtVNPair));
7405 tree->gtVNPair = vnStore->VNPWithExc(vnStore->VNPForVoid(), excSet);
7407 // Record non-constant value numbers that are used as the length argument to bounds checks, so that
7408 // assertion prop will know that comparisons against them are worth analyzing.
7409 ValueNum lengthVN = tree->AsBoundsChk()->gtArrLen->gtVNPair.GetConservative();
7410 if ((lengthVN != ValueNumStore::NoVN) && !vnStore->IsVNConstant(lengthVN))
7412 vnStore->SetVNIsCheckedBound(lengthVN);
7417 case GT_CMPXCHG: // Specialop
7418 // For CMPXCHG and other intrinsics add an arbitrary side effect on GcHeap/ByrefExposed.
7419 fgMutateGcHeap(tree DEBUGARG("Interlocked intrinsic"));
7420 tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, tree->TypeGet()));
7424 tree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, tree->TypeGet()));
7430 if (tree->gtVNPair.GetLiberal() != ValueNumStore::NoVN)
7432 printf("N%03u ", tree->gtSeqNum);
7435 gtDispNodeName(tree);
7436 if (tree->OperIsLeaf() || tree->OperIsLocalStore()) // local stores used to be leaves
7438 gtDispLeaf(tree, nullptr);
7441 vnpPrint(tree->gtVNPair, 1);
7448 void Compiler::fgValueNumberIntrinsic(GenTree* tree)
7450 assert(tree->OperGet() == GT_INTRINSIC);
7451 GenTreeIntrinsic* intrinsic = tree->AsIntrinsic();
7452 ValueNumPair arg0VNP, arg1VNP;
7453 ValueNumPair arg0VNPx = ValueNumStore::VNPForEmptyExcSet();
7454 ValueNumPair arg1VNPx = ValueNumStore::VNPForEmptyExcSet();
7456 vnStore->VNPUnpackExc(intrinsic->gtOp.gtOp1->gtVNPair, &arg0VNP, &arg0VNPx);
7458 if (intrinsic->gtOp.gtOp2 != nullptr)
7460 vnStore->VNPUnpackExc(intrinsic->gtOp.gtOp2->gtVNPair, &arg1VNP, &arg1VNPx);
7463 if (IsMathIntrinsic(intrinsic->gtIntrinsicId))
7465 // GT_INTRINSIC is a currently a subtype of binary operators. But most of
7466 // the math intrinsics are actually unary operations.
7468 if (intrinsic->gtOp.gtOp2 == nullptr)
7470 intrinsic->gtVNPair =
7471 vnStore->VNPWithExc(vnStore->EvalMathFuncUnary(tree->TypeGet(), intrinsic->gtIntrinsicId, arg0VNP),
7476 ValueNumPair newVNP =
7477 vnStore->EvalMathFuncBinary(tree->TypeGet(), intrinsic->gtIntrinsicId, arg0VNP, arg1VNP);
7478 ValueNumPair excSet = vnStore->VNPExcSetUnion(arg0VNPx, arg1VNPx);
7479 intrinsic->gtVNPair = vnStore->VNPWithExc(newVNP, excSet);
7484 switch (intrinsic->gtIntrinsicId)
7486 case CORINFO_INTRINSIC_Object_GetType:
7487 intrinsic->gtVNPair =
7488 vnStore->VNPWithExc(vnStore->VNPairForFunc(intrinsic->TypeGet(), VNF_ObjGetType, arg0VNP),
7498 void Compiler::fgValueNumberCastTree(GenTree* tree)
7500 assert(tree->OperGet() == GT_CAST);
7502 ValueNumPair srcVNPair = tree->gtOp.gtOp1->gtVNPair;
7503 var_types castToType = tree->CastToType();
7504 var_types castFromType = tree->CastFromType();
7505 bool srcIsUnsigned = ((tree->gtFlags & GTF_UNSIGNED) != 0);
7506 bool hasOverflowCheck = tree->gtOverflowEx();
7508 assert(genActualType(castToType) == genActualType(tree->TypeGet())); // Insure that the resultType is correct
7510 tree->gtVNPair = vnStore->VNPairForCast(srcVNPair, castToType, castFromType, srcIsUnsigned, hasOverflowCheck);
7513 // Compute the normal ValueNumber for a cast operation with no exceptions
7514 ValueNum ValueNumStore::VNForCast(ValueNum srcVN,
7515 var_types castToType,
7516 var_types castFromType,
7517 bool srcIsUnsigned /* = false */)
7519 // The resulting type after performingthe cast is always widened to a supported IL stack size
7520 var_types resultType = genActualType(castToType);
7522 // When we're considering actual value returned by a non-checking cast whether or not the source is
7523 // unsigned does *not* matter for non-widening casts. That is, if we cast an int or a uint to short,
7524 // we just extract the first two bytes from the source bit pattern, not worrying about the interpretation.
7525 // The same is true in casting between signed/unsigned types of the same width. Only when we're doing
7526 // a widening cast do we care about whether the source was unsigned,so we know whether to sign or zero extend it.
7528 bool srcIsUnsignedNorm = srcIsUnsigned;
7529 if (genTypeSize(castToType) <= genTypeSize(castFromType))
7531 srcIsUnsignedNorm = false;
7534 ValueNum castTypeVN = VNForCastOper(castToType, srcIsUnsigned);
7535 ValueNum resultVN = VNForFunc(resultType, VNF_Cast, srcVN, castTypeVN);
7538 if (m_pComp->verbose)
7540 printf(" VNForCast(" FMT_VN ", " FMT_VN ") returns ", srcVN, castTypeVN);
7541 m_pComp->vnPrint(resultVN, 1);
7549 // Compute the ValueNumberPair for a cast operation
7550 ValueNumPair ValueNumStore::VNPairForCast(ValueNumPair srcVNPair,
7551 var_types castToType,
7552 var_types castFromType,
7553 bool srcIsUnsigned, /* = false */
7554 bool hasOverflowCheck) /* = false */
7556 // The resulting type after performingthe cast is always widened to a supported IL stack size
7557 var_types resultType = genActualType(castToType);
7559 ValueNumPair castArgVNP;
7560 ValueNumPair castArgxVNP = ValueNumStore::VNPForEmptyExcSet();
7561 VNPUnpackExc(srcVNPair, &castArgVNP, &castArgxVNP);
7563 // When we're considering actual value returned by a non-checking cast (or a checking cast that succeeds),
7564 // whether or not the source is unsigned does *not* matter for non-widening casts.
7565 // That is, if we cast an int or a uint to short, we just extract the first two bytes from the source
7566 // bit pattern, not worrying about the interpretation. The same is true in casting between signed/unsigned
7567 // types of the same width. Only when we're doing a widening cast do we care about whether the source
7568 // was unsigned, so we know whether to sign or zero extend it.
7570 // Important: Casts to floating point cannot be optimized in this fashion. (bug 946768)
7572 bool srcIsUnsignedNorm = srcIsUnsigned;
7573 if (genTypeSize(castToType) <= genTypeSize(castFromType) && !varTypeIsFloating(castToType))
7575 srcIsUnsignedNorm = false;
7578 ValueNum castTypeVN = VNForCastOper(castToType, srcIsUnsignedNorm);
7579 ValueNumPair castTypeVNPair(castTypeVN, castTypeVN);
7580 ValueNumPair castNormRes = VNPairForFunc(resultType, VNF_Cast, castArgVNP, castTypeVNPair);
7582 ValueNumPair resultVNP = VNPWithExc(castNormRes, castArgxVNP);
7584 // If we have a check for overflow, add the exception information.
7585 if (hasOverflowCheck)
7587 // For overflow checking, we always need to know whether the source is unsigned.
7588 castTypeVNPair.SetBoth(VNForCastOper(castToType, srcIsUnsigned));
7589 ValueNumPair excSet =
7590 VNPExcSetSingleton(VNPairForFunc(TYP_REF, VNF_ConvOverflowExc, castArgVNP, castTypeVNPair));
7591 excSet = VNPExcSetUnion(excSet, castArgxVNP);
7592 resultVNP = VNPWithExc(castNormRes, excSet);
7598 void Compiler::fgValueNumberHelperCallFunc(GenTreeCall* call, VNFunc vnf, ValueNumPair vnpExc)
7600 unsigned nArgs = ValueNumStore::VNFuncArity(vnf);
7601 assert(vnf != VNF_Boundary);
7602 GenTreeArgList* args = call->gtCallArgs;
7603 bool generateUniqueVN = false;
7604 bool useEntryPointAddrAsArg0 = false;
7610 generateUniqueVN = true;
7611 vnpExc = ValueNumStore::VNPForEmptyExcSet();
7617 generateUniqueVN = true;
7618 ValueNumPair vnp1 = vnStore->VNPNormVal(args->Rest()->Current()->gtVNPair);
7620 // The New Array helper may throw an overflow exception
7621 vnpExc = vnStore->VNPExcSetSingleton(vnStore->VNPairForFunc(TYP_REF, VNF_NewArrOverflowExc, vnp1));
7626 case VNF_BoxNullable:
7628 // Generate unique VN so, VNForFunc generates a uniq value number for box nullable.
7629 // Alternatively instead of using vnpUniq below in VNPairForFunc(...),
7630 // we could use the value number of what the byref arg0 points to.
7632 // But retrieving the value number of what the byref arg0 points to is quite a bit more work
7633 // and doing so only very rarely allows for an additional optimization.
7634 generateUniqueVN = true;
7638 case VNF_JitReadyToRunNew:
7640 generateUniqueVN = true;
7641 vnpExc = ValueNumStore::VNPForEmptyExcSet();
7642 useEntryPointAddrAsArg0 = true;
7646 case VNF_JitReadyToRunNewArr:
7648 generateUniqueVN = true;
7649 ValueNumPair vnp1 = vnStore->VNPNormVal(args->Current()->gtVNPair);
7651 // The New Array helper may throw an overflow exception
7652 vnpExc = vnStore->VNPExcSetSingleton(vnStore->VNPairForFunc(TYP_REF, VNF_NewArrOverflowExc, vnp1));
7653 useEntryPointAddrAsArg0 = true;
7657 case VNF_ReadyToRunStaticBase:
7658 case VNF_ReadyToRunGenericStaticBase:
7659 case VNF_ReadyToRunIsInstanceOf:
7660 case VNF_ReadyToRunCastClass:
7662 useEntryPointAddrAsArg0 = true;
7668 assert(s_helperCallProperties.IsPure(eeGetHelperNum(call->gtCallMethHnd)));
7673 if (generateUniqueVN)
7678 ValueNumPair vnpUniq;
7679 if (generateUniqueVN)
7681 // Generate unique VN so, VNForFunc generates a unique value number.
7682 vnpUniq.SetBoth(vnStore->VNForExpr(compCurBB, call->TypeGet()));
7685 #if defined(FEATURE_READYTORUN_COMPILER) && defined(_TARGET_ARMARCH_)
7686 if (call->IsR2RRelativeIndir())
7689 assert(args->Current()->OperGet() == GT_ARGPLACE);
7691 // Find the corresponding late arg.
7692 GenTree* indirectCellAddress = call->fgArgInfo->GetLateArg(0);
7693 assert(indirectCellAddress->IsCnsIntOrI() && indirectCellAddress->gtRegNum == REG_R2R_INDIRECT_PARAM);
7695 // For ARM indirectCellAddress is consumed by the call itself, so it should have added as an implicit argument
7696 // in morph. So we do not need to use EntryPointAddrAsArg0, because arg0 is already an entry point addr.
7697 useEntryPointAddrAsArg0 = false;
7699 #endif // FEATURE_READYTORUN_COMPILER && _TARGET_ARMARCH_
7703 if (generateUniqueVN)
7705 call->gtVNPair = vnStore->VNPairForFunc(call->TypeGet(), vnf, vnpUniq);
7709 call->gtVNPair.SetBoth(vnStore->VNForFunc(call->TypeGet(), vnf));
7714 auto getCurrentArg = [call, &args, useEntryPointAddrAsArg0](int currentIndex) {
7715 GenTree* arg = args->Current();
7716 if ((arg->gtFlags & GTF_LATE_ARG) != 0)
7718 // This arg is a setup node that moves the arg into position.
7719 // Value-numbering will have visited the separate late arg that
7720 // holds the actual value, and propagated/computed the value number
7721 // for this arg there.
7722 if (useEntryPointAddrAsArg0)
7724 // The args in the fgArgInfo don't include the entry point, so
7725 // index into them using one less than the requested index.
7728 return call->fgArgInfo->GetLateArg(currentIndex);
7732 // Has at least one argument.
7734 ValueNumPair vnp0x = ValueNumStore::VNPForEmptyExcSet();
7735 #ifdef FEATURE_READYTORUN_COMPILER
7736 if (useEntryPointAddrAsArg0)
7738 ssize_t addrValue = (ssize_t)call->gtEntryPoint.addr;
7739 ValueNum callAddrVN = vnStore->VNForHandle(addrValue, GTF_ICON_FTN_ADDR);
7740 vnp0 = ValueNumPair(callAddrVN, callAddrVN);
7743 #endif // FEATURE_READYTORUN_COMPILER
7745 assert(!useEntryPointAddrAsArg0);
7746 ValueNumPair vnp0wx = getCurrentArg(0)->gtVNPair;
7747 vnStore->VNPUnpackExc(vnp0wx, &vnp0, &vnp0x);
7749 // Also include in the argument exception sets
7750 vnpExc = vnStore->VNPExcSetUnion(vnpExc, vnp0x);
7752 args = args->Rest();
7756 if (generateUniqueVN)
7758 call->gtVNPair = vnStore->VNPairForFunc(call->TypeGet(), vnf, vnp0, vnpUniq);
7762 call->gtVNPair = vnStore->VNPairForFunc(call->TypeGet(), vnf, vnp0);
7767 // Has at least two arguments.
7768 ValueNumPair vnp1wx = getCurrentArg(1)->gtVNPair;
7770 ValueNumPair vnp1x = ValueNumStore::VNPForEmptyExcSet();
7771 vnStore->VNPUnpackExc(vnp1wx, &vnp1, &vnp1x);
7772 vnpExc = vnStore->VNPExcSetUnion(vnpExc, vnp1x);
7774 args = args->Rest();
7777 if (generateUniqueVN)
7779 call->gtVNPair = vnStore->VNPairForFunc(call->TypeGet(), vnf, vnp0, vnp1, vnpUniq);
7783 call->gtVNPair = vnStore->VNPairForFunc(call->TypeGet(), vnf, vnp0, vnp1);
7788 ValueNumPair vnp2wx = getCurrentArg(2)->gtVNPair;
7790 ValueNumPair vnp2x = ValueNumStore::VNPForEmptyExcSet();
7791 vnStore->VNPUnpackExc(vnp2wx, &vnp2, &vnp2x);
7792 vnpExc = vnStore->VNPExcSetUnion(vnpExc, vnp2x);
7794 args = args->Rest();
7795 assert(nArgs == 3); // Our current maximum.
7796 assert(args == nullptr);
7797 if (generateUniqueVN)
7799 call->gtVNPair = vnStore->VNPairForFunc(call->TypeGet(), vnf, vnp0, vnp1, vnp2, vnpUniq);
7803 call->gtVNPair = vnStore->VNPairForFunc(call->TypeGet(), vnf, vnp0, vnp1, vnp2);
7807 // Add the accumulated exceptions.
7808 call->gtVNPair = vnStore->VNPWithExc(call->gtVNPair, vnpExc);
7810 assert(args == nullptr ||
7811 generateUniqueVN); // All arguments should be processed or we generate unique VN and do not care.
7814 void Compiler::fgValueNumberCall(GenTreeCall* call)
7816 // First: do value numbering of any argument placeholder nodes in the argument list
7817 // (by transferring from the VN of the late arg that they are standing in for...)
7819 GenTreeArgList* args = call->gtCallArgs;
7820 bool updatedArgPlace = false;
7821 while (args != nullptr)
7823 GenTree* arg = args->Current();
7824 if (arg->OperGet() == GT_ARGPLACE)
7826 // Find the corresponding late arg.
7827 GenTree* lateArg = call->fgArgInfo->GetLateArg(i);
7828 assert(lateArg->gtVNPair.BothDefined());
7829 arg->gtVNPair = lateArg->gtVNPair;
7830 updatedArgPlace = true;
7834 printf("VN of ARGPLACE tree ");
7835 Compiler::printTreeID(arg);
7836 printf(" updated to ");
7837 vnpPrint(arg->gtVNPair, 1);
7843 args = args->Rest();
7845 if (updatedArgPlace)
7847 // Now we have to update the VN's of the argument list nodes, since that will be used in determining
7849 fgUpdateArgListVNs(call->gtCallArgs);
7852 if (call->gtCallType == CT_HELPER)
7854 bool modHeap = fgValueNumberHelperCall(call);
7858 // For now, arbitrary side effect on GcHeap/ByrefExposed.
7859 fgMutateGcHeap(call DEBUGARG("HELPER - modifies heap"));
7864 if (call->TypeGet() == TYP_VOID)
7866 call->gtVNPair.SetBoth(ValueNumStore::VNForVoid());
7870 call->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, call->TypeGet()));
7873 // For now, arbitrary side effect on GcHeap/ByrefExposed.
7874 fgMutateGcHeap(call DEBUGARG("CALL"));
7878 void Compiler::fgUpdateArgListVNs(GenTreeArgList* args)
7880 if (args == nullptr)
7885 fgUpdateArgListVNs(args->Rest());
7886 fgValueNumberTree(args);
7889 VNFunc Compiler::fgValueNumberHelperMethVNFunc(CorInfoHelpFunc helpFunc)
7891 assert(s_helperCallProperties.IsPure(helpFunc) || s_helperCallProperties.IsAllocator(helpFunc));
7893 VNFunc vnf = VNF_Boundary; // An illegal value...
7896 // These translate to other function symbols:
7897 case CORINFO_HELP_DIV:
7898 vnf = VNFunc(GT_DIV);
7900 case CORINFO_HELP_MOD:
7901 vnf = VNFunc(GT_MOD);
7903 case CORINFO_HELP_UDIV:
7904 vnf = VNFunc(GT_UDIV);
7906 case CORINFO_HELP_UMOD:
7907 vnf = VNFunc(GT_UMOD);
7909 case CORINFO_HELP_LLSH:
7910 vnf = VNFunc(GT_LSH);
7912 case CORINFO_HELP_LRSH:
7913 vnf = VNFunc(GT_RSH);
7915 case CORINFO_HELP_LRSZ:
7916 vnf = VNFunc(GT_RSZ);
7918 case CORINFO_HELP_LMUL:
7919 case CORINFO_HELP_LMUL_OVF:
7920 vnf = VNFunc(GT_MUL);
7922 case CORINFO_HELP_ULMUL_OVF:
7923 vnf = VNFunc(GT_MUL);
7924 break; // Is this the right thing?
7925 case CORINFO_HELP_LDIV:
7926 vnf = VNFunc(GT_DIV);
7928 case CORINFO_HELP_LMOD:
7929 vnf = VNFunc(GT_MOD);
7931 case CORINFO_HELP_ULDIV:
7932 vnf = VNFunc(GT_UDIV);
7934 case CORINFO_HELP_ULMOD:
7935 vnf = VNFunc(GT_UMOD);
7938 case CORINFO_HELP_LNG2DBL:
7941 case CORINFO_HELP_ULNG2DBL:
7944 case CORINFO_HELP_DBL2INT:
7947 case CORINFO_HELP_DBL2INT_OVF:
7950 case CORINFO_HELP_DBL2LNG:
7953 case CORINFO_HELP_DBL2LNG_OVF:
7956 case CORINFO_HELP_DBL2UINT:
7959 case CORINFO_HELP_DBL2UINT_OVF:
7962 case CORINFO_HELP_DBL2ULNG:
7965 case CORINFO_HELP_DBL2ULNG_OVF:
7968 case CORINFO_HELP_FLTREM:
7969 vnf = VNFunc(GT_MOD);
7971 case CORINFO_HELP_DBLREM:
7972 vnf = VNFunc(GT_MOD);
7974 case CORINFO_HELP_FLTROUND:
7976 break; // Is this the right thing?
7977 case CORINFO_HELP_DBLROUND:
7979 break; // Is this the right thing?
7981 // These allocation operations probably require some augmentation -- perhaps allocSiteId,
7982 // something about array length...
7983 case CORINFO_HELP_NEW_CROSSCONTEXT:
7984 case CORINFO_HELP_NEWFAST:
7985 case CORINFO_HELP_NEWSFAST:
7986 case CORINFO_HELP_NEWSFAST_ALIGN8:
7990 case CORINFO_HELP_READYTORUN_NEW:
7991 vnf = VNF_JitReadyToRunNew;
7994 case CORINFO_HELP_NEWARR_1_DIRECT:
7995 case CORINFO_HELP_NEWARR_1_OBJ:
7996 case CORINFO_HELP_NEWARR_1_VC:
7997 case CORINFO_HELP_NEWARR_1_ALIGN8:
7998 vnf = VNF_JitNewArr;
8001 case CORINFO_HELP_NEWARR_1_R2R_DIRECT:
8002 case CORINFO_HELP_READYTORUN_NEWARR_1:
8003 vnf = VNF_JitReadyToRunNewArr;
8006 case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
8007 vnf = VNF_GetgenericsGcstaticBase;
8009 case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
8010 vnf = VNF_GetgenericsNongcstaticBase;
8012 case CORINFO_HELP_GETSHARED_GCSTATIC_BASE:
8013 vnf = VNF_GetsharedGcstaticBase;
8015 case CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE:
8016 vnf = VNF_GetsharedNongcstaticBase;
8018 case CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR:
8019 vnf = VNF_GetsharedGcstaticBaseNoctor;
8021 case CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_NOCTOR:
8022 vnf = VNF_GetsharedNongcstaticBaseNoctor;
8024 case CORINFO_HELP_READYTORUN_STATIC_BASE:
8025 vnf = VNF_ReadyToRunStaticBase;
8027 case CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE:
8028 vnf = VNF_ReadyToRunGenericStaticBase;
8030 case CORINFO_HELP_GETSHARED_GCSTATIC_BASE_DYNAMICCLASS:
8031 vnf = VNF_GetsharedGcstaticBaseDynamicclass;
8033 case CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_DYNAMICCLASS:
8034 vnf = VNF_GetsharedNongcstaticBaseDynamicclass;
8036 case CORINFO_HELP_CLASSINIT_SHARED_DYNAMICCLASS:
8037 vnf = VNF_ClassinitSharedDynamicclass;
8039 case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
8040 vnf = VNF_GetgenericsGcthreadstaticBase;
8042 case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
8043 vnf = VNF_GetgenericsNongcthreadstaticBase;
8045 case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE:
8046 vnf = VNF_GetsharedGcthreadstaticBase;
8048 case CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE:
8049 vnf = VNF_GetsharedNongcthreadstaticBase;
8051 case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_NOCTOR:
8052 vnf = VNF_GetsharedGcthreadstaticBaseNoctor;
8054 case CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_NOCTOR:
8055 vnf = VNF_GetsharedNongcthreadstaticBaseNoctor;
8057 case CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_DYNAMICCLASS:
8058 vnf = VNF_GetsharedGcthreadstaticBaseDynamicclass;
8060 case CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_DYNAMICCLASS:
8061 vnf = VNF_GetsharedNongcthreadstaticBaseDynamicclass;
8063 case CORINFO_HELP_GETSTATICFIELDADDR_CONTEXT:
8064 vnf = VNF_GetStaticAddrContext;
8066 case CORINFO_HELP_GETSTATICFIELDADDR_TLS:
8067 vnf = VNF_GetStaticAddrTLS;
8070 case CORINFO_HELP_RUNTIMEHANDLE_METHOD:
8071 case CORINFO_HELP_RUNTIMEHANDLE_METHOD_LOG:
8072 vnf = VNF_RuntimeHandleMethod;
8075 case CORINFO_HELP_RUNTIMEHANDLE_CLASS:
8076 case CORINFO_HELP_RUNTIMEHANDLE_CLASS_LOG:
8077 vnf = VNF_RuntimeHandleClass;
8080 case CORINFO_HELP_STRCNS:
8084 case CORINFO_HELP_CHKCASTCLASS:
8085 case CORINFO_HELP_CHKCASTCLASS_SPECIAL:
8086 case CORINFO_HELP_CHKCASTARRAY:
8087 case CORINFO_HELP_CHKCASTINTERFACE:
8088 case CORINFO_HELP_CHKCASTANY:
8089 vnf = VNF_CastClass;
8092 case CORINFO_HELP_READYTORUN_CHKCAST:
8093 vnf = VNF_ReadyToRunCastClass;
8096 case CORINFO_HELP_ISINSTANCEOFCLASS:
8097 case CORINFO_HELP_ISINSTANCEOFINTERFACE:
8098 case CORINFO_HELP_ISINSTANCEOFARRAY:
8099 case CORINFO_HELP_ISINSTANCEOFANY:
8100 vnf = VNF_IsInstanceOf;
8103 case CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE:
8104 vnf = VNF_TypeHandleToRuntimeType;
8107 case CORINFO_HELP_READYTORUN_ISINSTANCEOF:
8108 vnf = VNF_ReadyToRunIsInstanceOf;
8111 case CORINFO_HELP_LDELEMA_REF:
8115 case CORINFO_HELP_UNBOX:
8119 // A constant within any method.
8120 case CORINFO_HELP_GETCURRENTMANAGEDTHREADID:
8121 vnf = VNF_ManagedThreadId;
8124 case CORINFO_HELP_GETREFANY:
8125 // TODO-CQ: This should really be interpreted as just a struct field reference, in terms of values.
8126 vnf = VNF_GetRefanyVal;
8129 case CORINFO_HELP_GETCLASSFROMMETHODPARAM:
8130 vnf = VNF_GetClassFromMethodParam;
8133 case CORINFO_HELP_GETSYNCFROMCLASSHANDLE:
8134 vnf = VNF_GetSyncFromClassHandle;
8137 case CORINFO_HELP_LOOP_CLONE_CHOICE_ADDR:
8138 vnf = VNF_LoopCloneChoiceAddr;
8141 case CORINFO_HELP_BOX:
8145 case CORINFO_HELP_BOX_NULLABLE:
8146 vnf = VNF_BoxNullable;
8153 assert(vnf != VNF_Boundary);
8157 bool Compiler::fgValueNumberHelperCall(GenTreeCall* call)
8159 CorInfoHelpFunc helpFunc = eeGetHelperNum(call->gtCallMethHnd);
8160 bool pure = s_helperCallProperties.IsPure(helpFunc);
8161 bool isAlloc = s_helperCallProperties.IsAllocator(helpFunc);
8162 bool modHeap = s_helperCallProperties.MutatesHeap(helpFunc);
8163 bool mayRunCctor = s_helperCallProperties.MayRunCctor(helpFunc);
8164 bool noThrow = s_helperCallProperties.NoThrow(helpFunc);
8166 ValueNumPair vnpExc = ValueNumStore::VNPForEmptyExcSet();
8168 // If the JIT helper can throw an exception make sure that we fill in
8169 // vnpExc with a Value Number that represents the exception(s) that can be thrown.
8172 // If the helper is known to only throw only one particular exception
8173 // we can set vnpExc to that exception, otherwise we conservatively
8174 // model the JIT helper as possibly throwing multiple different exceptions
8178 case CORINFO_HELP_OVERFLOW:
8179 // This helper always throws the VNF_OverflowExc exception
8180 vnpExc = vnStore->VNPExcSetSingleton(vnStore->VNPairForFunc(TYP_REF, VNF_OverflowExc));
8184 // Setup vnpExc with the information that multiple different exceptions
8185 // could be generated by this helper
8186 vnpExc = vnStore->VNPExcSetSingleton(vnStore->VNPairForFunc(TYP_REF, VNF_HelperMultipleExc));
8190 ValueNumPair vnpNorm;
8192 if (call->TypeGet() == TYP_VOID)
8194 vnpNorm = ValueNumStore::VNPForVoid();
8198 // TODO-CQ: this is a list of helpers we're going to treat as non-pure,
8199 // because they raise complications. Eventually, we need to handle those complications...
8200 bool needsFurtherWork = false;
8203 case CORINFO_HELP_NEW_MDARR:
8204 // This is a varargs helper. We need to represent the array shape in the VN world somehow.
8205 needsFurtherWork = true;
8211 if (!needsFurtherWork && (pure || isAlloc))
8213 VNFunc vnf = fgValueNumberHelperMethVNFunc(helpFunc);
8217 if ((call->gtFlags & GTF_CALL_HOISTABLE) == 0)
8223 fgValueNumberHelperCallFunc(call, vnf, vnpExc);
8228 vnpNorm.SetBoth(vnStore->VNForExpr(compCurBB, call->TypeGet()));
8232 call->gtVNPair = vnStore->VNPWithExc(vnpNorm, vnpExc);
8237 // This method asserts that SSA name constraints specified are satisfied.
8238 // Until we figure out otherwise, all VN's are assumed to be liberal.
8239 // TODO-Cleanup: new JitTestLabels for lib vs cons vs both VN classes?
8240 void Compiler::JitTestCheckVN()
8242 typedef JitHashTable<ssize_t, JitSmallPrimitiveKeyFuncs<ssize_t>, ValueNum> LabelToVNMap;
8243 typedef JitHashTable<ValueNum, JitSmallPrimitiveKeyFuncs<ValueNum>, ssize_t> VNToLabelMap;
8245 // If we have no test data, early out.
8246 if (m_nodeTestData == nullptr)
8251 NodeToTestDataMap* testData = GetNodeTestData();
8253 // First we have to know which nodes in the tree are reachable.
8254 typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, int> NodeToIntMap;
8255 NodeToIntMap* reachable = FindReachableNodesInNodeTestData();
8257 LabelToVNMap* labelToVN = new (getAllocatorDebugOnly()) LabelToVNMap(getAllocatorDebugOnly());
8258 VNToLabelMap* vnToLabel = new (getAllocatorDebugOnly()) VNToLabelMap(getAllocatorDebugOnly());
8262 printf("\nJit Testing: Value numbering.\n");
8264 for (NodeToTestDataMap::KeyIterator ki = testData->Begin(); !ki.Equal(testData->End()); ++ki)
8266 TestLabelAndNum tlAndN;
8267 GenTree* node = ki.Get();
8268 ValueNum nodeVN = node->GetVN(VNK_Liberal);
8270 bool b = testData->Lookup(node, &tlAndN);
8272 if (tlAndN.m_tl == TL_VN || tlAndN.m_tl == TL_VNNorm)
8275 if (!reachable->Lookup(node, &dummy))
8278 Compiler::printTreeID(node);
8279 printf(" had a test constraint declared, but has become unreachable at the time the constraint is "
8281 "(This is probably as a result of some optimization -- \n"
8282 "you may need to modify the test case to defeat this opt.)\n");
8289 Compiler::printTreeID(node);
8290 printf(" -- VN class %d.\n", tlAndN.m_num);
8293 if (tlAndN.m_tl == TL_VNNorm)
8295 nodeVN = vnStore->VNNormVal(nodeVN);
8299 if (labelToVN->Lookup(tlAndN.m_num, &vn))
8303 printf(" Already in hash tables.\n");
8305 // The mapping(s) must be one-to-one: if the label has a mapping, then the ssaNm must, as well.
8307 bool b = vnToLabel->Lookup(vn, &num2);
8308 // And the mappings must be the same.
8309 if (tlAndN.m_num != num2)
8312 Compiler::printTreeID(node);
8313 printf(", with value number " FMT_VN ", was declared in VN class %d,\n", nodeVN, tlAndN.m_num);
8314 printf("but this value number " FMT_VN
8315 " has already been associated with a different SSA name class: %d.\n",
8319 // And the current node must be of the specified SSA family.
8323 Compiler::printTreeID(node);
8324 printf(", " FMT_VN " was declared in SSA name class %d,\n", nodeVN, tlAndN.m_num);
8325 printf("but that name class was previously bound to a different value number: " FMT_VN ".\n", vn);
8332 // The mapping(s) must be one-to-one: if the label has no mapping, then the ssaNm may not, either.
8333 if (vnToLabel->Lookup(nodeVN, &num))
8336 Compiler::printTreeID(node);
8337 printf(", " FMT_VN " was declared in value number class %d,\n", nodeVN, tlAndN.m_num);
8339 "but this value number has already been associated with a different value number class: %d.\n",
8343 // Add to both mappings.
8344 labelToVN->Set(tlAndN.m_num, nodeVN);
8345 vnToLabel->Set(nodeVN, tlAndN.m_num);
8348 printf(" added to hash tables.\n");
8355 void Compiler::vnpPrint(ValueNumPair vnp, unsigned level)
8357 if (vnp.BothEqual())
8359 vnPrint(vnp.GetLiberal(), level);
8364 vnPrint(vnp.GetLiberal(), level);
8366 vnPrint(vnp.GetConservative(), level);
8371 void Compiler::vnPrint(ValueNum vn, unsigned level)
8374 if (ValueNumStore::isReservedVN(vn))
8376 printf(ValueNumStore::reservedName(vn));
8383 vnStore->vnDump(this, vn);
8390 // Methods of ValueNumPair.
8391 ValueNumPair::ValueNumPair() : m_liberal(ValueNumStore::NoVN), m_conservative(ValueNumStore::NoVN)
8395 bool ValueNumPair::BothDefined() const
8397 return (m_liberal != ValueNumStore::NoVN) && (m_conservative != ValueNumStore::NoVN);