1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
5 // ---------------------------------------------------------------------------
8 // Utilities to write native data to images, that can be read by the NativeFormat.Reader class
9 // ---------------------------------------------------------------------------
13 #include "nativeformatwriter.h"
15 #include <clr_std/algorithm>
17 namespace NativeFormat
20 // Same encoding as what's used by CTL
22 void NativeWriter::WriteUnsigned(unsigned d)
26 WriteByte((byte)(d*2 + 0));
30 WriteByte((byte)(d*4 + 1));
31 WriteByte((byte)(d >> 6));
33 else if (d < 128*128*128)
35 WriteByte((byte)(d*8 + 3));
36 WriteByte((byte)(d >> 5));
37 WriteByte((byte)(d >> 13));
39 else if (d < 128*128*128*128)
41 WriteByte((byte)(d*16 + 7));
42 WriteByte((byte)(d >> 4));
43 WriteByte((byte)(d >> 12));
44 WriteByte((byte)(d >> 20));
53 unsigned NativeWriter::GetUnsignedEncodingSize(unsigned d)
55 if (d < 128) return 1;
56 if (d < 128*128) return 2;
57 if (d < 128*128*128) return 3;
58 if (d < 128*128*128*128) return 4;
62 void NativeWriter::WriteSigned(int i)
64 unsigned d = (unsigned)i;
67 WriteByte((byte)(d*2 + 0));
69 else if (d + 64*128 < 128*128)
71 WriteByte((byte)(d*4 + 1));
72 WriteByte((byte)(d >> 6));
74 else if (d + 64*128*128 < 128*128*128)
76 WriteByte((byte)(d*8 + 3));
77 WriteByte((byte)(d >> 5));
78 WriteByte((byte)(d >> 13));
80 else if (d + 64*128*128*128 < 128*128*128*128)
82 WriteByte((byte)(d*16 + 7));
83 WriteByte((byte)(d >> 4));
84 WriteByte((byte)(d >> 12));
85 WriteByte((byte)(d >> 20));
94 void NativeWriter::WriteRelativeOffset(Vertex * pVal)
96 WriteSigned(GetExpectedOffset(pVal) - GetCurrentOffset());
99 int NativeWriter::GetExpectedOffset(Vertex * pVal)
101 assert(pVal->m_offset != Vertex::NotPlaced);
103 if (pVal->m_iteration == -1)
105 // If the offsets are not determined yet, use the maximum possible encoding
109 int offset = pVal->m_offset;
111 // If the offset was not update in this iteration yet, adjust it by delta we have accumulated in this iteration so far.
112 // This adjustment allows the offsets to converge faster.
113 if (pVal->m_iteration < m_iteration)
114 offset += m_offsetAdjustment;
119 vector<byte>& NativeWriter::Save()
121 assert(m_phase == Initial);
123 for (auto pSection : m_Sections) for (auto pVertex : *pSection)
125 pVertex->m_offset = GetCurrentOffset();
126 pVertex->m_iteration = m_iteration;
130 // Aggresive phase that only allows offsets to shrink.
137 m_offsetAdjustment = 0;
139 for (auto pSection : m_Sections) for (auto pVertex : *pSection)
141 int currentOffset = GetCurrentOffset();
143 // Only allow the offsets to shrink.
144 m_offsetAdjustment = min(m_offsetAdjustment, currentOffset - pVertex->m_offset);
146 pVertex->m_offset += m_offsetAdjustment;
148 if (pVertex->m_offset < currentOffset)
150 // It is possible for the encoding of relative offsets to grow during some iterations.
151 // Ignore this growth because of it should disappear during next iteration.
152 RollbackTo(pVertex->m_offset);
154 assert(pVertex->m_offset == GetCurrentOffset());
156 pVertex->m_iteration = m_iteration;
161 // We are not able to shrink anymore. We cannot just return here. It is possible that we have rolledback
162 // above because of we shrinked too much.
163 if (m_offsetAdjustment == 0)
166 // Limit number of shrinking interations. This limit is meant to be hit in corner cases only.
167 if (m_iteration > 10)
171 // Conservative phase that only allows the offsets to grow. It is guaranteed to converge.
178 m_offsetAdjustment = 0;
181 for (auto pSection : m_Sections) for (auto pVertex : *pSection)
183 int currentOffset = GetCurrentOffset();
185 // Only allow the offsets to grow.
186 m_offsetAdjustment = max(m_offsetAdjustment, currentOffset - pVertex->m_offset);
188 pVertex->m_offset += m_offsetAdjustment;
190 if (pVertex->m_offset > currentOffset)
193 int padding = pVertex->m_offset - currentOffset;
194 m_paddingSize += padding;
197 assert(pVertex->m_offset == GetCurrentOffset());
199 pVertex->m_iteration = m_iteration;
204 if (m_offsetAdjustment == 0)
211 Vertex * NativeSection::Place(Vertex * pVertex)
213 assert(pVertex->m_offset == Vertex::NotPlaced);
214 pVertex->m_offset = Vertex::Placed;
220 Vertex * VertexArray::ExpandBlock(size_t index, int depth, bool place, bool * pIsLeaf)
224 Vertex * pFirst = (index < m_Entries.size()) ? m_Entries[index] : nullptr;
225 Vertex * pSecond = ((index + 1) < m_Entries.size()) ? m_Entries[index + 1] : nullptr;
227 if (pFirst == nullptr && pSecond == nullptr)
230 if (pFirst == nullptr || pSecond == nullptr)
232 VertexLeaf * pLeaf = new VertexLeaf();
234 m_pSection->Place(pLeaf);
236 pLeaf->m_pVertex = (pFirst == nullptr) ? pSecond : pFirst;
237 pLeaf->m_leafIndex = ((pFirst == nullptr) ? (index + 1) : index) & (_blockSize - 1);
243 VertexTree * pTree = new VertexTree();
245 m_pSection->Place(pTree);
247 pTree->m_pFirst = pFirst;
248 pTree->m_pSecond = pSecond;
250 m_pSection->Place(pSecond);
256 VertexTree * pTree = new VertexTree();
258 m_pSection->Place(pTree);
260 bool fFirstIsLeaf = false, fSecondIsLeaf = false;
261 Vertex * pFirst = ExpandBlock(index, depth - 1, false, &fFirstIsLeaf);
262 Vertex * pSecond = ExpandBlock(index + (1 << (depth - 1)), depth - 1, true, &fSecondIsLeaf);
266 if ((pFirst == nullptr && pSecond == nullptr))
270 pPop = m_pSection->Pop();
271 assert(pPop == pTree);
278 if ((pFirst == nullptr) && fSecondIsLeaf)
280 pPop = m_pSection->Pop();
281 assert(pPop == pSecond);
285 pPop = m_pSection->Pop();
286 assert(pPop == pTree);
292 m_pSection->Place(pSecond);
298 if ((pSecond == nullptr) && fFirstIsLeaf)
302 pPop = m_pSection->Pop();
303 assert(pPop == pTree);
309 m_pSection->Place(pFirst);
315 pTree->m_pFirst = pFirst;
316 pTree->m_pSecond = pSecond;
322 void VertexArray::ExpandLayout()
324 VertexLeaf * pNullBlock = nullptr;
325 for (size_t i = 0; i < m_Entries.size(); i += _blockSize)
328 Vertex * pBlock = ExpandBlock(i, 4, true, &fIsLeaf);
330 if (pBlock == nullptr)
332 if (pNullBlock == nullptr)
334 pNullBlock = new VertexLeaf();
335 pNullBlock->m_leafIndex = _blockSize;
336 pNullBlock->m_pVertex = nullptr;
337 m_pSection->Place(pNullBlock);
342 m_Blocks.push_back(pBlock);
345 // Start with maximum size entries
346 m_entryIndexSize = 2;
349 void VertexArray::VertexLeaf::Save(NativeWriter * pWriter)
351 pWriter->WriteUnsigned(m_leafIndex << 2);
353 if (m_pVertex != nullptr)
354 m_pVertex->Save(pWriter);
357 void VertexArray::VertexTree::Save(NativeWriter * pWriter)
359 unsigned value = (m_pFirst != nullptr) ? 1 : 0;
361 if (m_pSecond != nullptr)
365 int delta = pWriter->GetExpectedOffset(m_pSecond) - pWriter->GetCurrentOffset();
367 value |= (delta << 2);
370 pWriter->WriteUnsigned(value);
372 if (m_pFirst != nullptr)
373 m_pFirst->Save(pWriter);
376 void VertexArray::Save(NativeWriter * pWriter)
378 // Lowest two bits are entry index size, the rest is number of elements
379 pWriter->WriteUnsigned((m_Entries.size() << 2) | m_entryIndexSize);
381 int blocksOffset = pWriter->GetCurrentOffset();
384 for (auto pBlock : m_Blocks)
386 int offset = pWriter->GetExpectedOffset(pBlock) - blocksOffset;
389 maxOffset = max(offset, maxOffset);
391 if (m_entryIndexSize == 0)
393 pWriter->WriteByte((byte)offset);
396 if (m_entryIndexSize == 1)
398 pWriter->WriteUInt16((UInt16)offset);
402 pWriter->WriteUInt32((UInt32)offset);
406 int newEntryIndexSize = 0;
407 if (maxOffset > 0xFF)
410 if (maxOffset > 0xFFFF)
414 if (pWriter->IsGrowing())
416 if (newEntryIndexSize > m_entryIndexSize)
418 // Ensure that the table will be redone with new entry index size
419 pWriter->UpdateOffsetAdjustment(1);
421 m_entryIndexSize = newEntryIndexSize;
426 if (newEntryIndexSize < m_entryIndexSize)
428 // Ensure that the table will be redone with new entry index size
429 pWriter->UpdateOffsetAdjustment(-1);
431 m_entryIndexSize = newEntryIndexSize;
440 // Returns 1 + log2(x) rounded up, 0 iff x == 0
441 static unsigned HighestBit(unsigned x)
452 // Helper method to back patch entry index in the bucket table
453 static void PatchEntryIndex(NativeWriter * pWriter, int patchOffset, int entryIndexSize, int entryIndex)
455 if (entryIndexSize == 0)
457 pWriter->PatchByteAt(patchOffset, (byte)entryIndex);
460 if (entryIndexSize == 1)
462 pWriter->PatchByteAt(patchOffset, (byte)entryIndex);
463 pWriter->PatchByteAt(patchOffset + 1, (byte)(entryIndex >> 8));
467 pWriter->PatchByteAt(patchOffset, (byte)entryIndex);
468 pWriter->PatchByteAt(patchOffset + 1, (byte)(entryIndex >> 8));
469 pWriter->PatchByteAt(patchOffset + 2, (byte)(entryIndex >> 16));
470 pWriter->PatchByteAt(patchOffset + 3, (byte)(entryIndex >> 24));
474 void VertexHashtable::Save(NativeWriter * pWriter)
476 // Compute the layout of the table if we have not done it yet
480 int nEntries = (int)m_Entries.size();
481 int startOffset = pWriter->GetCurrentOffset();
482 int bucketMask = (m_nBuckets - 1);
484 // Lowest two bits are entry index size, the rest is log2 number of buckets
485 int numberOfBucketsShift = HighestBit(m_nBuckets) - 1;
486 pWriter->WriteByte(static_cast<uint8_t>((numberOfBucketsShift << 2) | m_entryIndexSize));
488 int bucketsOffset = pWriter->GetCurrentOffset();
490 pWriter->WritePad((m_nBuckets + 1) << m_entryIndexSize);
492 // For faster lookup at runtime, we store the first entry index even though it is redundant (the value can be
493 // inferred from number of buckets)
494 PatchEntryIndex(pWriter, bucketsOffset, m_entryIndexSize, pWriter->GetCurrentOffset() - bucketsOffset);
498 for (int iBucket = 0; iBucket < m_nBuckets; iBucket++)
500 while (iEntry < nEntries)
502 Entry &e = m_Entries[iEntry];
504 if (((e.hashcode >> 8) & bucketMask) != (unsigned)iBucket)
507 int currentOffset = pWriter->GetCurrentOffset();
508 pWriter->UpdateOffsetAdjustment(currentOffset - e.offset);
509 e.offset = currentOffset;
511 pWriter->WriteByte((byte)e.hashcode);
512 pWriter->WriteRelativeOffset(e.pVertex);
517 int patchOffset = bucketsOffset + ((iBucket + 1) << m_entryIndexSize);
519 PatchEntryIndex(pWriter, patchOffset, m_entryIndexSize, pWriter->GetCurrentOffset() - bucketsOffset);
521 assert(iEntry == nEntries);
523 int maxIndexEntry = (pWriter->GetCurrentOffset() - bucketsOffset);
524 int newEntryIndexSize = 0;
525 if (maxIndexEntry > 0xFF)
528 if (maxIndexEntry > 0xFFFF)
532 if (pWriter->IsGrowing())
534 if (newEntryIndexSize > m_entryIndexSize)
536 // Ensure that the table will be redone with new entry index size
537 pWriter->UpdateOffsetAdjustment(1);
539 m_entryIndexSize = newEntryIndexSize;
544 if (newEntryIndexSize < m_entryIndexSize)
546 // Ensure that the table will be redone with new entry index size
547 pWriter->UpdateOffsetAdjustment(-1);
549 m_entryIndexSize = newEntryIndexSize;
554 void VertexHashtable::ComputeLayout()
556 unsigned bucketsEstimate = (unsigned)(m_Entries.size() / m_nFillFactor);
558 // Round number of buckets up to the power of two
559 m_nBuckets = 1 << HighestBit(bucketsEstimate);
561 // Lowest byte of the hashcode is used for lookup within the bucket. Keep it sorted too so that
562 // we can use the ordering to terminate the lookup prematurely.
563 unsigned mask = ((m_nBuckets - 1) << 8) | 0xFF;
565 // sort it by hashcode
566 std::sort(m_Entries.begin(), m_Entries.end(),
567 [=](Entry const& a, Entry const& b)
569 return (a.hashcode & mask) < (b.hashcode & mask);
573 // Start with maximum size entries
574 m_entryIndexSize = 2;