2 Bullet Continuous Collision Detection and Physics Library
3 Copyright (c) 2003-2006 Erwin Coumans https://bulletphysics.org
5 This software is provided 'as-is', without any express or implied warranty.
6 In no event will the authors be held liable for any damages arising from the use of this software.
7 Permission is granted to anyone to use this software for any purpose,
8 including commercial applications, and to alter it and redistribute it freely,
9 subject to the following restrictions:
11 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
12 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
13 3. This notice may not be removed or altered from any source distribution.
16 #include "btQuantizedBvh.h"
18 #include "LinearMath/btAabbUtil2.h"
19 #include "LinearMath/btIDebugDraw.h"
20 #include "LinearMath/btSerializer.h"
24 btQuantizedBvh::btQuantizedBvh() : m_bulletVersion(BT_BULLET_VERSION),
25 m_useQuantization(false),
26 //m_traversalMode(TRAVERSAL_STACKLESS_CACHE_FRIENDLY)
27 m_traversalMode(TRAVERSAL_STACKLESS)
28 //m_traversalMode(TRAVERSAL_RECURSIVE)
30 m_subtreeHeaderCount(0) //PCK: add this line
32 m_bvhAabbMin.setValue(-SIMD_INFINITY, -SIMD_INFINITY, -SIMD_INFINITY);
33 m_bvhAabbMax.setValue(SIMD_INFINITY, SIMD_INFINITY, SIMD_INFINITY);
36 void btQuantizedBvh::buildInternal()
38 ///assumes that caller filled in the m_quantizedLeafNodes
39 m_useQuantization = true;
42 if (m_useQuantization)
44 //now we have an array of leafnodes in m_leafNodes
45 numLeafNodes = m_quantizedLeafNodes.size();
47 m_quantizedContiguousNodes.resize(2 * numLeafNodes);
52 buildTree(0, numLeafNodes);
54 ///if the entire tree is small then subtree size, we need to create a header info for the tree
55 if (m_useQuantization && !m_SubtreeHeaders.size())
57 btBvhSubtreeInfo& subtree = m_SubtreeHeaders.expand();
58 subtree.setAabbFromQuantizeNode(m_quantizedContiguousNodes[0]);
59 subtree.m_rootNodeIndex = 0;
60 subtree.m_subtreeSize = m_quantizedContiguousNodes[0].isLeafNode() ? 1 : m_quantizedContiguousNodes[0].getEscapeIndex();
63 //PCK: update the copy of the size
64 m_subtreeHeaderCount = m_SubtreeHeaders.size();
66 //PCK: clear m_quantizedLeafNodes and m_leafNodes, they are temporary
67 m_quantizedLeafNodes.clear();
71 ///just for debugging, to visualize the individual patches/subtrees
72 #ifdef DEBUG_PATCH_COLORS
79 #endif //DEBUG_PATCH_COLORS
81 void btQuantizedBvh::setQuantizationValues(const btVector3& bvhAabbMin, const btVector3& bvhAabbMax, btScalar quantizationMargin)
83 //enlarge the AABB to avoid division by zero when initializing the quantization values
84 btVector3 clampValue(quantizationMargin, quantizationMargin, quantizationMargin);
85 m_bvhAabbMin = bvhAabbMin - clampValue;
86 m_bvhAabbMax = bvhAabbMax + clampValue;
87 btVector3 aabbSize = m_bvhAabbMax - m_bvhAabbMin;
88 m_bvhQuantization = btVector3(btScalar(65533.0), btScalar(65533.0), btScalar(65533.0)) / aabbSize;
90 m_useQuantization = true;
93 unsigned short vecIn[3];
96 quantize(vecIn, m_bvhAabbMin, false);
97 v = unQuantize(vecIn);
98 m_bvhAabbMin.setMin(v - clampValue);
100 aabbSize = m_bvhAabbMax - m_bvhAabbMin;
101 m_bvhQuantization = btVector3(btScalar(65533.0), btScalar(65533.0), btScalar(65533.0)) / aabbSize;
103 quantize(vecIn, m_bvhAabbMax, true);
104 v = unQuantize(vecIn);
105 m_bvhAabbMax.setMax(v + clampValue);
107 aabbSize = m_bvhAabbMax - m_bvhAabbMin;
108 m_bvhQuantization = btVector3(btScalar(65533.0), btScalar(65533.0), btScalar(65533.0)) / aabbSize;
112 btQuantizedBvh::~btQuantizedBvh()
116 #ifdef DEBUG_TREE_BUILDING
118 int gMaxStackDepth = 0;
119 #endif //DEBUG_TREE_BUILDING
121 void btQuantizedBvh::buildTree(int startIndex, int endIndex)
123 #ifdef DEBUG_TREE_BUILDING
125 if (gStackDepth > gMaxStackDepth)
126 gMaxStackDepth = gStackDepth;
127 #endif //DEBUG_TREE_BUILDING
129 int splitAxis, splitIndex, i;
130 int numIndices = endIndex - startIndex;
131 int curIndex = m_curNodeIndex;
133 btAssert(numIndices > 0);
137 #ifdef DEBUG_TREE_BUILDING
139 #endif //DEBUG_TREE_BUILDING
141 assignInternalNodeFromLeafNode(m_curNodeIndex, startIndex);
146 //calculate Best Splitting Axis and where to split it. Sort the incoming 'leafNodes' array within range 'startIndex/endIndex'.
148 splitAxis = calcSplittingAxis(startIndex, endIndex);
150 splitIndex = sortAndCalcSplittingIndex(startIndex, endIndex, splitAxis);
152 int internalNodeIndex = m_curNodeIndex;
154 //set the min aabb to 'inf' or a max value, and set the max aabb to a -inf/minimum value.
155 //the aabb will be expanded during buildTree/mergeInternalNodeAabb with actual node values
156 setInternalNodeAabbMin(m_curNodeIndex, m_bvhAabbMax); //can't use btVector3(SIMD_INFINITY,SIMD_INFINITY,SIMD_INFINITY)) because of quantization
157 setInternalNodeAabbMax(m_curNodeIndex, m_bvhAabbMin); //can't use btVector3(-SIMD_INFINITY,-SIMD_INFINITY,-SIMD_INFINITY)) because of quantization
159 for (i = startIndex; i < endIndex; i++)
161 mergeInternalNodeAabb(m_curNodeIndex, getAabbMin(i), getAabbMax(i));
166 //internalNode->m_escapeIndex;
168 int leftChildNodexIndex = m_curNodeIndex;
170 //build left child tree
171 buildTree(startIndex, splitIndex);
173 int rightChildNodexIndex = m_curNodeIndex;
174 //build right child tree
175 buildTree(splitIndex, endIndex);
177 #ifdef DEBUG_TREE_BUILDING
179 #endif //DEBUG_TREE_BUILDING
181 int escapeIndex = m_curNodeIndex - curIndex;
183 if (m_useQuantization)
185 //escapeIndex is the number of nodes of this subtree
186 const int sizeQuantizedNode = sizeof(btQuantizedBvhNode);
187 const int treeSizeInBytes = escapeIndex * sizeQuantizedNode;
188 if (treeSizeInBytes > MAX_SUBTREE_SIZE_IN_BYTES)
190 updateSubtreeHeaders(leftChildNodexIndex, rightChildNodexIndex);
197 setInternalNodeEscapeIndex(internalNodeIndex, escapeIndex);
200 void btQuantizedBvh::updateSubtreeHeaders(int leftChildNodexIndex, int rightChildNodexIndex)
202 btAssert(m_useQuantization);
204 btQuantizedBvhNode& leftChildNode = m_quantizedContiguousNodes[leftChildNodexIndex];
205 int leftSubTreeSize = leftChildNode.isLeafNode() ? 1 : leftChildNode.getEscapeIndex();
206 int leftSubTreeSizeInBytes = leftSubTreeSize * static_cast<int>(sizeof(btQuantizedBvhNode));
208 btQuantizedBvhNode& rightChildNode = m_quantizedContiguousNodes[rightChildNodexIndex];
209 int rightSubTreeSize = rightChildNode.isLeafNode() ? 1 : rightChildNode.getEscapeIndex();
210 int rightSubTreeSizeInBytes = rightSubTreeSize * static_cast<int>(sizeof(btQuantizedBvhNode));
212 if (leftSubTreeSizeInBytes <= MAX_SUBTREE_SIZE_IN_BYTES)
214 btBvhSubtreeInfo& subtree = m_SubtreeHeaders.expand();
215 subtree.setAabbFromQuantizeNode(leftChildNode);
216 subtree.m_rootNodeIndex = leftChildNodexIndex;
217 subtree.m_subtreeSize = leftSubTreeSize;
220 if (rightSubTreeSizeInBytes <= MAX_SUBTREE_SIZE_IN_BYTES)
222 btBvhSubtreeInfo& subtree = m_SubtreeHeaders.expand();
223 subtree.setAabbFromQuantizeNode(rightChildNode);
224 subtree.m_rootNodeIndex = rightChildNodexIndex;
225 subtree.m_subtreeSize = rightSubTreeSize;
228 //PCK: update the copy of the size
229 m_subtreeHeaderCount = m_SubtreeHeaders.size();
232 int btQuantizedBvh::sortAndCalcSplittingIndex(int startIndex, int endIndex, int splitAxis)
235 int splitIndex = startIndex;
236 int numIndices = endIndex - startIndex;
239 btVector3 means(btScalar(0.), btScalar(0.), btScalar(0.));
240 for (i = startIndex; i < endIndex; i++)
242 btVector3 center = btScalar(0.5) * (getAabbMax(i) + getAabbMin(i));
245 means *= (btScalar(1.) / (btScalar)numIndices);
247 splitValue = means[splitAxis];
249 //sort leafNodes so all values larger then splitValue comes first, and smaller values start from 'splitIndex'.
250 for (i = startIndex; i < endIndex; i++)
252 btVector3 center = btScalar(0.5) * (getAabbMax(i) + getAabbMin(i));
253 if (center[splitAxis] > splitValue)
256 swapLeafNodes(i, splitIndex);
261 //if the splitIndex causes unbalanced trees, fix this by using the center in between startIndex and endIndex
262 //otherwise the tree-building might fail due to stack-overflows in certain cases.
263 //unbalanced1 is unsafe: it can cause stack overflows
264 //bool unbalanced1 = ((splitIndex==startIndex) || (splitIndex == (endIndex-1)));
266 //unbalanced2 should work too: always use center (perfect balanced trees)
267 //bool unbalanced2 = true;
269 //this should be safe too:
270 int rangeBalancedIndices = numIndices / 3;
271 bool unbalanced = ((splitIndex <= (startIndex + rangeBalancedIndices)) || (splitIndex >= (endIndex - 1 - rangeBalancedIndices)));
275 splitIndex = startIndex + (numIndices >> 1);
278 bool unbal = (splitIndex == startIndex) || (splitIndex == (endIndex));
285 int btQuantizedBvh::calcSplittingAxis(int startIndex, int endIndex)
289 btVector3 means(btScalar(0.), btScalar(0.), btScalar(0.));
290 btVector3 variance(btScalar(0.), btScalar(0.), btScalar(0.));
291 int numIndices = endIndex - startIndex;
293 for (i = startIndex; i < endIndex; i++)
295 btVector3 center = btScalar(0.5) * (getAabbMax(i) + getAabbMin(i));
298 means *= (btScalar(1.) / (btScalar)numIndices);
300 for (i = startIndex; i < endIndex; i++)
302 btVector3 center = btScalar(0.5) * (getAabbMax(i) + getAabbMin(i));
303 btVector3 diff2 = center - means;
304 diff2 = diff2 * diff2;
307 variance *= (btScalar(1.) / ((btScalar)numIndices - 1));
309 return variance.maxAxis();
312 void btQuantizedBvh::reportAabbOverlappingNodex(btNodeOverlapCallback* nodeCallback, const btVector3& aabbMin, const btVector3& aabbMax) const
314 //either choose recursive traversal (walkTree) or stackless (walkStacklessTree)
316 if (m_useQuantization)
318 ///quantize query AABB
319 unsigned short int quantizedQueryAabbMin[3];
320 unsigned short int quantizedQueryAabbMax[3];
321 quantizeWithClamp(quantizedQueryAabbMin, aabbMin, 0);
322 quantizeWithClamp(quantizedQueryAabbMax, aabbMax, 1);
324 switch (m_traversalMode)
326 case TRAVERSAL_STACKLESS:
327 walkStacklessQuantizedTree(nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax, 0, m_curNodeIndex);
329 case TRAVERSAL_STACKLESS_CACHE_FRIENDLY:
330 walkStacklessQuantizedTreeCacheFriendly(nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax);
332 case TRAVERSAL_RECURSIVE:
334 const btQuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[0];
335 walkRecursiveQuantizedTreeAgainstQueryAabb(rootNode, nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax);
345 walkStacklessTree(nodeCallback, aabbMin, aabbMax);
349 void btQuantizedBvh::walkStacklessTree(btNodeOverlapCallback* nodeCallback, const btVector3& aabbMin, const btVector3& aabbMax) const
351 btAssert(!m_useQuantization);
353 const btOptimizedBvhNode* rootNode = &m_contiguousNodes[0];
354 int escapeIndex, curIndex = 0;
355 int walkIterations = 0;
357 //PCK: unsigned instead of bool
358 unsigned aabbOverlap;
360 while (curIndex < m_curNodeIndex)
362 //catch bugs in tree data
363 btAssert(walkIterations < m_curNodeIndex);
366 aabbOverlap = TestAabbAgainstAabb2(aabbMin, aabbMax, rootNode->m_aabbMinOrg, rootNode->m_aabbMaxOrg);
367 isLeafNode = rootNode->m_escapeIndex == -1;
369 //PCK: unsigned instead of bool
370 if (isLeafNode && (aabbOverlap != 0))
372 nodeCallback->processNode(rootNode->m_subPart, rootNode->m_triangleIndex);
375 //PCK: unsigned instead of bool
376 if ((aabbOverlap != 0) || isLeafNode)
383 escapeIndex = rootNode->m_escapeIndex;
384 rootNode += escapeIndex;
385 curIndex += escapeIndex;
391 ///this was the original recursive traversal, before we optimized towards stackless traversal
392 void btQuantizedBvh::walkTree(btOptimizedBvhNode* rootNode,btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const
394 bool isLeafNode, aabbOverlap = TestAabbAgainstAabb2(aabbMin,aabbMax,rootNode->m_aabbMin,rootNode->m_aabbMax);
397 isLeafNode = (!rootNode->m_leftChild && !rootNode->m_rightChild);
400 nodeCallback->processNode(rootNode);
403 walkTree(rootNode->m_leftChild,nodeCallback,aabbMin,aabbMax);
404 walkTree(rootNode->m_rightChild,nodeCallback,aabbMin,aabbMax);
411 void btQuantizedBvh::walkRecursiveQuantizedTreeAgainstQueryAabb(const btQuantizedBvhNode* currentNode, btNodeOverlapCallback* nodeCallback, unsigned short int* quantizedQueryAabbMin, unsigned short int* quantizedQueryAabbMax) const
413 btAssert(m_useQuantization);
416 //PCK: unsigned instead of bool
417 unsigned aabbOverlap;
419 //PCK: unsigned instead of bool
420 aabbOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin, quantizedQueryAabbMax, currentNode->m_quantizedAabbMin, currentNode->m_quantizedAabbMax);
421 isLeafNode = currentNode->isLeafNode();
423 //PCK: unsigned instead of bool
424 if (aabbOverlap != 0)
428 nodeCallback->processNode(currentNode->getPartId(), currentNode->getTriangleIndex());
432 //process left and right children
433 const btQuantizedBvhNode* leftChildNode = currentNode + 1;
434 walkRecursiveQuantizedTreeAgainstQueryAabb(leftChildNode, nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax);
436 const btQuantizedBvhNode* rightChildNode = leftChildNode->isLeafNode() ? leftChildNode + 1 : leftChildNode + leftChildNode->getEscapeIndex();
437 walkRecursiveQuantizedTreeAgainstQueryAabb(rightChildNode, nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax);
442 void btQuantizedBvh::walkStacklessTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex, int endNodeIndex) const
444 btAssert(!m_useQuantization);
446 const btOptimizedBvhNode* rootNode = &m_contiguousNodes[0];
447 int escapeIndex, curIndex = 0;
448 int walkIterations = 0;
450 //PCK: unsigned instead of bool
451 unsigned aabbOverlap = 0;
452 unsigned rayBoxOverlap = 0;
453 btScalar lambda_max = 1.0;
455 /* Quick pruning by quantized box */
456 btVector3 rayAabbMin = raySource;
457 btVector3 rayAabbMax = raySource;
458 rayAabbMin.setMin(rayTarget);
459 rayAabbMax.setMax(rayTarget);
461 /* Add box cast extents to bounding box */
462 rayAabbMin += aabbMin;
463 rayAabbMax += aabbMax;
466 btVector3 rayDir = (rayTarget - raySource);
467 rayDir.safeNormalize();// stephengold changed normalize to safeNormalize 2020-02-17
468 lambda_max = rayDir.dot(rayTarget - raySource);
469 ///what about division by zero? --> just set rayDirection[i] to 1.0
470 btVector3 rayDirectionInverse;
471 rayDirectionInverse[0] = rayDir[0] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[0];
472 rayDirectionInverse[1] = rayDir[1] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[1];
473 rayDirectionInverse[2] = rayDir[2] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[2];
474 unsigned int sign[3] = {rayDirectionInverse[0] < 0.0, rayDirectionInverse[1] < 0.0, rayDirectionInverse[2] < 0.0};
479 while (curIndex < m_curNodeIndex)
481 btScalar param = 1.0;
482 //catch bugs in tree data
483 btAssert(walkIterations < m_curNodeIndex);
487 bounds[0] = rootNode->m_aabbMinOrg;
488 bounds[1] = rootNode->m_aabbMaxOrg;
489 /* Add box cast extents */
490 bounds[0] -= aabbMax;
491 bounds[1] -= aabbMin;
493 aabbOverlap = TestAabbAgainstAabb2(rayAabbMin, rayAabbMax, rootNode->m_aabbMinOrg, rootNode->m_aabbMaxOrg);
494 //perhaps profile if it is worth doing the aabbOverlap test first
497 ///careful with this check: need to check division by zero (above) and fix the unQuantize method
498 ///thanks Joerg/hiker for the reproduction case!
499 ///http://www.bulletphysics.com/Bullet/phpBB3/viewtopic.php?f=9&t=1858
500 rayBoxOverlap = aabbOverlap ? btRayAabb2(raySource, rayDirectionInverse, sign, bounds, param, 0.0f, lambda_max) : false;
504 rayBoxOverlap = btRayAabb(raySource, rayTarget, bounds[0], bounds[1], param, normal);
507 isLeafNode = rootNode->m_escapeIndex == -1;
509 //PCK: unsigned instead of bool
510 if (isLeafNode && (rayBoxOverlap != 0))
512 nodeCallback->processNode(rootNode->m_subPart, rootNode->m_triangleIndex);
515 //PCK: unsigned instead of bool
516 if ((rayBoxOverlap != 0) || isLeafNode)
523 escapeIndex = rootNode->m_escapeIndex;
524 rootNode += escapeIndex;
525 curIndex += escapeIndex;
530 void btQuantizedBvh::walkStacklessQuantizedTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex, int endNodeIndex) const
532 btAssert(m_useQuantization);
534 int curIndex = startNodeIndex;
535 int walkIterations = 0;
536 int subTreeSize = endNodeIndex - startNodeIndex;
539 const btQuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[startNodeIndex];
543 //PCK: unsigned instead of bool
544 unsigned boxBoxOverlap = 0;
545 unsigned rayBoxOverlap = 0;
547 btScalar lambda_max = 1.0;
550 btVector3 rayDirection = (rayTarget - raySource);
551 rayDirection.safeNormalize();// stephengold changed normalize to safeNormalize 2020-02-17
552 lambda_max = rayDirection.dot(rayTarget - raySource);
553 ///what about division by zero? --> just set rayDirection[i] to 1.0
554 rayDirection[0] = rayDirection[0] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDirection[0];
555 rayDirection[1] = rayDirection[1] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDirection[1];
556 rayDirection[2] = rayDirection[2] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDirection[2];
557 unsigned int sign[3] = {rayDirection[0] < 0.0, rayDirection[1] < 0.0, rayDirection[2] < 0.0};
560 /* Quick pruning by quantized box */
561 btVector3 rayAabbMin = raySource;
562 btVector3 rayAabbMax = raySource;
563 rayAabbMin.setMin(rayTarget);
564 rayAabbMax.setMax(rayTarget);
566 /* Add box cast extents to bounding box */
567 rayAabbMin += aabbMin;
568 rayAabbMax += aabbMax;
570 unsigned short int quantizedQueryAabbMin[3];
571 unsigned short int quantizedQueryAabbMax[3];
572 quantizeWithClamp(quantizedQueryAabbMin, rayAabbMin, 0);
573 quantizeWithClamp(quantizedQueryAabbMax, rayAabbMax, 1);
575 while (curIndex < endNodeIndex)
577 //#define VISUALLY_ANALYZE_BVH 1
578 #ifdef VISUALLY_ANALYZE_BVH
579 //some code snippet to debugDraw aabb, to visually analyze bvh structure
580 static int drawPatch = 0;
581 //need some global access to a debugDrawer
582 extern btIDebugDraw* debugDrawerPtr;
583 if (curIndex == drawPatch)
585 btVector3 aabbMin, aabbMax;
586 aabbMin = unQuantize(rootNode->m_quantizedAabbMin);
587 aabbMax = unQuantize(rootNode->m_quantizedAabbMax);
588 btVector3 color(1, 0, 0);
589 debugDrawerPtr->drawAabb(aabbMin, aabbMax, color);
591 #endif //VISUALLY_ANALYZE_BVH
593 //catch bugs in tree data
594 btAssert(walkIterations < subTreeSize);
597 //PCK: unsigned instead of bool
598 // only interested if this is closer than any previous hit
599 btScalar param = 1.0;
601 boxBoxOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin, quantizedQueryAabbMax, rootNode->m_quantizedAabbMin, rootNode->m_quantizedAabbMax);
602 isLeafNode = rootNode->isLeafNode();
606 bounds[0] = unQuantize(rootNode->m_quantizedAabbMin);
607 bounds[1] = unQuantize(rootNode->m_quantizedAabbMax);
608 /* Add box cast extents */
609 bounds[0] -= aabbMax;
610 bounds[1] -= aabbMin;
613 bool ra2 = btRayAabb2 (raySource, rayDirection, sign, bounds, param, 0.0, lambda_max);
614 bool ra = btRayAabb (raySource, rayTarget, bounds[0], bounds[1], param, normal);
617 printf("functions don't match\n");
621 ///careful with this check: need to check division by zero (above) and fix the unQuantize method
622 ///thanks Joerg/hiker for the reproduction case!
623 ///http://www.bulletphysics.com/Bullet/phpBB3/viewtopic.php?f=9&t=1858
625 //BT_PROFILE("btRayAabb2");
626 rayBoxOverlap = btRayAabb2(raySource, rayDirection, sign, bounds, param, 0.0f, lambda_max);
629 rayBoxOverlap = true; //btRayAabb(raySource, rayTarget, bounds[0], bounds[1], param, normal);
633 if (isLeafNode && rayBoxOverlap)
635 nodeCallback->processNode(rootNode->getPartId(), rootNode->getTriangleIndex());
638 //PCK: unsigned instead of bool
639 if ((rayBoxOverlap != 0) || isLeafNode)
646 escapeIndex = rootNode->getEscapeIndex();
647 rootNode += escapeIndex;
648 curIndex += escapeIndex;
653 void btQuantizedBvh::walkStacklessQuantizedTree(btNodeOverlapCallback* nodeCallback, unsigned short int* quantizedQueryAabbMin, unsigned short int* quantizedQueryAabbMax, int startNodeIndex, int endNodeIndex) const
655 btAssert(m_useQuantization);
657 int curIndex = startNodeIndex;
658 int walkIterations = 0;
659 int subTreeSize = endNodeIndex - startNodeIndex;
662 const btQuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[startNodeIndex];
666 //PCK: unsigned instead of bool
667 unsigned aabbOverlap;
669 while (curIndex < endNodeIndex)
671 //#define VISUALLY_ANALYZE_BVH 1
672 #ifdef VISUALLY_ANALYZE_BVH
673 //some code snippet to debugDraw aabb, to visually analyze bvh structure
674 static int drawPatch = 0;
675 //need some global access to a debugDrawer
676 extern btIDebugDraw* debugDrawerPtr;
677 if (curIndex == drawPatch)
679 btVector3 aabbMin, aabbMax;
680 aabbMin = unQuantize(rootNode->m_quantizedAabbMin);
681 aabbMax = unQuantize(rootNode->m_quantizedAabbMax);
682 btVector3 color(1, 0, 0);
683 debugDrawerPtr->drawAabb(aabbMin, aabbMax, color);
685 #endif //VISUALLY_ANALYZE_BVH
687 //catch bugs in tree data
688 btAssert(walkIterations < subTreeSize);
691 //PCK: unsigned instead of bool
692 aabbOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin, quantizedQueryAabbMax, rootNode->m_quantizedAabbMin, rootNode->m_quantizedAabbMax);
693 isLeafNode = rootNode->isLeafNode();
695 if (isLeafNode && aabbOverlap)
697 nodeCallback->processNode(rootNode->getPartId(), rootNode->getTriangleIndex());
700 //PCK: unsigned instead of bool
701 if ((aabbOverlap != 0) || isLeafNode)
708 escapeIndex = rootNode->getEscapeIndex();
709 rootNode += escapeIndex;
710 curIndex += escapeIndex;
715 //This traversal can be called from Playstation 3 SPU
716 void btQuantizedBvh::walkStacklessQuantizedTreeCacheFriendly(btNodeOverlapCallback* nodeCallback, unsigned short int* quantizedQueryAabbMin, unsigned short int* quantizedQueryAabbMax) const
718 btAssert(m_useQuantization);
722 for (i = 0; i < this->m_SubtreeHeaders.size(); i++)
724 const btBvhSubtreeInfo& subtree = m_SubtreeHeaders[i];
726 //PCK: unsigned instead of bool
727 unsigned overlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin, quantizedQueryAabbMax, subtree.m_quantizedAabbMin, subtree.m_quantizedAabbMax);
730 walkStacklessQuantizedTree(nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax,
731 subtree.m_rootNodeIndex,
732 subtree.m_rootNodeIndex + subtree.m_subtreeSize);
737 void btQuantizedBvh::reportRayOverlappingNodex(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget) const
739 reportBoxCastOverlappingNodex(nodeCallback, raySource, rayTarget, btVector3(0, 0, 0), btVector3(0, 0, 0));
742 void btQuantizedBvh::reportBoxCastOverlappingNodex(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax) const
744 //always use stackless
746 if (m_useQuantization)
748 walkStacklessQuantizedTreeAgainstRay(nodeCallback, raySource, rayTarget, aabbMin, aabbMax, 0, m_curNodeIndex);
752 walkStacklessTreeAgainstRay(nodeCallback, raySource, rayTarget, aabbMin, aabbMax, 0, m_curNodeIndex);
756 //recursive traversal
757 btVector3 qaabbMin = raySource;
758 btVector3 qaabbMax = raySource;
759 qaabbMin.setMin(rayTarget);
760 qaabbMax.setMax(rayTarget);
763 reportAabbOverlappingNodex(nodeCallback,qaabbMin,qaabbMax);
768 void btQuantizedBvh::swapLeafNodes(int i, int splitIndex)
770 if (m_useQuantization)
772 btQuantizedBvhNode tmp = m_quantizedLeafNodes[i];
773 m_quantizedLeafNodes[i] = m_quantizedLeafNodes[splitIndex];
774 m_quantizedLeafNodes[splitIndex] = tmp;
778 btOptimizedBvhNode tmp = m_leafNodes[i];
779 m_leafNodes[i] = m_leafNodes[splitIndex];
780 m_leafNodes[splitIndex] = tmp;
784 void btQuantizedBvh::assignInternalNodeFromLeafNode(int internalNode, int leafNodeIndex)
786 if (m_useQuantization)
788 m_quantizedContiguousNodes[internalNode] = m_quantizedLeafNodes[leafNodeIndex];
792 m_contiguousNodes[internalNode] = m_leafNodes[leafNodeIndex];
801 static const unsigned BVH_ALIGNMENT = 16;
802 static const unsigned BVH_ALIGNMENT_MASK = BVH_ALIGNMENT-1;
804 static const unsigned BVH_ALIGNMENT_BLOCKS = 2;
807 unsigned int btQuantizedBvh::getAlignmentSerializationPadding()
809 // I changed this to 0 since the extra padding is not needed or used.
810 return 0; //BVH_ALIGNMENT_BLOCKS * BVH_ALIGNMENT;
813 unsigned btQuantizedBvh::calculateSerializeBufferSize() const
815 unsigned baseSize = sizeof(btQuantizedBvh) + getAlignmentSerializationPadding();
816 baseSize += sizeof(btBvhSubtreeInfo) * m_subtreeHeaderCount;
817 if (m_useQuantization)
819 return baseSize + m_curNodeIndex * sizeof(btQuantizedBvhNode);
821 return baseSize + m_curNodeIndex * sizeof(btOptimizedBvhNode);
824 bool btQuantizedBvh::serialize(void* o_alignedDataBuffer, unsigned /*i_dataBufferSize */, bool i_swapEndian) const
826 btAssert(m_subtreeHeaderCount == m_SubtreeHeaders.size());
827 m_subtreeHeaderCount = m_SubtreeHeaders.size();
829 /* if (i_dataBufferSize < calculateSerializeBufferSize() || o_alignedDataBuffer == NULL || (((unsigned)o_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0))
831 ///check alignedment for buffer?
837 btQuantizedBvh* targetBvh = (btQuantizedBvh*)o_alignedDataBuffer;
839 // construct the class so the virtual function table, etc will be set up
840 // Also, m_leafNodes and m_quantizedLeafNodes will be initialized to default values by the constructor
841 new (targetBvh) btQuantizedBvh;
845 targetBvh->m_curNodeIndex = static_cast<int>(btSwapEndian(m_curNodeIndex));
847 btSwapVector3Endian(m_bvhAabbMin, targetBvh->m_bvhAabbMin);
848 btSwapVector3Endian(m_bvhAabbMax, targetBvh->m_bvhAabbMax);
849 btSwapVector3Endian(m_bvhQuantization, targetBvh->m_bvhQuantization);
851 targetBvh->m_traversalMode = (btTraversalMode)btSwapEndian(m_traversalMode);
852 targetBvh->m_subtreeHeaderCount = static_cast<int>(btSwapEndian(m_subtreeHeaderCount));
856 targetBvh->m_curNodeIndex = m_curNodeIndex;
857 targetBvh->m_bvhAabbMin = m_bvhAabbMin;
858 targetBvh->m_bvhAabbMax = m_bvhAabbMax;
859 targetBvh->m_bvhQuantization = m_bvhQuantization;
860 targetBvh->m_traversalMode = m_traversalMode;
861 targetBvh->m_subtreeHeaderCount = m_subtreeHeaderCount;
864 targetBvh->m_useQuantization = m_useQuantization;
866 unsigned char* nodeData = (unsigned char*)targetBvh;
867 nodeData += sizeof(btQuantizedBvh);
869 unsigned sizeToAdd = 0; //(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
870 nodeData += sizeToAdd;
872 int nodeCount = m_curNodeIndex;
874 if (m_useQuantization)
876 targetBvh->m_quantizedContiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
880 for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
882 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0]);
883 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1]);
884 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2]);
886 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0]);
887 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1]);
888 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2]);
890 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = static_cast<int>(btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex));
895 for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
897 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0];
898 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1];
899 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2];
901 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0];
902 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1];
903 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2];
905 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex;
908 nodeData += sizeof(btQuantizedBvhNode) * nodeCount;
910 // this clears the pointer in the member variable it doesn't really do anything to the data
911 // it does call the destructor on the contained objects, but they are all classes with no destructor defined
912 // so the memory (which is not freed) is left alone
913 targetBvh->m_quantizedContiguousNodes.initializeFromBuffer(NULL, 0, 0);
917 targetBvh->m_contiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
921 for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
923 btSwapVector3Endian(m_contiguousNodes[nodeIndex].m_aabbMinOrg, targetBvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg);
924 btSwapVector3Endian(m_contiguousNodes[nodeIndex].m_aabbMaxOrg, targetBvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg);
926 targetBvh->m_contiguousNodes[nodeIndex].m_escapeIndex = static_cast<int>(btSwapEndian(m_contiguousNodes[nodeIndex].m_escapeIndex));
927 targetBvh->m_contiguousNodes[nodeIndex].m_subPart = static_cast<int>(btSwapEndian(m_contiguousNodes[nodeIndex].m_subPart));
928 targetBvh->m_contiguousNodes[nodeIndex].m_triangleIndex = static_cast<int>(btSwapEndian(m_contiguousNodes[nodeIndex].m_triangleIndex));
933 for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
935 targetBvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg = m_contiguousNodes[nodeIndex].m_aabbMinOrg;
936 targetBvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg = m_contiguousNodes[nodeIndex].m_aabbMaxOrg;
938 targetBvh->m_contiguousNodes[nodeIndex].m_escapeIndex = m_contiguousNodes[nodeIndex].m_escapeIndex;
939 targetBvh->m_contiguousNodes[nodeIndex].m_subPart = m_contiguousNodes[nodeIndex].m_subPart;
940 targetBvh->m_contiguousNodes[nodeIndex].m_triangleIndex = m_contiguousNodes[nodeIndex].m_triangleIndex;
943 nodeData += sizeof(btOptimizedBvhNode) * nodeCount;
945 // this clears the pointer in the member variable it doesn't really do anything to the data
946 // it does call the destructor on the contained objects, but they are all classes with no destructor defined
947 // so the memory (which is not freed) is left alone
948 targetBvh->m_contiguousNodes.initializeFromBuffer(NULL, 0, 0);
951 sizeToAdd = 0; //(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
952 nodeData += sizeToAdd;
954 // Now serialize the subtree headers
955 targetBvh->m_SubtreeHeaders.initializeFromBuffer(nodeData, m_subtreeHeaderCount, m_subtreeHeaderCount);
958 for (int i = 0; i < m_subtreeHeaderCount; i++)
960 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
961 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
962 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
964 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
965 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
966 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
968 targetBvh->m_SubtreeHeaders[i].m_rootNodeIndex = static_cast<int>(btSwapEndian(m_SubtreeHeaders[i].m_rootNodeIndex));
969 targetBvh->m_SubtreeHeaders[i].m_subtreeSize = static_cast<int>(btSwapEndian(m_SubtreeHeaders[i].m_subtreeSize));
974 for (int i = 0; i < m_subtreeHeaderCount; i++)
976 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = (m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
977 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = (m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
978 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = (m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
980 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = (m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
981 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = (m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
982 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = (m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
984 targetBvh->m_SubtreeHeaders[i].m_rootNodeIndex = (m_SubtreeHeaders[i].m_rootNodeIndex);
985 targetBvh->m_SubtreeHeaders[i].m_subtreeSize = (m_SubtreeHeaders[i].m_subtreeSize);
987 // need to clear padding in destination buffer
988 targetBvh->m_SubtreeHeaders[i].m_padding[0] = 0;
989 targetBvh->m_SubtreeHeaders[i].m_padding[1] = 0;
990 targetBvh->m_SubtreeHeaders[i].m_padding[2] = 0;
993 nodeData += sizeof(btBvhSubtreeInfo) * m_subtreeHeaderCount;
995 // this clears the pointer in the member variable it doesn't really do anything to the data
996 // it does call the destructor on the contained objects, but they are all classes with no destructor defined
997 // so the memory (which is not freed) is left alone
998 targetBvh->m_SubtreeHeaders.initializeFromBuffer(NULL, 0, 0);
1000 // this wipes the virtual function table pointer at the start of the buffer for the class
1001 *((void**)o_alignedDataBuffer) = NULL;
1006 btQuantizedBvh* btQuantizedBvh::deSerializeInPlace(void* i_alignedDataBuffer, unsigned int i_dataBufferSize, bool i_swapEndian)
1008 if (i_alignedDataBuffer == NULL) // || (((unsigned)i_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0))
1012 btQuantizedBvh* bvh = (btQuantizedBvh*)i_alignedDataBuffer;
1016 bvh->m_curNodeIndex = static_cast<int>(btSwapEndian(bvh->m_curNodeIndex));
1018 btUnSwapVector3Endian(bvh->m_bvhAabbMin);
1019 btUnSwapVector3Endian(bvh->m_bvhAabbMax);
1020 btUnSwapVector3Endian(bvh->m_bvhQuantization);
1022 bvh->m_traversalMode = (btTraversalMode)btSwapEndian(bvh->m_traversalMode);
1023 bvh->m_subtreeHeaderCount = static_cast<int>(btSwapEndian(bvh->m_subtreeHeaderCount));
1026 unsigned int calculatedBufSize = bvh->calculateSerializeBufferSize();
1027 btAssert(calculatedBufSize <= i_dataBufferSize);
1029 if (calculatedBufSize > i_dataBufferSize)
1034 unsigned char* nodeData = (unsigned char*)bvh;
1035 nodeData += sizeof(btQuantizedBvh);
1037 unsigned sizeToAdd = 0; //(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
1038 nodeData += sizeToAdd;
1040 int nodeCount = bvh->m_curNodeIndex;
1042 // Must call placement new to fill in virtual function table, etc, but we don't want to overwrite most data, so call a special version of the constructor
1043 // Also, m_leafNodes and m_quantizedLeafNodes will be initialized to default values by the constructor
1044 new (bvh) btQuantizedBvh(*bvh, false);
1046 if (bvh->m_useQuantization)
1048 bvh->m_quantizedContiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
1052 for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
1054 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0]);
1055 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1]);
1056 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2]);
1058 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0]);
1059 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1]);
1060 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2]);
1062 bvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = static_cast<int>(btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex));
1065 nodeData += sizeof(btQuantizedBvhNode) * nodeCount;
1069 bvh->m_contiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
1073 for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
1075 btUnSwapVector3Endian(bvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg);
1076 btUnSwapVector3Endian(bvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg);
1078 bvh->m_contiguousNodes[nodeIndex].m_escapeIndex = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_escapeIndex));
1079 bvh->m_contiguousNodes[nodeIndex].m_subPart = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_subPart));
1080 bvh->m_contiguousNodes[nodeIndex].m_triangleIndex = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_triangleIndex));
1083 nodeData += sizeof(btOptimizedBvhNode) * nodeCount;
1086 sizeToAdd = 0; //(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
1087 nodeData += sizeToAdd;
1089 // Now serialize the subtree headers
1090 bvh->m_SubtreeHeaders.initializeFromBuffer(nodeData, bvh->m_subtreeHeaderCount, bvh->m_subtreeHeaderCount);
1093 for (int i = 0; i < bvh->m_subtreeHeaderCount; i++)
1095 bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
1096 bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
1097 bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
1099 bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
1100 bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
1101 bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
1103 bvh->m_SubtreeHeaders[i].m_rootNodeIndex = static_cast<int>(btSwapEndian(bvh->m_SubtreeHeaders[i].m_rootNodeIndex));
1104 bvh->m_SubtreeHeaders[i].m_subtreeSize = static_cast<int>(btSwapEndian(bvh->m_SubtreeHeaders[i].m_subtreeSize));
1111 // Constructor that prevents btVector3's default constructor from being called
1112 btQuantizedBvh::btQuantizedBvh(btQuantizedBvh& self, bool /* ownsMemory */) : m_bvhAabbMin(self.m_bvhAabbMin),
1113 m_bvhAabbMax(self.m_bvhAabbMax),
1114 m_bvhQuantization(self.m_bvhQuantization),
1115 m_bulletVersion(BT_BULLET_VERSION)
1119 void btQuantizedBvh::deSerializeFloat(struct btQuantizedBvhFloatData& quantizedBvhFloatData)
1121 m_bvhAabbMax.deSerializeFloat(quantizedBvhFloatData.m_bvhAabbMax);
1122 m_bvhAabbMin.deSerializeFloat(quantizedBvhFloatData.m_bvhAabbMin);
1123 m_bvhQuantization.deSerializeFloat(quantizedBvhFloatData.m_bvhQuantization);
1125 m_curNodeIndex = quantizedBvhFloatData.m_curNodeIndex;
1126 m_useQuantization = quantizedBvhFloatData.m_useQuantization != 0;
1129 int numElem = quantizedBvhFloatData.m_numContiguousLeafNodes;
1130 m_contiguousNodes.resize(numElem);
1134 btOptimizedBvhNodeFloatData* memPtr = quantizedBvhFloatData.m_contiguousNodesPtr;
1136 for (int i = 0; i < numElem; i++, memPtr++)
1138 m_contiguousNodes[i].m_aabbMaxOrg.deSerializeFloat(memPtr->m_aabbMaxOrg);
1139 m_contiguousNodes[i].m_aabbMinOrg.deSerializeFloat(memPtr->m_aabbMinOrg);
1140 m_contiguousNodes[i].m_escapeIndex = memPtr->m_escapeIndex;
1141 m_contiguousNodes[i].m_subPart = memPtr->m_subPart;
1142 m_contiguousNodes[i].m_triangleIndex = memPtr->m_triangleIndex;
1148 int numElem = quantizedBvhFloatData.m_numQuantizedContiguousNodes;
1149 m_quantizedContiguousNodes.resize(numElem);
1153 btQuantizedBvhNodeData* memPtr = quantizedBvhFloatData.m_quantizedContiguousNodesPtr;
1154 for (int i = 0; i < numElem; i++, memPtr++)
1156 m_quantizedContiguousNodes[i].m_escapeIndexOrTriangleIndex = memPtr->m_escapeIndexOrTriangleIndex;
1157 m_quantizedContiguousNodes[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0];
1158 m_quantizedContiguousNodes[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
1159 m_quantizedContiguousNodes[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
1160 m_quantizedContiguousNodes[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
1161 m_quantizedContiguousNodes[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
1162 m_quantizedContiguousNodes[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
1167 m_traversalMode = btTraversalMode(quantizedBvhFloatData.m_traversalMode);
1170 int numElem = quantizedBvhFloatData.m_numSubtreeHeaders;
1171 m_SubtreeHeaders.resize(numElem);
1174 btBvhSubtreeInfoData* memPtr = quantizedBvhFloatData.m_subTreeInfoPtr;
1175 for (int i = 0; i < numElem; i++, memPtr++)
1177 m_SubtreeHeaders[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0];
1178 m_SubtreeHeaders[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
1179 m_SubtreeHeaders[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
1180 m_SubtreeHeaders[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
1181 m_SubtreeHeaders[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
1182 m_SubtreeHeaders[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
1183 m_SubtreeHeaders[i].m_rootNodeIndex = memPtr->m_rootNodeIndex;
1184 m_SubtreeHeaders[i].m_subtreeSize = memPtr->m_subtreeSize;
1190 void btQuantizedBvh::deSerializeDouble(struct btQuantizedBvhDoubleData& quantizedBvhDoubleData)
1192 m_bvhAabbMax.deSerializeDouble(quantizedBvhDoubleData.m_bvhAabbMax);
1193 m_bvhAabbMin.deSerializeDouble(quantizedBvhDoubleData.m_bvhAabbMin);
1194 m_bvhQuantization.deSerializeDouble(quantizedBvhDoubleData.m_bvhQuantization);
1196 m_curNodeIndex = quantizedBvhDoubleData.m_curNodeIndex;
1197 m_useQuantization = quantizedBvhDoubleData.m_useQuantization != 0;
1200 int numElem = quantizedBvhDoubleData.m_numContiguousLeafNodes;
1201 m_contiguousNodes.resize(numElem);
1205 btOptimizedBvhNodeDoubleData* memPtr = quantizedBvhDoubleData.m_contiguousNodesPtr;
1207 for (int i = 0; i < numElem; i++, memPtr++)
1209 m_contiguousNodes[i].m_aabbMaxOrg.deSerializeDouble(memPtr->m_aabbMaxOrg);
1210 m_contiguousNodes[i].m_aabbMinOrg.deSerializeDouble(memPtr->m_aabbMinOrg);
1211 m_contiguousNodes[i].m_escapeIndex = memPtr->m_escapeIndex;
1212 m_contiguousNodes[i].m_subPart = memPtr->m_subPart;
1213 m_contiguousNodes[i].m_triangleIndex = memPtr->m_triangleIndex;
1219 int numElem = quantizedBvhDoubleData.m_numQuantizedContiguousNodes;
1220 m_quantizedContiguousNodes.resize(numElem);
1224 btQuantizedBvhNodeData* memPtr = quantizedBvhDoubleData.m_quantizedContiguousNodesPtr;
1225 for (int i = 0; i < numElem; i++, memPtr++)
1227 m_quantizedContiguousNodes[i].m_escapeIndexOrTriangleIndex = memPtr->m_escapeIndexOrTriangleIndex;
1228 m_quantizedContiguousNodes[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0];
1229 m_quantizedContiguousNodes[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
1230 m_quantizedContiguousNodes[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
1231 m_quantizedContiguousNodes[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
1232 m_quantizedContiguousNodes[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
1233 m_quantizedContiguousNodes[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
1238 m_traversalMode = btTraversalMode(quantizedBvhDoubleData.m_traversalMode);
1241 int numElem = quantizedBvhDoubleData.m_numSubtreeHeaders;
1242 m_SubtreeHeaders.resize(numElem);
1245 btBvhSubtreeInfoData* memPtr = quantizedBvhDoubleData.m_subTreeInfoPtr;
1246 for (int i = 0; i < numElem; i++, memPtr++)
1248 m_SubtreeHeaders[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0];
1249 m_SubtreeHeaders[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
1250 m_SubtreeHeaders[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
1251 m_SubtreeHeaders[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
1252 m_SubtreeHeaders[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
1253 m_SubtreeHeaders[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
1254 m_SubtreeHeaders[i].m_rootNodeIndex = memPtr->m_rootNodeIndex;
1255 m_SubtreeHeaders[i].m_subtreeSize = memPtr->m_subtreeSize;
1261 ///fills the dataBuffer and returns the struct name (and 0 on failure)
1262 const char* btQuantizedBvh::serialize(void* dataBuffer, btSerializer* serializer) const
1264 btQuantizedBvhData* quantizedData = (btQuantizedBvhData*)dataBuffer;
1266 m_bvhAabbMax.serialize(quantizedData->m_bvhAabbMax);
1267 m_bvhAabbMin.serialize(quantizedData->m_bvhAabbMin);
1268 m_bvhQuantization.serialize(quantizedData->m_bvhQuantization);
1270 quantizedData->m_curNodeIndex = m_curNodeIndex;
1271 quantizedData->m_useQuantization = m_useQuantization;
1273 quantizedData->m_numContiguousLeafNodes = m_contiguousNodes.size();
1274 quantizedData->m_contiguousNodesPtr = (btOptimizedBvhNodeData*)(m_contiguousNodes.size() ? serializer->getUniquePointer((void*)&m_contiguousNodes[0]) : 0);
1275 if (quantizedData->m_contiguousNodesPtr)
1277 int sz = sizeof(btOptimizedBvhNodeData);
1278 int numElem = m_contiguousNodes.size();
1279 btChunk* chunk = serializer->allocate(sz, numElem);
1280 btOptimizedBvhNodeData* memPtr = (btOptimizedBvhNodeData*)chunk->m_oldPtr;
1281 for (int i = 0; i < numElem; i++, memPtr++)
1283 m_contiguousNodes[i].m_aabbMaxOrg.serialize(memPtr->m_aabbMaxOrg);
1284 m_contiguousNodes[i].m_aabbMinOrg.serialize(memPtr->m_aabbMinOrg);
1285 memPtr->m_escapeIndex = m_contiguousNodes[i].m_escapeIndex;
1286 memPtr->m_subPart = m_contiguousNodes[i].m_subPart;
1287 memPtr->m_triangleIndex = m_contiguousNodes[i].m_triangleIndex;
1288 // Fill padding with zeros to appease msan.
1289 memset(memPtr->m_pad, 0, sizeof(memPtr->m_pad));
1291 serializer->finalizeChunk(chunk, "btOptimizedBvhNodeData", BT_ARRAY_CODE, (void*)&m_contiguousNodes[0]);
1294 quantizedData->m_numQuantizedContiguousNodes = m_quantizedContiguousNodes.size();
1295 // printf("quantizedData->m_numQuantizedContiguousNodes=%d\n",quantizedData->m_numQuantizedContiguousNodes);
1296 quantizedData->m_quantizedContiguousNodesPtr = (btQuantizedBvhNodeData*)(m_quantizedContiguousNodes.size() ? serializer->getUniquePointer((void*)&m_quantizedContiguousNodes[0]) : 0);
1297 if (quantizedData->m_quantizedContiguousNodesPtr)
1299 int sz = sizeof(btQuantizedBvhNodeData);
1300 int numElem = m_quantizedContiguousNodes.size();
1301 btChunk* chunk = serializer->allocate(sz, numElem);
1302 btQuantizedBvhNodeData* memPtr = (btQuantizedBvhNodeData*)chunk->m_oldPtr;
1303 for (int i = 0; i < numElem; i++, memPtr++)
1305 memPtr->m_escapeIndexOrTriangleIndex = m_quantizedContiguousNodes[i].m_escapeIndexOrTriangleIndex;
1306 memPtr->m_quantizedAabbMax[0] = m_quantizedContiguousNodes[i].m_quantizedAabbMax[0];
1307 memPtr->m_quantizedAabbMax[1] = m_quantizedContiguousNodes[i].m_quantizedAabbMax[1];
1308 memPtr->m_quantizedAabbMax[2] = m_quantizedContiguousNodes[i].m_quantizedAabbMax[2];
1309 memPtr->m_quantizedAabbMin[0] = m_quantizedContiguousNodes[i].m_quantizedAabbMin[0];
1310 memPtr->m_quantizedAabbMin[1] = m_quantizedContiguousNodes[i].m_quantizedAabbMin[1];
1311 memPtr->m_quantizedAabbMin[2] = m_quantizedContiguousNodes[i].m_quantizedAabbMin[2];
1313 serializer->finalizeChunk(chunk, "btQuantizedBvhNodeData", BT_ARRAY_CODE, (void*)&m_quantizedContiguousNodes[0]);
1316 quantizedData->m_traversalMode = int(m_traversalMode);
1317 quantizedData->m_numSubtreeHeaders = m_SubtreeHeaders.size();
1319 quantizedData->m_subTreeInfoPtr = (btBvhSubtreeInfoData*)(m_SubtreeHeaders.size() ? serializer->getUniquePointer((void*)&m_SubtreeHeaders[0]) : 0);
1320 if (quantizedData->m_subTreeInfoPtr)
1322 int sz = sizeof(btBvhSubtreeInfoData);
1323 int numElem = m_SubtreeHeaders.size();
1324 btChunk* chunk = serializer->allocate(sz, numElem);
1325 btBvhSubtreeInfoData* memPtr = (btBvhSubtreeInfoData*)chunk->m_oldPtr;
1326 for (int i = 0; i < numElem; i++, memPtr++)
1328 memPtr->m_quantizedAabbMax[0] = m_SubtreeHeaders[i].m_quantizedAabbMax[0];
1329 memPtr->m_quantizedAabbMax[1] = m_SubtreeHeaders[i].m_quantizedAabbMax[1];
1330 memPtr->m_quantizedAabbMax[2] = m_SubtreeHeaders[i].m_quantizedAabbMax[2];
1331 memPtr->m_quantizedAabbMin[0] = m_SubtreeHeaders[i].m_quantizedAabbMin[0];
1332 memPtr->m_quantizedAabbMin[1] = m_SubtreeHeaders[i].m_quantizedAabbMin[1];
1333 memPtr->m_quantizedAabbMin[2] = m_SubtreeHeaders[i].m_quantizedAabbMin[2];
1335 memPtr->m_rootNodeIndex = m_SubtreeHeaders[i].m_rootNodeIndex;
1336 memPtr->m_subtreeSize = m_SubtreeHeaders[i].m_subtreeSize;
1338 serializer->finalizeChunk(chunk, "btBvhSubtreeInfoData", BT_ARRAY_CODE, (void*)&m_SubtreeHeaders[0]);
1340 return btQuantizedBvhDataName;