2 Bullet Continuous Collision Detection and Physics Library
3 Copyright (c) 2003-2006 Erwin Coumans https://bulletphysics.org
5 This software is provided 'as-is', without any express or implied warranty.
6 In no event will the authors be held liable for any damages arising from the use of this software.
7 Permission is granted to anyone to use this software for any purpose,
8 including commercial applications, and to alter it and redistribute it freely,
9 subject to the following restrictions:
11 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
12 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
13 3. This notice may not be removed or altered from any source distribution.
16 #include "b3QuantizedBvh.h"
18 #include "Bullet3Geometry/b3AabbUtil.h"
22 b3QuantizedBvh::b3QuantizedBvh() : m_bulletVersion(B3_BULLET_VERSION),
23 m_useQuantization(false),
24 m_traversalMode(TRAVERSAL_STACKLESS_CACHE_FRIENDLY)
25 //m_traversalMode(TRAVERSAL_STACKLESS)
26 //m_traversalMode(TRAVERSAL_RECURSIVE)
28 m_subtreeHeaderCount(0) //PCK: add this line
30 m_bvhAabbMin.setValue(-B3_INFINITY, -B3_INFINITY, -B3_INFINITY);
31 m_bvhAabbMax.setValue(B3_INFINITY, B3_INFINITY, B3_INFINITY);
34 void b3QuantizedBvh::buildInternal()
36 ///assumes that caller filled in the m_quantizedLeafNodes
37 m_useQuantization = true;
40 if (m_useQuantization)
42 //now we have an array of leafnodes in m_leafNodes
43 numLeafNodes = m_quantizedLeafNodes.size();
45 m_quantizedContiguousNodes.resize(2 * numLeafNodes);
50 buildTree(0, numLeafNodes);
52 ///if the entire tree is small then subtree size, we need to create a header info for the tree
53 if (m_useQuantization && !m_SubtreeHeaders.size())
55 b3BvhSubtreeInfo& subtree = m_SubtreeHeaders.expand();
56 subtree.setAabbFromQuantizeNode(m_quantizedContiguousNodes[0]);
57 subtree.m_rootNodeIndex = 0;
58 subtree.m_subtreeSize = m_quantizedContiguousNodes[0].isLeafNode() ? 1 : m_quantizedContiguousNodes[0].getEscapeIndex();
61 //PCK: update the copy of the size
62 m_subtreeHeaderCount = m_SubtreeHeaders.size();
64 //PCK: clear m_quantizedLeafNodes and m_leafNodes, they are temporary
65 m_quantizedLeafNodes.clear();
69 ///just for debugging, to visualize the individual patches/subtrees
70 #ifdef DEBUG_PATCH_COLORS
77 #endif //DEBUG_PATCH_COLORS
79 void b3QuantizedBvh::setQuantizationValues(const b3Vector3& bvhAabbMin, const b3Vector3& bvhAabbMax, b3Scalar quantizationMargin)
81 //enlarge the AABB to avoid division by zero when initializing the quantization values
82 b3Vector3 clampValue = b3MakeVector3(quantizationMargin, quantizationMargin, quantizationMargin);
83 m_bvhAabbMin = bvhAabbMin - clampValue;
84 m_bvhAabbMax = bvhAabbMax + clampValue;
85 b3Vector3 aabbSize = m_bvhAabbMax - m_bvhAabbMin;
86 m_bvhQuantization = b3MakeVector3(b3Scalar(65533.0), b3Scalar(65533.0), b3Scalar(65533.0)) / aabbSize;
87 m_useQuantization = true;
90 b3QuantizedBvh::~b3QuantizedBvh()
94 #ifdef DEBUG_TREE_BUILDING
96 int gMaxStackDepth = 0;
97 #endif //DEBUG_TREE_BUILDING
99 void b3QuantizedBvh::buildTree(int startIndex, int endIndex)
101 #ifdef DEBUG_TREE_BUILDING
103 if (gStackDepth > gMaxStackDepth)
104 gMaxStackDepth = gStackDepth;
105 #endif //DEBUG_TREE_BUILDING
107 int splitAxis, splitIndex, i;
108 int numIndices = endIndex - startIndex;
109 int curIndex = m_curNodeIndex;
111 b3Assert(numIndices > 0);
115 #ifdef DEBUG_TREE_BUILDING
117 #endif //DEBUG_TREE_BUILDING
119 assignInternalNodeFromLeafNode(m_curNodeIndex, startIndex);
124 //calculate Best Splitting Axis and where to split it. Sort the incoming 'leafNodes' array within range 'startIndex/endIndex'.
126 splitAxis = calcSplittingAxis(startIndex, endIndex);
128 splitIndex = sortAndCalcSplittingIndex(startIndex, endIndex, splitAxis);
130 int internalNodeIndex = m_curNodeIndex;
132 //set the min aabb to 'inf' or a max value, and set the max aabb to a -inf/minimum value.
133 //the aabb will be expanded during buildTree/mergeInternalNodeAabb with actual node values
134 setInternalNodeAabbMin(m_curNodeIndex, m_bvhAabbMax); //can't use b3Vector3(B3_INFINITY,B3_INFINITY,B3_INFINITY)) because of quantization
135 setInternalNodeAabbMax(m_curNodeIndex, m_bvhAabbMin); //can't use b3Vector3(-B3_INFINITY,-B3_INFINITY,-B3_INFINITY)) because of quantization
137 for (i = startIndex; i < endIndex; i++)
139 mergeInternalNodeAabb(m_curNodeIndex, getAabbMin(i), getAabbMax(i));
144 //internalNode->m_escapeIndex;
146 int leftChildNodexIndex = m_curNodeIndex;
148 //build left child tree
149 buildTree(startIndex, splitIndex);
151 int rightChildNodexIndex = m_curNodeIndex;
152 //build right child tree
153 buildTree(splitIndex, endIndex);
155 #ifdef DEBUG_TREE_BUILDING
157 #endif //DEBUG_TREE_BUILDING
159 int escapeIndex = m_curNodeIndex - curIndex;
161 if (m_useQuantization)
163 //escapeIndex is the number of nodes of this subtree
164 const int sizeQuantizedNode = sizeof(b3QuantizedBvhNode);
165 const int treeSizeInBytes = escapeIndex * sizeQuantizedNode;
166 if (treeSizeInBytes > MAX_SUBTREE_SIZE_IN_BYTES)
168 updateSubtreeHeaders(leftChildNodexIndex, rightChildNodexIndex);
175 setInternalNodeEscapeIndex(internalNodeIndex, escapeIndex);
178 void b3QuantizedBvh::updateSubtreeHeaders(int leftChildNodexIndex, int rightChildNodexIndex)
180 b3Assert(m_useQuantization);
182 b3QuantizedBvhNode& leftChildNode = m_quantizedContiguousNodes[leftChildNodexIndex];
183 int leftSubTreeSize = leftChildNode.isLeafNode() ? 1 : leftChildNode.getEscapeIndex();
184 int leftSubTreeSizeInBytes = leftSubTreeSize * static_cast<int>(sizeof(b3QuantizedBvhNode));
186 b3QuantizedBvhNode& rightChildNode = m_quantizedContiguousNodes[rightChildNodexIndex];
187 int rightSubTreeSize = rightChildNode.isLeafNode() ? 1 : rightChildNode.getEscapeIndex();
188 int rightSubTreeSizeInBytes = rightSubTreeSize * static_cast<int>(sizeof(b3QuantizedBvhNode));
190 if (leftSubTreeSizeInBytes <= MAX_SUBTREE_SIZE_IN_BYTES)
192 b3BvhSubtreeInfo& subtree = m_SubtreeHeaders.expand();
193 subtree.setAabbFromQuantizeNode(leftChildNode);
194 subtree.m_rootNodeIndex = leftChildNodexIndex;
195 subtree.m_subtreeSize = leftSubTreeSize;
198 if (rightSubTreeSizeInBytes <= MAX_SUBTREE_SIZE_IN_BYTES)
200 b3BvhSubtreeInfo& subtree = m_SubtreeHeaders.expand();
201 subtree.setAabbFromQuantizeNode(rightChildNode);
202 subtree.m_rootNodeIndex = rightChildNodexIndex;
203 subtree.m_subtreeSize = rightSubTreeSize;
206 //PCK: update the copy of the size
207 m_subtreeHeaderCount = m_SubtreeHeaders.size();
210 int b3QuantizedBvh::sortAndCalcSplittingIndex(int startIndex, int endIndex, int splitAxis)
213 int splitIndex = startIndex;
214 int numIndices = endIndex - startIndex;
217 b3Vector3 means = b3MakeVector3(b3Scalar(0.), b3Scalar(0.), b3Scalar(0.));
218 for (i = startIndex; i < endIndex; i++)
220 b3Vector3 center = b3Scalar(0.5) * (getAabbMax(i) + getAabbMin(i));
223 means *= (b3Scalar(1.) / (b3Scalar)numIndices);
225 splitValue = means[splitAxis];
227 //sort leafNodes so all values larger then splitValue comes first, and smaller values start from 'splitIndex'.
228 for (i = startIndex; i < endIndex; i++)
230 b3Vector3 center = b3Scalar(0.5) * (getAabbMax(i) + getAabbMin(i));
231 if (center[splitAxis] > splitValue)
234 swapLeafNodes(i, splitIndex);
239 //if the splitIndex causes unbalanced trees, fix this by using the center in between startIndex and endIndex
240 //otherwise the tree-building might fail due to stack-overflows in certain cases.
241 //unbalanced1 is unsafe: it can cause stack overflows
242 //bool unbalanced1 = ((splitIndex==startIndex) || (splitIndex == (endIndex-1)));
244 //unbalanced2 should work too: always use center (perfect balanced trees)
245 //bool unbalanced2 = true;
247 //this should be safe too:
248 int rangeBalancedIndices = numIndices / 3;
249 bool unbalanced = ((splitIndex <= (startIndex + rangeBalancedIndices)) || (splitIndex >= (endIndex - 1 - rangeBalancedIndices)));
253 splitIndex = startIndex + (numIndices >> 1);
256 bool unbal = (splitIndex == startIndex) || (splitIndex == (endIndex));
263 int b3QuantizedBvh::calcSplittingAxis(int startIndex, int endIndex)
267 b3Vector3 means = b3MakeVector3(b3Scalar(0.), b3Scalar(0.), b3Scalar(0.));
268 b3Vector3 variance = b3MakeVector3(b3Scalar(0.), b3Scalar(0.), b3Scalar(0.));
269 int numIndices = endIndex - startIndex;
271 for (i = startIndex; i < endIndex; i++)
273 b3Vector3 center = b3Scalar(0.5) * (getAabbMax(i) + getAabbMin(i));
276 means *= (b3Scalar(1.) / (b3Scalar)numIndices);
278 for (i = startIndex; i < endIndex; i++)
280 b3Vector3 center = b3Scalar(0.5) * (getAabbMax(i) + getAabbMin(i));
281 b3Vector3 diff2 = center - means;
282 diff2 = diff2 * diff2;
285 variance *= (b3Scalar(1.) / ((b3Scalar)numIndices - 1));
287 return variance.maxAxis();
290 void b3QuantizedBvh::reportAabbOverlappingNodex(b3NodeOverlapCallback* nodeCallback, const b3Vector3& aabbMin, const b3Vector3& aabbMax) const
292 //either choose recursive traversal (walkTree) or stackless (walkStacklessTree)
294 if (m_useQuantization)
296 ///quantize query AABB
297 unsigned short int quantizedQueryAabbMin[3];
298 unsigned short int quantizedQueryAabbMax[3];
299 quantizeWithClamp(quantizedQueryAabbMin, aabbMin, 0);
300 quantizeWithClamp(quantizedQueryAabbMax, aabbMax, 1);
302 switch (m_traversalMode)
304 case TRAVERSAL_STACKLESS:
305 walkStacklessQuantizedTree(nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax, 0, m_curNodeIndex);
307 case TRAVERSAL_STACKLESS_CACHE_FRIENDLY:
308 walkStacklessQuantizedTreeCacheFriendly(nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax);
310 case TRAVERSAL_RECURSIVE:
312 const b3QuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[0];
313 walkRecursiveQuantizedTreeAgainstQueryAabb(rootNode, nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax);
323 walkStacklessTree(nodeCallback, aabbMin, aabbMax);
327 static int b3s_maxIterations = 0;
329 void b3QuantizedBvh::walkStacklessTree(b3NodeOverlapCallback* nodeCallback, const b3Vector3& aabbMin, const b3Vector3& aabbMax) const
331 b3Assert(!m_useQuantization);
333 const b3OptimizedBvhNode* rootNode = &m_contiguousNodes[0];
334 int escapeIndex, curIndex = 0;
335 int walkIterations = 0;
337 //PCK: unsigned instead of bool
338 unsigned aabbOverlap;
340 while (curIndex < m_curNodeIndex)
342 //catch bugs in tree data
343 b3Assert(walkIterations < m_curNodeIndex);
346 aabbOverlap = b3TestAabbAgainstAabb2(aabbMin, aabbMax, rootNode->m_aabbMinOrg, rootNode->m_aabbMaxOrg);
347 isLeafNode = rootNode->m_escapeIndex == -1;
349 //PCK: unsigned instead of bool
350 if (isLeafNode && (aabbOverlap != 0))
352 nodeCallback->processNode(rootNode->m_subPart, rootNode->m_triangleIndex);
355 //PCK: unsigned instead of bool
356 if ((aabbOverlap != 0) || isLeafNode)
363 escapeIndex = rootNode->m_escapeIndex;
364 rootNode += escapeIndex;
365 curIndex += escapeIndex;
368 if (b3s_maxIterations < walkIterations)
369 b3s_maxIterations = walkIterations;
373 ///this was the original recursive traversal, before we optimized towards stackless traversal
374 void b3QuantizedBvh::walkTree(b3OptimizedBvhNode* rootNode,b3NodeOverlapCallback* nodeCallback,const b3Vector3& aabbMin,const b3Vector3& aabbMax) const
376 bool isLeafNode, aabbOverlap = TestAabbAgainstAabb2(aabbMin,aabbMax,rootNode->m_aabbMin,rootNode->m_aabbMax);
379 isLeafNode = (!rootNode->m_leftChild && !rootNode->m_rightChild);
382 nodeCallback->processNode(rootNode);
385 walkTree(rootNode->m_leftChild,nodeCallback,aabbMin,aabbMax);
386 walkTree(rootNode->m_rightChild,nodeCallback,aabbMin,aabbMax);
393 void b3QuantizedBvh::walkRecursiveQuantizedTreeAgainstQueryAabb(const b3QuantizedBvhNode* currentNode, b3NodeOverlapCallback* nodeCallback, unsigned short int* quantizedQueryAabbMin, unsigned short int* quantizedQueryAabbMax) const
395 b3Assert(m_useQuantization);
398 //PCK: unsigned instead of bool
399 unsigned aabbOverlap;
401 //PCK: unsigned instead of bool
402 aabbOverlap = b3TestQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin, quantizedQueryAabbMax, currentNode->m_quantizedAabbMin, currentNode->m_quantizedAabbMax);
403 isLeafNode = currentNode->isLeafNode();
405 //PCK: unsigned instead of bool
406 if (aabbOverlap != 0)
410 nodeCallback->processNode(currentNode->getPartId(), currentNode->getTriangleIndex());
414 //process left and right children
415 const b3QuantizedBvhNode* leftChildNode = currentNode + 1;
416 walkRecursiveQuantizedTreeAgainstQueryAabb(leftChildNode, nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax);
418 const b3QuantizedBvhNode* rightChildNode = leftChildNode->isLeafNode() ? leftChildNode + 1 : leftChildNode + leftChildNode->getEscapeIndex();
419 walkRecursiveQuantizedTreeAgainstQueryAabb(rightChildNode, nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax);
424 void b3QuantizedBvh::walkStacklessTreeAgainstRay(b3NodeOverlapCallback* nodeCallback, const b3Vector3& raySource, const b3Vector3& rayTarget, const b3Vector3& aabbMin, const b3Vector3& aabbMax, int startNodeIndex, int endNodeIndex) const
426 b3Assert(!m_useQuantization);
428 const b3OptimizedBvhNode* rootNode = &m_contiguousNodes[0];
429 int escapeIndex, curIndex = 0;
430 int walkIterations = 0;
432 //PCK: unsigned instead of bool
433 unsigned aabbOverlap = 0;
434 unsigned rayBoxOverlap = 0;
435 b3Scalar lambda_max = 1.0;
437 /* Quick pruning by quantized box */
438 b3Vector3 rayAabbMin = raySource;
439 b3Vector3 rayAabbMax = raySource;
440 rayAabbMin.setMin(rayTarget);
441 rayAabbMax.setMax(rayTarget);
443 /* Add box cast extents to bounding box */
444 rayAabbMin += aabbMin;
445 rayAabbMax += aabbMax;
448 b3Vector3 rayDir = (rayTarget - raySource);
450 lambda_max = rayDir.dot(rayTarget - raySource);
451 ///what about division by zero? --> just set rayDirection[i] to 1.0
452 b3Vector3 rayDirectionInverse;
453 rayDirectionInverse[0] = rayDir[0] == b3Scalar(0.0) ? b3Scalar(B3_LARGE_FLOAT) : b3Scalar(1.0) / rayDir[0];
454 rayDirectionInverse[1] = rayDir[1] == b3Scalar(0.0) ? b3Scalar(B3_LARGE_FLOAT) : b3Scalar(1.0) / rayDir[1];
455 rayDirectionInverse[2] = rayDir[2] == b3Scalar(0.0) ? b3Scalar(B3_LARGE_FLOAT) : b3Scalar(1.0) / rayDir[2];
456 unsigned int sign[3] = {rayDirectionInverse[0] < 0.0, rayDirectionInverse[1] < 0.0, rayDirectionInverse[2] < 0.0};
461 while (curIndex < m_curNodeIndex)
463 b3Scalar param = 1.0;
464 //catch bugs in tree data
465 b3Assert(walkIterations < m_curNodeIndex);
469 bounds[0] = rootNode->m_aabbMinOrg;
470 bounds[1] = rootNode->m_aabbMaxOrg;
471 /* Add box cast extents */
472 bounds[0] -= aabbMax;
473 bounds[1] -= aabbMin;
475 aabbOverlap = b3TestAabbAgainstAabb2(rayAabbMin, rayAabbMax, rootNode->m_aabbMinOrg, rootNode->m_aabbMaxOrg);
476 //perhaps profile if it is worth doing the aabbOverlap test first
479 ///careful with this check: need to check division by zero (above) and fix the unQuantize method
480 ///thanks Joerg/hiker for the reproduction case!
481 ///http://www.bulletphysics.com/Bullet/phpBB3/viewtopic.php?f=9&t=1858
482 rayBoxOverlap = aabbOverlap ? b3RayAabb2(raySource, rayDirectionInverse, sign, bounds, param, 0.0f, lambda_max) : false;
486 rayBoxOverlap = b3RayAabb(raySource, rayTarget, bounds[0], bounds[1], param, normal);
489 isLeafNode = rootNode->m_escapeIndex == -1;
491 //PCK: unsigned instead of bool
492 if (isLeafNode && (rayBoxOverlap != 0))
494 nodeCallback->processNode(rootNode->m_subPart, rootNode->m_triangleIndex);
497 //PCK: unsigned instead of bool
498 if ((rayBoxOverlap != 0) || isLeafNode)
505 escapeIndex = rootNode->m_escapeIndex;
506 rootNode += escapeIndex;
507 curIndex += escapeIndex;
510 if (b3s_maxIterations < walkIterations)
511 b3s_maxIterations = walkIterations;
514 void b3QuantizedBvh::walkStacklessQuantizedTreeAgainstRay(b3NodeOverlapCallback* nodeCallback, const b3Vector3& raySource, const b3Vector3& rayTarget, const b3Vector3& aabbMin, const b3Vector3& aabbMax, int startNodeIndex, int endNodeIndex) const
516 b3Assert(m_useQuantization);
518 int curIndex = startNodeIndex;
519 int walkIterations = 0;
520 int subTreeSize = endNodeIndex - startNodeIndex;
523 const b3QuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[startNodeIndex];
527 //PCK: unsigned instead of bool
528 unsigned boxBoxOverlap = 0;
529 unsigned rayBoxOverlap = 0;
531 b3Scalar lambda_max = 1.0;
534 b3Vector3 rayDirection = (rayTarget - raySource);
535 rayDirection.normalize();
536 lambda_max = rayDirection.dot(rayTarget - raySource);
537 ///what about division by zero? --> just set rayDirection[i] to 1.0
538 rayDirection[0] = rayDirection[0] == b3Scalar(0.0) ? b3Scalar(B3_LARGE_FLOAT) : b3Scalar(1.0) / rayDirection[0];
539 rayDirection[1] = rayDirection[1] == b3Scalar(0.0) ? b3Scalar(B3_LARGE_FLOAT) : b3Scalar(1.0) / rayDirection[1];
540 rayDirection[2] = rayDirection[2] == b3Scalar(0.0) ? b3Scalar(B3_LARGE_FLOAT) : b3Scalar(1.0) / rayDirection[2];
541 unsigned int sign[3] = {rayDirection[0] < 0.0, rayDirection[1] < 0.0, rayDirection[2] < 0.0};
544 /* Quick pruning by quantized box */
545 b3Vector3 rayAabbMin = raySource;
546 b3Vector3 rayAabbMax = raySource;
547 rayAabbMin.setMin(rayTarget);
548 rayAabbMax.setMax(rayTarget);
550 /* Add box cast extents to bounding box */
551 rayAabbMin += aabbMin;
552 rayAabbMax += aabbMax;
554 unsigned short int quantizedQueryAabbMin[3];
555 unsigned short int quantizedQueryAabbMax[3];
556 quantizeWithClamp(quantizedQueryAabbMin, rayAabbMin, 0);
557 quantizeWithClamp(quantizedQueryAabbMax, rayAabbMax, 1);
559 while (curIndex < endNodeIndex)
561 //#define VISUALLY_ANALYZE_BVH 1
562 #ifdef VISUALLY_ANALYZE_BVH
563 //some code snippet to debugDraw aabb, to visually analyze bvh structure
564 static int drawPatch = 0;
565 //need some global access to a debugDrawer
566 extern b3IDebugDraw* debugDrawerPtr;
567 if (curIndex == drawPatch)
569 b3Vector3 aabbMin, aabbMax;
570 aabbMin = unQuantize(rootNode->m_quantizedAabbMin);
571 aabbMax = unQuantize(rootNode->m_quantizedAabbMax);
572 b3Vector3 color(1, 0, 0);
573 debugDrawerPtr->drawAabb(aabbMin, aabbMax, color);
575 #endif //VISUALLY_ANALYZE_BVH
577 //catch bugs in tree data
578 b3Assert(walkIterations < subTreeSize);
581 //PCK: unsigned instead of bool
582 // only interested if this is closer than any previous hit
583 b3Scalar param = 1.0;
585 boxBoxOverlap = b3TestQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin, quantizedQueryAabbMax, rootNode->m_quantizedAabbMin, rootNode->m_quantizedAabbMax);
586 isLeafNode = rootNode->isLeafNode();
590 bounds[0] = unQuantize(rootNode->m_quantizedAabbMin);
591 bounds[1] = unQuantize(rootNode->m_quantizedAabbMax);
592 /* Add box cast extents */
593 bounds[0] -= aabbMax;
594 bounds[1] -= aabbMin;
597 bool ra2 = b3RayAabb2 (raySource, rayDirection, sign, bounds, param, 0.0, lambda_max);
598 bool ra = b3RayAabb (raySource, rayTarget, bounds[0], bounds[1], param, normal);
601 printf("functions don't match\n");
605 ///careful with this check: need to check division by zero (above) and fix the unQuantize method
606 ///thanks Joerg/hiker for the reproduction case!
607 ///http://www.bulletphysics.com/Bullet/phpBB3/viewtopic.php?f=9&t=1858
609 //B3_PROFILE("b3RayAabb2");
610 rayBoxOverlap = b3RayAabb2(raySource, rayDirection, sign, bounds, param, 0.0f, lambda_max);
613 rayBoxOverlap = true; //b3RayAabb(raySource, rayTarget, bounds[0], bounds[1], param, normal);
617 if (isLeafNode && rayBoxOverlap)
619 nodeCallback->processNode(rootNode->getPartId(), rootNode->getTriangleIndex());
622 //PCK: unsigned instead of bool
623 if ((rayBoxOverlap != 0) || isLeafNode)
630 escapeIndex = rootNode->getEscapeIndex();
631 rootNode += escapeIndex;
632 curIndex += escapeIndex;
635 if (b3s_maxIterations < walkIterations)
636 b3s_maxIterations = walkIterations;
639 void b3QuantizedBvh::walkStacklessQuantizedTree(b3NodeOverlapCallback* nodeCallback, unsigned short int* quantizedQueryAabbMin, unsigned short int* quantizedQueryAabbMax, int startNodeIndex, int endNodeIndex) const
641 b3Assert(m_useQuantization);
643 int curIndex = startNodeIndex;
644 int walkIterations = 0;
645 int subTreeSize = endNodeIndex - startNodeIndex;
648 const b3QuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[startNodeIndex];
652 //PCK: unsigned instead of bool
653 unsigned aabbOverlap;
655 while (curIndex < endNodeIndex)
657 //#define VISUALLY_ANALYZE_BVH 1
658 #ifdef VISUALLY_ANALYZE_BVH
659 //some code snippet to debugDraw aabb, to visually analyze bvh structure
660 static int drawPatch = 0;
661 //need some global access to a debugDrawer
662 extern b3IDebugDraw* debugDrawerPtr;
663 if (curIndex == drawPatch)
665 b3Vector3 aabbMin, aabbMax;
666 aabbMin = unQuantize(rootNode->m_quantizedAabbMin);
667 aabbMax = unQuantize(rootNode->m_quantizedAabbMax);
668 b3Vector3 color(1, 0, 0);
669 debugDrawerPtr->drawAabb(aabbMin, aabbMax, color);
671 #endif //VISUALLY_ANALYZE_BVH
673 //catch bugs in tree data
674 b3Assert(walkIterations < subTreeSize);
677 //PCK: unsigned instead of bool
678 aabbOverlap = b3TestQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin, quantizedQueryAabbMax, rootNode->m_quantizedAabbMin, rootNode->m_quantizedAabbMax);
679 isLeafNode = rootNode->isLeafNode();
681 if (isLeafNode && aabbOverlap)
683 nodeCallback->processNode(rootNode->getPartId(), rootNode->getTriangleIndex());
686 //PCK: unsigned instead of bool
687 if ((aabbOverlap != 0) || isLeafNode)
694 escapeIndex = rootNode->getEscapeIndex();
695 rootNode += escapeIndex;
696 curIndex += escapeIndex;
699 if (b3s_maxIterations < walkIterations)
700 b3s_maxIterations = walkIterations;
703 //This traversal can be called from Playstation 3 SPU
704 void b3QuantizedBvh::walkStacklessQuantizedTreeCacheFriendly(b3NodeOverlapCallback* nodeCallback, unsigned short int* quantizedQueryAabbMin, unsigned short int* quantizedQueryAabbMax) const
706 b3Assert(m_useQuantization);
710 for (i = 0; i < this->m_SubtreeHeaders.size(); i++)
712 const b3BvhSubtreeInfo& subtree = m_SubtreeHeaders[i];
714 //PCK: unsigned instead of bool
715 unsigned overlap = b3TestQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin, quantizedQueryAabbMax, subtree.m_quantizedAabbMin, subtree.m_quantizedAabbMax);
718 walkStacklessQuantizedTree(nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax,
719 subtree.m_rootNodeIndex,
720 subtree.m_rootNodeIndex + subtree.m_subtreeSize);
725 void b3QuantizedBvh::reportRayOverlappingNodex(b3NodeOverlapCallback* nodeCallback, const b3Vector3& raySource, const b3Vector3& rayTarget) const
727 reportBoxCastOverlappingNodex(nodeCallback, raySource, rayTarget, b3MakeVector3(0, 0, 0), b3MakeVector3(0, 0, 0));
730 void b3QuantizedBvh::reportBoxCastOverlappingNodex(b3NodeOverlapCallback* nodeCallback, const b3Vector3& raySource, const b3Vector3& rayTarget, const b3Vector3& aabbMin, const b3Vector3& aabbMax) const
732 //always use stackless
734 if (m_useQuantization)
736 walkStacklessQuantizedTreeAgainstRay(nodeCallback, raySource, rayTarget, aabbMin, aabbMax, 0, m_curNodeIndex);
740 walkStacklessTreeAgainstRay(nodeCallback, raySource, rayTarget, aabbMin, aabbMax, 0, m_curNodeIndex);
744 //recursive traversal
745 b3Vector3 qaabbMin = raySource;
746 b3Vector3 qaabbMax = raySource;
747 qaabbMin.setMin(rayTarget);
748 qaabbMax.setMax(rayTarget);
751 reportAabbOverlappingNodex(nodeCallback,qaabbMin,qaabbMax);
756 void b3QuantizedBvh::swapLeafNodes(int i, int splitIndex)
758 if (m_useQuantization)
760 b3QuantizedBvhNode tmp = m_quantizedLeafNodes[i];
761 m_quantizedLeafNodes[i] = m_quantizedLeafNodes[splitIndex];
762 m_quantizedLeafNodes[splitIndex] = tmp;
766 b3OptimizedBvhNode tmp = m_leafNodes[i];
767 m_leafNodes[i] = m_leafNodes[splitIndex];
768 m_leafNodes[splitIndex] = tmp;
772 void b3QuantizedBvh::assignInternalNodeFromLeafNode(int internalNode, int leafNodeIndex)
774 if (m_useQuantization)
776 m_quantizedContiguousNodes[internalNode] = m_quantizedLeafNodes[leafNodeIndex];
780 m_contiguousNodes[internalNode] = m_leafNodes[leafNodeIndex];
789 static const unsigned BVH_ALIGNMENT = 16;
790 static const unsigned BVH_ALIGNMENT_MASK = BVH_ALIGNMENT-1;
792 static const unsigned BVH_ALIGNMENT_BLOCKS = 2;
795 unsigned int b3QuantizedBvh::getAlignmentSerializationPadding()
797 // I changed this to 0 since the extra padding is not needed or used.
798 return 0; //BVH_ALIGNMENT_BLOCKS * BVH_ALIGNMENT;
801 unsigned b3QuantizedBvh::calculateSerializeBufferSize() const
803 unsigned baseSize = sizeof(b3QuantizedBvh) + getAlignmentSerializationPadding();
804 baseSize += sizeof(b3BvhSubtreeInfo) * m_subtreeHeaderCount;
805 if (m_useQuantization)
807 return baseSize + m_curNodeIndex * sizeof(b3QuantizedBvhNode);
809 return baseSize + m_curNodeIndex * sizeof(b3OptimizedBvhNode);
812 bool b3QuantizedBvh::serialize(void* o_alignedDataBuffer, unsigned /*i_dataBufferSize */, bool i_swapEndian) const
814 b3Assert(m_subtreeHeaderCount == m_SubtreeHeaders.size());
815 m_subtreeHeaderCount = m_SubtreeHeaders.size();
817 /* if (i_dataBufferSize < calculateSerializeBufferSize() || o_alignedDataBuffer == NULL || (((unsigned)o_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0))
819 ///check alignedment for buffer?
825 b3QuantizedBvh* targetBvh = (b3QuantizedBvh*)o_alignedDataBuffer;
827 // construct the class so the virtual function table, etc will be set up
828 // Also, m_leafNodes and m_quantizedLeafNodes will be initialized to default values by the constructor
829 new (targetBvh) b3QuantizedBvh;
833 targetBvh->m_curNodeIndex = static_cast<int>(b3SwapEndian(m_curNodeIndex));
835 b3SwapVector3Endian(m_bvhAabbMin, targetBvh->m_bvhAabbMin);
836 b3SwapVector3Endian(m_bvhAabbMax, targetBvh->m_bvhAabbMax);
837 b3SwapVector3Endian(m_bvhQuantization, targetBvh->m_bvhQuantization);
839 targetBvh->m_traversalMode = (b3TraversalMode)b3SwapEndian(m_traversalMode);
840 targetBvh->m_subtreeHeaderCount = static_cast<int>(b3SwapEndian(m_subtreeHeaderCount));
844 targetBvh->m_curNodeIndex = m_curNodeIndex;
845 targetBvh->m_bvhAabbMin = m_bvhAabbMin;
846 targetBvh->m_bvhAabbMax = m_bvhAabbMax;
847 targetBvh->m_bvhQuantization = m_bvhQuantization;
848 targetBvh->m_traversalMode = m_traversalMode;
849 targetBvh->m_subtreeHeaderCount = m_subtreeHeaderCount;
852 targetBvh->m_useQuantization = m_useQuantization;
854 unsigned char* nodeData = (unsigned char*)targetBvh;
855 nodeData += sizeof(b3QuantizedBvh);
857 unsigned sizeToAdd = 0; //(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
858 nodeData += sizeToAdd;
860 int nodeCount = m_curNodeIndex;
862 if (m_useQuantization)
864 targetBvh->m_quantizedContiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
868 for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
870 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = b3SwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0]);
871 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = b3SwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1]);
872 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = b3SwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2]);
874 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = b3SwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0]);
875 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = b3SwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1]);
876 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = b3SwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2]);
878 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = static_cast<int>(b3SwapEndian(m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex));
883 for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
885 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0];
886 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1];
887 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2];
889 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0];
890 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1];
891 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2];
893 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex;
896 nodeData += sizeof(b3QuantizedBvhNode) * nodeCount;
898 // this clears the pointer in the member variable it doesn't really do anything to the data
899 // it does call the destructor on the contained objects, but they are all classes with no destructor defined
900 // so the memory (which is not freed) is left alone
901 targetBvh->m_quantizedContiguousNodes.initializeFromBuffer(NULL, 0, 0);
905 targetBvh->m_contiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
909 for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
911 b3SwapVector3Endian(m_contiguousNodes[nodeIndex].m_aabbMinOrg, targetBvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg);
912 b3SwapVector3Endian(m_contiguousNodes[nodeIndex].m_aabbMaxOrg, targetBvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg);
914 targetBvh->m_contiguousNodes[nodeIndex].m_escapeIndex = static_cast<int>(b3SwapEndian(m_contiguousNodes[nodeIndex].m_escapeIndex));
915 targetBvh->m_contiguousNodes[nodeIndex].m_subPart = static_cast<int>(b3SwapEndian(m_contiguousNodes[nodeIndex].m_subPart));
916 targetBvh->m_contiguousNodes[nodeIndex].m_triangleIndex = static_cast<int>(b3SwapEndian(m_contiguousNodes[nodeIndex].m_triangleIndex));
921 for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
923 targetBvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg = m_contiguousNodes[nodeIndex].m_aabbMinOrg;
924 targetBvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg = m_contiguousNodes[nodeIndex].m_aabbMaxOrg;
926 targetBvh->m_contiguousNodes[nodeIndex].m_escapeIndex = m_contiguousNodes[nodeIndex].m_escapeIndex;
927 targetBvh->m_contiguousNodes[nodeIndex].m_subPart = m_contiguousNodes[nodeIndex].m_subPart;
928 targetBvh->m_contiguousNodes[nodeIndex].m_triangleIndex = m_contiguousNodes[nodeIndex].m_triangleIndex;
931 nodeData += sizeof(b3OptimizedBvhNode) * nodeCount;
933 // this clears the pointer in the member variable it doesn't really do anything to the data
934 // it does call the destructor on the contained objects, but they are all classes with no destructor defined
935 // so the memory (which is not freed) is left alone
936 targetBvh->m_contiguousNodes.initializeFromBuffer(NULL, 0, 0);
939 sizeToAdd = 0; //(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
940 nodeData += sizeToAdd;
942 // Now serialize the subtree headers
943 targetBvh->m_SubtreeHeaders.initializeFromBuffer(nodeData, m_subtreeHeaderCount, m_subtreeHeaderCount);
946 for (int i = 0; i < m_subtreeHeaderCount; i++)
948 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = b3SwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
949 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = b3SwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
950 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = b3SwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
952 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = b3SwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
953 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = b3SwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
954 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = b3SwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
956 targetBvh->m_SubtreeHeaders[i].m_rootNodeIndex = static_cast<int>(b3SwapEndian(m_SubtreeHeaders[i].m_rootNodeIndex));
957 targetBvh->m_SubtreeHeaders[i].m_subtreeSize = static_cast<int>(b3SwapEndian(m_SubtreeHeaders[i].m_subtreeSize));
962 for (int i = 0; i < m_subtreeHeaderCount; i++)
964 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = (m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
965 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = (m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
966 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = (m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
968 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = (m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
969 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = (m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
970 targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = (m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
972 targetBvh->m_SubtreeHeaders[i].m_rootNodeIndex = (m_SubtreeHeaders[i].m_rootNodeIndex);
973 targetBvh->m_SubtreeHeaders[i].m_subtreeSize = (m_SubtreeHeaders[i].m_subtreeSize);
975 // need to clear padding in destination buffer
976 targetBvh->m_SubtreeHeaders[i].m_padding[0] = 0;
977 targetBvh->m_SubtreeHeaders[i].m_padding[1] = 0;
978 targetBvh->m_SubtreeHeaders[i].m_padding[2] = 0;
981 nodeData += sizeof(b3BvhSubtreeInfo) * m_subtreeHeaderCount;
983 // this clears the pointer in the member variable it doesn't really do anything to the data
984 // it does call the destructor on the contained objects, but they are all classes with no destructor defined
985 // so the memory (which is not freed) is left alone
986 targetBvh->m_SubtreeHeaders.initializeFromBuffer(NULL, 0, 0);
988 // this wipes the virtual function table pointer at the start of the buffer for the class
989 *((void**)o_alignedDataBuffer) = NULL;
994 b3QuantizedBvh* b3QuantizedBvh::deSerializeInPlace(void* i_alignedDataBuffer, unsigned int i_dataBufferSize, bool i_swapEndian)
996 if (i_alignedDataBuffer == NULL) // || (((unsigned)i_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0))
1000 b3QuantizedBvh* bvh = (b3QuantizedBvh*)i_alignedDataBuffer;
1004 bvh->m_curNodeIndex = static_cast<int>(b3SwapEndian(bvh->m_curNodeIndex));
1006 b3UnSwapVector3Endian(bvh->m_bvhAabbMin);
1007 b3UnSwapVector3Endian(bvh->m_bvhAabbMax);
1008 b3UnSwapVector3Endian(bvh->m_bvhQuantization);
1010 bvh->m_traversalMode = (b3TraversalMode)b3SwapEndian(bvh->m_traversalMode);
1011 bvh->m_subtreeHeaderCount = static_cast<int>(b3SwapEndian(bvh->m_subtreeHeaderCount));
1014 unsigned int calculatedBufSize = bvh->calculateSerializeBufferSize();
1015 b3Assert(calculatedBufSize <= i_dataBufferSize);
1017 if (calculatedBufSize > i_dataBufferSize)
1022 unsigned char* nodeData = (unsigned char*)bvh;
1023 nodeData += sizeof(b3QuantizedBvh);
1025 unsigned sizeToAdd = 0; //(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
1026 nodeData += sizeToAdd;
1028 int nodeCount = bvh->m_curNodeIndex;
1030 // Must call placement new to fill in virtual function table, etc, but we don't want to overwrite most data, so call a special version of the constructor
1031 // Also, m_leafNodes and m_quantizedLeafNodes will be initialized to default values by the constructor
1032 new (bvh) b3QuantizedBvh(*bvh, false);
1034 if (bvh->m_useQuantization)
1036 bvh->m_quantizedContiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
1040 for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
1042 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = b3SwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0]);
1043 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = b3SwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1]);
1044 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = b3SwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2]);
1046 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = b3SwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0]);
1047 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = b3SwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1]);
1048 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = b3SwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2]);
1050 bvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = static_cast<int>(b3SwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex));
1053 nodeData += sizeof(b3QuantizedBvhNode) * nodeCount;
1057 bvh->m_contiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
1061 for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
1063 b3UnSwapVector3Endian(bvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg);
1064 b3UnSwapVector3Endian(bvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg);
1066 bvh->m_contiguousNodes[nodeIndex].m_escapeIndex = static_cast<int>(b3SwapEndian(bvh->m_contiguousNodes[nodeIndex].m_escapeIndex));
1067 bvh->m_contiguousNodes[nodeIndex].m_subPart = static_cast<int>(b3SwapEndian(bvh->m_contiguousNodes[nodeIndex].m_subPart));
1068 bvh->m_contiguousNodes[nodeIndex].m_triangleIndex = static_cast<int>(b3SwapEndian(bvh->m_contiguousNodes[nodeIndex].m_triangleIndex));
1071 nodeData += sizeof(b3OptimizedBvhNode) * nodeCount;
1074 sizeToAdd = 0; //(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
1075 nodeData += sizeToAdd;
1077 // Now serialize the subtree headers
1078 bvh->m_SubtreeHeaders.initializeFromBuffer(nodeData, bvh->m_subtreeHeaderCount, bvh->m_subtreeHeaderCount);
1081 for (int i = 0; i < bvh->m_subtreeHeaderCount; i++)
1083 bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = b3SwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
1084 bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = b3SwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
1085 bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = b3SwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
1087 bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = b3SwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
1088 bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = b3SwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
1089 bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = b3SwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
1091 bvh->m_SubtreeHeaders[i].m_rootNodeIndex = static_cast<int>(b3SwapEndian(bvh->m_SubtreeHeaders[i].m_rootNodeIndex));
1092 bvh->m_SubtreeHeaders[i].m_subtreeSize = static_cast<int>(b3SwapEndian(bvh->m_SubtreeHeaders[i].m_subtreeSize));
1099 // Constructor that prevents b3Vector3's default constructor from being called
1100 b3QuantizedBvh::b3QuantizedBvh(b3QuantizedBvh& self, bool /* ownsMemory */) : m_bvhAabbMin(self.m_bvhAabbMin),
1101 m_bvhAabbMax(self.m_bvhAabbMax),
1102 m_bvhQuantization(self.m_bvhQuantization),
1103 m_bulletVersion(B3_BULLET_VERSION)
1107 void b3QuantizedBvh::deSerializeFloat(struct b3QuantizedBvhFloatData& quantizedBvhFloatData)
1109 m_bvhAabbMax.deSerializeFloat(quantizedBvhFloatData.m_bvhAabbMax);
1110 m_bvhAabbMin.deSerializeFloat(quantizedBvhFloatData.m_bvhAabbMin);
1111 m_bvhQuantization.deSerializeFloat(quantizedBvhFloatData.m_bvhQuantization);
1113 m_curNodeIndex = quantizedBvhFloatData.m_curNodeIndex;
1114 m_useQuantization = quantizedBvhFloatData.m_useQuantization != 0;
1117 int numElem = quantizedBvhFloatData.m_numContiguousLeafNodes;
1118 m_contiguousNodes.resize(numElem);
1122 b3OptimizedBvhNodeFloatData* memPtr = quantizedBvhFloatData.m_contiguousNodesPtr;
1124 for (int i = 0; i < numElem; i++, memPtr++)
1126 m_contiguousNodes[i].m_aabbMaxOrg.deSerializeFloat(memPtr->m_aabbMaxOrg);
1127 m_contiguousNodes[i].m_aabbMinOrg.deSerializeFloat(memPtr->m_aabbMinOrg);
1128 m_contiguousNodes[i].m_escapeIndex = memPtr->m_escapeIndex;
1129 m_contiguousNodes[i].m_subPart = memPtr->m_subPart;
1130 m_contiguousNodes[i].m_triangleIndex = memPtr->m_triangleIndex;
1136 int numElem = quantizedBvhFloatData.m_numQuantizedContiguousNodes;
1137 m_quantizedContiguousNodes.resize(numElem);
1141 b3QuantizedBvhNodeData* memPtr = quantizedBvhFloatData.m_quantizedContiguousNodesPtr;
1142 for (int i = 0; i < numElem; i++, memPtr++)
1144 m_quantizedContiguousNodes[i].m_escapeIndexOrTriangleIndex = memPtr->m_escapeIndexOrTriangleIndex;
1145 m_quantizedContiguousNodes[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0];
1146 m_quantizedContiguousNodes[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
1147 m_quantizedContiguousNodes[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
1148 m_quantizedContiguousNodes[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
1149 m_quantizedContiguousNodes[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
1150 m_quantizedContiguousNodes[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
1155 m_traversalMode = b3TraversalMode(quantizedBvhFloatData.m_traversalMode);
1158 int numElem = quantizedBvhFloatData.m_numSubtreeHeaders;
1159 m_SubtreeHeaders.resize(numElem);
1162 b3BvhSubtreeInfoData* memPtr = quantizedBvhFloatData.m_subTreeInfoPtr;
1163 for (int i = 0; i < numElem; i++, memPtr++)
1165 m_SubtreeHeaders[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0];
1166 m_SubtreeHeaders[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
1167 m_SubtreeHeaders[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
1168 m_SubtreeHeaders[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
1169 m_SubtreeHeaders[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
1170 m_SubtreeHeaders[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
1171 m_SubtreeHeaders[i].m_rootNodeIndex = memPtr->m_rootNodeIndex;
1172 m_SubtreeHeaders[i].m_subtreeSize = memPtr->m_subtreeSize;
1178 void b3QuantizedBvh::deSerializeDouble(struct b3QuantizedBvhDoubleData& quantizedBvhDoubleData)
1180 m_bvhAabbMax.deSerializeDouble(quantizedBvhDoubleData.m_bvhAabbMax);
1181 m_bvhAabbMin.deSerializeDouble(quantizedBvhDoubleData.m_bvhAabbMin);
1182 m_bvhQuantization.deSerializeDouble(quantizedBvhDoubleData.m_bvhQuantization);
1184 m_curNodeIndex = quantizedBvhDoubleData.m_curNodeIndex;
1185 m_useQuantization = quantizedBvhDoubleData.m_useQuantization != 0;
1188 int numElem = quantizedBvhDoubleData.m_numContiguousLeafNodes;
1189 m_contiguousNodes.resize(numElem);
1193 b3OptimizedBvhNodeDoubleData* memPtr = quantizedBvhDoubleData.m_contiguousNodesPtr;
1195 for (int i = 0; i < numElem; i++, memPtr++)
1197 m_contiguousNodes[i].m_aabbMaxOrg.deSerializeDouble(memPtr->m_aabbMaxOrg);
1198 m_contiguousNodes[i].m_aabbMinOrg.deSerializeDouble(memPtr->m_aabbMinOrg);
1199 m_contiguousNodes[i].m_escapeIndex = memPtr->m_escapeIndex;
1200 m_contiguousNodes[i].m_subPart = memPtr->m_subPart;
1201 m_contiguousNodes[i].m_triangleIndex = memPtr->m_triangleIndex;
1207 int numElem = quantizedBvhDoubleData.m_numQuantizedContiguousNodes;
1208 m_quantizedContiguousNodes.resize(numElem);
1212 b3QuantizedBvhNodeData* memPtr = quantizedBvhDoubleData.m_quantizedContiguousNodesPtr;
1213 for (int i = 0; i < numElem; i++, memPtr++)
1215 m_quantizedContiguousNodes[i].m_escapeIndexOrTriangleIndex = memPtr->m_escapeIndexOrTriangleIndex;
1216 m_quantizedContiguousNodes[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0];
1217 m_quantizedContiguousNodes[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
1218 m_quantizedContiguousNodes[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
1219 m_quantizedContiguousNodes[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
1220 m_quantizedContiguousNodes[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
1221 m_quantizedContiguousNodes[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
1226 m_traversalMode = b3TraversalMode(quantizedBvhDoubleData.m_traversalMode);
1229 int numElem = quantizedBvhDoubleData.m_numSubtreeHeaders;
1230 m_SubtreeHeaders.resize(numElem);
1233 b3BvhSubtreeInfoData* memPtr = quantizedBvhDoubleData.m_subTreeInfoPtr;
1234 for (int i = 0; i < numElem; i++, memPtr++)
1236 m_SubtreeHeaders[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0];
1237 m_SubtreeHeaders[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
1238 m_SubtreeHeaders[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
1239 m_SubtreeHeaders[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
1240 m_SubtreeHeaders[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
1241 m_SubtreeHeaders[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
1242 m_SubtreeHeaders[i].m_rootNodeIndex = memPtr->m_rootNodeIndex;
1243 m_SubtreeHeaders[i].m_subtreeSize = memPtr->m_subtreeSize;
1249 ///fills the dataBuffer and returns the struct name (and 0 on failure)
1250 const char* b3QuantizedBvh::serialize(void* dataBuffer, b3Serializer* serializer) const