[dali_2.3.21] Merge branch 'devel/master'
[platform/core/uifw/dali-toolkit.git] / dali-physics / third-party / bullet3 / src / Bullet3OpenCL / NarrowphaseCollision / b3QuantizedBvh.cpp
1 /*
2 Bullet Continuous Collision Detection and Physics Library
3 Copyright (c) 2003-2006 Erwin Coumans  https://bulletphysics.org
4
5 This software is provided 'as-is', without any express or implied warranty.
6 In no event will the authors be held liable for any damages arising from the use of this software.
7 Permission is granted to anyone to use this software for any purpose, 
8 including commercial applications, and to alter it and redistribute it freely, 
9 subject to the following restrictions:
10
11 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
12 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
13 3. This notice may not be removed or altered from any source distribution.
14 */
15
16 #include "b3QuantizedBvh.h"
17
18 #include "Bullet3Geometry/b3AabbUtil.h"
19
20 #define RAYAABB2
21
22 b3QuantizedBvh::b3QuantizedBvh() : m_bulletVersion(B3_BULLET_VERSION),
23                                                                    m_useQuantization(false),
24                                                                    m_traversalMode(TRAVERSAL_STACKLESS_CACHE_FRIENDLY)
25                                                                    //m_traversalMode(TRAVERSAL_STACKLESS)
26                                                                    //m_traversalMode(TRAVERSAL_RECURSIVE)
27                                                                    ,
28                                                                    m_subtreeHeaderCount(0)  //PCK: add this line
29 {
30         m_bvhAabbMin.setValue(-B3_INFINITY, -B3_INFINITY, -B3_INFINITY);
31         m_bvhAabbMax.setValue(B3_INFINITY, B3_INFINITY, B3_INFINITY);
32 }
33
34 void b3QuantizedBvh::buildInternal()
35 {
36         ///assumes that caller filled in the m_quantizedLeafNodes
37         m_useQuantization = true;
38         int numLeafNodes = 0;
39
40         if (m_useQuantization)
41         {
42                 //now we have an array of leafnodes in m_leafNodes
43                 numLeafNodes = m_quantizedLeafNodes.size();
44
45                 m_quantizedContiguousNodes.resize(2 * numLeafNodes);
46         }
47
48         m_curNodeIndex = 0;
49
50         buildTree(0, numLeafNodes);
51
52         ///if the entire tree is small then subtree size, we need to create a header info for the tree
53         if (m_useQuantization && !m_SubtreeHeaders.size())
54         {
55                 b3BvhSubtreeInfo& subtree = m_SubtreeHeaders.expand();
56                 subtree.setAabbFromQuantizeNode(m_quantizedContiguousNodes[0]);
57                 subtree.m_rootNodeIndex = 0;
58                 subtree.m_subtreeSize = m_quantizedContiguousNodes[0].isLeafNode() ? 1 : m_quantizedContiguousNodes[0].getEscapeIndex();
59         }
60
61         //PCK: update the copy of the size
62         m_subtreeHeaderCount = m_SubtreeHeaders.size();
63
64         //PCK: clear m_quantizedLeafNodes and m_leafNodes, they are temporary
65         m_quantizedLeafNodes.clear();
66         m_leafNodes.clear();
67 }
68
69 ///just for debugging, to visualize the individual patches/subtrees
70 #ifdef DEBUG_PATCH_COLORS
71 b3Vector3 color[4] =
72         {
73                 b3Vector3(1, 0, 0),
74                 b3Vector3(0, 1, 0),
75                 b3Vector3(0, 0, 1),
76                 b3Vector3(0, 1, 1)};
77 #endif  //DEBUG_PATCH_COLORS
78
79 void b3QuantizedBvh::setQuantizationValues(const b3Vector3& bvhAabbMin, const b3Vector3& bvhAabbMax, b3Scalar quantizationMargin)
80 {
81         //enlarge the AABB to avoid division by zero when initializing the quantization values
82         b3Vector3 clampValue = b3MakeVector3(quantizationMargin, quantizationMargin, quantizationMargin);
83         m_bvhAabbMin = bvhAabbMin - clampValue;
84         m_bvhAabbMax = bvhAabbMax + clampValue;
85         b3Vector3 aabbSize = m_bvhAabbMax - m_bvhAabbMin;
86         m_bvhQuantization = b3MakeVector3(b3Scalar(65533.0), b3Scalar(65533.0), b3Scalar(65533.0)) / aabbSize;
87         m_useQuantization = true;
88 }
89
90 b3QuantizedBvh::~b3QuantizedBvh()
91 {
92 }
93
94 #ifdef DEBUG_TREE_BUILDING
95 int gStackDepth = 0;
96 int gMaxStackDepth = 0;
97 #endif  //DEBUG_TREE_BUILDING
98
99 void b3QuantizedBvh::buildTree(int startIndex, int endIndex)
100 {
101 #ifdef DEBUG_TREE_BUILDING
102         gStackDepth++;
103         if (gStackDepth > gMaxStackDepth)
104                 gMaxStackDepth = gStackDepth;
105 #endif  //DEBUG_TREE_BUILDING
106
107         int splitAxis, splitIndex, i;
108         int numIndices = endIndex - startIndex;
109         int curIndex = m_curNodeIndex;
110
111         b3Assert(numIndices > 0);
112
113         if (numIndices == 1)
114         {
115 #ifdef DEBUG_TREE_BUILDING
116                 gStackDepth--;
117 #endif  //DEBUG_TREE_BUILDING
118
119                 assignInternalNodeFromLeafNode(m_curNodeIndex, startIndex);
120
121                 m_curNodeIndex++;
122                 return;
123         }
124         //calculate Best Splitting Axis and where to split it. Sort the incoming 'leafNodes' array within range 'startIndex/endIndex'.
125
126         splitAxis = calcSplittingAxis(startIndex, endIndex);
127
128         splitIndex = sortAndCalcSplittingIndex(startIndex, endIndex, splitAxis);
129
130         int internalNodeIndex = m_curNodeIndex;
131
132         //set the min aabb to 'inf' or a max value, and set the max aabb to a -inf/minimum value.
133         //the aabb will be expanded during buildTree/mergeInternalNodeAabb with actual node values
134         setInternalNodeAabbMin(m_curNodeIndex, m_bvhAabbMax);  //can't use b3Vector3(B3_INFINITY,B3_INFINITY,B3_INFINITY)) because of quantization
135         setInternalNodeAabbMax(m_curNodeIndex, m_bvhAabbMin);  //can't use b3Vector3(-B3_INFINITY,-B3_INFINITY,-B3_INFINITY)) because of quantization
136
137         for (i = startIndex; i < endIndex; i++)
138         {
139                 mergeInternalNodeAabb(m_curNodeIndex, getAabbMin(i), getAabbMax(i));
140         }
141
142         m_curNodeIndex++;
143
144         //internalNode->m_escapeIndex;
145
146         int leftChildNodexIndex = m_curNodeIndex;
147
148         //build left child tree
149         buildTree(startIndex, splitIndex);
150
151         int rightChildNodexIndex = m_curNodeIndex;
152         //build right child tree
153         buildTree(splitIndex, endIndex);
154
155 #ifdef DEBUG_TREE_BUILDING
156         gStackDepth--;
157 #endif  //DEBUG_TREE_BUILDING
158
159         int escapeIndex = m_curNodeIndex - curIndex;
160
161         if (m_useQuantization)
162         {
163                 //escapeIndex is the number of nodes of this subtree
164                 const int sizeQuantizedNode = sizeof(b3QuantizedBvhNode);
165                 const int treeSizeInBytes = escapeIndex * sizeQuantizedNode;
166                 if (treeSizeInBytes > MAX_SUBTREE_SIZE_IN_BYTES)
167                 {
168                         updateSubtreeHeaders(leftChildNodexIndex, rightChildNodexIndex);
169                 }
170         }
171         else
172         {
173         }
174
175         setInternalNodeEscapeIndex(internalNodeIndex, escapeIndex);
176 }
177
178 void b3QuantizedBvh::updateSubtreeHeaders(int leftChildNodexIndex, int rightChildNodexIndex)
179 {
180         b3Assert(m_useQuantization);
181
182         b3QuantizedBvhNode& leftChildNode = m_quantizedContiguousNodes[leftChildNodexIndex];
183         int leftSubTreeSize = leftChildNode.isLeafNode() ? 1 : leftChildNode.getEscapeIndex();
184         int leftSubTreeSizeInBytes = leftSubTreeSize * static_cast<int>(sizeof(b3QuantizedBvhNode));
185
186         b3QuantizedBvhNode& rightChildNode = m_quantizedContiguousNodes[rightChildNodexIndex];
187         int rightSubTreeSize = rightChildNode.isLeafNode() ? 1 : rightChildNode.getEscapeIndex();
188         int rightSubTreeSizeInBytes = rightSubTreeSize * static_cast<int>(sizeof(b3QuantizedBvhNode));
189
190         if (leftSubTreeSizeInBytes <= MAX_SUBTREE_SIZE_IN_BYTES)
191         {
192                 b3BvhSubtreeInfo& subtree = m_SubtreeHeaders.expand();
193                 subtree.setAabbFromQuantizeNode(leftChildNode);
194                 subtree.m_rootNodeIndex = leftChildNodexIndex;
195                 subtree.m_subtreeSize = leftSubTreeSize;
196         }
197
198         if (rightSubTreeSizeInBytes <= MAX_SUBTREE_SIZE_IN_BYTES)
199         {
200                 b3BvhSubtreeInfo& subtree = m_SubtreeHeaders.expand();
201                 subtree.setAabbFromQuantizeNode(rightChildNode);
202                 subtree.m_rootNodeIndex = rightChildNodexIndex;
203                 subtree.m_subtreeSize = rightSubTreeSize;
204         }
205
206         //PCK: update the copy of the size
207         m_subtreeHeaderCount = m_SubtreeHeaders.size();
208 }
209
210 int b3QuantizedBvh::sortAndCalcSplittingIndex(int startIndex, int endIndex, int splitAxis)
211 {
212         int i;
213         int splitIndex = startIndex;
214         int numIndices = endIndex - startIndex;
215         b3Scalar splitValue;
216
217         b3Vector3 means = b3MakeVector3(b3Scalar(0.), b3Scalar(0.), b3Scalar(0.));
218         for (i = startIndex; i < endIndex; i++)
219         {
220                 b3Vector3 center = b3Scalar(0.5) * (getAabbMax(i) + getAabbMin(i));
221                 means += center;
222         }
223         means *= (b3Scalar(1.) / (b3Scalar)numIndices);
224
225         splitValue = means[splitAxis];
226
227         //sort leafNodes so all values larger then splitValue comes first, and smaller values start from 'splitIndex'.
228         for (i = startIndex; i < endIndex; i++)
229         {
230                 b3Vector3 center = b3Scalar(0.5) * (getAabbMax(i) + getAabbMin(i));
231                 if (center[splitAxis] > splitValue)
232                 {
233                         //swap
234                         swapLeafNodes(i, splitIndex);
235                         splitIndex++;
236                 }
237         }
238
239         //if the splitIndex causes unbalanced trees, fix this by using the center in between startIndex and endIndex
240         //otherwise the tree-building might fail due to stack-overflows in certain cases.
241         //unbalanced1 is unsafe: it can cause stack overflows
242         //bool unbalanced1 = ((splitIndex==startIndex) || (splitIndex == (endIndex-1)));
243
244         //unbalanced2 should work too: always use center (perfect balanced trees)
245         //bool unbalanced2 = true;
246
247         //this should be safe too:
248         int rangeBalancedIndices = numIndices / 3;
249         bool unbalanced = ((splitIndex <= (startIndex + rangeBalancedIndices)) || (splitIndex >= (endIndex - 1 - rangeBalancedIndices)));
250
251         if (unbalanced)
252         {
253                 splitIndex = startIndex + (numIndices >> 1);
254         }
255
256         bool unbal = (splitIndex == startIndex) || (splitIndex == (endIndex));
257         (void)unbal;
258         b3Assert(!unbal);
259
260         return splitIndex;
261 }
262
263 int b3QuantizedBvh::calcSplittingAxis(int startIndex, int endIndex)
264 {
265         int i;
266
267         b3Vector3 means = b3MakeVector3(b3Scalar(0.), b3Scalar(0.), b3Scalar(0.));
268         b3Vector3 variance = b3MakeVector3(b3Scalar(0.), b3Scalar(0.), b3Scalar(0.));
269         int numIndices = endIndex - startIndex;
270
271         for (i = startIndex; i < endIndex; i++)
272         {
273                 b3Vector3 center = b3Scalar(0.5) * (getAabbMax(i) + getAabbMin(i));
274                 means += center;
275         }
276         means *= (b3Scalar(1.) / (b3Scalar)numIndices);
277
278         for (i = startIndex; i < endIndex; i++)
279         {
280                 b3Vector3 center = b3Scalar(0.5) * (getAabbMax(i) + getAabbMin(i));
281                 b3Vector3 diff2 = center - means;
282                 diff2 = diff2 * diff2;
283                 variance += diff2;
284         }
285         variance *= (b3Scalar(1.) / ((b3Scalar)numIndices - 1));
286
287         return variance.maxAxis();
288 }
289
290 void b3QuantizedBvh::reportAabbOverlappingNodex(b3NodeOverlapCallback* nodeCallback, const b3Vector3& aabbMin, const b3Vector3& aabbMax) const
291 {
292         //either choose recursive traversal (walkTree) or stackless (walkStacklessTree)
293
294         if (m_useQuantization)
295         {
296                 ///quantize query AABB
297                 unsigned short int quantizedQueryAabbMin[3];
298                 unsigned short int quantizedQueryAabbMax[3];
299                 quantizeWithClamp(quantizedQueryAabbMin, aabbMin, 0);
300                 quantizeWithClamp(quantizedQueryAabbMax, aabbMax, 1);
301
302                 switch (m_traversalMode)
303                 {
304                         case TRAVERSAL_STACKLESS:
305                                 walkStacklessQuantizedTree(nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax, 0, m_curNodeIndex);
306                                 break;
307                         case TRAVERSAL_STACKLESS_CACHE_FRIENDLY:
308                                 walkStacklessQuantizedTreeCacheFriendly(nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax);
309                                 break;
310                         case TRAVERSAL_RECURSIVE:
311                         {
312                                 const b3QuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[0];
313                                 walkRecursiveQuantizedTreeAgainstQueryAabb(rootNode, nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax);
314                         }
315                         break;
316                         default:
317                                 //unsupported
318                                 b3Assert(0);
319                 }
320         }
321         else
322         {
323                 walkStacklessTree(nodeCallback, aabbMin, aabbMax);
324         }
325 }
326
327 static int b3s_maxIterations = 0;
328
329 void b3QuantizedBvh::walkStacklessTree(b3NodeOverlapCallback* nodeCallback, const b3Vector3& aabbMin, const b3Vector3& aabbMax) const
330 {
331         b3Assert(!m_useQuantization);
332
333         const b3OptimizedBvhNode* rootNode = &m_contiguousNodes[0];
334         int escapeIndex, curIndex = 0;
335         int walkIterations = 0;
336         bool isLeafNode;
337         //PCK: unsigned instead of bool
338         unsigned aabbOverlap;
339
340         while (curIndex < m_curNodeIndex)
341         {
342                 //catch bugs in tree data
343                 b3Assert(walkIterations < m_curNodeIndex);
344
345                 walkIterations++;
346                 aabbOverlap = b3TestAabbAgainstAabb2(aabbMin, aabbMax, rootNode->m_aabbMinOrg, rootNode->m_aabbMaxOrg);
347                 isLeafNode = rootNode->m_escapeIndex == -1;
348
349                 //PCK: unsigned instead of bool
350                 if (isLeafNode && (aabbOverlap != 0))
351                 {
352                         nodeCallback->processNode(rootNode->m_subPart, rootNode->m_triangleIndex);
353                 }
354
355                 //PCK: unsigned instead of bool
356                 if ((aabbOverlap != 0) || isLeafNode)
357                 {
358                         rootNode++;
359                         curIndex++;
360                 }
361                 else
362                 {
363                         escapeIndex = rootNode->m_escapeIndex;
364                         rootNode += escapeIndex;
365                         curIndex += escapeIndex;
366                 }
367         }
368         if (b3s_maxIterations < walkIterations)
369                 b3s_maxIterations = walkIterations;
370 }
371
372 /*
373 ///this was the original recursive traversal, before we optimized towards stackless traversal
374 void    b3QuantizedBvh::walkTree(b3OptimizedBvhNode* rootNode,b3NodeOverlapCallback* nodeCallback,const b3Vector3& aabbMin,const b3Vector3& aabbMax) const
375 {
376         bool isLeafNode, aabbOverlap = TestAabbAgainstAabb2(aabbMin,aabbMax,rootNode->m_aabbMin,rootNode->m_aabbMax);
377         if (aabbOverlap)
378         {
379                 isLeafNode = (!rootNode->m_leftChild && !rootNode->m_rightChild);
380                 if (isLeafNode)
381                 {
382                         nodeCallback->processNode(rootNode);
383                 } else
384                 {
385                         walkTree(rootNode->m_leftChild,nodeCallback,aabbMin,aabbMax);
386                         walkTree(rootNode->m_rightChild,nodeCallback,aabbMin,aabbMax);
387                 }
388         }
389
390 }
391 */
392
393 void b3QuantizedBvh::walkRecursiveQuantizedTreeAgainstQueryAabb(const b3QuantizedBvhNode* currentNode, b3NodeOverlapCallback* nodeCallback, unsigned short int* quantizedQueryAabbMin, unsigned short int* quantizedQueryAabbMax) const
394 {
395         b3Assert(m_useQuantization);
396
397         bool isLeafNode;
398         //PCK: unsigned instead of bool
399         unsigned aabbOverlap;
400
401         //PCK: unsigned instead of bool
402         aabbOverlap = b3TestQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin, quantizedQueryAabbMax, currentNode->m_quantizedAabbMin, currentNode->m_quantizedAabbMax);
403         isLeafNode = currentNode->isLeafNode();
404
405         //PCK: unsigned instead of bool
406         if (aabbOverlap != 0)
407         {
408                 if (isLeafNode)
409                 {
410                         nodeCallback->processNode(currentNode->getPartId(), currentNode->getTriangleIndex());
411                 }
412                 else
413                 {
414                         //process left and right children
415                         const b3QuantizedBvhNode* leftChildNode = currentNode + 1;
416                         walkRecursiveQuantizedTreeAgainstQueryAabb(leftChildNode, nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax);
417
418                         const b3QuantizedBvhNode* rightChildNode = leftChildNode->isLeafNode() ? leftChildNode + 1 : leftChildNode + leftChildNode->getEscapeIndex();
419                         walkRecursiveQuantizedTreeAgainstQueryAabb(rightChildNode, nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax);
420                 }
421         }
422 }
423
424 void b3QuantizedBvh::walkStacklessTreeAgainstRay(b3NodeOverlapCallback* nodeCallback, const b3Vector3& raySource, const b3Vector3& rayTarget, const b3Vector3& aabbMin, const b3Vector3& aabbMax, int startNodeIndex, int endNodeIndex) const
425 {
426         b3Assert(!m_useQuantization);
427
428         const b3OptimizedBvhNode* rootNode = &m_contiguousNodes[0];
429         int escapeIndex, curIndex = 0;
430         int walkIterations = 0;
431         bool isLeafNode;
432         //PCK: unsigned instead of bool
433         unsigned aabbOverlap = 0;
434         unsigned rayBoxOverlap = 0;
435         b3Scalar lambda_max = 1.0;
436
437         /* Quick pruning by quantized box */
438         b3Vector3 rayAabbMin = raySource;
439         b3Vector3 rayAabbMax = raySource;
440         rayAabbMin.setMin(rayTarget);
441         rayAabbMax.setMax(rayTarget);
442
443         /* Add box cast extents to bounding box */
444         rayAabbMin += aabbMin;
445         rayAabbMax += aabbMax;
446
447 #ifdef RAYAABB2
448         b3Vector3 rayDir = (rayTarget - raySource);
449         rayDir.normalize();
450         lambda_max = rayDir.dot(rayTarget - raySource);
451         ///what about division by zero? --> just set rayDirection[i] to 1.0
452         b3Vector3 rayDirectionInverse;
453         rayDirectionInverse[0] = rayDir[0] == b3Scalar(0.0) ? b3Scalar(B3_LARGE_FLOAT) : b3Scalar(1.0) / rayDir[0];
454         rayDirectionInverse[1] = rayDir[1] == b3Scalar(0.0) ? b3Scalar(B3_LARGE_FLOAT) : b3Scalar(1.0) / rayDir[1];
455         rayDirectionInverse[2] = rayDir[2] == b3Scalar(0.0) ? b3Scalar(B3_LARGE_FLOAT) : b3Scalar(1.0) / rayDir[2];
456         unsigned int sign[3] = {rayDirectionInverse[0] < 0.0, rayDirectionInverse[1] < 0.0, rayDirectionInverse[2] < 0.0};
457 #endif
458
459         b3Vector3 bounds[2];
460
461         while (curIndex < m_curNodeIndex)
462         {
463                 b3Scalar param = 1.0;
464                 //catch bugs in tree data
465                 b3Assert(walkIterations < m_curNodeIndex);
466
467                 walkIterations++;
468
469                 bounds[0] = rootNode->m_aabbMinOrg;
470                 bounds[1] = rootNode->m_aabbMaxOrg;
471                 /* Add box cast extents */
472                 bounds[0] -= aabbMax;
473                 bounds[1] -= aabbMin;
474
475                 aabbOverlap = b3TestAabbAgainstAabb2(rayAabbMin, rayAabbMax, rootNode->m_aabbMinOrg, rootNode->m_aabbMaxOrg);
476                 //perhaps profile if it is worth doing the aabbOverlap test first
477
478 #ifdef RAYAABB2
479                 ///careful with this check: need to check division by zero (above) and fix the unQuantize method
480                 ///thanks Joerg/hiker for the reproduction case!
481                 ///http://www.bulletphysics.com/Bullet/phpBB3/viewtopic.php?f=9&t=1858
482                 rayBoxOverlap = aabbOverlap ? b3RayAabb2(raySource, rayDirectionInverse, sign, bounds, param, 0.0f, lambda_max) : false;
483
484 #else
485                 b3Vector3 normal;
486                 rayBoxOverlap = b3RayAabb(raySource, rayTarget, bounds[0], bounds[1], param, normal);
487 #endif
488
489                 isLeafNode = rootNode->m_escapeIndex == -1;
490
491                 //PCK: unsigned instead of bool
492                 if (isLeafNode && (rayBoxOverlap != 0))
493                 {
494                         nodeCallback->processNode(rootNode->m_subPart, rootNode->m_triangleIndex);
495                 }
496
497                 //PCK: unsigned instead of bool
498                 if ((rayBoxOverlap != 0) || isLeafNode)
499                 {
500                         rootNode++;
501                         curIndex++;
502                 }
503                 else
504                 {
505                         escapeIndex = rootNode->m_escapeIndex;
506                         rootNode += escapeIndex;
507                         curIndex += escapeIndex;
508                 }
509         }
510         if (b3s_maxIterations < walkIterations)
511                 b3s_maxIterations = walkIterations;
512 }
513
514 void b3QuantizedBvh::walkStacklessQuantizedTreeAgainstRay(b3NodeOverlapCallback* nodeCallback, const b3Vector3& raySource, const b3Vector3& rayTarget, const b3Vector3& aabbMin, const b3Vector3& aabbMax, int startNodeIndex, int endNodeIndex) const
515 {
516         b3Assert(m_useQuantization);
517
518         int curIndex = startNodeIndex;
519         int walkIterations = 0;
520         int subTreeSize = endNodeIndex - startNodeIndex;
521         (void)subTreeSize;
522
523         const b3QuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[startNodeIndex];
524         int escapeIndex;
525
526         bool isLeafNode;
527         //PCK: unsigned instead of bool
528         unsigned boxBoxOverlap = 0;
529         unsigned rayBoxOverlap = 0;
530
531         b3Scalar lambda_max = 1.0;
532
533 #ifdef RAYAABB2
534         b3Vector3 rayDirection = (rayTarget - raySource);
535         rayDirection.normalize();
536         lambda_max = rayDirection.dot(rayTarget - raySource);
537         ///what about division by zero? --> just set rayDirection[i] to 1.0
538         rayDirection[0] = rayDirection[0] == b3Scalar(0.0) ? b3Scalar(B3_LARGE_FLOAT) : b3Scalar(1.0) / rayDirection[0];
539         rayDirection[1] = rayDirection[1] == b3Scalar(0.0) ? b3Scalar(B3_LARGE_FLOAT) : b3Scalar(1.0) / rayDirection[1];
540         rayDirection[2] = rayDirection[2] == b3Scalar(0.0) ? b3Scalar(B3_LARGE_FLOAT) : b3Scalar(1.0) / rayDirection[2];
541         unsigned int sign[3] = {rayDirection[0] < 0.0, rayDirection[1] < 0.0, rayDirection[2] < 0.0};
542 #endif
543
544         /* Quick pruning by quantized box */
545         b3Vector3 rayAabbMin = raySource;
546         b3Vector3 rayAabbMax = raySource;
547         rayAabbMin.setMin(rayTarget);
548         rayAabbMax.setMax(rayTarget);
549
550         /* Add box cast extents to bounding box */
551         rayAabbMin += aabbMin;
552         rayAabbMax += aabbMax;
553
554         unsigned short int quantizedQueryAabbMin[3];
555         unsigned short int quantizedQueryAabbMax[3];
556         quantizeWithClamp(quantizedQueryAabbMin, rayAabbMin, 0);
557         quantizeWithClamp(quantizedQueryAabbMax, rayAabbMax, 1);
558
559         while (curIndex < endNodeIndex)
560         {
561 //#define VISUALLY_ANALYZE_BVH 1
562 #ifdef VISUALLY_ANALYZE_BVH
563                 //some code snippet to debugDraw aabb, to visually analyze bvh structure
564                 static int drawPatch = 0;
565                 //need some global access to a debugDrawer
566                 extern b3IDebugDraw* debugDrawerPtr;
567                 if (curIndex == drawPatch)
568                 {
569                         b3Vector3 aabbMin, aabbMax;
570                         aabbMin = unQuantize(rootNode->m_quantizedAabbMin);
571                         aabbMax = unQuantize(rootNode->m_quantizedAabbMax);
572                         b3Vector3 color(1, 0, 0);
573                         debugDrawerPtr->drawAabb(aabbMin, aabbMax, color);
574                 }
575 #endif  //VISUALLY_ANALYZE_BVH
576
577                 //catch bugs in tree data
578                 b3Assert(walkIterations < subTreeSize);
579
580                 walkIterations++;
581                 //PCK: unsigned instead of bool
582                 // only interested if this is closer than any previous hit
583                 b3Scalar param = 1.0;
584                 rayBoxOverlap = 0;
585                 boxBoxOverlap = b3TestQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin, quantizedQueryAabbMax, rootNode->m_quantizedAabbMin, rootNode->m_quantizedAabbMax);
586                 isLeafNode = rootNode->isLeafNode();
587                 if (boxBoxOverlap)
588                 {
589                         b3Vector3 bounds[2];
590                         bounds[0] = unQuantize(rootNode->m_quantizedAabbMin);
591                         bounds[1] = unQuantize(rootNode->m_quantizedAabbMax);
592                         /* Add box cast extents */
593                         bounds[0] -= aabbMax;
594                         bounds[1] -= aabbMin;
595 #if 0
596                         b3Vector3 normal;
597                         bool ra2 = b3RayAabb2 (raySource, rayDirection, sign, bounds, param, 0.0, lambda_max);
598                         bool ra = b3RayAabb (raySource, rayTarget, bounds[0], bounds[1], param, normal);
599                         if (ra2 != ra)
600                         {
601                                 printf("functions don't match\n");
602                         }
603 #endif
604 #ifdef RAYAABB2
605                         ///careful with this check: need to check division by zero (above) and fix the unQuantize method
606                         ///thanks Joerg/hiker for the reproduction case!
607                         ///http://www.bulletphysics.com/Bullet/phpBB3/viewtopic.php?f=9&t=1858
608
609                         //B3_PROFILE("b3RayAabb2");
610                         rayBoxOverlap = b3RayAabb2(raySource, rayDirection, sign, bounds, param, 0.0f, lambda_max);
611
612 #else
613                         rayBoxOverlap = true;  //b3RayAabb(raySource, rayTarget, bounds[0], bounds[1], param, normal);
614 #endif
615                 }
616
617                 if (isLeafNode && rayBoxOverlap)
618                 {
619                         nodeCallback->processNode(rootNode->getPartId(), rootNode->getTriangleIndex());
620                 }
621
622                 //PCK: unsigned instead of bool
623                 if ((rayBoxOverlap != 0) || isLeafNode)
624                 {
625                         rootNode++;
626                         curIndex++;
627                 }
628                 else
629                 {
630                         escapeIndex = rootNode->getEscapeIndex();
631                         rootNode += escapeIndex;
632                         curIndex += escapeIndex;
633                 }
634         }
635         if (b3s_maxIterations < walkIterations)
636                 b3s_maxIterations = walkIterations;
637 }
638
639 void b3QuantizedBvh::walkStacklessQuantizedTree(b3NodeOverlapCallback* nodeCallback, unsigned short int* quantizedQueryAabbMin, unsigned short int* quantizedQueryAabbMax, int startNodeIndex, int endNodeIndex) const
640 {
641         b3Assert(m_useQuantization);
642
643         int curIndex = startNodeIndex;
644         int walkIterations = 0;
645         int subTreeSize = endNodeIndex - startNodeIndex;
646         (void)subTreeSize;
647
648         const b3QuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[startNodeIndex];
649         int escapeIndex;
650
651         bool isLeafNode;
652         //PCK: unsigned instead of bool
653         unsigned aabbOverlap;
654
655         while (curIndex < endNodeIndex)
656         {
657 //#define VISUALLY_ANALYZE_BVH 1
658 #ifdef VISUALLY_ANALYZE_BVH
659                 //some code snippet to debugDraw aabb, to visually analyze bvh structure
660                 static int drawPatch = 0;
661                 //need some global access to a debugDrawer
662                 extern b3IDebugDraw* debugDrawerPtr;
663                 if (curIndex == drawPatch)
664                 {
665                         b3Vector3 aabbMin, aabbMax;
666                         aabbMin = unQuantize(rootNode->m_quantizedAabbMin);
667                         aabbMax = unQuantize(rootNode->m_quantizedAabbMax);
668                         b3Vector3 color(1, 0, 0);
669                         debugDrawerPtr->drawAabb(aabbMin, aabbMax, color);
670                 }
671 #endif  //VISUALLY_ANALYZE_BVH
672
673                 //catch bugs in tree data
674                 b3Assert(walkIterations < subTreeSize);
675
676                 walkIterations++;
677                 //PCK: unsigned instead of bool
678                 aabbOverlap = b3TestQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin, quantizedQueryAabbMax, rootNode->m_quantizedAabbMin, rootNode->m_quantizedAabbMax);
679                 isLeafNode = rootNode->isLeafNode();
680
681                 if (isLeafNode && aabbOverlap)
682                 {
683                         nodeCallback->processNode(rootNode->getPartId(), rootNode->getTriangleIndex());
684                 }
685
686                 //PCK: unsigned instead of bool
687                 if ((aabbOverlap != 0) || isLeafNode)
688                 {
689                         rootNode++;
690                         curIndex++;
691                 }
692                 else
693                 {
694                         escapeIndex = rootNode->getEscapeIndex();
695                         rootNode += escapeIndex;
696                         curIndex += escapeIndex;
697                 }
698         }
699         if (b3s_maxIterations < walkIterations)
700                 b3s_maxIterations = walkIterations;
701 }
702
703 //This traversal can be called from Playstation 3 SPU
704 void b3QuantizedBvh::walkStacklessQuantizedTreeCacheFriendly(b3NodeOverlapCallback* nodeCallback, unsigned short int* quantizedQueryAabbMin, unsigned short int* quantizedQueryAabbMax) const
705 {
706         b3Assert(m_useQuantization);
707
708         int i;
709
710         for (i = 0; i < this->m_SubtreeHeaders.size(); i++)
711         {
712                 const b3BvhSubtreeInfo& subtree = m_SubtreeHeaders[i];
713
714                 //PCK: unsigned instead of bool
715                 unsigned overlap = b3TestQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin, quantizedQueryAabbMax, subtree.m_quantizedAabbMin, subtree.m_quantizedAabbMax);
716                 if (overlap != 0)
717                 {
718                         walkStacklessQuantizedTree(nodeCallback, quantizedQueryAabbMin, quantizedQueryAabbMax,
719                                                                            subtree.m_rootNodeIndex,
720                                                                            subtree.m_rootNodeIndex + subtree.m_subtreeSize);
721                 }
722         }
723 }
724
725 void b3QuantizedBvh::reportRayOverlappingNodex(b3NodeOverlapCallback* nodeCallback, const b3Vector3& raySource, const b3Vector3& rayTarget) const
726 {
727         reportBoxCastOverlappingNodex(nodeCallback, raySource, rayTarget, b3MakeVector3(0, 0, 0), b3MakeVector3(0, 0, 0));
728 }
729
730 void b3QuantizedBvh::reportBoxCastOverlappingNodex(b3NodeOverlapCallback* nodeCallback, const b3Vector3& raySource, const b3Vector3& rayTarget, const b3Vector3& aabbMin, const b3Vector3& aabbMax) const
731 {
732         //always use stackless
733
734         if (m_useQuantization)
735         {
736                 walkStacklessQuantizedTreeAgainstRay(nodeCallback, raySource, rayTarget, aabbMin, aabbMax, 0, m_curNodeIndex);
737         }
738         else
739         {
740                 walkStacklessTreeAgainstRay(nodeCallback, raySource, rayTarget, aabbMin, aabbMax, 0, m_curNodeIndex);
741         }
742         /*
743         {
744                 //recursive traversal
745                 b3Vector3 qaabbMin = raySource;
746                 b3Vector3 qaabbMax = raySource;
747                 qaabbMin.setMin(rayTarget);
748                 qaabbMax.setMax(rayTarget);
749                 qaabbMin += aabbMin;
750                 qaabbMax += aabbMax;
751                 reportAabbOverlappingNodex(nodeCallback,qaabbMin,qaabbMax);
752         }
753         */
754 }
755
756 void b3QuantizedBvh::swapLeafNodes(int i, int splitIndex)
757 {
758         if (m_useQuantization)
759         {
760                 b3QuantizedBvhNode tmp = m_quantizedLeafNodes[i];
761                 m_quantizedLeafNodes[i] = m_quantizedLeafNodes[splitIndex];
762                 m_quantizedLeafNodes[splitIndex] = tmp;
763         }
764         else
765         {
766                 b3OptimizedBvhNode tmp = m_leafNodes[i];
767                 m_leafNodes[i] = m_leafNodes[splitIndex];
768                 m_leafNodes[splitIndex] = tmp;
769         }
770 }
771
772 void b3QuantizedBvh::assignInternalNodeFromLeafNode(int internalNode, int leafNodeIndex)
773 {
774         if (m_useQuantization)
775         {
776                 m_quantizedContiguousNodes[internalNode] = m_quantizedLeafNodes[leafNodeIndex];
777         }
778         else
779         {
780                 m_contiguousNodes[internalNode] = m_leafNodes[leafNodeIndex];
781         }
782 }
783
784 //PCK: include
785 #include <new>
786
787 #if 0
788 //PCK: consts
789 static const unsigned BVH_ALIGNMENT = 16;
790 static const unsigned BVH_ALIGNMENT_MASK = BVH_ALIGNMENT-1;
791
792 static const unsigned BVH_ALIGNMENT_BLOCKS = 2;
793 #endif
794
795 unsigned int b3QuantizedBvh::getAlignmentSerializationPadding()
796 {
797         // I changed this to 0 since the extra padding is not needed or used.
798         return 0;  //BVH_ALIGNMENT_BLOCKS * BVH_ALIGNMENT;
799 }
800
801 unsigned b3QuantizedBvh::calculateSerializeBufferSize() const
802 {
803         unsigned baseSize = sizeof(b3QuantizedBvh) + getAlignmentSerializationPadding();
804         baseSize += sizeof(b3BvhSubtreeInfo) * m_subtreeHeaderCount;
805         if (m_useQuantization)
806         {
807                 return baseSize + m_curNodeIndex * sizeof(b3QuantizedBvhNode);
808         }
809         return baseSize + m_curNodeIndex * sizeof(b3OptimizedBvhNode);
810 }
811
812 bool b3QuantizedBvh::serialize(void* o_alignedDataBuffer, unsigned /*i_dataBufferSize */, bool i_swapEndian) const
813 {
814         b3Assert(m_subtreeHeaderCount == m_SubtreeHeaders.size());
815         m_subtreeHeaderCount = m_SubtreeHeaders.size();
816
817         /*      if (i_dataBufferSize < calculateSerializeBufferSize() || o_alignedDataBuffer == NULL || (((unsigned)o_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0))
818         {
819                 ///check alignedment for buffer?
820                 b3Assert(0);
821                 return false;
822         }
823 */
824
825         b3QuantizedBvh* targetBvh = (b3QuantizedBvh*)o_alignedDataBuffer;
826
827         // construct the class so the virtual function table, etc will be set up
828         // Also, m_leafNodes and m_quantizedLeafNodes will be initialized to default values by the constructor
829         new (targetBvh) b3QuantizedBvh;
830
831         if (i_swapEndian)
832         {
833                 targetBvh->m_curNodeIndex = static_cast<int>(b3SwapEndian(m_curNodeIndex));
834
835                 b3SwapVector3Endian(m_bvhAabbMin, targetBvh->m_bvhAabbMin);
836                 b3SwapVector3Endian(m_bvhAabbMax, targetBvh->m_bvhAabbMax);
837                 b3SwapVector3Endian(m_bvhQuantization, targetBvh->m_bvhQuantization);
838
839                 targetBvh->m_traversalMode = (b3TraversalMode)b3SwapEndian(m_traversalMode);
840                 targetBvh->m_subtreeHeaderCount = static_cast<int>(b3SwapEndian(m_subtreeHeaderCount));
841         }
842         else
843         {
844                 targetBvh->m_curNodeIndex = m_curNodeIndex;
845                 targetBvh->m_bvhAabbMin = m_bvhAabbMin;
846                 targetBvh->m_bvhAabbMax = m_bvhAabbMax;
847                 targetBvh->m_bvhQuantization = m_bvhQuantization;
848                 targetBvh->m_traversalMode = m_traversalMode;
849                 targetBvh->m_subtreeHeaderCount = m_subtreeHeaderCount;
850         }
851
852         targetBvh->m_useQuantization = m_useQuantization;
853
854         unsigned char* nodeData = (unsigned char*)targetBvh;
855         nodeData += sizeof(b3QuantizedBvh);
856
857         unsigned sizeToAdd = 0;  //(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
858         nodeData += sizeToAdd;
859
860         int nodeCount = m_curNodeIndex;
861
862         if (m_useQuantization)
863         {
864                 targetBvh->m_quantizedContiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
865
866                 if (i_swapEndian)
867                 {
868                         for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
869                         {
870                                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = b3SwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0]);
871                                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = b3SwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1]);
872                                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = b3SwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2]);
873
874                                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = b3SwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0]);
875                                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = b3SwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1]);
876                                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = b3SwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2]);
877
878                                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = static_cast<int>(b3SwapEndian(m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex));
879                         }
880                 }
881                 else
882                 {
883                         for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
884                         {
885                                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0];
886                                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1];
887                                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2];
888
889                                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0];
890                                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1];
891                                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2];
892
893                                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex;
894                         }
895                 }
896                 nodeData += sizeof(b3QuantizedBvhNode) * nodeCount;
897
898                 // this clears the pointer in the member variable it doesn't really do anything to the data
899                 // it does call the destructor on the contained objects, but they are all classes with no destructor defined
900                 // so the memory (which is not freed) is left alone
901                 targetBvh->m_quantizedContiguousNodes.initializeFromBuffer(NULL, 0, 0);
902         }
903         else
904         {
905                 targetBvh->m_contiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
906
907                 if (i_swapEndian)
908                 {
909                         for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
910                         {
911                                 b3SwapVector3Endian(m_contiguousNodes[nodeIndex].m_aabbMinOrg, targetBvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg);
912                                 b3SwapVector3Endian(m_contiguousNodes[nodeIndex].m_aabbMaxOrg, targetBvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg);
913
914                                 targetBvh->m_contiguousNodes[nodeIndex].m_escapeIndex = static_cast<int>(b3SwapEndian(m_contiguousNodes[nodeIndex].m_escapeIndex));
915                                 targetBvh->m_contiguousNodes[nodeIndex].m_subPart = static_cast<int>(b3SwapEndian(m_contiguousNodes[nodeIndex].m_subPart));
916                                 targetBvh->m_contiguousNodes[nodeIndex].m_triangleIndex = static_cast<int>(b3SwapEndian(m_contiguousNodes[nodeIndex].m_triangleIndex));
917                         }
918                 }
919                 else
920                 {
921                         for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
922                         {
923                                 targetBvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg = m_contiguousNodes[nodeIndex].m_aabbMinOrg;
924                                 targetBvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg = m_contiguousNodes[nodeIndex].m_aabbMaxOrg;
925
926                                 targetBvh->m_contiguousNodes[nodeIndex].m_escapeIndex = m_contiguousNodes[nodeIndex].m_escapeIndex;
927                                 targetBvh->m_contiguousNodes[nodeIndex].m_subPart = m_contiguousNodes[nodeIndex].m_subPart;
928                                 targetBvh->m_contiguousNodes[nodeIndex].m_triangleIndex = m_contiguousNodes[nodeIndex].m_triangleIndex;
929                         }
930                 }
931                 nodeData += sizeof(b3OptimizedBvhNode) * nodeCount;
932
933                 // this clears the pointer in the member variable it doesn't really do anything to the data
934                 // it does call the destructor on the contained objects, but they are all classes with no destructor defined
935                 // so the memory (which is not freed) is left alone
936                 targetBvh->m_contiguousNodes.initializeFromBuffer(NULL, 0, 0);
937         }
938
939         sizeToAdd = 0;  //(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
940         nodeData += sizeToAdd;
941
942         // Now serialize the subtree headers
943         targetBvh->m_SubtreeHeaders.initializeFromBuffer(nodeData, m_subtreeHeaderCount, m_subtreeHeaderCount);
944         if (i_swapEndian)
945         {
946                 for (int i = 0; i < m_subtreeHeaderCount; i++)
947                 {
948                         targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = b3SwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
949                         targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = b3SwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
950                         targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = b3SwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
951
952                         targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = b3SwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
953                         targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = b3SwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
954                         targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = b3SwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
955
956                         targetBvh->m_SubtreeHeaders[i].m_rootNodeIndex = static_cast<int>(b3SwapEndian(m_SubtreeHeaders[i].m_rootNodeIndex));
957                         targetBvh->m_SubtreeHeaders[i].m_subtreeSize = static_cast<int>(b3SwapEndian(m_SubtreeHeaders[i].m_subtreeSize));
958                 }
959         }
960         else
961         {
962                 for (int i = 0; i < m_subtreeHeaderCount; i++)
963                 {
964                         targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = (m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
965                         targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = (m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
966                         targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = (m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
967
968                         targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = (m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
969                         targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = (m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
970                         targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = (m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
971
972                         targetBvh->m_SubtreeHeaders[i].m_rootNodeIndex = (m_SubtreeHeaders[i].m_rootNodeIndex);
973                         targetBvh->m_SubtreeHeaders[i].m_subtreeSize = (m_SubtreeHeaders[i].m_subtreeSize);
974
975                         // need to clear padding in destination buffer
976                         targetBvh->m_SubtreeHeaders[i].m_padding[0] = 0;
977                         targetBvh->m_SubtreeHeaders[i].m_padding[1] = 0;
978                         targetBvh->m_SubtreeHeaders[i].m_padding[2] = 0;
979                 }
980         }
981         nodeData += sizeof(b3BvhSubtreeInfo) * m_subtreeHeaderCount;
982
983         // this clears the pointer in the member variable it doesn't really do anything to the data
984         // it does call the destructor on the contained objects, but they are all classes with no destructor defined
985         // so the memory (which is not freed) is left alone
986         targetBvh->m_SubtreeHeaders.initializeFromBuffer(NULL, 0, 0);
987
988         // this wipes the virtual function table pointer at the start of the buffer for the class
989         *((void**)o_alignedDataBuffer) = NULL;
990
991         return true;
992 }
993
994 b3QuantizedBvh* b3QuantizedBvh::deSerializeInPlace(void* i_alignedDataBuffer, unsigned int i_dataBufferSize, bool i_swapEndian)
995 {
996         if (i_alignedDataBuffer == NULL)  // || (((unsigned)i_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0))
997         {
998                 return NULL;
999         }
1000         b3QuantizedBvh* bvh = (b3QuantizedBvh*)i_alignedDataBuffer;
1001
1002         if (i_swapEndian)
1003         {
1004                 bvh->m_curNodeIndex = static_cast<int>(b3SwapEndian(bvh->m_curNodeIndex));
1005
1006                 b3UnSwapVector3Endian(bvh->m_bvhAabbMin);
1007                 b3UnSwapVector3Endian(bvh->m_bvhAabbMax);
1008                 b3UnSwapVector3Endian(bvh->m_bvhQuantization);
1009
1010                 bvh->m_traversalMode = (b3TraversalMode)b3SwapEndian(bvh->m_traversalMode);
1011                 bvh->m_subtreeHeaderCount = static_cast<int>(b3SwapEndian(bvh->m_subtreeHeaderCount));
1012         }
1013
1014         unsigned int calculatedBufSize = bvh->calculateSerializeBufferSize();
1015         b3Assert(calculatedBufSize <= i_dataBufferSize);
1016
1017         if (calculatedBufSize > i_dataBufferSize)
1018         {
1019                 return NULL;
1020         }
1021
1022         unsigned char* nodeData = (unsigned char*)bvh;
1023         nodeData += sizeof(b3QuantizedBvh);
1024
1025         unsigned sizeToAdd = 0;  //(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
1026         nodeData += sizeToAdd;
1027
1028         int nodeCount = bvh->m_curNodeIndex;
1029
1030         // Must call placement new to fill in virtual function table, etc, but we don't want to overwrite most data, so call a special version of the constructor
1031         // Also, m_leafNodes and m_quantizedLeafNodes will be initialized to default values by the constructor
1032         new (bvh) b3QuantizedBvh(*bvh, false);
1033
1034         if (bvh->m_useQuantization)
1035         {
1036                 bvh->m_quantizedContiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
1037
1038                 if (i_swapEndian)
1039                 {
1040                         for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
1041                         {
1042                                 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = b3SwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0]);
1043                                 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = b3SwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1]);
1044                                 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = b3SwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2]);
1045
1046                                 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = b3SwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0]);
1047                                 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = b3SwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1]);
1048                                 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = b3SwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2]);
1049
1050                                 bvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = static_cast<int>(b3SwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex));
1051                         }
1052                 }
1053                 nodeData += sizeof(b3QuantizedBvhNode) * nodeCount;
1054         }
1055         else
1056         {
1057                 bvh->m_contiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
1058
1059                 if (i_swapEndian)
1060                 {
1061                         for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
1062                         {
1063                                 b3UnSwapVector3Endian(bvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg);
1064                                 b3UnSwapVector3Endian(bvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg);
1065
1066                                 bvh->m_contiguousNodes[nodeIndex].m_escapeIndex = static_cast<int>(b3SwapEndian(bvh->m_contiguousNodes[nodeIndex].m_escapeIndex));
1067                                 bvh->m_contiguousNodes[nodeIndex].m_subPart = static_cast<int>(b3SwapEndian(bvh->m_contiguousNodes[nodeIndex].m_subPart));
1068                                 bvh->m_contiguousNodes[nodeIndex].m_triangleIndex = static_cast<int>(b3SwapEndian(bvh->m_contiguousNodes[nodeIndex].m_triangleIndex));
1069                         }
1070                 }
1071                 nodeData += sizeof(b3OptimizedBvhNode) * nodeCount;
1072         }
1073
1074         sizeToAdd = 0;  //(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
1075         nodeData += sizeToAdd;
1076
1077         // Now serialize the subtree headers
1078         bvh->m_SubtreeHeaders.initializeFromBuffer(nodeData, bvh->m_subtreeHeaderCount, bvh->m_subtreeHeaderCount);
1079         if (i_swapEndian)
1080         {
1081                 for (int i = 0; i < bvh->m_subtreeHeaderCount; i++)
1082                 {
1083                         bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = b3SwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
1084                         bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = b3SwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
1085                         bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = b3SwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
1086
1087                         bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = b3SwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
1088                         bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = b3SwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
1089                         bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = b3SwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
1090
1091                         bvh->m_SubtreeHeaders[i].m_rootNodeIndex = static_cast<int>(b3SwapEndian(bvh->m_SubtreeHeaders[i].m_rootNodeIndex));
1092                         bvh->m_SubtreeHeaders[i].m_subtreeSize = static_cast<int>(b3SwapEndian(bvh->m_SubtreeHeaders[i].m_subtreeSize));
1093                 }
1094         }
1095
1096         return bvh;
1097 }
1098
1099 // Constructor that prevents b3Vector3's default constructor from being called
1100 b3QuantizedBvh::b3QuantizedBvh(b3QuantizedBvh& self, bool /* ownsMemory */) : m_bvhAabbMin(self.m_bvhAabbMin),
1101                                                                                                                                                           m_bvhAabbMax(self.m_bvhAabbMax),
1102                                                                                                                                                           m_bvhQuantization(self.m_bvhQuantization),
1103                                                                                                                                                           m_bulletVersion(B3_BULLET_VERSION)
1104 {
1105 }
1106
1107 void b3QuantizedBvh::deSerializeFloat(struct b3QuantizedBvhFloatData& quantizedBvhFloatData)
1108 {
1109         m_bvhAabbMax.deSerializeFloat(quantizedBvhFloatData.m_bvhAabbMax);
1110         m_bvhAabbMin.deSerializeFloat(quantizedBvhFloatData.m_bvhAabbMin);
1111         m_bvhQuantization.deSerializeFloat(quantizedBvhFloatData.m_bvhQuantization);
1112
1113         m_curNodeIndex = quantizedBvhFloatData.m_curNodeIndex;
1114         m_useQuantization = quantizedBvhFloatData.m_useQuantization != 0;
1115
1116         {
1117                 int numElem = quantizedBvhFloatData.m_numContiguousLeafNodes;
1118                 m_contiguousNodes.resize(numElem);
1119
1120                 if (numElem)
1121                 {
1122                         b3OptimizedBvhNodeFloatData* memPtr = quantizedBvhFloatData.m_contiguousNodesPtr;
1123
1124                         for (int i = 0; i < numElem; i++, memPtr++)
1125                         {
1126                                 m_contiguousNodes[i].m_aabbMaxOrg.deSerializeFloat(memPtr->m_aabbMaxOrg);
1127                                 m_contiguousNodes[i].m_aabbMinOrg.deSerializeFloat(memPtr->m_aabbMinOrg);
1128                                 m_contiguousNodes[i].m_escapeIndex = memPtr->m_escapeIndex;
1129                                 m_contiguousNodes[i].m_subPart = memPtr->m_subPart;
1130                                 m_contiguousNodes[i].m_triangleIndex = memPtr->m_triangleIndex;
1131                         }
1132                 }
1133         }
1134
1135         {
1136                 int numElem = quantizedBvhFloatData.m_numQuantizedContiguousNodes;
1137                 m_quantizedContiguousNodes.resize(numElem);
1138
1139                 if (numElem)
1140                 {
1141                         b3QuantizedBvhNodeData* memPtr = quantizedBvhFloatData.m_quantizedContiguousNodesPtr;
1142                         for (int i = 0; i < numElem; i++, memPtr++)
1143                         {
1144                                 m_quantizedContiguousNodes[i].m_escapeIndexOrTriangleIndex = memPtr->m_escapeIndexOrTriangleIndex;
1145                                 m_quantizedContiguousNodes[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0];
1146                                 m_quantizedContiguousNodes[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
1147                                 m_quantizedContiguousNodes[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
1148                                 m_quantizedContiguousNodes[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
1149                                 m_quantizedContiguousNodes[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
1150                                 m_quantizedContiguousNodes[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
1151                         }
1152                 }
1153         }
1154
1155         m_traversalMode = b3TraversalMode(quantizedBvhFloatData.m_traversalMode);
1156
1157         {
1158                 int numElem = quantizedBvhFloatData.m_numSubtreeHeaders;
1159                 m_SubtreeHeaders.resize(numElem);
1160                 if (numElem)
1161                 {
1162                         b3BvhSubtreeInfoData* memPtr = quantizedBvhFloatData.m_subTreeInfoPtr;
1163                         for (int i = 0; i < numElem; i++, memPtr++)
1164                         {
1165                                 m_SubtreeHeaders[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0];
1166                                 m_SubtreeHeaders[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
1167                                 m_SubtreeHeaders[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
1168                                 m_SubtreeHeaders[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
1169                                 m_SubtreeHeaders[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
1170                                 m_SubtreeHeaders[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
1171                                 m_SubtreeHeaders[i].m_rootNodeIndex = memPtr->m_rootNodeIndex;
1172                                 m_SubtreeHeaders[i].m_subtreeSize = memPtr->m_subtreeSize;
1173                         }
1174                 }
1175         }
1176 }
1177
1178 void b3QuantizedBvh::deSerializeDouble(struct b3QuantizedBvhDoubleData& quantizedBvhDoubleData)
1179 {
1180         m_bvhAabbMax.deSerializeDouble(quantizedBvhDoubleData.m_bvhAabbMax);
1181         m_bvhAabbMin.deSerializeDouble(quantizedBvhDoubleData.m_bvhAabbMin);
1182         m_bvhQuantization.deSerializeDouble(quantizedBvhDoubleData.m_bvhQuantization);
1183
1184         m_curNodeIndex = quantizedBvhDoubleData.m_curNodeIndex;
1185         m_useQuantization = quantizedBvhDoubleData.m_useQuantization != 0;
1186
1187         {
1188                 int numElem = quantizedBvhDoubleData.m_numContiguousLeafNodes;
1189                 m_contiguousNodes.resize(numElem);
1190
1191                 if (numElem)
1192                 {
1193                         b3OptimizedBvhNodeDoubleData* memPtr = quantizedBvhDoubleData.m_contiguousNodesPtr;
1194
1195                         for (int i = 0; i < numElem; i++, memPtr++)
1196                         {
1197                                 m_contiguousNodes[i].m_aabbMaxOrg.deSerializeDouble(memPtr->m_aabbMaxOrg);
1198                                 m_contiguousNodes[i].m_aabbMinOrg.deSerializeDouble(memPtr->m_aabbMinOrg);
1199                                 m_contiguousNodes[i].m_escapeIndex = memPtr->m_escapeIndex;
1200                                 m_contiguousNodes[i].m_subPart = memPtr->m_subPart;
1201                                 m_contiguousNodes[i].m_triangleIndex = memPtr->m_triangleIndex;
1202                         }
1203                 }
1204         }
1205
1206         {
1207                 int numElem = quantizedBvhDoubleData.m_numQuantizedContiguousNodes;
1208                 m_quantizedContiguousNodes.resize(numElem);
1209
1210                 if (numElem)
1211                 {
1212                         b3QuantizedBvhNodeData* memPtr = quantizedBvhDoubleData.m_quantizedContiguousNodesPtr;
1213                         for (int i = 0; i < numElem; i++, memPtr++)
1214                         {
1215                                 m_quantizedContiguousNodes[i].m_escapeIndexOrTriangleIndex = memPtr->m_escapeIndexOrTriangleIndex;
1216                                 m_quantizedContiguousNodes[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0];
1217                                 m_quantizedContiguousNodes[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
1218                                 m_quantizedContiguousNodes[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
1219                                 m_quantizedContiguousNodes[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
1220                                 m_quantizedContiguousNodes[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
1221                                 m_quantizedContiguousNodes[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
1222                         }
1223                 }
1224         }
1225
1226         m_traversalMode = b3TraversalMode(quantizedBvhDoubleData.m_traversalMode);
1227
1228         {
1229                 int numElem = quantizedBvhDoubleData.m_numSubtreeHeaders;
1230                 m_SubtreeHeaders.resize(numElem);
1231                 if (numElem)
1232                 {
1233                         b3BvhSubtreeInfoData* memPtr = quantizedBvhDoubleData.m_subTreeInfoPtr;
1234                         for (int i = 0; i < numElem; i++, memPtr++)
1235                         {
1236                                 m_SubtreeHeaders[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0];
1237                                 m_SubtreeHeaders[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
1238                                 m_SubtreeHeaders[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
1239                                 m_SubtreeHeaders[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
1240                                 m_SubtreeHeaders[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
1241                                 m_SubtreeHeaders[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
1242                                 m_SubtreeHeaders[i].m_rootNodeIndex = memPtr->m_rootNodeIndex;
1243                                 m_SubtreeHeaders[i].m_subtreeSize = memPtr->m_subtreeSize;
1244                         }
1245                 }
1246         }
1247 }
1248
1249 ///fills the dataBuffer and returns the struct name (and 0 on failure)
1250 const char* b3QuantizedBvh::serialize(void* dataBuffer, b3Serializer* serializer) const
1251 {
1252         b3Assert(0);
1253         return 0;
1254 }