Imported Upstream version 2.81
[platform/upstream/libbullet.git] / src / BulletCollision / BroadphaseCollision / btQuantizedBvh.cpp
1 /*
2 Bullet Continuous Collision Detection and Physics Library
3 Copyright (c) 2003-2006 Erwin Coumans  http://continuousphysics.com/Bullet/
4
5 This software is provided 'as-is', without any express or implied warranty.
6 In no event will the authors be held liable for any damages arising from the use of this software.
7 Permission is granted to anyone to use this software for any purpose, 
8 including commercial applications, and to alter it and redistribute it freely, 
9 subject to the following restrictions:
10
11 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
12 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
13 3. This notice may not be removed or altered from any source distribution.
14 */
15
16 #include "btQuantizedBvh.h"
17
18 #include "LinearMath/btAabbUtil2.h"
19 #include "LinearMath/btIDebugDraw.h"
20 #include "LinearMath/btSerializer.h"
21
22 #define RAYAABB2
23
24 btQuantizedBvh::btQuantizedBvh() : 
25                                         m_bulletVersion(BT_BULLET_VERSION),
26                                         m_useQuantization(false), 
27                                         //m_traversalMode(TRAVERSAL_STACKLESS_CACHE_FRIENDLY)
28                                         m_traversalMode(TRAVERSAL_STACKLESS)
29                                         //m_traversalMode(TRAVERSAL_RECURSIVE)
30                                         ,m_subtreeHeaderCount(0) //PCK: add this line
31 {
32         m_bvhAabbMin.setValue(-SIMD_INFINITY,-SIMD_INFINITY,-SIMD_INFINITY);
33         m_bvhAabbMax.setValue(SIMD_INFINITY,SIMD_INFINITY,SIMD_INFINITY);
34 }
35
36
37
38
39
40 void btQuantizedBvh::buildInternal()
41 {
42         ///assumes that caller filled in the m_quantizedLeafNodes
43         m_useQuantization = true;
44         int numLeafNodes = 0;
45         
46         if (m_useQuantization)
47         {
48                 //now we have an array of leafnodes in m_leafNodes
49                 numLeafNodes = m_quantizedLeafNodes.size();
50
51                 m_quantizedContiguousNodes.resize(2*numLeafNodes);
52
53         }
54
55         m_curNodeIndex = 0;
56
57         buildTree(0,numLeafNodes);
58
59         ///if the entire tree is small then subtree size, we need to create a header info for the tree
60         if(m_useQuantization && !m_SubtreeHeaders.size())
61         {
62                 btBvhSubtreeInfo& subtree = m_SubtreeHeaders.expand();
63                 subtree.setAabbFromQuantizeNode(m_quantizedContiguousNodes[0]);
64                 subtree.m_rootNodeIndex = 0;
65                 subtree.m_subtreeSize = m_quantizedContiguousNodes[0].isLeafNode() ? 1 : m_quantizedContiguousNodes[0].getEscapeIndex();
66         }
67
68         //PCK: update the copy of the size
69         m_subtreeHeaderCount = m_SubtreeHeaders.size();
70
71         //PCK: clear m_quantizedLeafNodes and m_leafNodes, they are temporary
72         m_quantizedLeafNodes.clear();
73         m_leafNodes.clear();
74 }
75
76
77
78 ///just for debugging, to visualize the individual patches/subtrees
79 #ifdef DEBUG_PATCH_COLORS
80 btVector3 color[4]=
81 {
82         btVector3(1,0,0),
83         btVector3(0,1,0),
84         btVector3(0,0,1),
85         btVector3(0,1,1)
86 };
87 #endif //DEBUG_PATCH_COLORS
88
89
90
91 void    btQuantizedBvh::setQuantizationValues(const btVector3& bvhAabbMin,const btVector3& bvhAabbMax,btScalar quantizationMargin)
92 {
93         //enlarge the AABB to avoid division by zero when initializing the quantization values
94         btVector3 clampValue(quantizationMargin,quantizationMargin,quantizationMargin);
95         m_bvhAabbMin = bvhAabbMin - clampValue;
96         m_bvhAabbMax = bvhAabbMax + clampValue;
97         btVector3 aabbSize = m_bvhAabbMax - m_bvhAabbMin;
98         m_bvhQuantization = btVector3(btScalar(65533.0),btScalar(65533.0),btScalar(65533.0)) / aabbSize;
99         m_useQuantization = true;
100 }
101
102
103
104
105 btQuantizedBvh::~btQuantizedBvh()
106 {
107 }
108
109 #ifdef DEBUG_TREE_BUILDING
110 int gStackDepth = 0;
111 int gMaxStackDepth = 0;
112 #endif //DEBUG_TREE_BUILDING
113
114 void    btQuantizedBvh::buildTree       (int startIndex,int endIndex)
115 {
116 #ifdef DEBUG_TREE_BUILDING
117         gStackDepth++;
118         if (gStackDepth > gMaxStackDepth)
119                 gMaxStackDepth = gStackDepth;
120 #endif //DEBUG_TREE_BUILDING
121
122
123         int splitAxis, splitIndex, i;
124         int numIndices =endIndex-startIndex;
125         int curIndex = m_curNodeIndex;
126
127         btAssert(numIndices>0);
128
129         if (numIndices==1)
130         {
131 #ifdef DEBUG_TREE_BUILDING
132                 gStackDepth--;
133 #endif //DEBUG_TREE_BUILDING
134                 
135                 assignInternalNodeFromLeafNode(m_curNodeIndex,startIndex);
136
137                 m_curNodeIndex++;
138                 return; 
139         }
140         //calculate Best Splitting Axis and where to split it. Sort the incoming 'leafNodes' array within range 'startIndex/endIndex'.
141         
142         splitAxis = calcSplittingAxis(startIndex,endIndex);
143
144         splitIndex = sortAndCalcSplittingIndex(startIndex,endIndex,splitAxis);
145
146         int internalNodeIndex = m_curNodeIndex;
147         
148         //set the min aabb to 'inf' or a max value, and set the max aabb to a -inf/minimum value.
149         //the aabb will be expanded during buildTree/mergeInternalNodeAabb with actual node values
150         setInternalNodeAabbMin(m_curNodeIndex,m_bvhAabbMax);//can't use btVector3(SIMD_INFINITY,SIMD_INFINITY,SIMD_INFINITY)) because of quantization
151         setInternalNodeAabbMax(m_curNodeIndex,m_bvhAabbMin);//can't use btVector3(-SIMD_INFINITY,-SIMD_INFINITY,-SIMD_INFINITY)) because of quantization
152         
153         
154         for (i=startIndex;i<endIndex;i++)
155         {
156                 mergeInternalNodeAabb(m_curNodeIndex,getAabbMin(i),getAabbMax(i));
157         }
158
159         m_curNodeIndex++;
160         
161
162         //internalNode->m_escapeIndex;
163         
164         int leftChildNodexIndex = m_curNodeIndex;
165
166         //build left child tree
167         buildTree(startIndex,splitIndex);
168
169         int rightChildNodexIndex = m_curNodeIndex;
170         //build right child tree
171         buildTree(splitIndex,endIndex);
172
173 #ifdef DEBUG_TREE_BUILDING
174         gStackDepth--;
175 #endif //DEBUG_TREE_BUILDING
176
177         int escapeIndex = m_curNodeIndex - curIndex;
178
179         if (m_useQuantization)
180         {
181                 //escapeIndex is the number of nodes of this subtree
182                 const int sizeQuantizedNode =sizeof(btQuantizedBvhNode);
183                 const int treeSizeInBytes = escapeIndex * sizeQuantizedNode;
184                 if (treeSizeInBytes > MAX_SUBTREE_SIZE_IN_BYTES)
185                 {
186                         updateSubtreeHeaders(leftChildNodexIndex,rightChildNodexIndex);
187                 }
188         } else
189         {
190
191         }
192
193         setInternalNodeEscapeIndex(internalNodeIndex,escapeIndex);
194
195 }
196
197 void    btQuantizedBvh::updateSubtreeHeaders(int leftChildNodexIndex,int rightChildNodexIndex)
198 {
199         btAssert(m_useQuantization);
200
201         btQuantizedBvhNode& leftChildNode = m_quantizedContiguousNodes[leftChildNodexIndex];
202         int leftSubTreeSize = leftChildNode.isLeafNode() ? 1 : leftChildNode.getEscapeIndex();
203         int leftSubTreeSizeInBytes =  leftSubTreeSize * static_cast<int>(sizeof(btQuantizedBvhNode));
204         
205         btQuantizedBvhNode& rightChildNode = m_quantizedContiguousNodes[rightChildNodexIndex];
206         int rightSubTreeSize = rightChildNode.isLeafNode() ? 1 : rightChildNode.getEscapeIndex();
207         int rightSubTreeSizeInBytes =  rightSubTreeSize *  static_cast<int>(sizeof(btQuantizedBvhNode));
208
209         if(leftSubTreeSizeInBytes <= MAX_SUBTREE_SIZE_IN_BYTES)
210         {
211                 btBvhSubtreeInfo& subtree = m_SubtreeHeaders.expand();
212                 subtree.setAabbFromQuantizeNode(leftChildNode);
213                 subtree.m_rootNodeIndex = leftChildNodexIndex;
214                 subtree.m_subtreeSize = leftSubTreeSize;
215         }
216
217         if(rightSubTreeSizeInBytes <= MAX_SUBTREE_SIZE_IN_BYTES)
218         {
219                 btBvhSubtreeInfo& subtree = m_SubtreeHeaders.expand();
220                 subtree.setAabbFromQuantizeNode(rightChildNode);
221                 subtree.m_rootNodeIndex = rightChildNodexIndex;
222                 subtree.m_subtreeSize = rightSubTreeSize;
223         }
224
225         //PCK: update the copy of the size
226         m_subtreeHeaderCount = m_SubtreeHeaders.size();
227 }
228
229
230 int     btQuantizedBvh::sortAndCalcSplittingIndex(int startIndex,int endIndex,int splitAxis)
231 {
232         int i;
233         int splitIndex =startIndex;
234         int numIndices = endIndex - startIndex;
235         btScalar splitValue;
236
237         btVector3 means(btScalar(0.),btScalar(0.),btScalar(0.));
238         for (i=startIndex;i<endIndex;i++)
239         {
240                 btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i));
241                 means+=center;
242         }
243         means *= (btScalar(1.)/(btScalar)numIndices);
244         
245         splitValue = means[splitAxis];
246         
247         //sort leafNodes so all values larger then splitValue comes first, and smaller values start from 'splitIndex'.
248         for (i=startIndex;i<endIndex;i++)
249         {
250                 btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i));
251                 if (center[splitAxis] > splitValue)
252                 {
253                         //swap
254                         swapLeafNodes(i,splitIndex);
255                         splitIndex++;
256                 }
257         }
258
259         //if the splitIndex causes unbalanced trees, fix this by using the center in between startIndex and endIndex
260         //otherwise the tree-building might fail due to stack-overflows in certain cases.
261         //unbalanced1 is unsafe: it can cause stack overflows
262         //bool unbalanced1 = ((splitIndex==startIndex) || (splitIndex == (endIndex-1)));
263
264         //unbalanced2 should work too: always use center (perfect balanced trees)       
265         //bool unbalanced2 = true;
266
267         //this should be safe too:
268         int rangeBalancedIndices = numIndices/3;
269         bool unbalanced = ((splitIndex<=(startIndex+rangeBalancedIndices)) || (splitIndex >=(endIndex-1-rangeBalancedIndices)));
270         
271         if (unbalanced)
272         {
273                 splitIndex = startIndex+ (numIndices>>1);
274         }
275
276         bool unbal = (splitIndex==startIndex) || (splitIndex == (endIndex));
277         (void)unbal;
278         btAssert(!unbal);
279
280         return splitIndex;
281 }
282
283
284 int     btQuantizedBvh::calcSplittingAxis(int startIndex,int endIndex)
285 {
286         int i;
287
288         btVector3 means(btScalar(0.),btScalar(0.),btScalar(0.));
289         btVector3 variance(btScalar(0.),btScalar(0.),btScalar(0.));
290         int numIndices = endIndex-startIndex;
291
292         for (i=startIndex;i<endIndex;i++)
293         {
294                 btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i));
295                 means+=center;
296         }
297         means *= (btScalar(1.)/(btScalar)numIndices);
298                 
299         for (i=startIndex;i<endIndex;i++)
300         {
301                 btVector3 center = btScalar(0.5)*(getAabbMax(i)+getAabbMin(i));
302                 btVector3 diff2 = center-means;
303                 diff2 = diff2 * diff2;
304                 variance += diff2;
305         }
306         variance *= (btScalar(1.)/      ((btScalar)numIndices-1)        );
307         
308         return variance.maxAxis();
309 }
310
311
312
313 void    btQuantizedBvh::reportAabbOverlappingNodex(btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const
314 {
315         //either choose recursive traversal (walkTree) or stackless (walkStacklessTree)
316
317         if (m_useQuantization)
318         {
319                 ///quantize query AABB
320                 unsigned short int quantizedQueryAabbMin[3];
321                 unsigned short int quantizedQueryAabbMax[3];
322                 quantizeWithClamp(quantizedQueryAabbMin,aabbMin,0);
323                 quantizeWithClamp(quantizedQueryAabbMax,aabbMax,1);
324
325                 switch (m_traversalMode)
326                 {
327                 case TRAVERSAL_STACKLESS:
328                                 walkStacklessQuantizedTree(nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax,0,m_curNodeIndex);
329                         break;
330                 case TRAVERSAL_STACKLESS_CACHE_FRIENDLY:
331                                 walkStacklessQuantizedTreeCacheFriendly(nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax);
332                         break;
333                 case TRAVERSAL_RECURSIVE:
334                         {
335                                 const btQuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[0];
336                                 walkRecursiveQuantizedTreeAgainstQueryAabb(rootNode,nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax);
337                         }
338                         break;
339                 default:
340                         //unsupported
341                         btAssert(0);
342                 }
343         } else
344         {
345                 walkStacklessTree(nodeCallback,aabbMin,aabbMax);
346         }
347 }
348
349
350 int maxIterations = 0;
351
352
353 void    btQuantizedBvh::walkStacklessTree(btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const
354 {
355         btAssert(!m_useQuantization);
356
357         const btOptimizedBvhNode* rootNode = &m_contiguousNodes[0];
358         int escapeIndex, curIndex = 0;
359         int walkIterations = 0;
360         bool isLeafNode;
361         //PCK: unsigned instead of bool
362         unsigned aabbOverlap;
363
364         while (curIndex < m_curNodeIndex)
365         {
366                 //catch bugs in tree data
367                 btAssert (walkIterations < m_curNodeIndex);
368
369                 walkIterations++;
370                 aabbOverlap = TestAabbAgainstAabb2(aabbMin,aabbMax,rootNode->m_aabbMinOrg,rootNode->m_aabbMaxOrg);
371                 isLeafNode = rootNode->m_escapeIndex == -1;
372                 
373                 //PCK: unsigned instead of bool
374                 if (isLeafNode && (aabbOverlap != 0))
375                 {
376                         nodeCallback->processNode(rootNode->m_subPart,rootNode->m_triangleIndex);
377                 } 
378                 
379                 //PCK: unsigned instead of bool
380                 if ((aabbOverlap != 0) || isLeafNode)
381                 {
382                         rootNode++;
383                         curIndex++;
384                 } else
385                 {
386                         escapeIndex = rootNode->m_escapeIndex;
387                         rootNode += escapeIndex;
388                         curIndex += escapeIndex;
389                 }
390         }
391         if (maxIterations < walkIterations)
392                 maxIterations = walkIterations;
393
394 }
395
396 /*
397 ///this was the original recursive traversal, before we optimized towards stackless traversal
398 void    btQuantizedBvh::walkTree(btOptimizedBvhNode* rootNode,btNodeOverlapCallback* nodeCallback,const btVector3& aabbMin,const btVector3& aabbMax) const
399 {
400         bool isLeafNode, aabbOverlap = TestAabbAgainstAabb2(aabbMin,aabbMax,rootNode->m_aabbMin,rootNode->m_aabbMax);
401         if (aabbOverlap)
402         {
403                 isLeafNode = (!rootNode->m_leftChild && !rootNode->m_rightChild);
404                 if (isLeafNode)
405                 {
406                         nodeCallback->processNode(rootNode);
407                 } else
408                 {
409                         walkTree(rootNode->m_leftChild,nodeCallback,aabbMin,aabbMax);
410                         walkTree(rootNode->m_rightChild,nodeCallback,aabbMin,aabbMax);
411                 }
412         }
413
414 }
415 */
416
417 void btQuantizedBvh::walkRecursiveQuantizedTreeAgainstQueryAabb(const btQuantizedBvhNode* currentNode,btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax) const
418 {
419         btAssert(m_useQuantization);
420         
421         bool isLeafNode;
422         //PCK: unsigned instead of bool
423         unsigned aabbOverlap;
424
425         //PCK: unsigned instead of bool
426         aabbOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,currentNode->m_quantizedAabbMin,currentNode->m_quantizedAabbMax);
427         isLeafNode = currentNode->isLeafNode();
428                 
429         //PCK: unsigned instead of bool
430         if (aabbOverlap != 0)
431         {
432                 if (isLeafNode)
433                 {
434                         nodeCallback->processNode(currentNode->getPartId(),currentNode->getTriangleIndex());
435                 } else
436                 {
437                         //process left and right children
438                         const btQuantizedBvhNode* leftChildNode = currentNode+1;
439                         walkRecursiveQuantizedTreeAgainstQueryAabb(leftChildNode,nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax);
440
441                         const btQuantizedBvhNode* rightChildNode = leftChildNode->isLeafNode() ? leftChildNode+1:leftChildNode+leftChildNode->getEscapeIndex();
442                         walkRecursiveQuantizedTreeAgainstQueryAabb(rightChildNode,nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax);
443                 }
444         }               
445 }
446
447
448
449 void    btQuantizedBvh::walkStacklessTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex,int endNodeIndex) const
450 {
451         btAssert(!m_useQuantization);
452
453         const btOptimizedBvhNode* rootNode = &m_contiguousNodes[0];
454         int escapeIndex, curIndex = 0;
455         int walkIterations = 0;
456         bool isLeafNode;
457         //PCK: unsigned instead of bool
458         unsigned aabbOverlap=0;
459         unsigned rayBoxOverlap=0;
460         btScalar lambda_max = 1.0;
461         
462                 /* Quick pruning by quantized box */
463         btVector3 rayAabbMin = raySource;
464         btVector3 rayAabbMax = raySource;
465         rayAabbMin.setMin(rayTarget);
466         rayAabbMax.setMax(rayTarget);
467
468         /* Add box cast extents to bounding box */
469         rayAabbMin += aabbMin;
470         rayAabbMax += aabbMax;
471
472 #ifdef RAYAABB2
473         btVector3 rayDir = (rayTarget-raySource);
474         rayDir.normalize ();
475         lambda_max = rayDir.dot(rayTarget-raySource);
476         ///what about division by zero? --> just set rayDirection[i] to 1.0
477         btVector3 rayDirectionInverse;
478         rayDirectionInverse[0] = rayDir[0] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[0];
479         rayDirectionInverse[1] = rayDir[1] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[1];
480         rayDirectionInverse[2] = rayDir[2] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDir[2];
481         unsigned int sign[3] = { rayDirectionInverse[0] < 0.0, rayDirectionInverse[1] < 0.0, rayDirectionInverse[2] < 0.0};
482 #endif
483
484         btVector3 bounds[2];
485
486         while (curIndex < m_curNodeIndex)
487         {
488                 btScalar param = 1.0;
489                 //catch bugs in tree data
490                 btAssert (walkIterations < m_curNodeIndex);
491
492                 walkIterations++;
493
494                 bounds[0] = rootNode->m_aabbMinOrg;
495                 bounds[1] = rootNode->m_aabbMaxOrg;
496                 /* Add box cast extents */
497                 bounds[0] -= aabbMax;
498                 bounds[1] -= aabbMin;
499
500                 aabbOverlap = TestAabbAgainstAabb2(rayAabbMin,rayAabbMax,rootNode->m_aabbMinOrg,rootNode->m_aabbMaxOrg);
501                 //perhaps profile if it is worth doing the aabbOverlap test first
502
503 #ifdef RAYAABB2
504                         ///careful with this check: need to check division by zero (above) and fix the unQuantize method
505                         ///thanks Joerg/hiker for the reproduction case!
506                         ///http://www.bulletphysics.com/Bullet/phpBB3/viewtopic.php?f=9&t=1858
507                 rayBoxOverlap = aabbOverlap ? btRayAabb2 (raySource, rayDirectionInverse, sign, bounds, param, 0.0f, lambda_max) : false;
508
509 #else
510                 btVector3 normal;
511                 rayBoxOverlap = btRayAabb(raySource, rayTarget,bounds[0],bounds[1],param, normal);
512 #endif
513
514                 isLeafNode = rootNode->m_escapeIndex == -1;
515                 
516                 //PCK: unsigned instead of bool
517                 if (isLeafNode && (rayBoxOverlap != 0))
518                 {
519                         nodeCallback->processNode(rootNode->m_subPart,rootNode->m_triangleIndex);
520                 } 
521                 
522                 //PCK: unsigned instead of bool
523                 if ((rayBoxOverlap != 0) || isLeafNode)
524                 {
525                         rootNode++;
526                         curIndex++;
527                 } else
528                 {
529                         escapeIndex = rootNode->m_escapeIndex;
530                         rootNode += escapeIndex;
531                         curIndex += escapeIndex;
532                 }
533         }
534         if (maxIterations < walkIterations)
535                 maxIterations = walkIterations;
536
537 }
538
539
540
541 void    btQuantizedBvh::walkStacklessQuantizedTreeAgainstRay(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin, const btVector3& aabbMax, int startNodeIndex,int endNodeIndex) const
542 {
543         btAssert(m_useQuantization);
544         
545         int curIndex = startNodeIndex;
546         int walkIterations = 0;
547         int subTreeSize = endNodeIndex - startNodeIndex;
548         (void)subTreeSize;
549
550         const btQuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[startNodeIndex];
551         int escapeIndex;
552         
553         bool isLeafNode;
554         //PCK: unsigned instead of bool
555         unsigned boxBoxOverlap = 0;
556         unsigned rayBoxOverlap = 0;
557
558         btScalar lambda_max = 1.0;
559
560 #ifdef RAYAABB2
561         btVector3 rayDirection = (rayTarget-raySource);
562         rayDirection.normalize ();
563         lambda_max = rayDirection.dot(rayTarget-raySource);
564         ///what about division by zero? --> just set rayDirection[i] to 1.0
565         rayDirection[0] = rayDirection[0] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDirection[0];
566         rayDirection[1] = rayDirection[1] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDirection[1];
567         rayDirection[2] = rayDirection[2] == btScalar(0.0) ? btScalar(BT_LARGE_FLOAT) : btScalar(1.0) / rayDirection[2];
568         unsigned int sign[3] = { rayDirection[0] < 0.0, rayDirection[1] < 0.0, rayDirection[2] < 0.0};
569 #endif
570
571         /* Quick pruning by quantized box */
572         btVector3 rayAabbMin = raySource;
573         btVector3 rayAabbMax = raySource;
574         rayAabbMin.setMin(rayTarget);
575         rayAabbMax.setMax(rayTarget);
576
577         /* Add box cast extents to bounding box */
578         rayAabbMin += aabbMin;
579         rayAabbMax += aabbMax;
580
581         unsigned short int quantizedQueryAabbMin[3];
582         unsigned short int quantizedQueryAabbMax[3];
583         quantizeWithClamp(quantizedQueryAabbMin,rayAabbMin,0);
584         quantizeWithClamp(quantizedQueryAabbMax,rayAabbMax,1);
585
586         while (curIndex < endNodeIndex)
587         {
588
589 //#define VISUALLY_ANALYZE_BVH 1
590 #ifdef VISUALLY_ANALYZE_BVH
591                 //some code snippet to debugDraw aabb, to visually analyze bvh structure
592                 static int drawPatch = 0;
593                 //need some global access to a debugDrawer
594                 extern btIDebugDraw* debugDrawerPtr;
595                 if (curIndex==drawPatch)
596                 {
597                         btVector3 aabbMin,aabbMax;
598                         aabbMin = unQuantize(rootNode->m_quantizedAabbMin);
599                         aabbMax = unQuantize(rootNode->m_quantizedAabbMax);
600                         btVector3       color(1,0,0);
601                         debugDrawerPtr->drawAabb(aabbMin,aabbMax,color);
602                 }
603 #endif//VISUALLY_ANALYZE_BVH
604
605                 //catch bugs in tree data
606                 btAssert (walkIterations < subTreeSize);
607
608                 walkIterations++;
609                 //PCK: unsigned instead of bool
610                 // only interested if this is closer than any previous hit
611                 btScalar param = 1.0;
612                 rayBoxOverlap = 0;
613                 boxBoxOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,rootNode->m_quantizedAabbMin,rootNode->m_quantizedAabbMax);
614                 isLeafNode = rootNode->isLeafNode();
615                 if (boxBoxOverlap)
616                 {
617                         btVector3 bounds[2];
618                         bounds[0] = unQuantize(rootNode->m_quantizedAabbMin);
619                         bounds[1] = unQuantize(rootNode->m_quantizedAabbMax);
620                         /* Add box cast extents */
621                         bounds[0] -= aabbMax;
622                         bounds[1] -= aabbMin;
623                         btVector3 normal;
624 #if 0
625                         bool ra2 = btRayAabb2 (raySource, rayDirection, sign, bounds, param, 0.0, lambda_max);
626                         bool ra = btRayAabb (raySource, rayTarget, bounds[0], bounds[1], param, normal);
627                         if (ra2 != ra)
628                         {
629                                 printf("functions don't match\n");
630                         }
631 #endif
632 #ifdef RAYAABB2
633                         ///careful with this check: need to check division by zero (above) and fix the unQuantize method
634                         ///thanks Joerg/hiker for the reproduction case!
635                         ///http://www.bulletphysics.com/Bullet/phpBB3/viewtopic.php?f=9&t=1858
636
637                         //BT_PROFILE("btRayAabb2");
638                         rayBoxOverlap = btRayAabb2 (raySource, rayDirection, sign, bounds, param, 0.0f, lambda_max);
639                         
640 #else
641                         rayBoxOverlap = true;//btRayAabb(raySource, rayTarget, bounds[0], bounds[1], param, normal);
642 #endif
643                 }
644                 
645                 if (isLeafNode && rayBoxOverlap)
646                 {
647                         nodeCallback->processNode(rootNode->getPartId(),rootNode->getTriangleIndex());
648                 }
649                 
650                 //PCK: unsigned instead of bool
651                 if ((rayBoxOverlap != 0) || isLeafNode)
652                 {
653                         rootNode++;
654                         curIndex++;
655                 } else
656                 {
657                         escapeIndex = rootNode->getEscapeIndex();
658                         rootNode += escapeIndex;
659                         curIndex += escapeIndex;
660                 }
661         }
662         if (maxIterations < walkIterations)
663                 maxIterations = walkIterations;
664
665 }
666
667 void    btQuantizedBvh::walkStacklessQuantizedTree(btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax,int startNodeIndex,int endNodeIndex) const
668 {
669         btAssert(m_useQuantization);
670         
671         int curIndex = startNodeIndex;
672         int walkIterations = 0;
673         int subTreeSize = endNodeIndex - startNodeIndex;
674         (void)subTreeSize;
675
676         const btQuantizedBvhNode* rootNode = &m_quantizedContiguousNodes[startNodeIndex];
677         int escapeIndex;
678         
679         bool isLeafNode;
680         //PCK: unsigned instead of bool
681         unsigned aabbOverlap;
682
683         while (curIndex < endNodeIndex)
684         {
685
686 //#define VISUALLY_ANALYZE_BVH 1
687 #ifdef VISUALLY_ANALYZE_BVH
688                 //some code snippet to debugDraw aabb, to visually analyze bvh structure
689                 static int drawPatch = 0;
690                 //need some global access to a debugDrawer
691                 extern btIDebugDraw* debugDrawerPtr;
692                 if (curIndex==drawPatch)
693                 {
694                         btVector3 aabbMin,aabbMax;
695                         aabbMin = unQuantize(rootNode->m_quantizedAabbMin);
696                         aabbMax = unQuantize(rootNode->m_quantizedAabbMax);
697                         btVector3       color(1,0,0);
698                         debugDrawerPtr->drawAabb(aabbMin,aabbMax,color);
699                 }
700 #endif//VISUALLY_ANALYZE_BVH
701
702                 //catch bugs in tree data
703                 btAssert (walkIterations < subTreeSize);
704
705                 walkIterations++;
706                 //PCK: unsigned instead of bool
707                 aabbOverlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,rootNode->m_quantizedAabbMin,rootNode->m_quantizedAabbMax);
708                 isLeafNode = rootNode->isLeafNode();
709                 
710                 if (isLeafNode && aabbOverlap)
711                 {
712                         nodeCallback->processNode(rootNode->getPartId(),rootNode->getTriangleIndex());
713                 } 
714                 
715                 //PCK: unsigned instead of bool
716                 if ((aabbOverlap != 0) || isLeafNode)
717                 {
718                         rootNode++;
719                         curIndex++;
720                 } else
721                 {
722                         escapeIndex = rootNode->getEscapeIndex();
723                         rootNode += escapeIndex;
724                         curIndex += escapeIndex;
725                 }
726         }
727         if (maxIterations < walkIterations)
728                 maxIterations = walkIterations;
729
730 }
731
732 //This traversal can be called from Playstation 3 SPU
733 void    btQuantizedBvh::walkStacklessQuantizedTreeCacheFriendly(btNodeOverlapCallback* nodeCallback,unsigned short int* quantizedQueryAabbMin,unsigned short int* quantizedQueryAabbMax) const
734 {
735         btAssert(m_useQuantization);
736
737         int i;
738
739
740         for (i=0;i<this->m_SubtreeHeaders.size();i++)
741         {
742                 const btBvhSubtreeInfo& subtree = m_SubtreeHeaders[i];
743
744                 //PCK: unsigned instead of bool
745                 unsigned overlap = testQuantizedAabbAgainstQuantizedAabb(quantizedQueryAabbMin,quantizedQueryAabbMax,subtree.m_quantizedAabbMin,subtree.m_quantizedAabbMax);
746                 if (overlap != 0)
747                 {
748                         walkStacklessQuantizedTree(nodeCallback,quantizedQueryAabbMin,quantizedQueryAabbMax,
749                                 subtree.m_rootNodeIndex,
750                                 subtree.m_rootNodeIndex+subtree.m_subtreeSize);
751                 }
752         }
753 }
754
755
756 void    btQuantizedBvh::reportRayOverlappingNodex (btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget) const
757 {
758         reportBoxCastOverlappingNodex(nodeCallback,raySource,rayTarget,btVector3(0,0,0),btVector3(0,0,0));
759 }
760
761
762 void    btQuantizedBvh::reportBoxCastOverlappingNodex(btNodeOverlapCallback* nodeCallback, const btVector3& raySource, const btVector3& rayTarget, const btVector3& aabbMin,const btVector3& aabbMax) const
763 {
764         //always use stackless
765
766         if (m_useQuantization)
767         {
768                 walkStacklessQuantizedTreeAgainstRay(nodeCallback, raySource, rayTarget, aabbMin, aabbMax, 0, m_curNodeIndex);
769         }
770         else
771         {
772                 walkStacklessTreeAgainstRay(nodeCallback, raySource, rayTarget, aabbMin, aabbMax, 0, m_curNodeIndex);
773         }
774         /*
775         {
776                 //recursive traversal
777                 btVector3 qaabbMin = raySource;
778                 btVector3 qaabbMax = raySource;
779                 qaabbMin.setMin(rayTarget);
780                 qaabbMax.setMax(rayTarget);
781                 qaabbMin += aabbMin;
782                 qaabbMax += aabbMax;
783                 reportAabbOverlappingNodex(nodeCallback,qaabbMin,qaabbMax);
784         }
785         */
786
787 }
788
789
790 void    btQuantizedBvh::swapLeafNodes(int i,int splitIndex)
791 {
792         if (m_useQuantization)
793         {
794                         btQuantizedBvhNode tmp = m_quantizedLeafNodes[i];
795                         m_quantizedLeafNodes[i] = m_quantizedLeafNodes[splitIndex];
796                         m_quantizedLeafNodes[splitIndex] = tmp;
797         } else
798         {
799                         btOptimizedBvhNode tmp = m_leafNodes[i];
800                         m_leafNodes[i] = m_leafNodes[splitIndex];
801                         m_leafNodes[splitIndex] = tmp;
802         }
803 }
804
805 void    btQuantizedBvh::assignInternalNodeFromLeafNode(int internalNode,int leafNodeIndex)
806 {
807         if (m_useQuantization)
808         {
809                 m_quantizedContiguousNodes[internalNode] = m_quantizedLeafNodes[leafNodeIndex];
810         } else
811         {
812                 m_contiguousNodes[internalNode] = m_leafNodes[leafNodeIndex];
813         }
814 }
815
816 //PCK: include
817 #include <new>
818
819 #if 0
820 //PCK: consts
821 static const unsigned BVH_ALIGNMENT = 16;
822 static const unsigned BVH_ALIGNMENT_MASK = BVH_ALIGNMENT-1;
823
824 static const unsigned BVH_ALIGNMENT_BLOCKS = 2;
825 #endif
826
827
828 unsigned int btQuantizedBvh::getAlignmentSerializationPadding()
829 {
830         // I changed this to 0 since the extra padding is not needed or used.
831         return 0;//BVH_ALIGNMENT_BLOCKS * BVH_ALIGNMENT;
832 }
833
834 unsigned btQuantizedBvh::calculateSerializeBufferSize() const
835 {
836         unsigned baseSize = sizeof(btQuantizedBvh) + getAlignmentSerializationPadding();
837         baseSize += sizeof(btBvhSubtreeInfo) * m_subtreeHeaderCount;
838         if (m_useQuantization)
839         {
840                 return baseSize + m_curNodeIndex * sizeof(btQuantizedBvhNode);
841         }
842         return baseSize + m_curNodeIndex * sizeof(btOptimizedBvhNode);
843 }
844
845 bool btQuantizedBvh::serialize(void *o_alignedDataBuffer, unsigned /*i_dataBufferSize */, bool i_swapEndian) const
846 {
847         btAssert(m_subtreeHeaderCount == m_SubtreeHeaders.size());
848         m_subtreeHeaderCount = m_SubtreeHeaders.size();
849
850 /*      if (i_dataBufferSize < calculateSerializeBufferSize() || o_alignedDataBuffer == NULL || (((unsigned)o_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0))
851         {
852                 ///check alignedment for buffer?
853                 btAssert(0);
854                 return false;
855         }
856 */
857
858         btQuantizedBvh *targetBvh = (btQuantizedBvh *)o_alignedDataBuffer;
859
860         // construct the class so the virtual function table, etc will be set up
861         // Also, m_leafNodes and m_quantizedLeafNodes will be initialized to default values by the constructor
862         new (targetBvh) btQuantizedBvh;
863
864         if (i_swapEndian)
865         {
866                 targetBvh->m_curNodeIndex = static_cast<int>(btSwapEndian(m_curNodeIndex));
867
868
869                 btSwapVector3Endian(m_bvhAabbMin,targetBvh->m_bvhAabbMin);
870                 btSwapVector3Endian(m_bvhAabbMax,targetBvh->m_bvhAabbMax);
871                 btSwapVector3Endian(m_bvhQuantization,targetBvh->m_bvhQuantization);
872
873                 targetBvh->m_traversalMode = (btTraversalMode)btSwapEndian(m_traversalMode);
874                 targetBvh->m_subtreeHeaderCount = static_cast<int>(btSwapEndian(m_subtreeHeaderCount));
875         }
876         else
877         {
878                 targetBvh->m_curNodeIndex = m_curNodeIndex;
879                 targetBvh->m_bvhAabbMin = m_bvhAabbMin;
880                 targetBvh->m_bvhAabbMax = m_bvhAabbMax;
881                 targetBvh->m_bvhQuantization = m_bvhQuantization;
882                 targetBvh->m_traversalMode = m_traversalMode;
883                 targetBvh->m_subtreeHeaderCount = m_subtreeHeaderCount;
884         }
885
886         targetBvh->m_useQuantization = m_useQuantization;
887
888         unsigned char *nodeData = (unsigned char *)targetBvh;
889         nodeData += sizeof(btQuantizedBvh);
890         
891         unsigned sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
892         nodeData += sizeToAdd;
893         
894         int nodeCount = m_curNodeIndex;
895
896         if (m_useQuantization)
897         {
898                 targetBvh->m_quantizedContiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
899
900                 if (i_swapEndian)
901                 {
902                         for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
903                         {
904                                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0]);
905                                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1]);
906                                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2]);
907
908                                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0]);
909                                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1]);
910                                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2]);
911
912                                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = static_cast<int>(btSwapEndian(m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex));
913                         }
914                 }
915                 else
916                 {
917                         for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
918                         {
919         
920                                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0];
921                                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1];
922                                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2];
923
924                                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0];
925                                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1];
926                                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2];
927
928                                 targetBvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex;
929
930
931                         }
932                 }
933                 nodeData += sizeof(btQuantizedBvhNode) * nodeCount;
934
935                 // this clears the pointer in the member variable it doesn't really do anything to the data
936                 // it does call the destructor on the contained objects, but they are all classes with no destructor defined
937                 // so the memory (which is not freed) is left alone
938                 targetBvh->m_quantizedContiguousNodes.initializeFromBuffer(NULL, 0, 0);
939         }
940         else
941         {
942                 targetBvh->m_contiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
943
944                 if (i_swapEndian)
945                 {
946                         for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
947                         {
948                                 btSwapVector3Endian(m_contiguousNodes[nodeIndex].m_aabbMinOrg, targetBvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg);
949                                 btSwapVector3Endian(m_contiguousNodes[nodeIndex].m_aabbMaxOrg, targetBvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg);
950
951                                 targetBvh->m_contiguousNodes[nodeIndex].m_escapeIndex = static_cast<int>(btSwapEndian(m_contiguousNodes[nodeIndex].m_escapeIndex));
952                                 targetBvh->m_contiguousNodes[nodeIndex].m_subPart = static_cast<int>(btSwapEndian(m_contiguousNodes[nodeIndex].m_subPart));
953                                 targetBvh->m_contiguousNodes[nodeIndex].m_triangleIndex = static_cast<int>(btSwapEndian(m_contiguousNodes[nodeIndex].m_triangleIndex));
954                         }
955                 }
956                 else
957                 {
958                         for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
959                         {
960                                 targetBvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg = m_contiguousNodes[nodeIndex].m_aabbMinOrg;
961                                 targetBvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg = m_contiguousNodes[nodeIndex].m_aabbMaxOrg;
962
963                                 targetBvh->m_contiguousNodes[nodeIndex].m_escapeIndex = m_contiguousNodes[nodeIndex].m_escapeIndex;
964                                 targetBvh->m_contiguousNodes[nodeIndex].m_subPart = m_contiguousNodes[nodeIndex].m_subPart;
965                                 targetBvh->m_contiguousNodes[nodeIndex].m_triangleIndex = m_contiguousNodes[nodeIndex].m_triangleIndex;
966                         }
967                 }
968                 nodeData += sizeof(btOptimizedBvhNode) * nodeCount;
969
970                 // this clears the pointer in the member variable it doesn't really do anything to the data
971                 // it does call the destructor on the contained objects, but they are all classes with no destructor defined
972                 // so the memory (which is not freed) is left alone
973                 targetBvh->m_contiguousNodes.initializeFromBuffer(NULL, 0, 0);
974         }
975
976         sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
977         nodeData += sizeToAdd;
978
979         // Now serialize the subtree headers
980         targetBvh->m_SubtreeHeaders.initializeFromBuffer(nodeData, m_subtreeHeaderCount, m_subtreeHeaderCount);
981         if (i_swapEndian)
982         {
983                 for (int i = 0; i < m_subtreeHeaderCount; i++)
984                 {
985                         targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
986                         targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
987                         targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
988
989                         targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
990                         targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
991                         targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = btSwapEndian(m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
992
993                         targetBvh->m_SubtreeHeaders[i].m_rootNodeIndex = static_cast<int>(btSwapEndian(m_SubtreeHeaders[i].m_rootNodeIndex));
994                         targetBvh->m_SubtreeHeaders[i].m_subtreeSize = static_cast<int>(btSwapEndian(m_SubtreeHeaders[i].m_subtreeSize));
995                 }
996         }
997         else
998         {
999                 for (int i = 0; i < m_subtreeHeaderCount; i++)
1000                 {
1001                         targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = (m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
1002                         targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = (m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
1003                         targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = (m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
1004
1005                         targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = (m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
1006                         targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = (m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
1007                         targetBvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = (m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
1008
1009                         targetBvh->m_SubtreeHeaders[i].m_rootNodeIndex = (m_SubtreeHeaders[i].m_rootNodeIndex);
1010                         targetBvh->m_SubtreeHeaders[i].m_subtreeSize = (m_SubtreeHeaders[i].m_subtreeSize);
1011
1012                         // need to clear padding in destination buffer
1013                         targetBvh->m_SubtreeHeaders[i].m_padding[0] = 0;
1014                         targetBvh->m_SubtreeHeaders[i].m_padding[1] = 0;
1015                         targetBvh->m_SubtreeHeaders[i].m_padding[2] = 0;
1016                 }
1017         }
1018         nodeData += sizeof(btBvhSubtreeInfo) * m_subtreeHeaderCount;
1019
1020         // this clears the pointer in the member variable it doesn't really do anything to the data
1021         // it does call the destructor on the contained objects, but they are all classes with no destructor defined
1022         // so the memory (which is not freed) is left alone
1023         targetBvh->m_SubtreeHeaders.initializeFromBuffer(NULL, 0, 0);
1024
1025         // this wipes the virtual function table pointer at the start of the buffer for the class
1026         *((void**)o_alignedDataBuffer) = NULL;
1027
1028         return true;
1029 }
1030
1031 btQuantizedBvh *btQuantizedBvh::deSerializeInPlace(void *i_alignedDataBuffer, unsigned int i_dataBufferSize, bool i_swapEndian)
1032 {
1033
1034         if (i_alignedDataBuffer == NULL)// || (((unsigned)i_alignedDataBuffer & BVH_ALIGNMENT_MASK) != 0))
1035         {
1036                 return NULL;
1037         }
1038         btQuantizedBvh *bvh = (btQuantizedBvh *)i_alignedDataBuffer;
1039
1040         if (i_swapEndian)
1041         {
1042                 bvh->m_curNodeIndex = static_cast<int>(btSwapEndian(bvh->m_curNodeIndex));
1043
1044                 btUnSwapVector3Endian(bvh->m_bvhAabbMin);
1045                 btUnSwapVector3Endian(bvh->m_bvhAabbMax);
1046                 btUnSwapVector3Endian(bvh->m_bvhQuantization);
1047
1048                 bvh->m_traversalMode = (btTraversalMode)btSwapEndian(bvh->m_traversalMode);
1049                 bvh->m_subtreeHeaderCount = static_cast<int>(btSwapEndian(bvh->m_subtreeHeaderCount));
1050         }
1051
1052         unsigned int calculatedBufSize = bvh->calculateSerializeBufferSize();
1053         btAssert(calculatedBufSize <= i_dataBufferSize);
1054
1055         if (calculatedBufSize > i_dataBufferSize)
1056         {
1057                 return NULL;
1058         }
1059
1060         unsigned char *nodeData = (unsigned char *)bvh;
1061         nodeData += sizeof(btQuantizedBvh);
1062         
1063         unsigned sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
1064         nodeData += sizeToAdd;
1065         
1066         int nodeCount = bvh->m_curNodeIndex;
1067
1068         // Must call placement new to fill in virtual function table, etc, but we don't want to overwrite most data, so call a special version of the constructor
1069         // Also, m_leafNodes and m_quantizedLeafNodes will be initialized to default values by the constructor
1070         new (bvh) btQuantizedBvh(*bvh, false);
1071
1072         if (bvh->m_useQuantization)
1073         {
1074                 bvh->m_quantizedContiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
1075
1076                 if (i_swapEndian)
1077                 {
1078                         for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
1079                         {
1080                                 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[0]);
1081                                 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[1]);
1082                                 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMin[2]);
1083
1084                                 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[0]);
1085                                 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[1]);
1086                                 bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2] = btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_quantizedAabbMax[2]);
1087
1088                                 bvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex = static_cast<int>(btSwapEndian(bvh->m_quantizedContiguousNodes[nodeIndex].m_escapeIndexOrTriangleIndex));
1089                         }
1090                 }
1091                 nodeData += sizeof(btQuantizedBvhNode) * nodeCount;
1092         }
1093         else
1094         {
1095                 bvh->m_contiguousNodes.initializeFromBuffer(nodeData, nodeCount, nodeCount);
1096
1097                 if (i_swapEndian)
1098                 {
1099                         for (int nodeIndex = 0; nodeIndex < nodeCount; nodeIndex++)
1100                         {
1101                                 btUnSwapVector3Endian(bvh->m_contiguousNodes[nodeIndex].m_aabbMinOrg);
1102                                 btUnSwapVector3Endian(bvh->m_contiguousNodes[nodeIndex].m_aabbMaxOrg);
1103                                 
1104                                 bvh->m_contiguousNodes[nodeIndex].m_escapeIndex = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_escapeIndex));
1105                                 bvh->m_contiguousNodes[nodeIndex].m_subPart = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_subPart));
1106                                 bvh->m_contiguousNodes[nodeIndex].m_triangleIndex = static_cast<int>(btSwapEndian(bvh->m_contiguousNodes[nodeIndex].m_triangleIndex));
1107                         }
1108                 }
1109                 nodeData += sizeof(btOptimizedBvhNode) * nodeCount;
1110         }
1111
1112         sizeToAdd = 0;//(BVH_ALIGNMENT-((unsigned)nodeData & BVH_ALIGNMENT_MASK))&BVH_ALIGNMENT_MASK;
1113         nodeData += sizeToAdd;
1114
1115         // Now serialize the subtree headers
1116         bvh->m_SubtreeHeaders.initializeFromBuffer(nodeData, bvh->m_subtreeHeaderCount, bvh->m_subtreeHeaderCount);
1117         if (i_swapEndian)
1118         {
1119                 for (int i = 0; i < bvh->m_subtreeHeaderCount; i++)
1120                 {
1121                         bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[0]);
1122                         bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[1]);
1123                         bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMin[2]);
1124
1125                         bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[0]);
1126                         bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[1]);
1127                         bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2] = btSwapEndian(bvh->m_SubtreeHeaders[i].m_quantizedAabbMax[2]);
1128
1129                         bvh->m_SubtreeHeaders[i].m_rootNodeIndex = static_cast<int>(btSwapEndian(bvh->m_SubtreeHeaders[i].m_rootNodeIndex));
1130                         bvh->m_SubtreeHeaders[i].m_subtreeSize = static_cast<int>(btSwapEndian(bvh->m_SubtreeHeaders[i].m_subtreeSize));
1131                 }
1132         }
1133
1134         return bvh;
1135 }
1136
1137 // Constructor that prevents btVector3's default constructor from being called
1138 btQuantizedBvh::btQuantizedBvh(btQuantizedBvh &self, bool /* ownsMemory */) :
1139 m_bvhAabbMin(self.m_bvhAabbMin),
1140 m_bvhAabbMax(self.m_bvhAabbMax),
1141 m_bvhQuantization(self.m_bvhQuantization),
1142 m_bulletVersion(BT_BULLET_VERSION)
1143 {
1144
1145 }
1146
1147 void btQuantizedBvh::deSerializeFloat(struct btQuantizedBvhFloatData& quantizedBvhFloatData)
1148 {
1149         m_bvhAabbMax.deSerializeFloat(quantizedBvhFloatData.m_bvhAabbMax);
1150         m_bvhAabbMin.deSerializeFloat(quantizedBvhFloatData.m_bvhAabbMin);
1151         m_bvhQuantization.deSerializeFloat(quantizedBvhFloatData.m_bvhQuantization);
1152
1153         m_curNodeIndex = quantizedBvhFloatData.m_curNodeIndex;
1154         m_useQuantization = quantizedBvhFloatData.m_useQuantization!=0;
1155         
1156         {
1157                 int numElem = quantizedBvhFloatData.m_numContiguousLeafNodes;
1158                 m_contiguousNodes.resize(numElem);
1159
1160                 if (numElem)
1161                 {
1162                         btOptimizedBvhNodeFloatData* memPtr = quantizedBvhFloatData.m_contiguousNodesPtr;
1163
1164                         for (int i=0;i<numElem;i++,memPtr++)
1165                         {
1166                                 m_contiguousNodes[i].m_aabbMaxOrg.deSerializeFloat(memPtr->m_aabbMaxOrg);
1167                                 m_contiguousNodes[i].m_aabbMinOrg.deSerializeFloat(memPtr->m_aabbMinOrg);
1168                                 m_contiguousNodes[i].m_escapeIndex = memPtr->m_escapeIndex;
1169                                 m_contiguousNodes[i].m_subPart = memPtr->m_subPart;
1170                                 m_contiguousNodes[i].m_triangleIndex = memPtr->m_triangleIndex;
1171                         }
1172                 }
1173         }
1174
1175         {
1176                 int numElem = quantizedBvhFloatData.m_numQuantizedContiguousNodes;
1177                 m_quantizedContiguousNodes.resize(numElem);
1178                 
1179                 if (numElem)
1180                 {
1181                         btQuantizedBvhNodeData* memPtr = quantizedBvhFloatData.m_quantizedContiguousNodesPtr;
1182                         for (int i=0;i<numElem;i++,memPtr++)
1183                         {
1184                                 m_quantizedContiguousNodes[i].m_escapeIndexOrTriangleIndex = memPtr->m_escapeIndexOrTriangleIndex;
1185                                 m_quantizedContiguousNodes[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0];
1186                                 m_quantizedContiguousNodes[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
1187                                 m_quantizedContiguousNodes[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
1188                                 m_quantizedContiguousNodes[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
1189                                 m_quantizedContiguousNodes[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
1190                                 m_quantizedContiguousNodes[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
1191                         }
1192                 }
1193         }
1194
1195         m_traversalMode = btTraversalMode(quantizedBvhFloatData.m_traversalMode);
1196         
1197         {
1198                 int numElem = quantizedBvhFloatData.m_numSubtreeHeaders;
1199                 m_SubtreeHeaders.resize(numElem);
1200                 if (numElem)
1201                 {
1202                         btBvhSubtreeInfoData* memPtr = quantizedBvhFloatData.m_subTreeInfoPtr;
1203                         for (int i=0;i<numElem;i++,memPtr++)
1204                         {
1205                                 m_SubtreeHeaders[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0] ;
1206                                 m_SubtreeHeaders[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
1207                                 m_SubtreeHeaders[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
1208                                 m_SubtreeHeaders[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
1209                                 m_SubtreeHeaders[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
1210                                 m_SubtreeHeaders[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
1211                                 m_SubtreeHeaders[i].m_rootNodeIndex = memPtr->m_rootNodeIndex;
1212                                 m_SubtreeHeaders[i].m_subtreeSize = memPtr->m_subtreeSize;
1213                         }
1214                 }
1215         }
1216 }
1217
1218 void btQuantizedBvh::deSerializeDouble(struct btQuantizedBvhDoubleData& quantizedBvhDoubleData)
1219 {
1220         m_bvhAabbMax.deSerializeDouble(quantizedBvhDoubleData.m_bvhAabbMax);
1221         m_bvhAabbMin.deSerializeDouble(quantizedBvhDoubleData.m_bvhAabbMin);
1222         m_bvhQuantization.deSerializeDouble(quantizedBvhDoubleData.m_bvhQuantization);
1223
1224         m_curNodeIndex = quantizedBvhDoubleData.m_curNodeIndex;
1225         m_useQuantization = quantizedBvhDoubleData.m_useQuantization!=0;
1226         
1227         {
1228                 int numElem = quantizedBvhDoubleData.m_numContiguousLeafNodes;
1229                 m_contiguousNodes.resize(numElem);
1230
1231                 if (numElem)
1232                 {
1233                         btOptimizedBvhNodeDoubleData* memPtr = quantizedBvhDoubleData.m_contiguousNodesPtr;
1234
1235                         for (int i=0;i<numElem;i++,memPtr++)
1236                         {
1237                                 m_contiguousNodes[i].m_aabbMaxOrg.deSerializeDouble(memPtr->m_aabbMaxOrg);
1238                                 m_contiguousNodes[i].m_aabbMinOrg.deSerializeDouble(memPtr->m_aabbMinOrg);
1239                                 m_contiguousNodes[i].m_escapeIndex = memPtr->m_escapeIndex;
1240                                 m_contiguousNodes[i].m_subPart = memPtr->m_subPart;
1241                                 m_contiguousNodes[i].m_triangleIndex = memPtr->m_triangleIndex;
1242                         }
1243                 }
1244         }
1245
1246         {
1247                 int numElem = quantizedBvhDoubleData.m_numQuantizedContiguousNodes;
1248                 m_quantizedContiguousNodes.resize(numElem);
1249                 
1250                 if (numElem)
1251                 {
1252                         btQuantizedBvhNodeData* memPtr = quantizedBvhDoubleData.m_quantizedContiguousNodesPtr;
1253                         for (int i=0;i<numElem;i++,memPtr++)
1254                         {
1255                                 m_quantizedContiguousNodes[i].m_escapeIndexOrTriangleIndex = memPtr->m_escapeIndexOrTriangleIndex;
1256                                 m_quantizedContiguousNodes[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0];
1257                                 m_quantizedContiguousNodes[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
1258                                 m_quantizedContiguousNodes[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
1259                                 m_quantizedContiguousNodes[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
1260                                 m_quantizedContiguousNodes[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
1261                                 m_quantizedContiguousNodes[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
1262                         }
1263                 }
1264         }
1265
1266         m_traversalMode = btTraversalMode(quantizedBvhDoubleData.m_traversalMode);
1267         
1268         {
1269                 int numElem = quantizedBvhDoubleData.m_numSubtreeHeaders;
1270                 m_SubtreeHeaders.resize(numElem);
1271                 if (numElem)
1272                 {
1273                         btBvhSubtreeInfoData* memPtr = quantizedBvhDoubleData.m_subTreeInfoPtr;
1274                         for (int i=0;i<numElem;i++,memPtr++)
1275                         {
1276                                 m_SubtreeHeaders[i].m_quantizedAabbMax[0] = memPtr->m_quantizedAabbMax[0] ;
1277                                 m_SubtreeHeaders[i].m_quantizedAabbMax[1] = memPtr->m_quantizedAabbMax[1];
1278                                 m_SubtreeHeaders[i].m_quantizedAabbMax[2] = memPtr->m_quantizedAabbMax[2];
1279                                 m_SubtreeHeaders[i].m_quantizedAabbMin[0] = memPtr->m_quantizedAabbMin[0];
1280                                 m_SubtreeHeaders[i].m_quantizedAabbMin[1] = memPtr->m_quantizedAabbMin[1];
1281                                 m_SubtreeHeaders[i].m_quantizedAabbMin[2] = memPtr->m_quantizedAabbMin[2];
1282                                 m_SubtreeHeaders[i].m_rootNodeIndex = memPtr->m_rootNodeIndex;
1283                                 m_SubtreeHeaders[i].m_subtreeSize = memPtr->m_subtreeSize;
1284                         }
1285                 }
1286         }
1287
1288 }
1289
1290
1291
1292 ///fills the dataBuffer and returns the struct name (and 0 on failure)
1293 const char*     btQuantizedBvh::serialize(void* dataBuffer, btSerializer* serializer) const
1294 {
1295         btQuantizedBvhData* quantizedData = (btQuantizedBvhData*)dataBuffer;
1296         
1297         m_bvhAabbMax.serialize(quantizedData->m_bvhAabbMax);
1298         m_bvhAabbMin.serialize(quantizedData->m_bvhAabbMin);
1299         m_bvhQuantization.serialize(quantizedData->m_bvhQuantization);
1300
1301         quantizedData->m_curNodeIndex = m_curNodeIndex;
1302         quantizedData->m_useQuantization = m_useQuantization;
1303         
1304         quantizedData->m_numContiguousLeafNodes = m_contiguousNodes.size();
1305         quantizedData->m_contiguousNodesPtr = (btOptimizedBvhNodeData*) (m_contiguousNodes.size() ? serializer->getUniquePointer((void*)&m_contiguousNodes[0]) : 0);
1306         if (quantizedData->m_contiguousNodesPtr)
1307         {
1308                 int sz = sizeof(btOptimizedBvhNodeData);
1309                 int numElem = m_contiguousNodes.size();
1310                 btChunk* chunk = serializer->allocate(sz,numElem);
1311                 btOptimizedBvhNodeData* memPtr = (btOptimizedBvhNodeData*)chunk->m_oldPtr;
1312                 for (int i=0;i<numElem;i++,memPtr++)
1313                 {
1314                         m_contiguousNodes[i].m_aabbMaxOrg.serialize(memPtr->m_aabbMaxOrg);
1315                         m_contiguousNodes[i].m_aabbMinOrg.serialize(memPtr->m_aabbMinOrg);
1316                         memPtr->m_escapeIndex = m_contiguousNodes[i].m_escapeIndex;
1317                         memPtr->m_subPart = m_contiguousNodes[i].m_subPart;
1318                         memPtr->m_triangleIndex = m_contiguousNodes[i].m_triangleIndex;
1319                 }
1320                 serializer->finalizeChunk(chunk,"btOptimizedBvhNodeData",BT_ARRAY_CODE,(void*)&m_contiguousNodes[0]);
1321         }
1322
1323         quantizedData->m_numQuantizedContiguousNodes = m_quantizedContiguousNodes.size();
1324 //      printf("quantizedData->m_numQuantizedContiguousNodes=%d\n",quantizedData->m_numQuantizedContiguousNodes);
1325         quantizedData->m_quantizedContiguousNodesPtr =(btQuantizedBvhNodeData*) (m_quantizedContiguousNodes.size() ? serializer->getUniquePointer((void*)&m_quantizedContiguousNodes[0]) : 0);
1326         if (quantizedData->m_quantizedContiguousNodesPtr)
1327         {
1328                 int sz = sizeof(btQuantizedBvhNodeData);
1329                 int numElem = m_quantizedContiguousNodes.size();
1330                 btChunk* chunk = serializer->allocate(sz,numElem);
1331                 btQuantizedBvhNodeData* memPtr = (btQuantizedBvhNodeData*)chunk->m_oldPtr;
1332                 for (int i=0;i<numElem;i++,memPtr++)
1333                 {
1334                         memPtr->m_escapeIndexOrTriangleIndex = m_quantizedContiguousNodes[i].m_escapeIndexOrTriangleIndex;
1335                         memPtr->m_quantizedAabbMax[0] = m_quantizedContiguousNodes[i].m_quantizedAabbMax[0];
1336                         memPtr->m_quantizedAabbMax[1] = m_quantizedContiguousNodes[i].m_quantizedAabbMax[1];
1337                         memPtr->m_quantizedAabbMax[2] = m_quantizedContiguousNodes[i].m_quantizedAabbMax[2];
1338                         memPtr->m_quantizedAabbMin[0] = m_quantizedContiguousNodes[i].m_quantizedAabbMin[0];
1339                         memPtr->m_quantizedAabbMin[1] = m_quantizedContiguousNodes[i].m_quantizedAabbMin[1];
1340                         memPtr->m_quantizedAabbMin[2] = m_quantizedContiguousNodes[i].m_quantizedAabbMin[2];
1341                 }
1342                 serializer->finalizeChunk(chunk,"btQuantizedBvhNodeData",BT_ARRAY_CODE,(void*)&m_quantizedContiguousNodes[0]);
1343         }
1344
1345         quantizedData->m_traversalMode = int(m_traversalMode);
1346         quantizedData->m_numSubtreeHeaders = m_SubtreeHeaders.size();
1347
1348         quantizedData->m_subTreeInfoPtr = (btBvhSubtreeInfoData*) (m_SubtreeHeaders.size() ? serializer->getUniquePointer((void*)&m_SubtreeHeaders[0]) : 0);
1349         if (quantizedData->m_subTreeInfoPtr)
1350         {
1351                 int sz = sizeof(btBvhSubtreeInfoData);
1352                 int numElem = m_SubtreeHeaders.size();
1353                 btChunk* chunk = serializer->allocate(sz,numElem);
1354                 btBvhSubtreeInfoData* memPtr = (btBvhSubtreeInfoData*)chunk->m_oldPtr;
1355                 for (int i=0;i<numElem;i++,memPtr++)
1356                 {
1357                         memPtr->m_quantizedAabbMax[0] = m_SubtreeHeaders[i].m_quantizedAabbMax[0];
1358                         memPtr->m_quantizedAabbMax[1] = m_SubtreeHeaders[i].m_quantizedAabbMax[1];
1359                         memPtr->m_quantizedAabbMax[2] = m_SubtreeHeaders[i].m_quantizedAabbMax[2];
1360                         memPtr->m_quantizedAabbMin[0] = m_SubtreeHeaders[i].m_quantizedAabbMin[0];
1361                         memPtr->m_quantizedAabbMin[1] = m_SubtreeHeaders[i].m_quantizedAabbMin[1];
1362                         memPtr->m_quantizedAabbMin[2] = m_SubtreeHeaders[i].m_quantizedAabbMin[2];
1363
1364                         memPtr->m_rootNodeIndex = m_SubtreeHeaders[i].m_rootNodeIndex;
1365                         memPtr->m_subtreeSize = m_SubtreeHeaders[i].m_subtreeSize;
1366                 }
1367                 serializer->finalizeChunk(chunk,"btBvhSubtreeInfoData",BT_ARRAY_CODE,(void*)&m_SubtreeHeaders[0]);
1368         }
1369         return btQuantizedBvhDataName;
1370 }
1371
1372
1373
1374
1375