swr: [rasterizer] Add SWR_ASSUME / SWR_ASSUME_ASSERT macros
authorTim Rowley <timothy.o.rowley@intel.com>
Mon, 2 May 2016 17:37:37 +0000 (11:37 -0600)
committerTim Rowley <timothy.o.rowley@intel.com>
Thu, 5 May 2016 19:49:56 +0000 (14:49 -0500)
Fix static code analysis errors found by coverity on Linux

Reviewed-by: Bruce Cherniak <bruce.cherniak@intel.com>
src/gallium/drivers/swr/rasterizer/common/swr_assert.h
src/gallium/drivers/swr/rasterizer/core/arena.h

index a805acc..04d7681 100644 (file)
 #error swr_assert.h should not be included directly, please include "common/os.h" instead.
 #endif
 
+//=============================================================================
+//
+// MACROS defined in this file:
+//
+// - SWR_ASSUME(expression, ...):   Tell compiler that the expression is true.
+//                                  Helps with static code analysis as well.
+//                                  DO NOT USE if code after this dynamically
+//                                  checks for errors and handles them.  The
+//                                  compiler may optimize out the error check.
+//
+// - SWR_ASSERT(expression, ...):   Inform the user is expression is false.
+//                                  This check is only conditionally made,
+//                                  usually only in debug mode.
+//
+// - SWR_REL_ASSERT(expression, ...): Unconditionally enabled version of SWR_ASSERT
+//
+// - SWR_ASSUME_ASSERT(expression, ...): Conditionally enabled SWR_ASSERT.  Uses
+//                                       SWR_ASSUME if SWR_ASSERT is disabled.
+//                                       DO NOT USE in combination with actual
+//                                       error checking (see SWR_ASSUME)
+//
+// - SWR_REL_ASSUME_ASSERT(expression, ...): Same as SWR_REL_ASSERT.
+//
+//=============================================================================
+
+#if defined(_WIN32)
+#define SWR_ASSUME(e, ...) __assume(e)
+#elif defined(__clang__)
+#define SWR_ASSUME(e, ...) __builtin_assume(e)
+#elif defined(__GNUC__)
+#define SWR_ASSUME(e, ...) ((e) ? ((void)0) : __builtin_unreachable())
+#else
+#define SWR_ASSUME(e, ...) ASSUME(e)
+#endif
+
 #if !defined(SWR_ENABLE_ASSERTS)
 
 #if !defined(NDEBUG)
@@ -79,28 +114,33 @@ bool SwrAssert(
 }
 
 #if SWR_ENABLE_ASSERTS
-#define SWR_ASSERT(e, ...) _SWR_ASSERT(true, e, ##__VA_ARGS__)
+#define SWR_ASSERT(e, ...)              _SWR_ASSERT(true, e, ##__VA_ARGS__)
+#define SWR_ASSUME_ASSERT(e, ...)       SWR_ASSERT(e, ##__VA_ARGS__)
 
 #if defined(assert)
 #undef assert
 #endif
 #define assert(exp) SWR_ASSERT(exp)
 
-#endif
+#endif // SWR_ENABLE_ASSERTS
 
 #if SWR_ENABLE_REL_ASSERTS
-#define SWR_REL_ASSERT(e, ...) _SWR_ASSERT(false, e, ##__VA_ARGS__)
+#define SWR_REL_ASSERT(e, ...)          _SWR_ASSERT(false, e, ##__VA_ARGS__)
+#define SWR_REL_ASSUME_ASSERT(e, ...)   SWR_REL_ASSERT(e, ##__VA_ARGS__)
 #endif
+
 #endif // C++
 
 #endif // SWR_ENABLE_ASSERTS || SWR_ENABLE_REL_ASSERTS
 
 #if !SWR_ENABLE_ASSERTS
-#define SWR_ASSERT(e, ...) (void)(0)
+#define SWR_ASSERT(e, ...)              (void)(0)
+#define SWR_ASSUME_ASSERT(e, ...)       SWR_ASSUME(e, ##__VA_ARGS__)
 #endif
 
 #if !SWR_ENABLE_REL_ASSERTS
-#define SWR_REL_ASSERT(e, ...) (void)(0)
+#define SWR_REL_ASSERT(e, ...)          (void)(0)
+#define SWR_REL_ASSUME_ASSERT(e, ...)   SWR_ASSUME(e, ##__VA_ARGS__)
 #endif
 
 #define SWR_NOT_IMPL SWR_ASSERT(0, "%s not implemented", __FUNCTION__)
index b80f6f0..26785db 100644 (file)
@@ -52,7 +52,7 @@ class DefaultAllocator
 public:
     ArenaBlock* AllocateAligned(size_t size, size_t align)
     {
-        SWR_ASSERT(size >= sizeof(ArenaBlock));
+        SWR_ASSUME_ASSERT(size >= sizeof(ArenaBlock));
 
         ArenaBlock* p = new (_aligned_malloc(size, align)) ArenaBlock();
         p->blockSize = size;
@@ -63,7 +63,7 @@ public:
     {
         if (pMem)
         {
-            SWR_ASSERT(pMem->blockSize < size_t(0xdddddddd));
+            SWR_ASSUME_ASSERT(pMem->blockSize < size_t(0xdddddddd));
             _aligned_free(pMem);
         }
     }
@@ -75,8 +75,8 @@ struct CachingAllocatorT : DefaultAllocator
 {
     ArenaBlock* AllocateAligned(size_t size, size_t align)
     {
-        SWR_ASSERT(size >= sizeof(ArenaBlock));
-        SWR_ASSERT(size <= uint32_t(-1));
+        SWR_ASSUME_ASSERT(size >= sizeof(ArenaBlock));
+        SWR_ASSUME_ASSERT(size <= uint32_t(-1));
 
         uint32_t bucket = GetBucketId(size);
 
@@ -111,7 +111,7 @@ struct CachingAllocatorT : DefaultAllocator
 
             if (pBlock)
             {
-                SWR_ASSERT(pPrevBlock && pPrevBlock->pNext == pBlock);
+                SWR_ASSUME_ASSERT(pPrevBlock && pPrevBlock->pNext == pBlock);
                 pPrevBlock->pNext = pBlock->pNext;
                 pBlock->pNext = nullptr;
 
@@ -143,8 +143,6 @@ struct CachingAllocatorT : DefaultAllocator
     {
         if (pMem)
         {
-            SWR_ASSERT(pMem->blockSize >= 0);
-
             std::unique_lock<std::mutex> l(m_mutex);
             InsertCachedBlock(GetBucketId(pMem->blockSize), pMem);
         }
@@ -260,7 +258,7 @@ private:
     template <bool OldBlockT = false>
     void InsertCachedBlock(uint32_t bucketId, ArenaBlock* pNewBlock)
     {
-        SWR_ASSERT(bucketId < CACHE_NUM_BUCKETS);
+        SWR_ASSUME_ASSERT(bucketId < CACHE_NUM_BUCKETS);
 
         ArenaBlock* pPrevBlock = OldBlockT ? &m_oldCachedBlocks[bucketId] : &m_cachedBlocks[bucketId];
         ArenaBlock* pBlock = pPrevBlock->pNext;
@@ -277,7 +275,7 @@ private:
         }
 
         // Insert into list
-        SWR_ASSERT(pPrevBlock);
+        SWR_ASSUME_ASSERT(pPrevBlock);
         pPrevBlock->pNext = pNewBlock;
         pNewBlock->pNext = pBlock;