{
auto& graphics = mController.GetGraphics();
- for( auto& pool : mPoolSet[mBufferIndex] )
+ const auto bufferIndex = mController.GetCurrentBufferIndex();
+ for( auto& pool : mPoolSet[bufferIndex] )
{
if( !pool.vkDescriptorSetsToBeFreed.empty() )
{
// free unused descriptor sets
- graphics.GetDevice().freeDescriptorSets( pool.vkPool, pool.vkDescriptorSetsToBeFreed );
-
- // Update tracked descriptor sets
- std::sort( pool.vkDescriptorSetsToBeFreed.begin(), pool.vkDescriptorSetsToBeFreed.end() );
-
+ std::vector<vk::DescriptorSet> existingDSToFree;
auto freeIt = pool.vkDescriptorSetsToBeFreed.begin();
std::vector<vk::DescriptorSet> newList{};
std::for_each( pool.vkDescriptorSets.begin(), pool.vkDescriptorSets.end(), [&]( auto& item )
{
- if( item != *freeIt )
+ if( static_cast<VkDescriptorSet>(item) <= static_cast<VkDescriptorSet>(*freeIt) )
{
- newList.emplace_back( item );
+ if( item != *freeIt )
+ {
+ newList.emplace_back( item );
+ }
+ else
+ {
+ existingDSToFree.emplace_back( *freeIt );
+ ++freeIt;
+ }
}
- else
+ else if( freeIt != pool.vkDescriptorSetsToBeFreed.end() )
{
++freeIt;
}
});
+
+ if( !existingDSToFree.empty() )
+ {
+ graphics.GetDevice().freeDescriptorSets( pool.vkPool, existingDSToFree );
+ }
+
pool.vkDescriptorSets = std::move( newList );
// update available number of descriptor
- pool.available += uint32_t( pool.vkDescriptorSetsToBeFreed.size() );
+ pool.available += uint32_t( existingDSToFree.size() );
// clear the free list
pool.vkDescriptorSetsToBeFreed.clear();
{
ResolveFreeDescriptorSets();
- auto& poolset = mPoolSet[mBufferIndex];
+ const auto bufferIndex = mController.GetCurrentBufferIndex();
+
+ // clean pools dirty flags
+ for( auto& pool : mPoolSet[mController.GetCurrentBufferIndex()] )
+ {
+ pool.dirty = false;
+ }
+
+ auto& poolset = mPoolSet[bufferIndex];
// For each signature decide whether we should reallocate pool or not.
// Newly created pool always reallocates
DescriptorSetList& descriptorSets )
{
// access correct pool
- auto& poolset = mPoolSet[mBufferIndex];
+ auto& poolset = mPoolSet[mController.GetCurrentBufferIndex()];
auto& retval = descriptorSets.descriptorSets;
(*it).available -= uint32_t( result.size() );
descriptorSets.reserved.reset( new DescriptorSetList::Internal() );
descriptorSets.reserved->pool = (*it).vkPool;
- descriptorSets.reserved->bufferIndex = mBufferIndex;
+ descriptorSets.reserved->bufferIndex = mController.GetCurrentBufferIndex();
descriptorSets.reserved->signature = (*it).signature;
descriptorSets.reserved->poolUID = (*it).uid;
auto bufferIndex = list.reserved->bufferIndex;
std::find_if( mPoolSet[bufferIndex].begin(), mPoolSet[bufferIndex].end(), [&]( Pool& pool )
{
- if( pool.uid == list.reserved->poolUID )
+ if( pool.uid == list.reserved->poolUID && pool.vkPool == list.reserved->pool )
{
pool.vkDescriptorSetsToBeFreed.insert( pool.vkDescriptorSetsToBeFreed.end(), list.descriptorSets.begin(), list.descriptorSets.end() );
return true;
}
}
-void DescriptorSetAllocator::SwapBuffers()
-{
- // clean pools dirty flags
- for( auto& pools : mPoolSet )
- {
- for( auto& pool : pools )
- {
- pool.dirty = false;
- }
- }
- mBufferIndex = (mBufferIndex+1) & 1;
-}
-
void DescriptorSetAllocator::InvalidateAllDescriptorSets()
{
auto& graphics = mController.GetGraphics();
graphics.DeviceWaitIdle();
- for( auto& set : mPoolSet )
+ for( auto& pool : mPoolSet[mController.GetCurrentBufferIndex()] )
{
- for( auto& pool : set )
- {
- graphics.GetDevice().destroyDescriptorPool(
- pool.vkPool,
- &mController.GetGraphics().GetAllocator("DESCRIPTORPOOL") );
- }
- set.clear();
+ graphics.GetDevice().destroyDescriptorPool(
+ pool.vkPool,
+ &mController.GetGraphics().GetAllocator("DESCRIPTORPOOL") );
}
+ mPoolSet[mController.GetCurrentBufferIndex()].clear();
}
} // Namespace Internal
*/
void FreeDescriptorSets( std::vector<DescriptorSetList>&& descriptorSets );
- /**
- * Swaps internal buffers
- */
- void SwapBuffers();
-
/**
* Invalidates and releases all descriptor sets forcing recreating pools. This may happen
* due to DALi staying in idle state.
using PoolSet = std::vector<Pool>;
std::array<PoolSet, 2u> mPoolSet;
- uint32_t mBufferIndex { 1u }; // Starts with 1 to match index of update thread in DALi
-
uint32_t mPoolUID { 0u };
};
mGraphics.GetSwapchainForFBID(0)->ResetAllCommandBuffers();
- mGraphics.CollectGarbage();
mGraphics.CollectGarbage();
mDescriptorSetAllocator->InvalidateAllDescriptorSets();
void SwapBuffers()
{
- mBufferIndex = (mBufferIndex+1)%1;
- mDescriptorSetAllocator->SwapBuffers();
+ mGraphics.SwapBuffers();
}
uint32_t GetSwapchainBufferCount()
return mImpl->GetSwapchainBufferCount();
}
+uint32_t Controller::GetCurrentBufferIndex()
+{
+ return mImpl->mGraphics.GetCurrentBufferIndex();
+}
} // namespace VulkanAPI
} // namespace Graphics
// VULKAN only
+ uint32_t GetCurrentBufferIndex();
+
public:
Vulkan::Graphics& GetGraphics() const;
// if not created yet
if( mSwapchainBuffers.empty() )
{
- const auto MAX_SWAPCHAIN_BUFFERS { mFramebuffers.size() };
-
- mSwapchainBuffers.resize( MAX_SWAPCHAIN_BUFFERS );
+ mSwapchainBuffers.resize( MAX_SWAPCHAIN_RESOURCE_BUFFERS );
for( auto& buffer : mSwapchainBuffers )
{
buffer.reset( new SwapchainBuffer( *mGraphics ) );
DALI_LOG_INFO( gVulkanFilter, Debug::General, "Swapchain Image Index ( BEFORE Acquire ) = %d", int(mSwapchainImageIndex) );
auto result = device.acquireNextImageKHR( mSwapchainKHR,
std::numeric_limits<uint64_t>::max(),
- mSwapchainBuffers[mBufferIndex]->acquireNextImageSemaphore,
+ mSwapchainBuffers[mGraphics->GetCurrentBufferIndex()]->acquireNextImageSemaphore,
nullptr, &mSwapchainImageIndex );
DALI_LOG_INFO( gVulkanFilter, Debug::General, "Swapchain Image Index ( AFTER Acquire ) = %d", int(mSwapchainImageIndex) );
}
}
- auto& swapBuffer = mSwapchainBuffers[mBufferIndex];
+ auto& swapBuffer = mSwapchainBuffers[mGraphics->GetCurrentBufferIndex()];
// First frames don't need waiting as they haven't been submitted
// yet. Note, that waiting on the fence without resetting it may
// cause a stall ( nvidia, ubuntu )
if( mFrameCounter >= mSwapchainBuffers.size() )
{
- mGraphics->WaitForFence( mSwapchainBuffers[mBufferIndex]->endOfFrameFence );
- mGraphics->ExecuteActions();
+ mGraphics->WaitForFence( mSwapchainBuffers[mGraphics->GetCurrentBufferIndex()]->endOfFrameFence );
mGraphics->CollectGarbage();
}
else
{
mGraphics->DeviceWaitIdle();
- mGraphics->ExecuteActions();
mGraphics->CollectGarbage();
}
return;
}
- auto& swapBuffer = mSwapchainBuffers[mBufferIndex];
+ auto& swapBuffer = mSwapchainBuffers[mGraphics->GetCurrentBufferIndex()];
// End any render pass command buffers
size_t count = swapBuffer->commandBuffers.size();
}
mFrameCounter++;
- mBufferIndex = uint32_t( (mBufferIndex+1) % MAX_SWAPCHAIN_RESOURCE_BUFFERS );
}
void Swapchain::Present( std::vector< vk::Semaphore > waitSemaphores )
void Swapchain::AllocateCommandBuffers( size_t renderPassCount )
{
- size_t commandBuffersCount = mSwapchainBuffers[mBufferIndex]->commandBuffers.size();
+ size_t commandBuffersCount = mSwapchainBuffers[mGraphics->GetCurrentBufferIndex()]->commandBuffers.size();
DALI_LOG_STREAM( gVulkanFilter, Debug::General, "AllocateCommandBuffers: cbCount:" << commandBuffersCount
<< " renderPassCount: " << renderPassCount );
// Create primary buffer for each render pass & begin recording
auto commandBuffer = mGraphics->CreateCommandBuffer(true);
commandBuffer->Begin( vk::CommandBufferUsageFlagBits::eOneTimeSubmit, nullptr );
- mSwapchainBuffers[mBufferIndex]->commandBuffers.emplace_back( commandBuffer );
+ mSwapchainBuffers[mGraphics->GetCurrentBufferIndex()]->commandBuffers.emplace_back( commandBuffer );
}
}
}
RefCountedCommandBuffer Swapchain::GetLastCommandBuffer() const
{
- return mSwapchainBuffers[mBufferIndex]->commandBuffers.back();
+ return mSwapchainBuffers[mGraphics->GetCurrentBufferIndex()]->commandBuffers.back();
}
std::vector<RefCountedCommandBuffer>& Swapchain::GetCommandBuffers() const
{
- return mSwapchainBuffers[mBufferIndex]->commandBuffers;
+ return mSwapchainBuffers[mGraphics->GetCurrentBufferIndex()]->commandBuffers;
}
uint32_t Swapchain::GetImageCount() const
RefCountedFence mBetweenRenderPassFence;
uint32_t mFrameCounter { 0u }; ///< Current frame number
- uint32_t mBufferIndex { 0u }; ///< Current buffer index number
bool mIsValid; // indicates whether the swapchain is still valid or requires to be recreated
};
// This call assumes that the cash only holds the last reference of every resource in the program. (As it should)
mResourceRegister->Clear();
- // Execute any outstanding actions...
- ExecuteActions();
- ExecuteActions();
-
// Kill pipeline cache
mDevice.destroyPipelineCache( mVulkanPipelineCache, mAllocator.get() );
// Collect the garbage ( for each buffer index ) and shut down gracefully...
CollectGarbage();
+ SwapBuffers();
CollectGarbage();
// We are done with all resources (technically... . If not we will get a ton of validation layer errors)
void Graphics::CollectGarbage()
{
std::lock_guard< std::mutex > lock{ mMutex };
+ auto bufferIndex = (mCurrentBufferIndex+1)&1;
+
DALI_LOG_STREAM( gVulkanFilter, Debug::General,
- "Beginning graphics garbage collection---------------------------------------" )
- DALI_LOG_INFO( gVulkanFilter, Debug::General, "Discard queue size: %ld\n", mDiscardQueue[mCurrentGarbageBufferIndex].size() )
+ "Beginning graphics garbage collection---------------------------------------" );
+ DALI_LOG_INFO( gVulkanFilter, Debug::General, "Discard queue size: %ld\n", mDiscardQueue[bufferIndex].size() );
// swap buffer
- mCurrentGarbageBufferIndex = ((mCurrentGarbageBufferIndex+1)&1);
-
- if( mDiscardQueue[mCurrentGarbageBufferIndex].empty() )
+ if( mDiscardQueue[bufferIndex].empty() )
{
return;
}
- for( const auto& deleter : mDiscardQueue[mCurrentGarbageBufferIndex] )
+ for( const auto& deleter : mDiscardQueue[bufferIndex] )
{
deleter();
}
// collect what's in the queue
- mDiscardQueue[mCurrentGarbageBufferIndex].clear();
+ mDiscardQueue[bufferIndex].clear();
DALI_LOG_STREAM( gVulkanFilter, Debug::General,
"Graphics garbage collection complete---------------------------------------" )
}
-void Graphics::ExecuteActions()
-{
- std::lock_guard< std::mutex > lock{ mMutex };
- DALI_LOG_STREAM( gVulkanFilter, Debug::General,
- "Beginning graphics action execution---------------------------------------" )
- DALI_LOG_INFO( gVulkanFilter, Debug::General, "Action queue size: %ld\n", mActionQueue.size() )
-
- mCurrentActionBufferIndex = ((mCurrentActionBufferIndex+1)&1);
-
- if( mActionQueue[mCurrentActionBufferIndex].empty() )
- {
- return;
- }
-
- // swap buffer
- for( const auto& action : mActionQueue[mCurrentActionBufferIndex] )
- {
- action();
- }
-
- mActionQueue[mCurrentActionBufferIndex].clear();
- DALI_LOG_STREAM( gVulkanFilter, Debug::General,
- "Graphics action execution complete---------------------------------------" )
-}
-
void Graphics::DiscardResource( std::function< void() > deleter )
{
std::lock_guard< std::mutex > lock( mMutex );
- mDiscardQueue[mCurrentGarbageBufferIndex].push_back( std::move( deleter ) );
-}
-
-void Graphics::EnqueueAction( std::function< void() > action )
-{
- std::lock_guard< std::mutex > lock( mMutex );
- mActionQueue[mCurrentActionBufferIndex].push_back( std::move( action ) );
+ mDiscardQueue[mCurrentBufferIndex].push_back( std::move( deleter ) );
}
const DiscardQueue& Graphics::GetDiscardQueue( uint32_t bufferIndex ) const
return mDiscardQueue[bufferIndex];
}
-// --------------------------------------------------------------------------------------------------------------
+uint32_t Graphics::SwapBuffers()
+{
+ mCurrentBufferIndex = (mCurrentBufferIndex+1)&1;
+ return mCurrentBufferIndex;
+}
+uint32_t Graphics::GetCurrentBufferIndex()
+{
+ return mCurrentBufferIndex;
+}
void Graphics::CreateInstance( const std::vector< const char* >& extensions,
const std::vector< const char* >& validationLayers )
void CollectGarbage();
- void ExecuteActions();
-
void DiscardResource( std::function< void() > deleter );
- void EnqueueAction( std::function< void() > action );
-
const DiscardQueue& GetDiscardQueue( uint32_t bufferIndex ) const;
+ uint32_t SwapBuffers();
+
+ uint32_t GetCurrentBufferIndex();
+
private: // Methods
void CreateInstance( const std::vector< const char* >& extensions,
// Command pool map using thread IDs as keys
CommandPoolMap mCommandPools;
- DiscardQueue mActionQueue[2u];
DiscardQueue mDiscardQueue[2u];
- uint32_t mCurrentGarbageBufferIndex { 0u };
- uint32_t mCurrentActionBufferIndex { 0u };
-
bool mHasDepth { false };
bool mHasStencil { false };
vk::PipelineCache mVulkanPipelineCache;
bool mSurfaceResized { false };
+
+ uint32_t mCurrentBufferIndex{ 0u };
};
} // namespace Vulkan
// pass shared UBO and offset, return new offset for next item to be used
// don't process bindings if there are no uniform buffers allocated
auto shader = renderer->GetShader().GetGfxObject();
- auto ubo = mUniformBuffer[mUniformBufferIndex].get();
+ auto ubo = mUniformBuffer[bufferIndex].get();
if( ubo && shader )
{
std::vector<Graphics::API::RenderCommand::UniformBufferBinding>* bindings{ nullptr };
bool usesDepth = false;
bool usesStencil = false;
- mUniformBufferIndex = bufferIndex;
-
PrepareRendererPipelines( controller, renderInstructions, usesDepth, usesStencil, bufferIndex );
// If state of depth/stencil has changed between frames then the pipelines must be
mGraphicsBufferManager.reset( new GraphicsBufferManager( &controller ) );
}
- auto pagedAllocation = ( ( mUniformBlockAllocationBytes / UBO_PAGE_SIZE + 1u ) ) * UBO_PAGE_SIZE;
-
controller.BeginFrame();
+ auto pagedAllocation = ( ( mUniformBlockAllocationBytes / UBO_PAGE_SIZE + 1u ) ) * UBO_PAGE_SIZE;
+
// Allocate twice memory as required by the uniform buffers
// todo: memory usage backlog to use optimal allocation
- if( mUniformBlockAllocationBytes && !mUniformBuffer[mUniformBufferIndex] )
+ if( mUniformBlockAllocationBytes && !mUniformBuffer[bufferIndex] )
{
- mUniformBuffer[mUniformBufferIndex] = std::move( mGraphicsBufferManager->AllocateUniformBuffer( pagedAllocation ) );
+ mUniformBuffer[bufferIndex] = std::move( mGraphicsBufferManager->AllocateUniformBuffer( pagedAllocation ) );
}
else if( mUniformBlockAllocationBytes && (
- mUniformBuffer[mUniformBufferIndex]->GetSize() < pagedAllocation ||
- (pagedAllocation < uint32_t(float(mUniformBuffer[mUniformBufferIndex]->GetSize()) * UBO_SHRINK_THRESHOLD ))))
+ mUniformBuffer[bufferIndex]->GetSize() < pagedAllocation ||
+ (pagedAllocation < uint32_t(float(mUniformBuffer[bufferIndex]->GetSize()) * UBO_SHRINK_THRESHOLD ))))
{
- mUniformBuffer[mUniformBufferIndex]->Reserve( pagedAllocation, true );
+ mUniformBuffer[bufferIndex]->Reserve( pagedAllocation, true );
}
// Clear UBO
- mUniformBuffer[mUniformBufferIndex]->Fill( 0, 0u, 0u );
+ if( mUniformBuffer[bufferIndex] )
+ {
+ mUniformBuffer[bufferIndex]->Fill( 0, 0u, 0u );
+ }
mUboOffset = 0u;
// Submit all render commands in one go
controller.SubmitCommands( std::move(commandList) );
- if( mUniformBlockAllocationBytes && mUniformBuffer[mUniformBufferIndex] )
+ if( mUniformBlockAllocationBytes && mUniformBuffer[bufferIndex] )
{
- mUniformBuffer[mUniformBufferIndex]->Flush();
+ mUniformBuffer[bufferIndex]->Flush();
}
controller.EndFrame();
using UniformBufferList = std::array<std::unique_ptr<GraphicsBuffer>, 2u>;
UniformBufferList mUniformBuffer;
- uint32_t mUniformBufferIndex{0u};
uint32_t mUniformBlockAllocationCount;
uint32_t mUniformBlockAllocationBytes;