[Vulkan] Pipelines created in the render algorithms
[platform/core/uifw/dali-core.git] / dali / graphics / vulkan / vulkan-graphics.cpp
1 /*
2  * Copyright (c) 2018 Samsung Electronics Co., Ltd.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *
16  */
17
18 // INTERNAL INCLUDES
19 #include <dali/graphics/vulkan/vulkan-graphics.h>
20 #include <dali/graphics/vulkan/vulkan-command-pool.h>
21 #include <dali/graphics/vulkan/vulkan-command-buffer.h>
22 #include <dali/graphics/vulkan/vulkan-queue.h>
23 #include <dali/graphics/vulkan/vulkan-surface.h>
24 #include <dali/integration-api/graphics/vulkan/vk-surface-factory.h>
25 #include <dali/graphics/vulkan/gpu-memory/vulkan-gpu-memory-manager.h>
26
27 #include <dali/graphics/vulkan/vulkan-buffer.h>
28 #include <dali/graphics/vulkan/vulkan-image.h>
29 #include <dali/graphics/vulkan/vulkan-image-view.h>
30 #include <dali/graphics/vulkan/vulkan-pipeline.h>
31 #include <dali/graphics/vulkan/vulkan-shader.h>
32 #include <dali/graphics/vulkan/vulkan-descriptor-set.h>
33 #include <dali/graphics/vulkan/vulkan-framebuffer.h>
34 #include <dali/graphics/vulkan/api/vulkan-api-controller.h>
35 #include <dali/graphics/vulkan/vulkan-sampler.h>
36 #include <dali/graphics/vulkan/vulkan-resource-cache.h>
37 #include <dali/graphics/vulkan/vulkan-debug.h>
38 #include <dali/graphics/vulkan/vulkan-fence.h>
39 #include <dali/graphics/vulkan/gpu-memory/vulkan-gpu-memory-handle.h>
40
41 #include <dali/graphics-api/graphics-api-controller.h>
42
43 #ifndef VK_KHR_XLIB_SURFACE_EXTENSION_NAME
44 #define VK_KHR_XLIB_SURFACE_EXTENSION_NAME "VK_KHR_xlib_surface"
45 #endif
46
47 #ifndef VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME
48 #define VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME "VK_KHR_wayland_surface"
49 #endif
50
51 #ifndef VK_KHR_XCB_SURFACE_EXTENSION_NAME
52 #define VK_KHR_XCB_SURFACE_EXTENSION_NAME "VK_KHR_xcb_surface"
53 #endif
54
55 #include <iostream>
56 #include <utility>
57
58 namespace Dali
59 {
60 namespace Graphics
61 {
62 using VkSurfaceFactory = Dali::Integration::Graphics::Vulkan::VkSurfaceFactory;
63 namespace Vulkan
64 {
65
66 const auto VALIDATION_LAYERS = std::vector< const char* >{
67
68   //"VK_LAYER_LUNARG_screenshot",           // screenshot
69   //"VK_LAYER_RENDERDOC_Capture",
70   "VK_LAYER_LUNARG_parameter_validation", // parameter
71   //"VK_LAYER_LUNARG_vktrace",              // vktrace ( requires vktrace connection )
72   //"VK_LAYER_LUNARG_monitor",             // monitor
73   "VK_LAYER_LUNARG_swapchain",           // swapchain
74   "VK_LAYER_GOOGLE_threading",           // threading
75   //"VK_LAYER_LUNARG_api_dump",            // api
76   "VK_LAYER_LUNARG_object_tracker",      // objects
77   "VK_LAYER_LUNARG_core_validation",     // core
78   "VK_LAYER_GOOGLE_unique_objects",      // unique objects
79   "VK_LAYER_LUNARG_standard_validation", // standard
80 };
81
82 Graphics::Graphics() = default;
83
84 Graphics::~Graphics()
85 {
86   // Wait for everything to finish on the GPU
87   DeviceWaitIdle();
88
89   // We are shutting down. This flag is used to avoid cache manipulation by Handles' OnDestroy function calls.
90   // The cache will do its own house keeping on teardown
91   mShuttingDown = true;
92
93   // Manually resetting unique pointer here because we need to control the order of destruction.
94   // This defeats the purpose of unique pointers and we might as well use raw pointers. But a unique ptr
95   // communicates ownership more clearly (e.g by not allowing copies).
96   mGfxController.reset(nullptr);
97   mSurfaceFBIDMap.clear();
98
99 #ifndef NDEBUG
100   printf("DESTROYING GRAPHICS CONTEXT--------------------------------\n");
101   size_t totalObjCount = 0;
102   mResourceCache->PrintReferenceCountReport( &totalObjCount );
103 #endif
104
105   // Clear the last references of resources in the cache.
106   // This should ensure that all resources have been queued for garbage collection
107   // This call assumes that the cash only holds the last reference of every resource in the program. (As it should)
108   mResourceCache->Clear();
109
110   mDeviceMemoryManager.reset(nullptr);
111
112   // Collect the garbage! And shut down gracefully...
113   CollectGarbage();
114
115   // We are done with all resources (technically... . If not we will get a ton of validation layer errors)
116   // Kill the Vulkan logical device
117   mDevice.destroy(mAllocator.get());
118
119   // Kill the Vulkan instance
120   mInstance.destroy(mAllocator.get());
121
122 }
123
124 // Create methods -----------------------------------------------------------------------------------------------
125 void Graphics::Create()
126 {
127
128   auto extensions = PrepareDefaultInstanceExtensions();
129
130   auto layers = vk::enumerateInstanceLayerProperties();
131   std::vector< const char* > validationLayers;
132   for( auto&& reqLayer : VALIDATION_LAYERS )
133   {
134     for( auto&& prop : layers.value )
135     {
136       DALI_LOG_STREAM( gVulkanFilter, Debug::General, prop.layerName );
137       if( std::string(prop.layerName) == reqLayer )
138       {
139         validationLayers.push_back( reqLayer );
140       }
141     }
142   }
143
144   CreateInstance( extensions, validationLayers );
145   PreparePhysicalDevice();
146 }
147
148 void Graphics::CreateDevice()
149 {
150   auto queueInfos = GetQueueCreateInfos();
151   {
152     auto maxQueueCountPerFamily = 0u;
153     for( auto&& info : queueInfos )
154     {
155       maxQueueCountPerFamily = std::max( info.queueCount, maxQueueCountPerFamily );
156     }
157
158     auto priorities = std::vector< float >( maxQueueCountPerFamily );
159     std::fill( priorities.begin(), priorities.end(), 1.0f );
160
161     for( auto& info : queueInfos )
162     {
163       info.setPQueuePriorities( priorities.data());
164     }
165
166     std::vector< const char* > extensions{ VK_KHR_SWAPCHAIN_EXTENSION_NAME };
167
168     auto info = vk::DeviceCreateInfo{};
169     info.setEnabledExtensionCount( U32( extensions.size()))
170         .setPpEnabledExtensionNames( extensions.data())
171         .setPEnabledFeatures( &( *mPhysicalDeviceFeatures ))
172         .setPQueueCreateInfos( queueInfos.data())
173         .setQueueCreateInfoCount( U32( queueInfos.size()));
174
175     mDevice = VkAssert( mPhysicalDevice.createDevice( info, *mAllocator ));
176   }
177
178   // create Queue objects
179   for( auto& queueInfo : queueInfos )
180   {
181     for( auto i = 0u; i < queueInfo.queueCount; ++i )
182     {
183       auto queue = mDevice.getQueue( queueInfo.queueFamilyIndex, i );
184
185       // based on family push queue instance into right array
186       auto flags = mQueueFamilyProperties[queueInfo.queueFamilyIndex].queueFlags;
187       if( flags & vk::QueueFlagBits::eGraphics )
188       {
189         mGraphicsQueues.emplace_back(
190                 MakeUnique< Queue >( *this, queue, queueInfo.queueFamilyIndex, i, flags ));
191       }
192       if( flags & vk::QueueFlagBits::eTransfer )
193       {
194         mTransferQueues.emplace_back(
195                 MakeUnique< Queue >( *this, queue, queueInfo.queueFamilyIndex, i, flags ));
196       }
197       if( flags & vk::QueueFlagBits::eCompute )
198       {
199         mComputeQueues.emplace_back(
200                 MakeUnique< Queue >( *this, queue, queueInfo.queueFamilyIndex, i, flags ));
201       }
202
203       // todo: present queue
204     }
205   }
206
207   mResourceCache = MakeUnique< ResourceCache >();
208 }
209
210 FBID Graphics::CreateSurface( std::unique_ptr< SurfaceFactory > surfaceFactory )
211 {
212   // create surface from the factory
213   auto surfaceRef = Surface::New( *this, std::move( surfaceFactory ));
214
215   if( surfaceRef->Create())
216   {
217
218     // map surface to FBID
219     auto fbid = ++mBaseFBID;
220     mSurfaceFBIDMap[fbid] = SwapchainSurfacePair{ RefCountedSwapchain{}, surfaceRef };
221     return fbid;
222   }
223   return -1;
224 }
225
226 RefCountedSwapchain Graphics::CreateSwapchainForSurface( RefCountedSurface surface )
227 {
228   auto swapchain = Swapchain::New( *this,
229                                    GetGraphicsQueue( 0u ),
230                                    surface, 4, 0 );
231
232   // store swapchain in the correct pair
233   for( auto&& val : mSurfaceFBIDMap )
234   {
235     if( val.second
236            .surface == surface )
237     {
238       val.second
239          .swapchain = swapchain;
240       break;
241     }
242   }
243
244   return swapchain;
245 }
246
247 RefCountedShader Graphics::CreateShader()
248 {
249   NotImplemented()
250 }
251
252 RefCountedPipeline Graphics::CreatePipeline()
253 {
254   NotImplemented()
255 }
256
257 RefCountedFence Graphics::CreateFence( const vk::FenceCreateInfo& fenceCreateInfo )
258 {
259   auto refCountedFence = Fence::New( *this );
260
261   VkAssert( mDevice.createFence( &fenceCreateInfo, mAllocator.get(), refCountedFence->Ref()));
262
263   return refCountedFence;
264 }
265
266 RefCountedBuffer Graphics::CreateBuffer( size_t size, BufferType type )
267 {
268   auto usageFlags = vk::BufferUsageFlags{};
269
270   switch( type )
271   {
272     case BufferType::VERTEX:
273     {
274       usageFlags |= vk::BufferUsageFlagBits::eVertexBuffer;
275       break;
276     };
277     case BufferType::INDEX:
278     {
279       usageFlags |= vk::BufferUsageFlagBits::eIndexBuffer;
280       break;
281     };
282     case BufferType::UNIFORM:
283     {
284       usageFlags |= vk::BufferUsageFlagBits::eUniformBuffer;
285       break;
286     };
287     case BufferType::SHADER_STORAGE:
288     {
289       usageFlags |= vk::BufferUsageFlagBits::eStorageBuffer;
290       break;
291     };
292   }
293
294   auto info = vk::BufferCreateInfo{};
295   info.setSharingMode( vk::SharingMode::eExclusive );
296   info.setSize( size );
297   info.setUsage( usageFlags | vk::BufferUsageFlagBits::eTransferDst );
298
299   auto refCountedBuffer = Buffer::New( *this, info );
300
301   VkAssert( mDevice.createBuffer( &info, mAllocator.get(), refCountedBuffer->Ref()));
302
303   AddBuffer( refCountedBuffer );
304
305   return refCountedBuffer;
306 }
307
308 RefCountedBuffer Graphics::CreateBuffer( const vk::BufferCreateInfo& bufferCreateInfo )
309 {
310   auto refCountedBuffer = Buffer::New( *this, bufferCreateInfo );
311
312   VkAssert( mDevice.createBuffer( &bufferCreateInfo, mAllocator.get(), refCountedBuffer->Ref()));
313
314   AddBuffer( refCountedBuffer );
315
316   return refCountedBuffer;
317 }
318
319 RefCountedFramebuffer Graphics::CreateFramebuffer()
320 {
321   NotImplemented()
322 }
323
324 RefCountedImage Graphics::CreateImage( const vk::ImageCreateInfo& imageCreateInfo )
325 {
326   auto refCountedImage = Image::New(*this, imageCreateInfo);
327
328   VkAssert( mDevice.createImage( &imageCreateInfo, mAllocator.get(), refCountedImage->Ref() ) );
329
330   AddImage( refCountedImage );
331
332   return refCountedImage;
333 }
334
335 RefCountedImageView Graphics::CreateImageView( const vk::ImageViewCreateFlags& flags,
336                                                const RefCountedImage& image,
337                                                vk::ImageViewType viewType,
338                                                vk::Format format,
339                                                vk::ComponentMapping components,
340                                                vk::ImageSubresourceRange subresourceRange )
341 {
342   auto imageViewCreateInfo = vk::ImageViewCreateInfo{}
343           .setFlags( flags )
344           .setImage( image->GetVkHandle())
345           .setViewType( viewType )
346           .setFormat( format )
347           .setComponents( components )
348           .setSubresourceRange( std::move( subresourceRange ));
349
350   auto refCountedImageView = ImageView::New( *this, image, imageViewCreateInfo );
351
352   VkAssert( mDevice.createImageView( &imageViewCreateInfo, nullptr, refCountedImageView->Ref()));
353
354   AddImageView( refCountedImageView );
355
356   return refCountedImageView;
357 }
358
359 RefCountedImageView Graphics::CreateImageView( RefCountedImage image )
360 {
361   vk::ComponentMapping componentsMapping = { vk::ComponentSwizzle::eR, vk::ComponentSwizzle::eG,
362                                              vk::ComponentSwizzle::eB, vk::ComponentSwizzle::eA };
363   vk::ImageAspectFlags aspectFlags{};
364   if( image->GetUsageFlags() & vk::ImageUsageFlagBits::eColorAttachment )
365   {
366     aspectFlags |= vk::ImageAspectFlagBits::eColor;
367   }
368   if( image->GetUsageFlags() & vk::ImageUsageFlagBits::eDepthStencilAttachment )
369   {
370     aspectFlags |= ( vk::ImageAspectFlagBits::eDepth | vk::ImageAspectFlagBits::eStencil );
371   }
372   if( image->GetUsageFlags() & vk::ImageUsageFlagBits::eSampled )
373   {
374     aspectFlags |= vk::ImageAspectFlagBits::eColor;
375   }
376
377   auto subresourceRange = vk::ImageSubresourceRange{}
378           .setAspectMask( aspectFlags )
379           .setBaseArrayLayer( 0 )
380           .setBaseMipLevel( 0 )
381           .setLevelCount( image->GetMipLevelCount())
382           .setLayerCount( image->GetLayerCount());
383
384   auto refCountedImageView = CreateImageView( {},
385                                               image,
386                                               vk::ImageViewType::e2D,
387                                               image->GetFormat(),
388                                               componentsMapping,
389                                               subresourceRange );
390
391   AddImageView( refCountedImageView );
392
393   return refCountedImageView;
394 }
395
396 RefCountedDescriptorPool Graphics::CreateDescriptorPool()
397 {
398   NotImplemented()
399 }
400
401 RefCountedGpuMemoryBlock Graphics::CreateGpuMemoryBlock()
402 {
403   NotImplemented()
404 }
405
406 RefCountedDescriptorSet Graphics::CreateDescriptorSet()
407 {
408   NotImplemented()
409 }
410
411 RefCountedSampler Graphics::CreateSampler( const vk::SamplerCreateInfo& samplerCreateInfo )
412 {
413   auto refCountedSampler = Sampler::New( *this, samplerCreateInfo );
414
415   VkAssert( mDevice.createSampler( &samplerCreateInfo, mAllocator.get(), refCountedSampler->Ref() ) );
416
417   AddSampler( refCountedSampler );
418
419   return refCountedSampler;
420
421 }
422
423 RefCountedCommandBuffer Graphics::CreateCommandBuffer( bool primary )
424 {
425   auto commandPool = GetCommandPool( std::this_thread::get_id());
426
427   return commandPool->NewCommandBuffer( primary );
428 }
429 // --------------------------------------------------------------------------------------------------------------
430
431 // Actions ------------------------------------------------------------------------------------------------------
432 vk::Result Graphics::WaitForFence( RefCountedFence fence, uint32_t timeout )
433 {
434   return mDevice.waitForFences( 1, *fence, VK_TRUE, timeout );
435 }
436
437 vk::Result Graphics::WaitForFences( const std::vector< RefCountedFence >& fences, bool waitAll, uint32_t timeout )
438 {
439   std::vector< vk::Fence > vkFenceHandles{};
440   std::transform( fences.begin(),
441                   fences.end(),
442                   std::back_inserter( vkFenceHandles ),
443                   []( RefCountedFence entry ) { return entry->GetVkHandle(); } );
444
445
446   return mDevice.waitForFences( vkFenceHandles, vk::Bool32( waitAll ), timeout );
447 }
448
449 vk::Result Graphics::ResetFence( RefCountedFence fence )
450 {
451   return mDevice.resetFences( 1, *fence );
452 }
453
454 vk::Result Graphics::ResetFences( const std::vector< RefCountedFence >& fences )
455 {
456   std::vector< vk::Fence > vkFenceHandles{};
457   std::transform( fences.begin(),
458                   fences.end(),
459                   std::back_inserter( vkFenceHandles ),
460                   []( RefCountedFence entry ) { return entry->GetVkHandle(); } );
461
462   return mDevice.resetFences( vkFenceHandles );
463 }
464
465 vk::Result Graphics::BindImageMemory( RefCountedImage image, RefCountedGpuMemoryBlock memory, uint32_t offset )
466 {
467   auto result = VkAssert( mDevice.bindImageMemory( image->GetVkHandle(), *memory, offset ) );
468   image->AssignMemory(memory);
469   return result;
470 }
471
472 vk::Result Graphics::Submit( Queue& queue, const std::vector< SubmissionData >& submissionData, RefCountedFence fence )
473 {
474   std::vector< vk::SubmitInfo > submitInfos;
475   std::vector< vk::CommandBuffer > commandBufferHandles;
476
477   // Transform SubmissionData to vk::SubmitInfo
478   std::transform(submissionData.begin(),
479                  submissionData.end(),
480                  std::back_inserter( submitInfos ),
481                  [&]( SubmissionData subData )
482                  {
483
484
485                    //Extract the command buffer handles
486                    std::transform(subData.commandBuffers.begin(),
487                                   subData.commandBuffers.end(),
488                                   std::back_inserter(commandBufferHandles),
489                                   [&]( RefCountedCommandBuffer& entry )
490                                   {
491                                     return entry->GetVkHandle();
492                                   });
493
494                    return vk::SubmitInfo().setWaitSemaphoreCount( U32( subData.waitSemaphores.size() ) )
495                                           .setPWaitSemaphores( subData.waitSemaphores.data() )
496                                           .setPWaitDstStageMask( &subData.waitDestinationStageMask )
497                                           .setCommandBufferCount( U32( subData.commandBuffers.size() )  )
498                                           .setPCommandBuffers( commandBufferHandles.data() )
499                                           .setSignalSemaphoreCount( U32( subData.signalSemaphores.size() ) )
500                                           .setPSignalSemaphores( subData.signalSemaphores.data() );
501                  });
502
503   return VkAssert( queue.GetVkHandle().submit( submitInfos, fence ? fence->GetVkHandle() : nullptr ) );
504 }
505
506 vk::Result Graphics::Present( Queue& queue, vk::PresentInfoKHR presentInfo )
507 {
508   return queue.GetVkHandle().presentKHR(presentInfo);
509 }
510
511 vk::Result Graphics::QueueWaitIdle( Queue& queue )
512 {
513   return queue.GetVkHandle().waitIdle();
514 }
515
516 vk::Result Graphics::DeviceWaitIdle()
517 {
518   return mDevice.waitIdle();
519 }
520 // --------------------------------------------------------------------------------------------------------------
521
522 // Getters ------------------------------------------------------------------------------------------------------
523 RefCountedSurface Graphics::GetSurface( FBID surfaceId )
524 {
525   // TODO: FBID == 0 means default framebuffer, but there should be no
526   // such thing as default framebuffer.
527   if( surfaceId == 0 )
528   {
529     return mSurfaceFBIDMap.begin()
530                           ->second
531                           .surface;
532   }
533   return mSurfaceFBIDMap[surfaceId].surface;
534 }
535
536 RefCountedSwapchain Graphics::GetSwapchainForSurface( RefCountedSurface surface )
537 {
538   for( auto&& val : mSurfaceFBIDMap )
539   {
540     if( val.second
541            .surface == surface )
542     {
543       return val.second
544                 .swapchain;
545     }
546   }
547   return RefCountedSwapchain();
548 }
549
550 RefCountedSwapchain Graphics::GetSwapchainForFBID( FBID surfaceId )
551 {
552   if( surfaceId == 0 )
553   {
554     return mSurfaceFBIDMap.begin()
555                           ->second
556                           .swapchain;
557   }
558   return mSurfaceFBIDMap[surfaceId].swapchain;
559 }
560
561 vk::Device Graphics::GetDevice() const
562 {
563   return mDevice;
564 }
565
566 vk::PhysicalDevice Graphics::GetPhysicalDevice() const
567 {
568   return mPhysicalDevice;
569 }
570
571 vk::Instance Graphics::GetInstance() const
572 {
573   return mInstance;
574 }
575
576 const vk::AllocationCallbacks& Graphics::GetAllocator() const
577 {
578   return *mAllocator;
579 }
580
581 GpuMemoryManager& Graphics::GetDeviceMemoryManager() const
582 {
583   return *mDeviceMemoryManager;
584 }
585
586 const vk::PhysicalDeviceMemoryProperties& Graphics::GetMemoryProperties() const
587 {
588   return *mPhysicalDeviceMemoryProperties;
589 }
590
591 Queue& Graphics::GetGraphicsQueue( uint32_t index ) const
592 {
593   // todo: at the moment each type of queue may use only one, indices greater than 0 are invalid
594   // this will change in the future
595   assert( index == 0u && "Each type of queue may use only one, indices greater than 0 are invalid!" );
596
597   return *mGraphicsQueues[0]; // will be mGraphicsQueues[index]
598 }
599
600 Queue& Graphics::GetTransferQueue( uint32_t index ) const
601 {
602   // todo: at the moment each type of queue may use only one, indices greater than 0 are invalid
603   // this will change in the future
604   assert( index == 0u && "Each type of queue may use only one, indices greater than 0 are invalid!" );
605
606   return *mTransferQueues[0]; // will be mGraphicsQueues[index]
607 }
608
609 Queue& Graphics::GetComputeQueue( uint32_t index ) const
610 {
611   // todo: at the moment each type of queue may use only one, indices greater than 0 are invalid
612   // this will change in the future
613   assert( index == 0u && "Each type of queue may use only one, indices greater than 0 are invalid!" );
614
615   return *mComputeQueues[0]; // will be mGraphicsQueues[index]
616 }
617
618 Queue& Graphics::GetPresentQueue() const
619 {
620   // fixme: should be a dedicated presentation queue
621   return GetGraphicsQueue( 0 );
622 }
623
624 Platform Graphics::GetDefaultPlatform() const
625 {
626 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
627   mPlatform = Platform::WAYLAND;
628 #elif VK_USE_PLATFORM_XCB_KHR
629   mPlatform = Platform::XCB;
630 #elif VK_USE_PLATFORM_XLIB_KHR
631   mPlatform =  Platform::XLIB;
632 #else
633   return mPlatform;
634 #endif
635 }
636
637 Dali::Graphics::API::Controller& Graphics::GetController()
638 {
639   if( !mGfxController )
640   {
641     mGfxController = Dali::Graphics::VulkanAPI::Controller::New( *this );
642   }
643
644   return *mGfxController;
645 }
646
647 bool Graphics::IsShuttingDown()
648 {
649   return mShuttingDown;
650 }
651
652 // --------------------------------------------------------------------------------------------------------------
653
654 // Cache manipulation methods -----------------------------------------------------------------------------------
655 void Graphics::AddBuffer( Handle< Buffer > buffer )
656 {
657   std::lock_guard< std::mutex > lock{ mMutex };
658   mResourceCache->AddBuffer( std::move( buffer ));
659 }
660
661 void Graphics::AddImage( Handle< Image > image )
662 {
663   std::lock_guard< std::mutex > lock{ mMutex };
664   mResourceCache->AddImage( std::move( image ));
665 }
666
667 void Graphics::AddImageView( RefCountedImageView imageView )
668 {
669   std::lock_guard< std::mutex > lock{ mMutex };
670   mResourceCache->AddImageView( std::move( imageView ));
671 }
672
673 void Graphics::AddShader( Handle< Shader > shader )
674 {
675   std::lock_guard< std::mutex > lock{ mMutex };
676   mResourceCache->AddShader( std::move( shader ));
677 }
678
679 void Graphics::AddCommandPool( Handle< CommandPool > pool )
680 {
681   std::lock_guard< std::mutex > lock{ mMutex };
682   mResourceCache->AddCommandPool( std::this_thread::get_id(), std::move( pool ));
683 }
684
685 void Graphics::AddDescriptorPool( Handle< DescriptorPool > pool )
686 {
687   std::lock_guard< std::mutex > lock{ mMutex };
688   mResourceCache->AddDescriptorPool( std::move( pool ));
689 }
690
691 void Graphics::AddFramebuffer( Handle< Framebuffer > framebuffer )
692 {
693   std::lock_guard< std::mutex > lock{ mMutex };
694   mResourceCache->AddFramebuffer( std::move( framebuffer ));
695 }
696
697 void Graphics::AddSampler( RefCountedSampler sampler )
698 {
699   std::lock_guard< std::mutex > lock{ mMutex };
700   mResourceCache->AddSampler( std::move( sampler ) );
701 }
702
703 RefCountedShader Graphics::FindShader( vk::ShaderModule shaderModule )
704 {
705   std::lock_guard< std::mutex > lock{ mMutex };
706   return mResourceCache->FindShader( shaderModule );
707 }
708
709 RefCountedImage Graphics::FindImage( vk::Image image )
710 {
711   std::lock_guard< std::mutex > lock{ mMutex };
712   return mResourceCache->FindImage( image );
713 }
714
715 void Graphics::RemoveBuffer( Buffer& buffer )
716 {
717   std::lock_guard< std::mutex > lock{ mMutex };
718   mResourceCache->RemoveBuffer( buffer );
719 }
720
721 void Graphics::RemoveImage( Image& image )
722 {
723   std::lock_guard< std::mutex > lock{ mMutex };
724   mResourceCache->RemoveImage( image );
725 }
726
727 void Graphics::RemoveImageView( ImageView& imageView )
728 {
729   std::lock_guard< std::mutex > lock{ mMutex };
730   mResourceCache->RemoveImageView( imageView );
731 }
732
733 void Graphics::RemoveShader( Shader& shader )
734 {
735   std::lock_guard< std::mutex > lock{ mMutex };
736   mResourceCache->RemoveShader( shader );
737 }
738
739 void Graphics::RemoveCommandPool( CommandPool& commandPool )
740 {
741   std::lock_guard< std::mutex > lock{ mMutex };
742   mResourceCache->RemoveCommandPool( commandPool );
743 }
744
745 void Graphics::RemoveDescriptorPool( DescriptorPool& pool )
746 {
747   std::lock_guard< std::mutex > lock{ mMutex };
748   mResourceCache->RemoveDescriptorPool( pool );
749 }
750
751 void Graphics::RemoveFramebuffer( Framebuffer& framebuffer )
752 {
753   std::lock_guard< std::mutex > lock{ mMutex };
754   mResourceCache->RemoveFramebuffer( framebuffer );
755 }
756
757 void Graphics::RemoveSampler( Sampler& sampler )
758 {
759   std::lock_guard< std::mutex > lock{ mMutex };
760   mResourceCache->RemoveSampler( sampler );
761 }
762
763 void Graphics::CollectGarbage()
764 {
765   std::lock_guard< std::mutex > lock{ mMutex };
766   mResourceCache->CollectGarbage();
767 }
768
769 void Graphics::DiscardResource( std::function< void() > deleter )
770 {
771   std::lock_guard< std::mutex > lock{ mMutex };
772   mResourceCache->EnqueueDiscardOperation( std::move( deleter ));
773 }
774 // --------------------------------------------------------------------------------------------------------------
775
776
777 void
778 Graphics::CreateInstance( const std::vector< const char* >& extensions,
779                           const std::vector< const char* >& validationLayers )
780 {
781   auto info = vk::InstanceCreateInfo{};
782
783   info.setEnabledExtensionCount(U32(extensions.size()))
784       .setPpEnabledExtensionNames(extensions.data())
785       .setEnabledLayerCount(U32(validationLayers.size()))
786       .setPpEnabledLayerNames(validationLayers.data());
787
788 #if defined(DEBUG_ENABLED)
789   if( ! getenv("LOG_VULKAN") )
790   {
791     info.setEnabledLayerCount(0);
792   }
793 #else
794   info.setEnabledLayerCount(0);
795 #endif
796
797   mInstance = VkAssert(vk::createInstance(info, *mAllocator));
798 }
799
800 void Graphics::DestroyInstance()
801 {
802   if( mInstance )
803   {
804     mInstance.destroy( *mAllocator );
805     mInstance = nullptr;
806   }
807 }
808
809
810 void Graphics::PreparePhysicalDevice()
811 {
812   auto devices = VkAssert( mInstance.enumeratePhysicalDevices());
813   assert( !devices.empty() && "No Vulkan supported device found!" );
814
815   // if only one, pick first
816   mPhysicalDevice = nullptr;
817   if( devices.size() == 1 )
818   {
819     mPhysicalDevice = devices[0];
820   }else // otherwise look for one which is a graphics device
821   {
822     for( auto& device : devices )
823     {
824       auto properties = device.getProperties();
825       if( properties.deviceType == vk::PhysicalDeviceType::eDiscreteGpu ||
826           properties.deviceType == vk::PhysicalDeviceType::eIntegratedGpu )
827       {
828         mPhysicalDevice = device;
829         break;
830       }
831     }
832   }
833
834   assert( mPhysicalDevice && "No suitable Physical Device found!" );
835
836   GetPhysicalDeviceProperties();
837
838   GetQueueFamilyProperties();
839
840   mDeviceMemoryManager = GpuMemoryManager::New( *this );
841 }
842
843 void Graphics::GetPhysicalDeviceProperties()
844 {
845   // store data on heap to keep object smaller
846   mPhysicalDeviceProperties =
847           MakeUnique< vk::PhysicalDeviceProperties >( mPhysicalDevice.getProperties());
848   mPhysicalDeviceMemoryProperties =
849           MakeUnique< vk::PhysicalDeviceMemoryProperties >( mPhysicalDevice.getMemoryProperties());
850   mPhysicalDeviceFeatures =
851           MakeUnique< vk::PhysicalDeviceFeatures >( mPhysicalDevice.getFeatures());
852 }
853
854 void Graphics::GetQueueFamilyProperties()
855 {
856   mQueueFamilyProperties = mPhysicalDevice.getQueueFamilyProperties();
857 }
858
859 std::vector< vk::DeviceQueueCreateInfo > Graphics::GetQueueCreateInfos()
860 {
861   // surface is needed in order to find a family that supports presentation to this surface
862   // fixme: assuming all surfaces will be compatible with the queue family
863   assert( !mSurfaceFBIDMap.empty() &&
864           "At least one surface has to be created before creating VkDevice!" );
865
866   std::vector< vk::DeviceQueueCreateInfo > queueInfos{};
867
868   constexpr uint8_t MAX_QUEUE_TYPES = 3;
869   // find suitable family for each type of queue
870   uint32_t familyIndexType[MAX_QUEUE_TYPES];
871   std::fill( &familyIndexType[0], &familyIndexType[MAX_QUEUE_TYPES], -1u );
872
873   // Graphics
874   auto& graphicsFamily = familyIndexType[0];
875
876   // Transfer
877   auto& transferFamily = familyIndexType[1];
878
879   // Transfer
880   auto& presentFamily = familyIndexType[2];
881
882   auto queueFamilyIndex = 0u;
883   for( auto& prop : mQueueFamilyProperties )
884   {
885     if(( prop.queueFlags & vk::QueueFlagBits::eGraphics ) && graphicsFamily == -1u )
886     {
887       graphicsFamily = queueFamilyIndex;
888     }
889     if(( prop.queueFlags & vk::QueueFlagBits::eTransfer ) && transferFamily == -1u )
890     {
891       transferFamily = queueFamilyIndex;
892     }
893     if( mPhysicalDevice.getSurfaceSupportKHR( queueFamilyIndex, mSurfaceFBIDMap.begin()->second.
894             surface->GetSurfaceKHR()).value && presentFamily == -1u )
895     {
896       presentFamily = queueFamilyIndex;
897     }
898     ++queueFamilyIndex;
899   }
900
901   assert( graphicsFamily != -1u && "No queue family that supports graphics operations!" );
902   assert( transferFamily != -1u && "No queue family that supports transfer operations!" );
903   assert( presentFamily != -1u && "No queue family that supports present operations!" );
904
905   // todo: we may require that the family must be same for all types of operations, it makes
906   // easier to handle synchronisation related issues.
907
908   // sort queues
909   std::sort( &familyIndexType[0], &familyIndexType[MAX_QUEUE_TYPES] );
910
911   // allocate all queues from graphics family
912   uint32_t prevQueueFamilyIndex = -1u;
913
914   for( auto i = 0u; i < MAX_QUEUE_TYPES; ++i )
915   {
916     auto& familyIndex = familyIndexType[i];
917     if( prevQueueFamilyIndex == familyIndex )
918     {
919       continue;
920     }
921
922     auto& queueCount = mQueueFamilyProperties[familyIndex].queueCount;
923
924     // fill queue create info for the family.
925     // note the priorities are not being set as local pointer will out of scope, this
926     // will be fixed by the caller function
927     auto info = vk::DeviceQueueCreateInfo{}
928             .setPQueuePriorities( nullptr )
929             .setQueueCount( queueCount )
930             .setQueueFamilyIndex( familyIndex );
931     queueInfos.push_back( info );
932     prevQueueFamilyIndex = familyIndex;
933   }
934
935   return queueInfos;
936 }
937
938 std::vector< const char* > Graphics::PrepareDefaultInstanceExtensions()
939 {
940   auto extensions = vk::enumerateInstanceExtensionProperties();
941
942   std::string extensionName;
943
944   bool xlibAvailable{ false };
945   bool xcbAvailable{ false };
946   bool waylandAvailable{ false };
947
948   for( auto&& ext : extensions.value )
949   {
950     extensionName = ext.extensionName;
951     if( extensionName == VK_KHR_XCB_SURFACE_EXTENSION_NAME )
952     {
953       xcbAvailable = true;
954     }
955     else if( extensionName == VK_KHR_XLIB_SURFACE_EXTENSION_NAME )
956     {
957       xlibAvailable = true;
958     }
959     else if( extensionName == VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME )
960     {
961       waylandAvailable = true;
962     }
963   }
964
965   std::vector< const char* > retval{};
966
967   // depending on the platform validate extensions
968   auto platform = GetDefaultPlatform();
969
970   if( platform != Platform::UNDEFINED )
971   {
972     if (platform == Platform::XCB && xcbAvailable)
973     {
974       retval.push_back(VK_KHR_XCB_SURFACE_EXTENSION_NAME);
975     }
976     else if (platform == Platform::XLIB && xlibAvailable)
977     {
978       retval.push_back(VK_KHR_XLIB_SURFACE_EXTENSION_NAME);
979     }
980     else if (platform == Platform::WAYLAND && waylandAvailable)
981     {
982       retval.push_back(VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME);
983     }
984   }
985   else // try to determine the platform based on available extensions
986   {
987     if (xcbAvailable)
988     {
989       mPlatform = Platform::XCB;
990       retval.push_back(VK_KHR_XCB_SURFACE_EXTENSION_NAME);
991     }
992     else if (xlibAvailable)
993     {
994       mPlatform = Platform::XLIB;
995       retval.push_back(VK_KHR_XLIB_SURFACE_EXTENSION_NAME);
996     }
997     else if (waylandAvailable)
998     {
999       mPlatform = Platform::WAYLAND;
1000       retval.push_back(VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME);
1001     }
1002     else
1003     {
1004       // can't determine the platform!
1005       mPlatform = Platform::UNDEFINED;
1006     }
1007   }
1008
1009   // other essential extensions
1010   retval.push_back( VK_KHR_SURFACE_EXTENSION_NAME );
1011   retval.push_back( VK_EXT_DEBUG_REPORT_EXTENSION_NAME );
1012
1013   return retval;
1014 }
1015
1016 RefCountedCommandPool Graphics::GetCommandPool( std::thread::id )
1017 {
1018   RefCountedCommandPool commandPool;
1019   {
1020     std::lock_guard< std::mutex > lock{ mMutex };
1021     commandPool = mResourceCache->FindCommandPool( std::this_thread::get_id() );
1022   }
1023
1024   if( !commandPool )
1025   {
1026     auto&& createInfo = vk::CommandPoolCreateInfo{}.setFlags( vk::CommandPoolCreateFlagBits::eResetCommandBuffer );
1027     commandPool = CommandPool::New( *this,  createInfo);
1028   }
1029
1030   return commandPool;
1031 }
1032
1033 } // namespace Vulkan
1034 } // namespace Graphics
1035 } // namespace Dali