radv: add helpers for destroying various pipeline types
authorSamuel Pitoiset <samuel.pitoiset@gmail.com>
Tue, 14 Mar 2023 09:47:26 +0000 (10:47 +0100)
committerMarge Bot <emma+marge@anholt.net>
Mon, 20 Mar 2023 13:56:32 +0000 (13:56 +0000)
Much cleaner than having a single function for everything.

Signed-off-by: Samuel Pitoiset <samuel.pitoiset@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/21894>

src/amd/vulkan/radv_pipeline.c
src/amd/vulkan/radv_pipeline_rt.c
src/amd/vulkan/radv_private.h

index ace8db6..c19f029 100644 (file)
@@ -126,41 +126,26 @@ void
 radv_pipeline_destroy(struct radv_device *device, struct radv_pipeline *pipeline,
                       const VkAllocationCallbacks *allocator)
 {
-   if (pipeline->type == RADV_PIPELINE_GRAPHICS) {
-      struct radv_graphics_pipeline *graphics_pipeline = radv_pipeline_to_graphics(pipeline);
-
-      if (graphics_pipeline->ps_epilog)
-         radv_shader_part_unref(device, graphics_pipeline->ps_epilog);
-
-      vk_free(&device->vk.alloc, graphics_pipeline->state_data);
-   } else if (pipeline->type == RADV_PIPELINE_RAY_TRACING_LIB) {
-      struct radv_ray_tracing_lib_pipeline *library_pipeline =
-         radv_pipeline_to_ray_tracing_lib(pipeline);
-
-      ralloc_free(library_pipeline->ctx);
-   } else if (pipeline->type == RADV_PIPELINE_GRAPHICS_LIB) {
-      struct radv_graphics_lib_pipeline *gfx_pipeline_lib =
-         radv_pipeline_to_graphics_lib(pipeline);
-
-      radv_pipeline_layout_finish(device, &gfx_pipeline_lib->layout);
-
-      for (unsigned i = 0; i < MESA_VULKAN_SHADER_STAGES; ++i) {
-         ralloc_free(gfx_pipeline_lib->base.retained_shaders[i].nir);
-      }
-
-      if (gfx_pipeline_lib->base.ps_epilog)
-         radv_shader_part_unref(device, gfx_pipeline_lib->base.ps_epilog);
-
-      vk_free(&device->vk.alloc, gfx_pipeline_lib->base.state_data);
+   switch (pipeline->type) {
+   case RADV_PIPELINE_GRAPHICS:
+      radv_destroy_graphics_pipeline(device, radv_pipeline_to_graphics(pipeline));
+      break;
+   case RADV_PIPELINE_GRAPHICS_LIB:
+      radv_destroy_graphics_lib_pipeline(device, radv_pipeline_to_graphics_lib(pipeline));
+      break;
+   case RADV_PIPELINE_COMPUTE:
+      radv_destroy_compute_pipeline(device, radv_pipeline_to_compute(pipeline));
+      break;
+   case RADV_PIPELINE_RAY_TRACING_LIB:
+      radv_destroy_ray_tracing_lib_pipeline(device, radv_pipeline_to_ray_tracing_lib(pipeline));
+      break;
+   case RADV_PIPELINE_RAY_TRACING:
+      radv_destroy_ray_tracing_pipeline(device, radv_pipeline_to_ray_tracing(pipeline));
+      break;
+   default:
+      unreachable("invalid pipeline type");
    }
 
-   for (unsigned i = 0; i < MESA_VULKAN_SHADER_STAGES; ++i)
-      if (pipeline->shaders[i])
-         radv_shader_unref(device, pipeline->shaders[i]);
-
-   if (pipeline->gs_copy_shader)
-      radv_shader_unref(device, pipeline->gs_copy_shader);
-
    if (pipeline->cs.buf)
       free(pipeline->cs.buf);
 
@@ -5052,6 +5037,22 @@ radv_graphics_pipeline_create(VkDevice _device, VkPipelineCache _cache,
    return VK_SUCCESS;
 }
 
+void
+radv_destroy_graphics_pipeline(struct radv_device *device, struct radv_graphics_pipeline *pipeline)
+{
+   for (unsigned i = 0; i < MESA_VULKAN_SHADER_STAGES; ++i) {
+      if (pipeline->base.shaders[i])
+         radv_shader_unref(device, pipeline->base.shaders[i]);
+   }
+
+   if (pipeline->base.gs_copy_shader)
+      radv_shader_unref(device, pipeline->base.gs_copy_shader);
+   if (pipeline->ps_epilog)
+      radv_shader_part_unref(device, pipeline->ps_epilog);
+
+   vk_free(&device->vk.alloc, pipeline->state_data);
+}
+
 static VkResult
 radv_graphics_lib_pipeline_init(struct radv_graphics_lib_pipeline *pipeline,
                                 struct radv_device *device, struct radv_pipeline_cache *cache,
@@ -5149,6 +5150,19 @@ radv_graphics_lib_pipeline_create(VkDevice _device, VkPipelineCache _cache,
    return VK_SUCCESS;
 }
 
+void
+radv_destroy_graphics_lib_pipeline(struct radv_device *device,
+                                   struct radv_graphics_lib_pipeline *pipeline)
+{
+   radv_pipeline_layout_finish(device, &pipeline->layout);
+
+   for (unsigned i = 0; i < MESA_VULKAN_SHADER_STAGES; ++i) {
+      ralloc_free(pipeline->base.retained_shaders[i].nir);
+   }
+
+   radv_destroy_graphics_pipeline(device, &pipeline->base);
+}
+
 VKAPI_ATTR VkResult VKAPI_CALL
 radv_CreateGraphicsPipelines(VkDevice _device, VkPipelineCache pipelineCache, uint32_t count,
                              const VkGraphicsPipelineCreateInfo *pCreateInfos,
@@ -5477,6 +5491,13 @@ radv_create_compute_pipelines(VkDevice _device, VkPipelineCache pipelineCache, u
    return result;
 }
 
+void
+radv_destroy_compute_pipeline(struct radv_device *device, struct radv_compute_pipeline *pipeline)
+{
+   if (pipeline->base.shaders[MESA_SHADER_COMPUTE])
+      radv_shader_unref(device, pipeline->base.shaders[MESA_SHADER_COMPUTE]);
+}
+
 VKAPI_ATTR VkResult VKAPI_CALL
 radv_CreateComputePipelines(VkDevice _device, VkPipelineCache pipelineCache, uint32_t count,
                             const VkComputePipelineCreateInfo *pCreateInfos,
index 62a4bb9..977d241 100644 (file)
@@ -476,6 +476,13 @@ fail:
    return VK_ERROR_OUT_OF_HOST_MEMORY;
 }
 
+void
+radv_destroy_ray_tracing_lib_pipeline(struct radv_device *device,
+                                      struct radv_ray_tracing_lib_pipeline *pipeline)
+{
+   ralloc_free(pipeline->ctx);
+}
+
 static bool
 radv_rt_pipeline_has_dynamic_stack_size(const VkRayTracingPipelineCreateInfoKHR *pCreateInfo)
 {
@@ -689,6 +696,16 @@ fail:
    return result;
 }
 
+void
+radv_destroy_ray_tracing_pipeline(struct radv_device *device,
+                                  struct radv_ray_tracing_pipeline *pipeline)
+{
+   if (pipeline->base.base.shaders[MESA_SHADER_COMPUTE])
+      radv_shader_unref(device, pipeline->base.base.shaders[MESA_SHADER_COMPUTE]);
+   if (pipeline->base.base.shaders[MESA_SHADER_RAYGEN])
+      radv_shader_unref(device, pipeline->base.base.shaders[MESA_SHADER_RAYGEN]);
+}
+
 VKAPI_ATTR VkResult VKAPI_CALL
 radv_CreateRayTracingPipelinesKHR(VkDevice _device, VkDeferredOperationKHR deferredOperation,
                                   VkPipelineCache pipelineCache, uint32_t count,
index 7b100ee..eb47fa7 100644 (file)
@@ -3612,6 +3612,17 @@ bool radv_spm_init(struct radv_device *device);
 void radv_spm_finish(struct radv_device *device);
 void radv_emit_spm_setup(struct radv_device *device, struct radeon_cmdbuf *cs);
 
+void radv_destroy_graphics_pipeline(struct radv_device *device,
+                                    struct radv_graphics_pipeline *pipeline);
+void radv_destroy_graphics_lib_pipeline(struct radv_device *device,
+                                        struct radv_graphics_lib_pipeline *pipeline);
+void radv_destroy_compute_pipeline(struct radv_device *device,
+                                   struct radv_compute_pipeline *pipeline);
+void radv_destroy_ray_tracing_lib_pipeline(struct radv_device *device,
+                                           struct radv_ray_tracing_lib_pipeline *pipeline);
+void radv_destroy_ray_tracing_pipeline(struct radv_device *device,
+                                       struct radv_ray_tracing_pipeline *pipeline);
+
 #define RADV_FROM_HANDLE(__radv_type, __name, __handle) \
    VK_FROM_HANDLE(__radv_type, __name, __handle)