2 * Copyright 2016 Google Inc.
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
8 #include "src/gpu/ganesh/vk/GrVkResourceProvider.h"
10 #include "include/gpu/GrDirectContext.h"
11 #include "src/core/SkTaskGroup.h"
12 #include "src/core/SkTraceEvent.h"
13 #include "src/gpu/ganesh/GrDirectContextPriv.h"
14 #include "src/gpu/ganesh/GrSamplerState.h"
15 #include "src/gpu/ganesh/GrStencilSettings.h"
16 #include "src/gpu/ganesh/vk/GrVkCommandBuffer.h"
17 #include "src/gpu/ganesh/vk/GrVkCommandPool.h"
18 #include "src/gpu/ganesh/vk/GrVkGpu.h"
19 #include "src/gpu/ganesh/vk/GrVkPipeline.h"
20 #include "src/gpu/ganesh/vk/GrVkRenderTarget.h"
21 #include "src/gpu/ganesh/vk/GrVkUtil.h"
23 GrVkResourceProvider::GrVkResourceProvider(GrVkGpu* gpu)
25 , fPipelineCache(VK_NULL_HANDLE) {
26 fPipelineStateCache = sk_make_sp<PipelineStateCache>(gpu);
29 GrVkResourceProvider::~GrVkResourceProvider() {
30 SkASSERT(0 == fRenderPassArray.count());
31 SkASSERT(0 == fExternalRenderPasses.count());
32 SkASSERT(0 == fMSAALoadPipelines.count());
33 SkASSERT(VK_NULL_HANDLE == fPipelineCache);
36 VkPipelineCache GrVkResourceProvider::pipelineCache() {
37 if (fPipelineCache == VK_NULL_HANDLE) {
38 VkPipelineCacheCreateInfo createInfo;
39 memset(&createInfo, 0, sizeof(VkPipelineCacheCreateInfo));
40 createInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
41 createInfo.pNext = nullptr;
44 auto persistentCache = fGpu->getContext()->priv().getPersistentCache();
46 if (persistentCache) {
47 uint32_t key = GrVkGpu::kPipelineCache_PersistentCacheKeyType;
48 sk_sp<SkData> keyData = SkData::MakeWithoutCopy(&key, sizeof(uint32_t));
49 cached = persistentCache->load(*keyData);
51 bool usedCached = false;
53 uint32_t* cacheHeader = (uint32_t*)cached->data();
54 if (cacheHeader[1] == VK_PIPELINE_CACHE_HEADER_VERSION_ONE) {
55 // For version one of the header, the total header size is 16 bytes plus
56 // VK_UUID_SIZE bytes. See Section 9.6 (Pipeline Cache) in the vulkan spec to see
57 // the breakdown of these bytes.
58 SkASSERT(cacheHeader[0] == 16 + VK_UUID_SIZE);
59 const VkPhysicalDeviceProperties& devProps = fGpu->physicalDeviceProperties();
60 const uint8_t* supportedPipelineCacheUUID = devProps.pipelineCacheUUID;
61 if (cacheHeader[2] == devProps.vendorID && cacheHeader[3] == devProps.deviceID &&
62 !memcmp(&cacheHeader[4], supportedPipelineCacheUUID, VK_UUID_SIZE)) {
63 createInfo.initialDataSize = cached->size();
64 createInfo.pInitialData = cached->data();
70 createInfo.initialDataSize = 0;
71 createInfo.pInitialData = nullptr;
75 GR_VK_CALL_RESULT(fGpu, result, CreatePipelineCache(fGpu->device(), &createInfo, nullptr,
77 if (VK_SUCCESS != result) {
78 fPipelineCache = VK_NULL_HANDLE;
81 return fPipelineCache;
84 void GrVkResourceProvider::init() {
85 // Init uniform descriptor objects
86 GrVkDescriptorSetManager* dsm = GrVkDescriptorSetManager::CreateUniformManager(fGpu);
87 fDescriptorSetManagers.emplace_back(dsm);
88 SkASSERT(1 == fDescriptorSetManagers.count());
89 fUniformDSHandle = GrVkDescriptorSetManager::Handle(0);
90 dsm = GrVkDescriptorSetManager::CreateInputManager(fGpu);
91 fDescriptorSetManagers.emplace_back(dsm);
92 SkASSERT(2 == fDescriptorSetManagers.count());
93 fInputDSHandle = GrVkDescriptorSetManager::Handle(1);
96 sk_sp<const GrVkPipeline> GrVkResourceProvider::makePipeline(
97 const GrProgramInfo& programInfo,
98 VkPipelineShaderStageCreateInfo* shaderStageInfo,
100 VkRenderPass compatibleRenderPass,
101 VkPipelineLayout layout,
103 return GrVkPipeline::Make(fGpu, programInfo, shaderStageInfo, shaderStageCount,
104 compatibleRenderPass, layout, this->pipelineCache(), subpass);
107 // To create framebuffers, we first need to create a simple RenderPass that is
108 // only used for framebuffer creation. When we actually render we will create
109 // RenderPasses as needed that are compatible with the framebuffer.
110 const GrVkRenderPass*
111 GrVkResourceProvider::findCompatibleRenderPass(GrVkRenderTarget* target,
112 CompatibleRPHandle* compatibleHandle,
115 SelfDependencyFlags selfDepFlags,
116 LoadFromResolve loadFromResolve) {
117 // Get attachment information from render target. This includes which attachments the render
118 // target has (color, stencil) and the attachments format and sample count.
119 GrVkRenderPass::AttachmentFlags attachmentFlags;
120 GrVkRenderPass::AttachmentsDescriptor attachmentsDesc;
121 target->getAttachmentsDescriptor(&attachmentsDesc, &attachmentFlags, withResolve, withStencil);
123 return this->findCompatibleRenderPass(&attachmentsDesc, attachmentFlags, selfDepFlags,
124 loadFromResolve, compatibleHandle);
127 const GrVkRenderPass*
128 GrVkResourceProvider::findCompatibleRenderPass(GrVkRenderPass::AttachmentsDescriptor* desc,
129 GrVkRenderPass::AttachmentFlags attachmentFlags,
130 SelfDependencyFlags selfDepFlags,
131 LoadFromResolve loadFromResolve,
132 CompatibleRPHandle* compatibleHandle) {
133 for (int i = 0; i < fRenderPassArray.count(); ++i) {
134 if (fRenderPassArray[i].isCompatible(*desc, attachmentFlags, selfDepFlags,
136 const GrVkRenderPass* renderPass = fRenderPassArray[i].getCompatibleRenderPass();
138 if (compatibleHandle) {
139 *compatibleHandle = CompatibleRPHandle(i);
145 GrVkRenderPass* renderPass = GrVkRenderPass::CreateSimple(fGpu, desc, attachmentFlags,
146 selfDepFlags, loadFromResolve);
150 fRenderPassArray.emplace_back(renderPass);
152 if (compatibleHandle) {
153 *compatibleHandle = CompatibleRPHandle(fRenderPassArray.count() - 1);
158 const GrVkRenderPass* GrVkResourceProvider::findCompatibleExternalRenderPass(
159 VkRenderPass renderPass, uint32_t colorAttachmentIndex) {
160 for (int i = 0; i < fExternalRenderPasses.count(); ++i) {
161 if (fExternalRenderPasses[i]->isCompatibleExternalRP(renderPass)) {
162 fExternalRenderPasses[i]->ref();
164 uint32_t cachedColorIndex;
165 SkASSERT(fExternalRenderPasses[i]->colorAttachmentIndex(&cachedColorIndex));
166 SkASSERT(cachedColorIndex == colorAttachmentIndex);
168 return fExternalRenderPasses[i];
172 const GrVkRenderPass* newRenderPass = new GrVkRenderPass(fGpu, renderPass,
173 colorAttachmentIndex);
174 fExternalRenderPasses.push_back(newRenderPass);
175 newRenderPass->ref();
176 return newRenderPass;
179 const GrVkRenderPass* GrVkResourceProvider::findRenderPass(
180 GrVkRenderTarget* target,
181 const GrVkRenderPass::LoadStoreOps& colorOps,
182 const GrVkRenderPass::LoadStoreOps& resolveOps,
183 const GrVkRenderPass::LoadStoreOps& stencilOps,
184 CompatibleRPHandle* compatibleHandle,
187 SelfDependencyFlags selfDepFlags,
188 LoadFromResolve loadFromResolve) {
189 GrVkResourceProvider::CompatibleRPHandle tempRPHandle;
190 GrVkResourceProvider::CompatibleRPHandle* pRPHandle = compatibleHandle ? compatibleHandle
192 *pRPHandle = target->compatibleRenderPassHandle(withResolve, withStencil, selfDepFlags,
194 if (!pRPHandle->isValid()) {
198 return this->findRenderPass(*pRPHandle, colorOps, resolveOps, stencilOps);
201 const GrVkRenderPass*
202 GrVkResourceProvider::findRenderPass(const CompatibleRPHandle& compatibleHandle,
203 const GrVkRenderPass::LoadStoreOps& colorOps,
204 const GrVkRenderPass::LoadStoreOps& resolveOps,
205 const GrVkRenderPass::LoadStoreOps& stencilOps) {
206 SkASSERT(compatibleHandle.isValid() && compatibleHandle.toIndex() < fRenderPassArray.count());
207 CompatibleRenderPassSet& compatibleSet = fRenderPassArray[compatibleHandle.toIndex()];
208 const GrVkRenderPass* renderPass = compatibleSet.getRenderPass(fGpu,
219 GrVkDescriptorPool* GrVkResourceProvider::findOrCreateCompatibleDescriptorPool(
220 VkDescriptorType type, uint32_t count) {
221 return GrVkDescriptorPool::Create(fGpu, type, count);
224 GrVkSampler* GrVkResourceProvider::findOrCreateCompatibleSampler(
225 GrSamplerState params, const GrVkYcbcrConversionInfo& ycbcrInfo) {
226 GrVkSampler* sampler = fSamplers.find(GrVkSampler::GenerateKey(params, ycbcrInfo));
228 sampler = GrVkSampler::Create(fGpu, params, ycbcrInfo);
232 fSamplers.add(sampler);
239 GrVkSamplerYcbcrConversion* GrVkResourceProvider::findOrCreateCompatibleSamplerYcbcrConversion(
240 const GrVkYcbcrConversionInfo& ycbcrInfo) {
241 GrVkSamplerYcbcrConversion* ycbcrConversion =
242 fYcbcrConversions.find(GrVkSamplerYcbcrConversion::GenerateKey(ycbcrInfo));
243 if (!ycbcrConversion) {
244 ycbcrConversion = GrVkSamplerYcbcrConversion::Create(fGpu, ycbcrInfo);
245 if (!ycbcrConversion) {
248 fYcbcrConversions.add(ycbcrConversion);
250 SkASSERT(ycbcrConversion);
251 ycbcrConversion->ref();
252 return ycbcrConversion;
255 GrVkPipelineState* GrVkResourceProvider::findOrCreateCompatiblePipelineState(
256 GrRenderTarget* renderTarget,
257 const GrProgramInfo& programInfo,
258 VkRenderPass compatibleRenderPass,
259 bool overrideSubpassForResolveLoad) {
260 return fPipelineStateCache->findOrCreatePipelineState(renderTarget, programInfo,
261 compatibleRenderPass,
262 overrideSubpassForResolveLoad);
265 GrVkPipelineState* GrVkResourceProvider::findOrCreateCompatiblePipelineState(
266 const GrProgramDesc& desc,
267 const GrProgramInfo& programInfo,
268 VkRenderPass compatibleRenderPass,
269 GrThreadSafePipelineBuilder::Stats::ProgramCacheResult* stat) {
271 auto tmp = fPipelineStateCache->findOrCreatePipelineState(desc, programInfo,
272 compatibleRenderPass, stat);
274 fPipelineStateCache->stats()->incNumPreCompilationFailures();
276 fPipelineStateCache->stats()->incNumPreProgramCacheResult(*stat);
282 sk_sp<const GrVkPipeline> GrVkResourceProvider::findOrCreateMSAALoadPipeline(
283 const GrVkRenderPass& renderPass,
285 VkPipelineShaderStageCreateInfo* shaderStageInfo,
286 VkPipelineLayout pipelineLayout) {
287 // Find or Create a compatible pipeline
288 sk_sp<const GrVkPipeline> pipeline;
289 for (int i = 0; i < fMSAALoadPipelines.count() && !pipeline; ++i) {
290 if (fMSAALoadPipelines[i].fRenderPass->isCompatible(renderPass)) {
291 pipeline = fMSAALoadPipelines[i].fPipeline;
295 pipeline = GrVkPipeline::Make(
297 /*vertexAttribs=*/GrGeometryProcessor::AttributeSet(),
298 /*instanceAttribs=*/GrGeometryProcessor::AttributeSet(),
299 GrPrimitiveType::kTriangleStrip,
300 kTopLeft_GrSurfaceOrigin,
303 /*isHWantialiasState=*/false,
304 GrXferProcessor::BlendInfo(),
305 /*isWireframe=*/false,
306 /*useConservativeRaster=*/false,
309 /*shaderStageCount=*/2,
310 renderPass.vkRenderPass(),
312 /*ownsLayout=*/false,
313 this->pipelineCache());
317 fMSAALoadPipelines.push_back({pipeline, &renderPass});
323 void GrVkResourceProvider::getZeroSamplerDescriptorSetHandle(
324 GrVkDescriptorSetManager::Handle* handle) {
326 for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
327 if (fDescriptorSetManagers[i]->isZeroSampler()) {
328 *handle = GrVkDescriptorSetManager::Handle(i);
333 GrVkDescriptorSetManager* dsm =
334 GrVkDescriptorSetManager::CreateZeroSamplerManager(fGpu);
335 fDescriptorSetManagers.emplace_back(dsm);
336 *handle = GrVkDescriptorSetManager::Handle(fDescriptorSetManagers.count() - 1);
339 void GrVkResourceProvider::getSamplerDescriptorSetHandle(VkDescriptorType type,
340 const GrVkUniformHandler& uniformHandler,
341 GrVkDescriptorSetManager::Handle* handle) {
343 SkASSERT(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type ||
344 VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == type);
345 for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
346 if (fDescriptorSetManagers[i]->isCompatible(type, &uniformHandler)) {
347 *handle = GrVkDescriptorSetManager::Handle(i);
352 GrVkDescriptorSetManager* dsm = GrVkDescriptorSetManager::CreateSamplerManager(fGpu, type,
354 fDescriptorSetManagers.emplace_back(dsm);
355 *handle = GrVkDescriptorSetManager::Handle(fDescriptorSetManagers.count() - 1);
358 VkDescriptorSetLayout GrVkResourceProvider::getUniformDSLayout() const {
359 SkASSERT(fUniformDSHandle.isValid());
360 return fDescriptorSetManagers[fUniformDSHandle.toIndex()]->layout();
363 VkDescriptorSetLayout GrVkResourceProvider::getInputDSLayout() const {
364 SkASSERT(fInputDSHandle.isValid());
365 return fDescriptorSetManagers[fInputDSHandle.toIndex()]->layout();
368 VkDescriptorSetLayout GrVkResourceProvider::getSamplerDSLayout(
369 const GrVkDescriptorSetManager::Handle& handle) const {
370 SkASSERT(handle.isValid());
371 return fDescriptorSetManagers[handle.toIndex()]->layout();
374 const GrVkDescriptorSet* GrVkResourceProvider::getUniformDescriptorSet() {
375 SkASSERT(fUniformDSHandle.isValid());
376 return fDescriptorSetManagers[fUniformDSHandle.toIndex()]->getDescriptorSet(fGpu,
380 const GrVkDescriptorSet* GrVkResourceProvider::getInputDescriptorSet() {
381 SkASSERT(fInputDSHandle.isValid());
382 return fDescriptorSetManagers[fInputDSHandle.toIndex()]->getDescriptorSet(fGpu, fInputDSHandle);
385 const GrVkDescriptorSet* GrVkResourceProvider::getSamplerDescriptorSet(
386 const GrVkDescriptorSetManager::Handle& handle) {
387 SkASSERT(handle.isValid());
388 return fDescriptorSetManagers[handle.toIndex()]->getDescriptorSet(fGpu, handle);
391 void GrVkResourceProvider::recycleDescriptorSet(const GrVkDescriptorSet* descSet,
392 const GrVkDescriptorSetManager::Handle& handle) {
394 SkASSERT(handle.isValid());
395 int managerIdx = handle.toIndex();
396 SkASSERT(managerIdx < fDescriptorSetManagers.count());
397 fDescriptorSetManagers[managerIdx]->recycleDescriptorSet(descSet);
400 GrVkCommandPool* GrVkResourceProvider::findOrCreateCommandPool() {
401 GrVkCommandPool* result;
402 if (fAvailableCommandPools.count()) {
403 result = fAvailableCommandPools.back();
404 fAvailableCommandPools.pop_back();
406 result = GrVkCommandPool::Create(fGpu);
411 SkASSERT(result->unique());
413 for (const GrVkCommandPool* pool : fActiveCommandPools) {
414 SkASSERT(pool != result);
416 for (const GrVkCommandPool* pool : fAvailableCommandPools) {
417 SkASSERT(pool != result);
420 fActiveCommandPools.push_back(result);
425 void GrVkResourceProvider::checkCommandBuffers() {
426 // When resetting a command buffer it can trigger client provided procs (e.g. release or
427 // finished) to be called. During these calls the client could trigger us to abandon the vk
428 // context, e.g. if we are in a DEVICE_LOST state. When we abandon the vk context we will
429 // unref all the fActiveCommandPools and reset the array. Since this can happen in the middle
430 // of the loop here, we need to additionally check that fActiveCommandPools still has pools on
433 // TODO: We really need to have a more robust way to protect us from client proc calls that
434 // happen in the middle of us doing work. This may be just one of many potential pitfalls that
435 // could happen from the client triggering GrDirectContext changes during a proc call.
436 for (int i = fActiveCommandPools.count() - 1; fActiveCommandPools.count() && i >= 0; --i) {
437 GrVkCommandPool* pool = fActiveCommandPools[i];
438 if (!pool->isOpen()) {
439 GrVkPrimaryCommandBuffer* buffer = pool->getPrimaryCommandBuffer();
440 if (buffer->finished(fGpu)) {
441 fActiveCommandPools.removeShuffle(i);
442 SkASSERT(pool->unique());
444 // After resetting the pool (specifically releasing the pool's resources) we may
445 // have called a client callback proc which may have disconnected the GrVkGpu. In
446 // that case we do not want to push the pool back onto the cache, but instead just
448 if (fGpu->disconnected()) {
452 fAvailableCommandPools.push_back(pool);
458 void GrVkResourceProvider::forceSyncAllCommandBuffers() {
459 for (int i = fActiveCommandPools.count() - 1; fActiveCommandPools.count() && i >= 0; --i) {
460 GrVkCommandPool* pool = fActiveCommandPools[i];
461 if (!pool->isOpen()) {
462 GrVkPrimaryCommandBuffer* buffer = pool->getPrimaryCommandBuffer();
463 buffer->forceSync(fGpu);
468 void GrVkResourceProvider::addFinishedProcToActiveCommandBuffers(
469 sk_sp<skgpu::RefCntedCallback> finishedCallback) {
470 for (int i = 0; i < fActiveCommandPools.count(); ++i) {
471 GrVkCommandPool* pool = fActiveCommandPools[i];
472 GrVkPrimaryCommandBuffer* buffer = pool->getPrimaryCommandBuffer();
473 buffer->addFinishedProc(finishedCallback);
477 void GrVkResourceProvider::destroyResources() {
478 SkTaskGroup* taskGroup = fGpu->getContext()->priv().getTaskGroup();
483 // Release all msaa load pipelines
484 fMSAALoadPipelines.reset();
486 // loop over all render pass sets to make sure we destroy all the internal VkRenderPasses
487 for (int i = 0; i < fRenderPassArray.count(); ++i) {
488 fRenderPassArray[i].releaseResources();
490 fRenderPassArray.reset();
492 for (int i = 0; i < fExternalRenderPasses.count(); ++i) {
493 fExternalRenderPasses[i]->unref();
495 fExternalRenderPasses.reset();
497 // Iterate through all store GrVkSamplers and unref them before resetting the hash table.
498 fSamplers.foreach([&](auto* elt) { elt->unref(); });
501 fYcbcrConversions.foreach([&](auto* elt) { elt->unref(); });
502 fYcbcrConversions.reset();
504 fPipelineStateCache->release();
506 GR_VK_CALL(fGpu->vkInterface(), DestroyPipelineCache(fGpu->device(), fPipelineCache, nullptr));
507 fPipelineCache = VK_NULL_HANDLE;
509 for (GrVkCommandPool* pool : fActiveCommandPools) {
510 SkASSERT(pool->unique());
513 fActiveCommandPools.reset();
515 for (GrVkCommandPool* pool : fAvailableCommandPools) {
516 SkASSERT(pool->unique());
519 fAvailableCommandPools.reset();
521 // We must release/destroy all command buffers and pipeline states before releasing the
522 // GrVkDescriptorSetManagers. Additionally, we must release all uniform buffers since they hold
523 // refs to GrVkDescriptorSets.
524 for (int i = 0; i < fDescriptorSetManagers.count(); ++i) {
525 fDescriptorSetManagers[i]->release(fGpu);
527 fDescriptorSetManagers.reset();
531 void GrVkResourceProvider::releaseUnlockedBackendObjects() {
532 for (GrVkCommandPool* pool : fAvailableCommandPools) {
533 SkASSERT(pool->unique());
536 fAvailableCommandPools.reset();
539 void GrVkResourceProvider::storePipelineCacheData() {
540 if (this->pipelineCache() == VK_NULL_HANDLE) {
545 GR_VK_CALL_RESULT(fGpu, result, GetPipelineCacheData(fGpu->device(), this->pipelineCache(),
546 &dataSize, nullptr));
547 if (result != VK_SUCCESS) {
551 std::unique_ptr<uint8_t[]> data(new uint8_t[dataSize]);
553 GR_VK_CALL_RESULT(fGpu, result, GetPipelineCacheData(fGpu->device(), this->pipelineCache(),
554 &dataSize, (void*)data.get()));
555 if (result != VK_SUCCESS) {
559 uint32_t key = GrVkGpu::kPipelineCache_PersistentCacheKeyType;
560 sk_sp<SkData> keyData = SkData::MakeWithoutCopy(&key, sizeof(uint32_t));
562 fGpu->getContext()->priv().getPersistentCache()->store(
563 *keyData, *SkData::MakeWithoutCopy(data.get(), dataSize), SkString("VkPipelineCache"));
566 ////////////////////////////////////////////////////////////////////////////////
568 GrVkResourceProvider::CompatibleRenderPassSet::CompatibleRenderPassSet(GrVkRenderPass* renderPass)
569 : fLastReturnedIndex(0) {
571 fRenderPasses.push_back(renderPass);
574 bool GrVkResourceProvider::CompatibleRenderPassSet::isCompatible(
575 const GrVkRenderPass::AttachmentsDescriptor& attachmentsDescriptor,
576 GrVkRenderPass::AttachmentFlags attachmentFlags,
577 SelfDependencyFlags selfDepFlags,
578 LoadFromResolve loadFromResolve) const {
579 // The first GrVkRenderpass should always exists since we create the basic load store
580 // render pass on create
581 SkASSERT(fRenderPasses[0]);
582 return fRenderPasses[0]->isCompatible(attachmentsDescriptor, attachmentFlags, selfDepFlags,
586 GrVkRenderPass* GrVkResourceProvider::CompatibleRenderPassSet::getRenderPass(
588 const GrVkRenderPass::LoadStoreOps& colorOps,
589 const GrVkRenderPass::LoadStoreOps& resolveOps,
590 const GrVkRenderPass::LoadStoreOps& stencilOps) {
591 for (int i = 0; i < fRenderPasses.count(); ++i) {
592 int idx = (i + fLastReturnedIndex) % fRenderPasses.count();
593 if (fRenderPasses[idx]->equalLoadStoreOps(colorOps, resolveOps, stencilOps)) {
594 fLastReturnedIndex = idx;
595 return fRenderPasses[idx];
598 GrVkRenderPass* renderPass = GrVkRenderPass::Create(gpu, *this->getCompatibleRenderPass(),
599 colorOps, resolveOps, stencilOps);
603 fRenderPasses.push_back(renderPass);
604 fLastReturnedIndex = fRenderPasses.count() - 1;
608 void GrVkResourceProvider::CompatibleRenderPassSet::releaseResources() {
609 for (int i = 0; i < fRenderPasses.count(); ++i) {
610 if (fRenderPasses[i]) {
611 fRenderPasses[i]->unref();
612 fRenderPasses[i] = nullptr;