Update rive-cpp to 2.0 version
[platform/core/uifw/rive-tizen.git] / submodule / skia / src / gpu / ganesh / mtl / GrMtlOpsRenderPass.mm
1 /*
2  * Copyright 2018 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7
8 #include "src/gpu/ganesh/mtl/GrMtlOpsRenderPass.h"
9
10 #include "src/gpu/ganesh/GrBackendUtils.h"
11 #include "src/gpu/ganesh/GrColor.h"
12 #include "src/gpu/ganesh/GrRenderTarget.h"
13 #include "src/gpu/ganesh/mtl/GrMtlCommandBuffer.h"
14 #include "src/gpu/ganesh/mtl/GrMtlPipelineState.h"
15 #include "src/gpu/ganesh/mtl/GrMtlPipelineStateBuilder.h"
16 #include "src/gpu/ganesh/mtl/GrMtlRenderCommandEncoder.h"
17 #include "src/gpu/ganesh/mtl/GrMtlRenderTarget.h"
18 #include "src/gpu/ganesh/mtl/GrMtlTexture.h"
19
20 #if !__has_feature(objc_arc)
21 #error This file must be compiled with Arc. Use -fobjc-arc flag
22 #endif
23
24 GR_NORETAIN_BEGIN
25
26 GrMtlOpsRenderPass::GrMtlOpsRenderPass(GrMtlGpu* gpu, GrRenderTarget* rt,
27                                        sk_sp<GrMtlFramebuffer> framebuffer, GrSurfaceOrigin origin,
28                                        const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
29                                        const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo)
30         : INHERITED(rt, origin)
31         , fGpu(gpu)
32         , fFramebuffer(std::move(framebuffer)) {
33     this->setupRenderPass(colorInfo, stencilInfo);
34 }
35
36 GrMtlOpsRenderPass::~GrMtlOpsRenderPass() {
37 }
38
39 void GrMtlOpsRenderPass::submit() {
40     if (!fFramebuffer) {
41         return;
42     }
43     SkIRect iBounds;
44     fBounds.roundOut(&iBounds);
45     fGpu->submitIndirectCommandBuffer(fRenderTarget, fOrigin, &iBounds);
46     fActiveRenderCmdEncoder = nil;
47 }
48
49 static MTLPrimitiveType gr_to_mtl_primitive(GrPrimitiveType primitiveType) {
50     const static MTLPrimitiveType mtlPrimitiveType[] {
51         MTLPrimitiveTypeTriangle,
52         MTLPrimitiveTypeTriangleStrip,
53         MTLPrimitiveTypePoint,
54         MTLPrimitiveTypeLine,
55         MTLPrimitiveTypeLineStrip
56     };
57     static_assert((int)GrPrimitiveType::kTriangles == 0);
58     static_assert((int)GrPrimitiveType::kTriangleStrip == 1);
59     static_assert((int)GrPrimitiveType::kPoints == 2);
60     static_assert((int)GrPrimitiveType::kLines == 3);
61     static_assert((int)GrPrimitiveType::kLineStrip == 4);
62
63     SkASSERT(primitiveType <= GrPrimitiveType::kLineStrip);
64     return mtlPrimitiveType[static_cast<int>(primitiveType)];
65 }
66
67 bool GrMtlOpsRenderPass::onBindPipeline(const GrProgramInfo& programInfo,
68                                         const SkRect& drawBounds) {
69     const GrMtlCaps& caps = fGpu->mtlCaps();
70     GrProgramDesc programDesc = caps.makeDesc(fRenderTarget, programInfo,
71                                               GrCaps::ProgramDescOverrideFlags::kNone);
72     if (!programDesc.isValid()) {
73         return false;
74     }
75
76     fActivePipelineState = fGpu->resourceProvider().findOrCreateCompatiblePipelineState(
77             programDesc, programInfo);
78     if (!fActivePipelineState) {
79         return false;
80     }
81
82     fActivePipelineState->setData(fFramebuffer.get(), programInfo);
83     fCurrentVertexStride = programInfo.geomProc().vertexStride();
84
85     if (!fActiveRenderCmdEncoder) {
86         fActiveRenderCmdEncoder =
87                 fGpu->commandBuffer()->getRenderCommandEncoder(fRenderPassDesc,
88                                                                fActivePipelineState, this);
89         if (!fActiveRenderCmdEncoder) {
90             return false;
91         }
92         fGpu->commandBuffer()->addGrSurface(
93                 sk_ref_sp<GrMtlAttachment>(fFramebuffer->colorAttachment()));
94     }
95
96     fActiveRenderCmdEncoder->setRenderPipelineState(
97             fActivePipelineState->pipeline()->mtlPipelineState());
98 #ifdef SK_ENABLE_MTL_DEBUG_INFO
99     if (!fDebugGroupActive) {
100         fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw");
101         fDebugGroupActive = true;
102     }
103 #endif
104     fActivePipelineState->setDrawState(fActiveRenderCmdEncoder,
105                                        programInfo.pipeline().writeSwizzle(),
106                                        programInfo.pipeline().getXferProcessor());
107     if (this->gpu()->caps()->wireframeMode() || programInfo.pipeline().isWireframe()) {
108         fActiveRenderCmdEncoder->setTriangleFillMode(MTLTriangleFillModeLines);
109     } else {
110         fActiveRenderCmdEncoder->setTriangleFillMode(MTLTriangleFillModeFill);
111     }
112
113     if (!programInfo.pipeline().isScissorTestEnabled()) {
114         // "Disable" scissor by setting it to the full pipeline bounds.
115         SkISize dimensions = fFramebuffer->colorAttachment()->dimensions();
116         GrMtlPipelineState::SetDynamicScissorRectState(fActiveRenderCmdEncoder,
117                                                        dimensions, fOrigin,
118                                                        SkIRect::MakeWH(dimensions.width(),
119                                                                        dimensions.height()));
120     }
121
122     fActivePrimitiveType = gr_to_mtl_primitive(programInfo.primitiveType());
123     fBounds.join(drawBounds);
124     return true;
125 }
126
127 void GrMtlOpsRenderPass::onSetScissorRect(const SkIRect& scissor) {
128     SkASSERT(fActivePipelineState);
129     SkASSERT(fActiveRenderCmdEncoder);
130     GrMtlPipelineState::SetDynamicScissorRectState(fActiveRenderCmdEncoder,
131                                                    fFramebuffer->colorAttachment()->dimensions(),
132                                                    fOrigin, scissor);
133 }
134
135 bool GrMtlOpsRenderPass::onBindTextures(const GrGeometryProcessor& geomProc,
136                                         const GrSurfaceProxy* const geomProcTextures[],
137                                         const GrPipeline& pipeline) {
138     SkASSERT(fActivePipelineState);
139     SkASSERT(fActiveRenderCmdEncoder);
140 #ifdef SK_ENABLE_MTL_DEBUG_INFO
141     if (!fDebugGroupActive) {
142         fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw");
143         fDebugGroupActive = true;
144     }
145 #endif
146     fActivePipelineState->setTextures(geomProc, pipeline, geomProcTextures);
147     fActivePipelineState->bindTextures(fActiveRenderCmdEncoder);
148     return true;
149 }
150
151 void GrMtlOpsRenderPass::onClear(const GrScissorState& scissor, std::array<float, 4> color) {
152     // Partial clears are not supported
153     SkASSERT(!scissor.enabled());
154
155     // Ideally we should never end up here since all clears should either be done as draws or
156     // load ops in metal. However, if a client inserts a wait op we need to handle it.
157     auto colorAttachment = fRenderPassDesc.colorAttachments[0];
158     colorAttachment.clearColor = MTLClearColorMake(color[0], color[1], color[2], color[3]);
159     colorAttachment.loadAction = MTLLoadActionClear;
160     fActiveRenderCmdEncoder =
161             fGpu->commandBuffer()->getRenderCommandEncoder(fRenderPassDesc, nullptr, this);
162 }
163
164 void GrMtlOpsRenderPass::onClearStencilClip(const GrScissorState& scissor, bool insideStencilMask) {
165     // Partial clears are not supported
166     SkASSERT(!scissor.enabled());
167
168     GrAttachment* sb = fFramebuffer->stencilAttachment();
169     // this should only be called internally when we know we have a
170     // stencil buffer.
171     SkASSERT(sb);
172     int stencilBitCount = GrBackendFormatStencilBits(sb->backendFormat());
173
174     // The contract with the callers does not guarantee that we preserve all bits in the stencil
175     // during this clear. Thus we will clear the entire stencil to the desired value.
176     auto stencilAttachment = fRenderPassDesc.stencilAttachment;
177     if (insideStencilMask) {
178         stencilAttachment.clearStencil = (1 << (stencilBitCount - 1));
179     } else {
180         stencilAttachment.clearStencil = 0;
181     }
182
183     stencilAttachment.loadAction = MTLLoadActionClear;
184     fActiveRenderCmdEncoder = this->setupResolve();
185
186     if (!fActiveRenderCmdEncoder) {
187         fActiveRenderCmdEncoder =
188                 fGpu->commandBuffer()->getRenderCommandEncoder(fRenderPassDesc, nullptr, this);
189     }
190 }
191
192 void GrMtlOpsRenderPass::inlineUpload(GrOpFlushState* state, GrDeferredTextureUploadFn& upload) {
193     // TODO: could this be more efficient?
194     state->doUpload(upload);
195     // doUpload() creates a blitCommandEncoder, so if we had a previous render we need to
196     // adjust the renderPassDescriptor to load from it.
197     if (fActiveRenderCmdEncoder) {
198         auto colorAttachment = fRenderPassDesc.colorAttachments[0];
199         colorAttachment.loadAction = MTLLoadActionLoad;
200         auto mtlStencil = fRenderPassDesc.stencilAttachment;
201         mtlStencil.loadAction = MTLLoadActionLoad;
202     }
203     // If the previous renderCommandEncoder did a resolve without an MSAA store
204     // (e.g., if the color attachment is memoryless) we need to copy the contents of
205     // the resolve attachment to the MSAA attachment at this point.
206     fActiveRenderCmdEncoder = this->setupResolve();
207
208     if (!fActiveRenderCmdEncoder) {
209         // If setting up for the resolve didn't create an encoder, it's probably reasonable to
210         // create a new encoder at this point, though maybe not necessary.
211         fActiveRenderCmdEncoder =
212                 fGpu->commandBuffer()->getRenderCommandEncoder(fRenderPassDesc, nullptr, this);
213     }
214 }
215
216 void GrMtlOpsRenderPass::initRenderState(GrMtlRenderCommandEncoder* encoder) {
217     if (!encoder) {
218         return;
219     }
220 #ifdef SK_ENABLE_MTL_DEBUG_INFO
221     encoder->pushDebugGroup(@"initRenderState");
222 #endif
223     encoder->setFrontFacingWinding(MTLWindingCounterClockwise);
224     SkISize colorAttachmentDimensions = fFramebuffer->colorAttachment()->dimensions();
225     // Strictly speaking we shouldn't have to set this, as the default viewport is the size of
226     // the drawable used to generate the renderCommandEncoder -- but just in case.
227     MTLViewport viewport = { 0.0, 0.0,
228                              (double) colorAttachmentDimensions.width(),
229                              (double) colorAttachmentDimensions.height(),
230                              0.0, 1.0 };
231     encoder->setViewport(viewport);
232 #ifdef SK_ENABLE_MTL_DEBUG_INFO
233     encoder->popDebugGroup();
234 #endif
235 }
236
237 void GrMtlOpsRenderPass::setupRenderPass(
238         const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
239         const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo) {
240     const static MTLLoadAction mtlLoadAction[] {
241         MTLLoadActionLoad,
242         MTLLoadActionClear,
243         MTLLoadActionDontCare
244     };
245     static_assert((int)GrLoadOp::kLoad == 0);
246     static_assert((int)GrLoadOp::kClear == 1);
247     static_assert((int)GrLoadOp::kDiscard == 2);
248     SkASSERT(colorInfo.fLoadOp <= GrLoadOp::kDiscard);
249     SkASSERT(stencilInfo.fLoadOp <= GrLoadOp::kDiscard);
250
251     const static MTLStoreAction mtlStoreAction[] {
252         MTLStoreActionStore,
253         MTLStoreActionDontCare
254     };
255     static_assert((int)GrStoreOp::kStore == 0);
256     static_assert((int)GrStoreOp::kDiscard == 1);
257     SkASSERT(colorInfo.fStoreOp <= GrStoreOp::kDiscard);
258     SkASSERT(stencilInfo.fStoreOp <= GrStoreOp::kDiscard);
259
260     fRenderPassDesc = [MTLRenderPassDescriptor new];
261     auto colorAttachment = fRenderPassDesc.colorAttachments[0];
262     auto color = fFramebuffer->colorAttachment();
263     colorAttachment.texture = color->mtlTexture();
264     const std::array<float, 4>& clearColor = colorInfo.fClearColor;
265     colorAttachment.clearColor =
266             MTLClearColorMake(clearColor[0], clearColor[1], clearColor[2], clearColor[3]);
267     colorAttachment.loadAction = mtlLoadAction[static_cast<int>(colorInfo.fLoadOp)];
268     colorAttachment.storeAction = mtlStoreAction[static_cast<int>(colorInfo.fStoreOp)];
269
270     auto stencil = fFramebuffer->stencilAttachment();
271     auto mtlStencil = fRenderPassDesc.stencilAttachment;
272     if (stencil) {
273         mtlStencil.texture = stencil->mtlTexture();
274     }
275     mtlStencil.clearStencil = 0;
276     mtlStencil.loadAction = mtlLoadAction[static_cast<int>(stencilInfo.fLoadOp)];
277     mtlStencil.storeAction = mtlStoreAction[static_cast<int>(stencilInfo.fStoreOp)];
278
279     fActiveRenderCmdEncoder = this->setupResolve();
280
281     if (!fActiveRenderCmdEncoder) {
282         // Manage initial clears
283         if (colorInfo.fLoadOp == GrLoadOp::kClear || stencilInfo.fLoadOp == GrLoadOp::kClear)  {
284             fBounds = SkRect::MakeWH(color->dimensions().width(),
285                                      color->dimensions().height());
286             fActiveRenderCmdEncoder =
287                     fGpu->commandBuffer()->getRenderCommandEncoder(fRenderPassDesc, nullptr, this);
288         } else {
289             fBounds.setEmpty();
290             // For now, we lazily create the renderCommandEncoder because we may have no draws,
291             // and an empty renderCommandEncoder can still produce output. This can cause issues
292             // when we've cleared a texture upon creation -- we'll subsequently discard the contents.
293             // This can be removed when that ordering is fixed.
294         }
295     }
296 }
297
298 GrMtlRenderCommandEncoder* GrMtlOpsRenderPass::setupResolve() {
299     auto resolve = fFramebuffer->resolveAttachment();
300     if (resolve) {
301         auto colorAttachment = fRenderPassDesc.colorAttachments[0];
302         colorAttachment.resolveTexture = resolve->mtlTexture();
303         // TODO: For framebufferOnly attachments we should do StoreAndMultisampleResolve if
304         // the storeAction is Store. But for the moment they don't take this path.
305         colorAttachment.storeAction = MTLStoreActionMultisampleResolve;
306         if (colorAttachment.loadAction == MTLLoadActionLoad) {
307             auto color = fFramebuffer->colorAttachment();
308             auto dimensions = color->dimensions();
309             // for now use the full bounds
310             auto nativeBounds = GrNativeRect::MakeIRectRelativeTo(
311                     fOrigin, dimensions.height(), SkIRect::MakeSize(dimensions));
312             return fGpu->loadMSAAFromResolve(color, resolve, nativeBounds,
313                                              fRenderPassDesc.stencilAttachment);
314         }
315     }
316
317     return nullptr;
318 }
319
320 void GrMtlOpsRenderPass::onBindBuffers(sk_sp<const GrBuffer> indexBuffer,
321                                        sk_sp<const GrBuffer> instanceBuffer,
322                                        sk_sp<const GrBuffer> vertexBuffer,
323                                        GrPrimitiveRestart primRestart) {
324 #ifdef SK_ENABLE_MTL_DEBUG_INFO
325     if (!fDebugGroupActive) {
326         fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw");
327         fDebugGroupActive = true;
328     }
329 #endif
330     SkASSERT(GrPrimitiveRestart::kNo == primRestart);
331     int inputBufferIndex = 0;
332     if (vertexBuffer) {
333         SkASSERT(!vertexBuffer->isCpuBuffer());
334         SkASSERT(!static_cast<const GrGpuBuffer*>(vertexBuffer.get())->isMapped());
335         fActiveVertexBuffer = std::move(vertexBuffer);
336         fGpu->commandBuffer()->addGrBuffer(fActiveVertexBuffer);
337         ++inputBufferIndex;
338     }
339     if (instanceBuffer) {
340         SkASSERT(!instanceBuffer->isCpuBuffer());
341         SkASSERT(!static_cast<const GrGpuBuffer*>(instanceBuffer.get())->isMapped());
342         this->setVertexBuffer(fActiveRenderCmdEncoder, instanceBuffer.get(), 0, inputBufferIndex++);
343         fActiveInstanceBuffer = std::move(instanceBuffer);
344         fGpu->commandBuffer()->addGrBuffer(fActiveInstanceBuffer);
345     }
346     if (indexBuffer) {
347         SkASSERT(!indexBuffer->isCpuBuffer());
348         SkASSERT(!static_cast<const GrGpuBuffer*>(indexBuffer.get())->isMapped());
349         fActiveIndexBuffer = std::move(indexBuffer);
350         fGpu->commandBuffer()->addGrBuffer(fActiveIndexBuffer);
351     }
352 }
353
354 void GrMtlOpsRenderPass::onDraw(int vertexCount, int baseVertex) {
355     SkASSERT(fActivePipelineState);
356     SkASSERT(nil != fActiveRenderCmdEncoder);
357 #ifdef SK_ENABLE_MTL_DEBUG_INFO
358     if (!fDebugGroupActive) {
359         fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw");
360         fDebugGroupActive = true;
361     }
362 #endif
363     this->setVertexBuffer(fActiveRenderCmdEncoder, fActiveVertexBuffer.get(), 0, 0);
364
365     fActiveRenderCmdEncoder->drawPrimitives(fActivePrimitiveType, baseVertex, vertexCount);
366     fGpu->stats()->incNumDraws();
367 #ifdef SK_ENABLE_MTL_DEBUG_INFO
368     SkASSERT(fDebugGroupActive);
369     fActiveRenderCmdEncoder->popDebugGroup();
370     fDebugGroupActive = false;
371 #endif
372 }
373
374 void GrMtlOpsRenderPass::onDrawIndexed(int indexCount, int baseIndex, uint16_t minIndexValue,
375                                        uint16_t maxIndexValue, int baseVertex) {
376     SkASSERT(fActivePipelineState);
377     SkASSERT(nil != fActiveRenderCmdEncoder);
378     SkASSERT(fActiveIndexBuffer);
379 #ifdef SK_ENABLE_MTL_DEBUG_INFO
380     if (!fDebugGroupActive) {
381         fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw");
382         fDebugGroupActive = true;
383     }
384 #endif
385     this->setVertexBuffer(fActiveRenderCmdEncoder, fActiveVertexBuffer.get(),
386                           fCurrentVertexStride * baseVertex, 0);
387
388     auto mtlIndexBuffer = static_cast<const GrMtlBuffer*>(fActiveIndexBuffer.get());
389     size_t indexOffset = sizeof(uint16_t) * baseIndex;
390     id<MTLBuffer> indexBuffer = mtlIndexBuffer->mtlBuffer();
391     fActiveRenderCmdEncoder->drawIndexedPrimitives(fActivePrimitiveType, indexCount,
392                                                    MTLIndexTypeUInt16, indexBuffer, indexOffset);
393     fGpu->stats()->incNumDraws();
394 #ifdef SK_ENABLE_MTL_DEBUG_INFO
395     SkASSERT(fDebugGroupActive);
396     fActiveRenderCmdEncoder->popDebugGroup();
397     fDebugGroupActive = false;
398 #endif
399 }
400
401 void GrMtlOpsRenderPass::onDrawInstanced(int instanceCount, int baseInstance, int vertexCount,
402                                          int baseVertex) {
403     SkASSERT(fActivePipelineState);
404     SkASSERT(nil != fActiveRenderCmdEncoder);
405 #ifdef SK_ENABLE_MTL_DEBUG_INFO
406     if (!fDebugGroupActive) {
407         fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw");
408         fDebugGroupActive = true;
409     }
410 #endif
411     this->setVertexBuffer(fActiveRenderCmdEncoder, fActiveVertexBuffer.get(), 0, 0);
412
413     if (@available(macOS 10.11, iOS 9.0, *)) {
414         fActiveRenderCmdEncoder->drawPrimitives(fActivePrimitiveType, baseVertex, vertexCount,
415                                                 instanceCount, baseInstance);
416     } else {
417         SkASSERT(false);
418     }
419     fGpu->stats()->incNumDraws();
420 #ifdef SK_ENABLE_MTL_DEBUG_INFO
421     SkASSERT(fDebugGroupActive);
422     fActiveRenderCmdEncoder->popDebugGroup();
423     fDebugGroupActive = false;
424 #endif
425 }
426
427 void GrMtlOpsRenderPass::onDrawIndexedInstanced(
428         int indexCount, int baseIndex, int instanceCount, int baseInstance, int baseVertex) {
429     SkASSERT(fActivePipelineState);
430     SkASSERT(nil != fActiveRenderCmdEncoder);
431     SkASSERT(fActiveIndexBuffer);
432 #ifdef SK_ENABLE_MTL_DEBUG_INFO
433     if (!fDebugGroupActive) {
434         fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw");
435         fDebugGroupActive = true;
436     }
437 #endif
438     this->setVertexBuffer(fActiveRenderCmdEncoder, fActiveVertexBuffer.get(), 0, 0);
439
440     auto mtlIndexBuffer = static_cast<const GrMtlBuffer*>(fActiveIndexBuffer.get());
441     size_t indexOffset = sizeof(uint16_t) * baseIndex;
442     if (@available(macOS 10.11, iOS 9.0, *)) {
443         fActiveRenderCmdEncoder->drawIndexedPrimitives(fActivePrimitiveType, indexCount,
444                                                        MTLIndexTypeUInt16,
445                                                        mtlIndexBuffer->mtlBuffer(), indexOffset,
446                                                        instanceCount, baseVertex, baseInstance);
447     } else {
448         SkASSERT(false);
449     }
450     fGpu->stats()->incNumDraws();
451 #ifdef SK_ENABLE_MTL_DEBUG_INFO
452     SkASSERT(fDebugGroupActive);
453     fActiveRenderCmdEncoder->popDebugGroup();
454     fDebugGroupActive = false;
455 #endif
456 }
457
458 void GrMtlOpsRenderPass::onDrawIndirect(const GrBuffer* drawIndirectBuffer,
459                                         size_t bufferOffset,
460                                         int drawCount) {
461     SkASSERT(fGpu->caps()->nativeDrawIndirectSupport());
462     SkASSERT(fActivePipelineState);
463     SkASSERT(nil != fActiveRenderCmdEncoder);
464 #ifdef SK_ENABLE_MTL_DEBUG_INFO
465     if (!fDebugGroupActive) {
466         fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw");
467         fDebugGroupActive = true;
468     }
469 #endif
470     this->setVertexBuffer(fActiveRenderCmdEncoder, fActiveVertexBuffer.get(), 0, 0);
471
472     auto mtlIndirectBuffer = static_cast<const GrMtlBuffer*>(drawIndirectBuffer);
473     const size_t stride = sizeof(GrDrawIndirectCommand);
474     while (drawCount >= 1) {
475         if (@available(macOS 10.11, iOS 9.0, *)) {
476             fActiveRenderCmdEncoder->drawPrimitives(fActivePrimitiveType,
477                                                     mtlIndirectBuffer->mtlBuffer(), bufferOffset);
478         } else {
479             SkASSERT(false);
480         }
481         drawCount--;
482         bufferOffset += stride;
483         fGpu->stats()->incNumDraws();
484     }
485 #ifdef SK_ENABLE_MTL_DEBUG_INFO
486     SkASSERT(fDebugGroupActive);
487     fActiveRenderCmdEncoder->popDebugGroup();
488     fDebugGroupActive = false;
489 #endif
490 }
491
492 void GrMtlOpsRenderPass::onDrawIndexedIndirect(const GrBuffer* drawIndirectBuffer,
493                                                size_t bufferOffset,
494                                                int drawCount) {
495     SkASSERT(fGpu->caps()->nativeDrawIndirectSupport());
496     SkASSERT(fActivePipelineState);
497     SkASSERT(nil != fActiveRenderCmdEncoder);
498     SkASSERT(fActiveIndexBuffer);
499 #ifdef SK_ENABLE_MTL_DEBUG_INFO
500     if (!fDebugGroupActive) {
501         fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw");
502         fDebugGroupActive = true;
503     }
504 #endif
505     this->setVertexBuffer(fActiveRenderCmdEncoder, fActiveVertexBuffer.get(), 0, 0);
506
507     auto mtlIndexBuffer = static_cast<const GrMtlBuffer*>(fActiveIndexBuffer.get());
508     auto mtlIndirectBuffer = static_cast<const GrMtlBuffer*>(drawIndirectBuffer);
509     size_t indexOffset = 0;
510
511     const size_t stride = sizeof(GrDrawIndexedIndirectCommand);
512     while (drawCount >= 1) {
513         if (@available(macOS 10.11, iOS 9.0, *)) {
514             fActiveRenderCmdEncoder->drawIndexedPrimitives(fActivePrimitiveType,
515                                                            MTLIndexTypeUInt16,
516                                                            mtlIndexBuffer->mtlBuffer(),
517                                                            indexOffset,
518                                                            mtlIndirectBuffer->mtlBuffer(),
519                                                            bufferOffset);
520         } else {
521             SkASSERT(false);
522         }
523         drawCount--;
524         bufferOffset += stride;
525         fGpu->stats()->incNumDraws();
526     }
527 #ifdef SK_ENABLE_MTL_DEBUG_INFO
528     SkASSERT(fDebugGroupActive);
529     fActiveRenderCmdEncoder->popDebugGroup();
530     fDebugGroupActive = false;
531 #endif
532 }
533
534 void GrMtlOpsRenderPass::setVertexBuffer(GrMtlRenderCommandEncoder* encoder,
535                                          const GrBuffer* buffer,
536                                          size_t vertexOffset,
537                                          size_t inputBufferIndex) {
538     if (!buffer) {
539         return;
540     }
541
542     constexpr static int kFirstBufferBindingIdx = GrMtlUniformHandler::kLastUniformBinding + 1;
543     int index = inputBufferIndex + kFirstBufferBindingIdx;
544     SkASSERT(index < 4);
545     auto mtlBuffer = static_cast<const GrMtlBuffer*>(buffer);
546     id<MTLBuffer> mtlVertexBuffer = mtlBuffer->mtlBuffer();
547     SkASSERT(mtlVertexBuffer);
548     size_t offset = vertexOffset;
549     encoder->setVertexBuffer(mtlVertexBuffer, offset, index);
550 }
551
552 GR_NORETAIN_END