2 * Copyright 2018 Google Inc.
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
8 #include "src/gpu/ganesh/mtl/GrMtlOpsRenderPass.h"
10 #include "src/gpu/ganesh/GrBackendUtils.h"
11 #include "src/gpu/ganesh/GrColor.h"
12 #include "src/gpu/ganesh/GrRenderTarget.h"
13 #include "src/gpu/ganesh/mtl/GrMtlCommandBuffer.h"
14 #include "src/gpu/ganesh/mtl/GrMtlPipelineState.h"
15 #include "src/gpu/ganesh/mtl/GrMtlPipelineStateBuilder.h"
16 #include "src/gpu/ganesh/mtl/GrMtlRenderCommandEncoder.h"
17 #include "src/gpu/ganesh/mtl/GrMtlRenderTarget.h"
18 #include "src/gpu/ganesh/mtl/GrMtlTexture.h"
20 #if !__has_feature(objc_arc)
21 #error This file must be compiled with Arc. Use -fobjc-arc flag
26 GrMtlOpsRenderPass::GrMtlOpsRenderPass(GrMtlGpu* gpu, GrRenderTarget* rt,
27 sk_sp<GrMtlFramebuffer> framebuffer, GrSurfaceOrigin origin,
28 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
29 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo)
30 : INHERITED(rt, origin)
32 , fFramebuffer(std::move(framebuffer)) {
33 this->setupRenderPass(colorInfo, stencilInfo);
36 GrMtlOpsRenderPass::~GrMtlOpsRenderPass() {
39 void GrMtlOpsRenderPass::submit() {
44 fBounds.roundOut(&iBounds);
45 fGpu->submitIndirectCommandBuffer(fRenderTarget, fOrigin, &iBounds);
46 fActiveRenderCmdEncoder = nil;
49 static MTLPrimitiveType gr_to_mtl_primitive(GrPrimitiveType primitiveType) {
50 const static MTLPrimitiveType mtlPrimitiveType[] {
51 MTLPrimitiveTypeTriangle,
52 MTLPrimitiveTypeTriangleStrip,
53 MTLPrimitiveTypePoint,
55 MTLPrimitiveTypeLineStrip
57 static_assert((int)GrPrimitiveType::kTriangles == 0);
58 static_assert((int)GrPrimitiveType::kTriangleStrip == 1);
59 static_assert((int)GrPrimitiveType::kPoints == 2);
60 static_assert((int)GrPrimitiveType::kLines == 3);
61 static_assert((int)GrPrimitiveType::kLineStrip == 4);
63 SkASSERT(primitiveType <= GrPrimitiveType::kLineStrip);
64 return mtlPrimitiveType[static_cast<int>(primitiveType)];
67 bool GrMtlOpsRenderPass::onBindPipeline(const GrProgramInfo& programInfo,
68 const SkRect& drawBounds) {
69 const GrMtlCaps& caps = fGpu->mtlCaps();
70 GrProgramDesc programDesc = caps.makeDesc(fRenderTarget, programInfo,
71 GrCaps::ProgramDescOverrideFlags::kNone);
72 if (!programDesc.isValid()) {
76 fActivePipelineState = fGpu->resourceProvider().findOrCreateCompatiblePipelineState(
77 programDesc, programInfo);
78 if (!fActivePipelineState) {
82 fActivePipelineState->setData(fFramebuffer.get(), programInfo);
83 fCurrentVertexStride = programInfo.geomProc().vertexStride();
85 if (!fActiveRenderCmdEncoder) {
86 fActiveRenderCmdEncoder =
87 fGpu->commandBuffer()->getRenderCommandEncoder(fRenderPassDesc,
88 fActivePipelineState, this);
89 if (!fActiveRenderCmdEncoder) {
92 fGpu->commandBuffer()->addGrSurface(
93 sk_ref_sp<GrMtlAttachment>(fFramebuffer->colorAttachment()));
96 fActiveRenderCmdEncoder->setRenderPipelineState(
97 fActivePipelineState->pipeline()->mtlPipelineState());
98 #ifdef SK_ENABLE_MTL_DEBUG_INFO
99 if (!fDebugGroupActive) {
100 fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw");
101 fDebugGroupActive = true;
104 fActivePipelineState->setDrawState(fActiveRenderCmdEncoder,
105 programInfo.pipeline().writeSwizzle(),
106 programInfo.pipeline().getXferProcessor());
107 if (this->gpu()->caps()->wireframeMode() || programInfo.pipeline().isWireframe()) {
108 fActiveRenderCmdEncoder->setTriangleFillMode(MTLTriangleFillModeLines);
110 fActiveRenderCmdEncoder->setTriangleFillMode(MTLTriangleFillModeFill);
113 if (!programInfo.pipeline().isScissorTestEnabled()) {
114 // "Disable" scissor by setting it to the full pipeline bounds.
115 SkISize dimensions = fFramebuffer->colorAttachment()->dimensions();
116 GrMtlPipelineState::SetDynamicScissorRectState(fActiveRenderCmdEncoder,
118 SkIRect::MakeWH(dimensions.width(),
119 dimensions.height()));
122 fActivePrimitiveType = gr_to_mtl_primitive(programInfo.primitiveType());
123 fBounds.join(drawBounds);
127 void GrMtlOpsRenderPass::onSetScissorRect(const SkIRect& scissor) {
128 SkASSERT(fActivePipelineState);
129 SkASSERT(fActiveRenderCmdEncoder);
130 GrMtlPipelineState::SetDynamicScissorRectState(fActiveRenderCmdEncoder,
131 fFramebuffer->colorAttachment()->dimensions(),
135 bool GrMtlOpsRenderPass::onBindTextures(const GrGeometryProcessor& geomProc,
136 const GrSurfaceProxy* const geomProcTextures[],
137 const GrPipeline& pipeline) {
138 SkASSERT(fActivePipelineState);
139 SkASSERT(fActiveRenderCmdEncoder);
140 #ifdef SK_ENABLE_MTL_DEBUG_INFO
141 if (!fDebugGroupActive) {
142 fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw");
143 fDebugGroupActive = true;
146 fActivePipelineState->setTextures(geomProc, pipeline, geomProcTextures);
147 fActivePipelineState->bindTextures(fActiveRenderCmdEncoder);
151 void GrMtlOpsRenderPass::onClear(const GrScissorState& scissor, std::array<float, 4> color) {
152 // Partial clears are not supported
153 SkASSERT(!scissor.enabled());
155 // Ideally we should never end up here since all clears should either be done as draws or
156 // load ops in metal. However, if a client inserts a wait op we need to handle it.
157 auto colorAttachment = fRenderPassDesc.colorAttachments[0];
158 colorAttachment.clearColor = MTLClearColorMake(color[0], color[1], color[2], color[3]);
159 colorAttachment.loadAction = MTLLoadActionClear;
160 fActiveRenderCmdEncoder =
161 fGpu->commandBuffer()->getRenderCommandEncoder(fRenderPassDesc, nullptr, this);
164 void GrMtlOpsRenderPass::onClearStencilClip(const GrScissorState& scissor, bool insideStencilMask) {
165 // Partial clears are not supported
166 SkASSERT(!scissor.enabled());
168 GrAttachment* sb = fFramebuffer->stencilAttachment();
169 // this should only be called internally when we know we have a
172 int stencilBitCount = GrBackendFormatStencilBits(sb->backendFormat());
174 // The contract with the callers does not guarantee that we preserve all bits in the stencil
175 // during this clear. Thus we will clear the entire stencil to the desired value.
176 auto stencilAttachment = fRenderPassDesc.stencilAttachment;
177 if (insideStencilMask) {
178 stencilAttachment.clearStencil = (1 << (stencilBitCount - 1));
180 stencilAttachment.clearStencil = 0;
183 stencilAttachment.loadAction = MTLLoadActionClear;
184 fActiveRenderCmdEncoder = this->setupResolve();
186 if (!fActiveRenderCmdEncoder) {
187 fActiveRenderCmdEncoder =
188 fGpu->commandBuffer()->getRenderCommandEncoder(fRenderPassDesc, nullptr, this);
192 void GrMtlOpsRenderPass::inlineUpload(GrOpFlushState* state, GrDeferredTextureUploadFn& upload) {
193 // TODO: could this be more efficient?
194 state->doUpload(upload);
195 // doUpload() creates a blitCommandEncoder, so if we had a previous render we need to
196 // adjust the renderPassDescriptor to load from it.
197 if (fActiveRenderCmdEncoder) {
198 auto colorAttachment = fRenderPassDesc.colorAttachments[0];
199 colorAttachment.loadAction = MTLLoadActionLoad;
200 auto mtlStencil = fRenderPassDesc.stencilAttachment;
201 mtlStencil.loadAction = MTLLoadActionLoad;
203 // If the previous renderCommandEncoder did a resolve without an MSAA store
204 // (e.g., if the color attachment is memoryless) we need to copy the contents of
205 // the resolve attachment to the MSAA attachment at this point.
206 fActiveRenderCmdEncoder = this->setupResolve();
208 if (!fActiveRenderCmdEncoder) {
209 // If setting up for the resolve didn't create an encoder, it's probably reasonable to
210 // create a new encoder at this point, though maybe not necessary.
211 fActiveRenderCmdEncoder =
212 fGpu->commandBuffer()->getRenderCommandEncoder(fRenderPassDesc, nullptr, this);
216 void GrMtlOpsRenderPass::initRenderState(GrMtlRenderCommandEncoder* encoder) {
220 #ifdef SK_ENABLE_MTL_DEBUG_INFO
221 encoder->pushDebugGroup(@"initRenderState");
223 encoder->setFrontFacingWinding(MTLWindingCounterClockwise);
224 SkISize colorAttachmentDimensions = fFramebuffer->colorAttachment()->dimensions();
225 // Strictly speaking we shouldn't have to set this, as the default viewport is the size of
226 // the drawable used to generate the renderCommandEncoder -- but just in case.
227 MTLViewport viewport = { 0.0, 0.0,
228 (double) colorAttachmentDimensions.width(),
229 (double) colorAttachmentDimensions.height(),
231 encoder->setViewport(viewport);
232 #ifdef SK_ENABLE_MTL_DEBUG_INFO
233 encoder->popDebugGroup();
237 void GrMtlOpsRenderPass::setupRenderPass(
238 const GrOpsRenderPass::LoadAndStoreInfo& colorInfo,
239 const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo) {
240 const static MTLLoadAction mtlLoadAction[] {
243 MTLLoadActionDontCare
245 static_assert((int)GrLoadOp::kLoad == 0);
246 static_assert((int)GrLoadOp::kClear == 1);
247 static_assert((int)GrLoadOp::kDiscard == 2);
248 SkASSERT(colorInfo.fLoadOp <= GrLoadOp::kDiscard);
249 SkASSERT(stencilInfo.fLoadOp <= GrLoadOp::kDiscard);
251 const static MTLStoreAction mtlStoreAction[] {
253 MTLStoreActionDontCare
255 static_assert((int)GrStoreOp::kStore == 0);
256 static_assert((int)GrStoreOp::kDiscard == 1);
257 SkASSERT(colorInfo.fStoreOp <= GrStoreOp::kDiscard);
258 SkASSERT(stencilInfo.fStoreOp <= GrStoreOp::kDiscard);
260 fRenderPassDesc = [MTLRenderPassDescriptor new];
261 auto colorAttachment = fRenderPassDesc.colorAttachments[0];
262 auto color = fFramebuffer->colorAttachment();
263 colorAttachment.texture = color->mtlTexture();
264 const std::array<float, 4>& clearColor = colorInfo.fClearColor;
265 colorAttachment.clearColor =
266 MTLClearColorMake(clearColor[0], clearColor[1], clearColor[2], clearColor[3]);
267 colorAttachment.loadAction = mtlLoadAction[static_cast<int>(colorInfo.fLoadOp)];
268 colorAttachment.storeAction = mtlStoreAction[static_cast<int>(colorInfo.fStoreOp)];
270 auto stencil = fFramebuffer->stencilAttachment();
271 auto mtlStencil = fRenderPassDesc.stencilAttachment;
273 mtlStencil.texture = stencil->mtlTexture();
275 mtlStencil.clearStencil = 0;
276 mtlStencil.loadAction = mtlLoadAction[static_cast<int>(stencilInfo.fLoadOp)];
277 mtlStencil.storeAction = mtlStoreAction[static_cast<int>(stencilInfo.fStoreOp)];
279 fActiveRenderCmdEncoder = this->setupResolve();
281 if (!fActiveRenderCmdEncoder) {
282 // Manage initial clears
283 if (colorInfo.fLoadOp == GrLoadOp::kClear || stencilInfo.fLoadOp == GrLoadOp::kClear) {
284 fBounds = SkRect::MakeWH(color->dimensions().width(),
285 color->dimensions().height());
286 fActiveRenderCmdEncoder =
287 fGpu->commandBuffer()->getRenderCommandEncoder(fRenderPassDesc, nullptr, this);
290 // For now, we lazily create the renderCommandEncoder because we may have no draws,
291 // and an empty renderCommandEncoder can still produce output. This can cause issues
292 // when we've cleared a texture upon creation -- we'll subsequently discard the contents.
293 // This can be removed when that ordering is fixed.
298 GrMtlRenderCommandEncoder* GrMtlOpsRenderPass::setupResolve() {
299 auto resolve = fFramebuffer->resolveAttachment();
301 auto colorAttachment = fRenderPassDesc.colorAttachments[0];
302 colorAttachment.resolveTexture = resolve->mtlTexture();
303 // TODO: For framebufferOnly attachments we should do StoreAndMultisampleResolve if
304 // the storeAction is Store. But for the moment they don't take this path.
305 colorAttachment.storeAction = MTLStoreActionMultisampleResolve;
306 if (colorAttachment.loadAction == MTLLoadActionLoad) {
307 auto color = fFramebuffer->colorAttachment();
308 auto dimensions = color->dimensions();
309 // for now use the full bounds
310 auto nativeBounds = GrNativeRect::MakeIRectRelativeTo(
311 fOrigin, dimensions.height(), SkIRect::MakeSize(dimensions));
312 return fGpu->loadMSAAFromResolve(color, resolve, nativeBounds,
313 fRenderPassDesc.stencilAttachment);
320 void GrMtlOpsRenderPass::onBindBuffers(sk_sp<const GrBuffer> indexBuffer,
321 sk_sp<const GrBuffer> instanceBuffer,
322 sk_sp<const GrBuffer> vertexBuffer,
323 GrPrimitiveRestart primRestart) {
324 #ifdef SK_ENABLE_MTL_DEBUG_INFO
325 if (!fDebugGroupActive) {
326 fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw");
327 fDebugGroupActive = true;
330 SkASSERT(GrPrimitiveRestart::kNo == primRestart);
331 int inputBufferIndex = 0;
333 SkASSERT(!vertexBuffer->isCpuBuffer());
334 SkASSERT(!static_cast<const GrGpuBuffer*>(vertexBuffer.get())->isMapped());
335 fActiveVertexBuffer = std::move(vertexBuffer);
336 fGpu->commandBuffer()->addGrBuffer(fActiveVertexBuffer);
339 if (instanceBuffer) {
340 SkASSERT(!instanceBuffer->isCpuBuffer());
341 SkASSERT(!static_cast<const GrGpuBuffer*>(instanceBuffer.get())->isMapped());
342 this->setVertexBuffer(fActiveRenderCmdEncoder, instanceBuffer.get(), 0, inputBufferIndex++);
343 fActiveInstanceBuffer = std::move(instanceBuffer);
344 fGpu->commandBuffer()->addGrBuffer(fActiveInstanceBuffer);
347 SkASSERT(!indexBuffer->isCpuBuffer());
348 SkASSERT(!static_cast<const GrGpuBuffer*>(indexBuffer.get())->isMapped());
349 fActiveIndexBuffer = std::move(indexBuffer);
350 fGpu->commandBuffer()->addGrBuffer(fActiveIndexBuffer);
354 void GrMtlOpsRenderPass::onDraw(int vertexCount, int baseVertex) {
355 SkASSERT(fActivePipelineState);
356 SkASSERT(nil != fActiveRenderCmdEncoder);
357 #ifdef SK_ENABLE_MTL_DEBUG_INFO
358 if (!fDebugGroupActive) {
359 fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw");
360 fDebugGroupActive = true;
363 this->setVertexBuffer(fActiveRenderCmdEncoder, fActiveVertexBuffer.get(), 0, 0);
365 fActiveRenderCmdEncoder->drawPrimitives(fActivePrimitiveType, baseVertex, vertexCount);
366 fGpu->stats()->incNumDraws();
367 #ifdef SK_ENABLE_MTL_DEBUG_INFO
368 SkASSERT(fDebugGroupActive);
369 fActiveRenderCmdEncoder->popDebugGroup();
370 fDebugGroupActive = false;
374 void GrMtlOpsRenderPass::onDrawIndexed(int indexCount, int baseIndex, uint16_t minIndexValue,
375 uint16_t maxIndexValue, int baseVertex) {
376 SkASSERT(fActivePipelineState);
377 SkASSERT(nil != fActiveRenderCmdEncoder);
378 SkASSERT(fActiveIndexBuffer);
379 #ifdef SK_ENABLE_MTL_DEBUG_INFO
380 if (!fDebugGroupActive) {
381 fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw");
382 fDebugGroupActive = true;
385 this->setVertexBuffer(fActiveRenderCmdEncoder, fActiveVertexBuffer.get(),
386 fCurrentVertexStride * baseVertex, 0);
388 auto mtlIndexBuffer = static_cast<const GrMtlBuffer*>(fActiveIndexBuffer.get());
389 size_t indexOffset = sizeof(uint16_t) * baseIndex;
390 id<MTLBuffer> indexBuffer = mtlIndexBuffer->mtlBuffer();
391 fActiveRenderCmdEncoder->drawIndexedPrimitives(fActivePrimitiveType, indexCount,
392 MTLIndexTypeUInt16, indexBuffer, indexOffset);
393 fGpu->stats()->incNumDraws();
394 #ifdef SK_ENABLE_MTL_DEBUG_INFO
395 SkASSERT(fDebugGroupActive);
396 fActiveRenderCmdEncoder->popDebugGroup();
397 fDebugGroupActive = false;
401 void GrMtlOpsRenderPass::onDrawInstanced(int instanceCount, int baseInstance, int vertexCount,
403 SkASSERT(fActivePipelineState);
404 SkASSERT(nil != fActiveRenderCmdEncoder);
405 #ifdef SK_ENABLE_MTL_DEBUG_INFO
406 if (!fDebugGroupActive) {
407 fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw");
408 fDebugGroupActive = true;
411 this->setVertexBuffer(fActiveRenderCmdEncoder, fActiveVertexBuffer.get(), 0, 0);
413 if (@available(macOS 10.11, iOS 9.0, *)) {
414 fActiveRenderCmdEncoder->drawPrimitives(fActivePrimitiveType, baseVertex, vertexCount,
415 instanceCount, baseInstance);
419 fGpu->stats()->incNumDraws();
420 #ifdef SK_ENABLE_MTL_DEBUG_INFO
421 SkASSERT(fDebugGroupActive);
422 fActiveRenderCmdEncoder->popDebugGroup();
423 fDebugGroupActive = false;
427 void GrMtlOpsRenderPass::onDrawIndexedInstanced(
428 int indexCount, int baseIndex, int instanceCount, int baseInstance, int baseVertex) {
429 SkASSERT(fActivePipelineState);
430 SkASSERT(nil != fActiveRenderCmdEncoder);
431 SkASSERT(fActiveIndexBuffer);
432 #ifdef SK_ENABLE_MTL_DEBUG_INFO
433 if (!fDebugGroupActive) {
434 fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw");
435 fDebugGroupActive = true;
438 this->setVertexBuffer(fActiveRenderCmdEncoder, fActiveVertexBuffer.get(), 0, 0);
440 auto mtlIndexBuffer = static_cast<const GrMtlBuffer*>(fActiveIndexBuffer.get());
441 size_t indexOffset = sizeof(uint16_t) * baseIndex;
442 if (@available(macOS 10.11, iOS 9.0, *)) {
443 fActiveRenderCmdEncoder->drawIndexedPrimitives(fActivePrimitiveType, indexCount,
445 mtlIndexBuffer->mtlBuffer(), indexOffset,
446 instanceCount, baseVertex, baseInstance);
450 fGpu->stats()->incNumDraws();
451 #ifdef SK_ENABLE_MTL_DEBUG_INFO
452 SkASSERT(fDebugGroupActive);
453 fActiveRenderCmdEncoder->popDebugGroup();
454 fDebugGroupActive = false;
458 void GrMtlOpsRenderPass::onDrawIndirect(const GrBuffer* drawIndirectBuffer,
461 SkASSERT(fGpu->caps()->nativeDrawIndirectSupport());
462 SkASSERT(fActivePipelineState);
463 SkASSERT(nil != fActiveRenderCmdEncoder);
464 #ifdef SK_ENABLE_MTL_DEBUG_INFO
465 if (!fDebugGroupActive) {
466 fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw");
467 fDebugGroupActive = true;
470 this->setVertexBuffer(fActiveRenderCmdEncoder, fActiveVertexBuffer.get(), 0, 0);
472 auto mtlIndirectBuffer = static_cast<const GrMtlBuffer*>(drawIndirectBuffer);
473 const size_t stride = sizeof(GrDrawIndirectCommand);
474 while (drawCount >= 1) {
475 if (@available(macOS 10.11, iOS 9.0, *)) {
476 fActiveRenderCmdEncoder->drawPrimitives(fActivePrimitiveType,
477 mtlIndirectBuffer->mtlBuffer(), bufferOffset);
482 bufferOffset += stride;
483 fGpu->stats()->incNumDraws();
485 #ifdef SK_ENABLE_MTL_DEBUG_INFO
486 SkASSERT(fDebugGroupActive);
487 fActiveRenderCmdEncoder->popDebugGroup();
488 fDebugGroupActive = false;
492 void GrMtlOpsRenderPass::onDrawIndexedIndirect(const GrBuffer* drawIndirectBuffer,
495 SkASSERT(fGpu->caps()->nativeDrawIndirectSupport());
496 SkASSERT(fActivePipelineState);
497 SkASSERT(nil != fActiveRenderCmdEncoder);
498 SkASSERT(fActiveIndexBuffer);
499 #ifdef SK_ENABLE_MTL_DEBUG_INFO
500 if (!fDebugGroupActive) {
501 fActiveRenderCmdEncoder->pushDebugGroup(@"bindAndDraw");
502 fDebugGroupActive = true;
505 this->setVertexBuffer(fActiveRenderCmdEncoder, fActiveVertexBuffer.get(), 0, 0);
507 auto mtlIndexBuffer = static_cast<const GrMtlBuffer*>(fActiveIndexBuffer.get());
508 auto mtlIndirectBuffer = static_cast<const GrMtlBuffer*>(drawIndirectBuffer);
509 size_t indexOffset = 0;
511 const size_t stride = sizeof(GrDrawIndexedIndirectCommand);
512 while (drawCount >= 1) {
513 if (@available(macOS 10.11, iOS 9.0, *)) {
514 fActiveRenderCmdEncoder->drawIndexedPrimitives(fActivePrimitiveType,
516 mtlIndexBuffer->mtlBuffer(),
518 mtlIndirectBuffer->mtlBuffer(),
524 bufferOffset += stride;
525 fGpu->stats()->incNumDraws();
527 #ifdef SK_ENABLE_MTL_DEBUG_INFO
528 SkASSERT(fDebugGroupActive);
529 fActiveRenderCmdEncoder->popDebugGroup();
530 fDebugGroupActive = false;
534 void GrMtlOpsRenderPass::setVertexBuffer(GrMtlRenderCommandEncoder* encoder,
535 const GrBuffer* buffer,
537 size_t inputBufferIndex) {
542 constexpr static int kFirstBufferBindingIdx = GrMtlUniformHandler::kLastUniformBinding + 1;
543 int index = inputBufferIndex + kFirstBufferBindingIdx;
545 auto mtlBuffer = static_cast<const GrMtlBuffer*>(buffer);
546 id<MTLBuffer> mtlVertexBuffer = mtlBuffer->mtlBuffer();
547 SkASSERT(mtlVertexBuffer);
548 size_t offset = vertexOffset;
549 encoder->setVertexBuffer(mtlVertexBuffer, offset, index);