2 * Copyright 2015 Google Inc.
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
8 #ifndef GrTargetCommands_DEFINED
9 #define GrTargetCommands_DEFINED
12 #include "GrBatchTarget.h"
13 #include "GrDrawTarget.h"
16 #include "GrPendingProgramElement.h"
17 #include "GrRenderTarget.h"
18 #include "GrTRecorder.h"
22 class GrInOrderDrawBuffer;
23 class GrVertexBufferAllocPool;
24 class GrIndexBufferAllocPool;
26 class GrTargetCommands : ::SkNoncopyable {
30 GrTargetCommands(GrGpu* gpu,
31 GrVertexBufferAllocPool* vertexPool,
32 GrIndexBufferAllocPool* indexPool)
33 : fCmdBuffer(kCmdBufferInitialSizeInBytes)
35 , fBatchTarget(gpu, vertexPool, indexPool)
39 class Cmd : ::SkNoncopyable {
43 kStencilPath_CmdType = 2,
44 kSetState_CmdType = 3,
46 kCopySurface_CmdType = 5,
47 kDrawPath_CmdType = 6,
48 kDrawPaths_CmdType = 7,
49 kDrawBatch_CmdType = 8,
52 Cmd(CmdType type) : fMarkerID(-1), fType(type) {}
55 virtual void execute(GrGpu*, const SetState*) = 0;
57 CmdType type() const { return fType; }
60 bool isTraced() const { return -1 != fMarkerID; }
61 void setMarkerID(int markerID) { SkASSERT(-1 == fMarkerID); fMarkerID = markerID; }
62 int markerID() const { return fMarkerID; }
70 void flush(GrInOrderDrawBuffer*);
72 Cmd* recordClearStencilClip(GrInOrderDrawBuffer*,
75 GrRenderTarget* renderTarget);
77 Cmd* recordDiscard(GrInOrderDrawBuffer*, GrRenderTarget*);
79 Cmd* recordDraw(GrInOrderDrawBuffer*,
80 const GrGeometryProcessor*,
81 const GrDrawTarget::DrawInfo&,
82 const GrDrawTarget::PipelineInfo&);
83 Cmd* recordDrawBatch(GrInOrderDrawBuffer*,
85 const GrDrawTarget::PipelineInfo&);
86 void recordDrawRect(GrInOrderDrawBuffer*,
89 const SkMatrix& viewMatrix,
91 const SkRect* localRect,
92 const SkMatrix* localMatrix);
93 Cmd* recordStencilPath(GrInOrderDrawBuffer*,
94 const GrPipelineBuilder&,
95 const GrPathProcessor*,
97 const GrScissorState&,
98 const GrStencilSettings&);
99 Cmd* recordDrawPath(GrInOrderDrawBuffer*,
100 const GrPathProcessor*,
102 const GrStencilSettings&,
103 const GrDrawTarget::PipelineInfo&);
104 Cmd* recordDrawPaths(GrInOrderDrawBuffer*,
105 const GrPathProcessor*,
108 GrDrawTarget::PathIndexType,
109 const float transformValues[],
110 GrDrawTarget::PathTransformType ,
112 const GrStencilSettings&,
113 const GrDrawTarget::PipelineInfo&);
114 Cmd* recordClear(GrInOrderDrawBuffer*,
119 Cmd* recordCopySurface(GrInOrderDrawBuffer*,
122 const SkIRect& srcRect,
123 const SkIPoint& dstPoint);
126 void willReserveVertexAndIndexSpace(int vertexCount,
131 friend class GrInOrderDrawBuffer;
133 typedef GrGpu::DrawArgs DrawArgs;
135 // Attempts to concat instances from info onto the previous draw. info must represent an
136 // instanced draw. The caller must have already recorded a new draw state and clip if necessary.
137 int concatInstancedDraw(GrInOrderDrawBuffer*, const GrDrawTarget::DrawInfo&);
139 bool SK_WARN_UNUSED_RESULT setupPipelineAndShouldDraw(GrInOrderDrawBuffer*,
140 const GrPrimitiveProcessor*,
141 const GrDrawTarget::PipelineInfo&);
142 bool SK_WARN_UNUSED_RESULT setupPipelineAndShouldDraw(GrInOrderDrawBuffer*,
144 const GrDrawTarget::PipelineInfo&);
146 struct Draw : public Cmd {
147 Draw(const GrDrawTarget::DrawInfo& info) : Cmd(kDraw_CmdType), fInfo(info) {}
149 void execute(GrGpu*, const SetState*) SK_OVERRIDE;
151 GrDrawTarget::DrawInfo fInfo;
154 struct StencilPath : public Cmd {
155 StencilPath(const GrPath* path, GrRenderTarget* rt)
156 : Cmd(kStencilPath_CmdType)
160 const GrPath* path() const { return fPath.get(); }
162 void execute(GrGpu*, const SetState*) SK_OVERRIDE;
164 SkMatrix fViewMatrix;
166 GrStencilSettings fStencil;
167 GrScissorState fScissor;
169 GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget;
170 GrPendingIOResource<const GrPath, kRead_GrIOType> fPath;
173 struct DrawPath : public Cmd {
174 DrawPath(const GrPath* path) : Cmd(kDrawPath_CmdType), fPath(path) {}
176 const GrPath* path() const { return fPath.get(); }
178 void execute(GrGpu*, const SetState*) SK_OVERRIDE;
180 GrStencilSettings fStencilSettings;
183 GrPendingIOResource<const GrPath, kRead_GrIOType> fPath;
186 struct DrawPaths : public Cmd {
187 DrawPaths(const GrPathRange* pathRange) : Cmd(kDrawPaths_CmdType), fPathRange(pathRange) {}
189 const GrPathRange* pathRange() const { return fPathRange.get(); }
191 void execute(GrGpu*, const SetState*) SK_OVERRIDE;
194 GrDrawTarget::PathIndexType fIndexType;
196 GrDrawTarget::PathTransformType fTransformType;
198 GrStencilSettings fStencilSettings;
201 GrPendingIOResource<const GrPathRange, kRead_GrIOType> fPathRange;
204 // This is also used to record a discard by setting the color to GrColor_ILLEGAL
205 struct Clear : public Cmd {
206 Clear(GrRenderTarget* rt) : Cmd(kClear_CmdType), fRenderTarget(rt) {}
208 GrRenderTarget* renderTarget() const { return fRenderTarget.get(); }
210 void execute(GrGpu*, const SetState*) SK_OVERRIDE;
217 GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget;
220 // This command is ONLY used by the clip mask manager to clear the stencil clip bits
221 struct ClearStencilClip : public Cmd {
222 ClearStencilClip(GrRenderTarget* rt) : Cmd(kClear_CmdType), fRenderTarget(rt) {}
224 GrRenderTarget* renderTarget() const { return fRenderTarget.get(); }
226 void execute(GrGpu*, const SetState*) SK_OVERRIDE;
232 GrPendingIOResource<GrRenderTarget, kWrite_GrIOType> fRenderTarget;
235 struct CopySurface : public Cmd {
236 CopySurface(GrSurface* dst, GrSurface* src)
237 : Cmd(kCopySurface_CmdType)
242 GrSurface* dst() const { return fDst.get(); }
243 GrSurface* src() const { return fSrc.get(); }
245 void execute(GrGpu*, const SetState*) SK_OVERRIDE;
251 GrPendingIOResource<GrSurface, kWrite_GrIOType> fDst;
252 GrPendingIOResource<GrSurface, kRead_GrIOType> fSrc;
255 // TODO: rename to SetPipeline once pp, batch tracker, and desc are removed
256 struct SetState : public Cmd {
257 // TODO get rid of the prim proc parameter when we use batch everywhere
258 SetState(const GrPrimitiveProcessor* primProc = NULL)
259 : Cmd(kSetState_CmdType)
260 , fPrimitiveProcessor(primProc) {}
262 ~SetState() { reinterpret_cast<GrPipeline*>(fPipeline.get())->~GrPipeline(); }
264 // This function is only for getting the location in memory where we will create our
266 GrPipeline* pipelineLocation() { return reinterpret_cast<GrPipeline*>(fPipeline.get()); }
268 const GrPipeline* getPipeline() const {
269 return reinterpret_cast<const GrPipeline*>(fPipeline.get());
272 void execute(GrGpu*, const SetState*) SK_OVERRIDE;
274 typedef GrPendingProgramElement<const GrPrimitiveProcessor> ProgramPrimitiveProcessor;
275 ProgramPrimitiveProcessor fPrimitiveProcessor;
276 SkAlignedSStorage<sizeof(GrPipeline)> fPipeline;
278 GrBatchTracker fBatchTracker;
281 struct DrawBatch : public Cmd {
282 DrawBatch(GrBatch* batch, GrBatchTarget* batchTarget)
283 : Cmd(kDrawBatch_CmdType)
284 , fBatch(SkRef(batch))
285 , fBatchTarget(batchTarget) {
286 SkASSERT(!batch->isUsed());
289 void execute(GrGpu*, const SetState*) SK_OVERRIDE;
291 // TODO it wouldn't be too hard to let batches allocate in the cmd buffer
292 SkAutoTUnref<GrBatch> fBatch;
295 GrBatchTarget* fBatchTarget;
298 static const int kCmdBufferInitialSizeInBytes = 8 * 1024;
300 typedef void* TCmdAlign; // This wouldn't be enough align if a command used long double.
301 typedef GrTRecorder<Cmd, TCmdAlign> CmdBuffer;
303 CmdBuffer fCmdBuffer;
304 SetState* fPrevState;
305 GrBatchTarget fBatchTarget;
306 // TODO hack until batch is everywhere
307 GrTargetCommands::DrawBatch* fDrawBatch;
309 // This will go away when everything uses batch. However, in the short term anything which
310 // might be put into the GrInOrderDrawBuffer needs to make sure it closes the last batch