Update rive-cpp to 2.0 version
[platform/core/uifw/rive-tizen.git] / submodule / skia / tests / BulkRectTest.cpp
1 /*
2  * Copyright 2019 Google LLC
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7
8 #include "include/core/SkColorSpace.h"
9 #include "include/gpu/GrDirectContext.h"
10 #include "src/core/SkBlendModePriv.h"
11 #include "src/gpu/ganesh/GrDirectContextPriv.h"
12 #include "src/gpu/ganesh/GrOpsTypes.h"
13 #include "src/gpu/ganesh/GrProxyProvider.h"
14 #include "src/gpu/ganesh/GrResourceProvider.h"
15 #include "src/gpu/ganesh/ops/FillRectOp.h"
16 #include "src/gpu/ganesh/ops/TextureOp.h"
17 #include "src/gpu/ganesh/v1/SurfaceDrawContext_v1.h"
18 #include "tests/Test.h"
19
20 static std::unique_ptr<skgpu::v1::SurfaceDrawContext> new_SDC(GrRecordingContext* rContext) {
21     return skgpu::v1::SurfaceDrawContext::Make(
22             rContext, GrColorType::kRGBA_8888, nullptr, SkBackingFit::kExact, {128, 128},
23             SkSurfaceProps());
24 }
25
26 static sk_sp<GrSurfaceProxy> create_proxy(GrRecordingContext* rContext) {
27     static constexpr SkISize kDimensions = {128, 128};
28
29     const GrBackendFormat format = rContext->priv().caps()->getDefaultBackendFormat(
30                                                                            GrColorType::kRGBA_8888,
31                                                                            GrRenderable::kYes);
32     return rContext->priv().proxyProvider()->createProxy(
33             format, kDimensions, GrRenderable::kYes, 1, GrMipmapped::kNo, SkBackingFit::kExact,
34             SkBudgeted::kNo, GrProtected::kNo, GrInternalSurfaceFlags::kNone);
35 }
36
37 typedef GrQuadAAFlags (*PerQuadAAFunc)(int i);
38
39 typedef void (*BulkRectTest)(skiatest::Reporter*,
40                              GrDirectContext*,
41                              PerQuadAAFunc,
42                              GrAAType overallAA,
43                              SkBlendMode,
44                              bool addOneByOne,
45                              bool allUniqueProxies,
46                              int requestedTotNumQuads,
47                              int expectedNumOps);
48
49 //-------------------------------------------------------------------------------------------------
50 static void fillrectop_creation_test(skiatest::Reporter* reporter, GrDirectContext* dContext,
51                                      PerQuadAAFunc perQuadAA, GrAAType overallAA,
52                                      SkBlendMode blendMode, bool addOneByOne,
53                                      bool allUniqueProxies,
54                                      int requestedTotNumQuads, int expectedNumOps) {
55
56     if (addOneByOne || allUniqueProxies) {
57         return;
58     }
59
60     std::unique_ptr<skgpu::v1::SurfaceDrawContext> sdc = new_SDC(dContext);
61
62     auto quads = new GrQuadSetEntry[requestedTotNumQuads];
63
64     for (int i = 0; i < requestedTotNumQuads; ++i) {
65         quads[i].fRect = SkRect::MakeWH(100.5f, 100.5f); // prevent the int non-AA optimization
66         quads[i].fColor = SK_PMColor4fWHITE;
67         quads[i].fLocalMatrix = SkMatrix::I();
68         quads[i].fAAFlags = perQuadAA(i);
69     }
70
71     GrPaint paint;
72     paint.setXPFactory(SkBlendMode_AsXPFactory(blendMode));
73
74     skgpu::v1::FillRectOp::AddFillRectOps(sdc.get(), nullptr, dContext, std::move(paint), overallAA,
75                                           SkMatrix::I(), quads, requestedTotNumQuads);
76
77     auto opsTask = sdc->testingOnly_PeekLastOpsTask();
78     int actualNumOps = opsTask->numOpChains();
79
80     int actualTotNumQuads = 0;
81
82     for (int i = 0; i < actualNumOps; ++i) {
83         const GrOp* tmp = opsTask->getChain(i);
84         REPORTER_ASSERT(reporter, tmp->classID() == skgpu::v1::FillRectOp::ClassID());
85         REPORTER_ASSERT(reporter, tmp->isChainTail());
86         actualTotNumQuads += ((GrDrawOp*) tmp)->numQuads();
87     }
88
89     REPORTER_ASSERT(reporter, expectedNumOps == actualNumOps);
90     REPORTER_ASSERT(reporter, requestedTotNumQuads == actualTotNumQuads);
91
92     dContext->flushAndSubmit();
93
94     delete[] quads;
95 }
96
97 //-------------------------------------------------------------------------------------------------
98 static void textureop_creation_test(skiatest::Reporter* reporter, GrDirectContext* dContext,
99                                     PerQuadAAFunc perQuadAA, GrAAType overallAA,
100                                     SkBlendMode blendMode, bool addOneByOne,
101                                     bool allUniqueProxies,
102                                     int requestedTotNumQuads, int expectedNumOps) {
103
104     std::unique_ptr<skgpu::v1::SurfaceDrawContext> sdc = new_SDC(dContext);
105
106     GrSurfaceProxyView proxyViewA, proxyViewB;
107
108     if (!allUniqueProxies) {
109         sk_sp<GrSurfaceProxy> proxyA = create_proxy(dContext);
110         sk_sp<GrSurfaceProxy> proxyB = create_proxy(dContext);
111         proxyViewA = GrSurfaceProxyView(std::move(proxyA),
112                                         kTopLeft_GrSurfaceOrigin,
113                                         skgpu::Swizzle::RGBA());
114         proxyViewB = GrSurfaceProxyView(std::move(proxyB),
115                                         kTopLeft_GrSurfaceOrigin,
116                                         skgpu::Swizzle::RGBA());
117     }
118
119     auto set = new GrTextureSetEntry[requestedTotNumQuads];
120
121     for (int i = 0; i < requestedTotNumQuads; ++i) {
122         if (!allUniqueProxies) {
123             // Alternate between two proxies to prevent op merging if the batch API was forced to
124             // submit one op at a time (to work, this does require that all fDstRects overlap).
125             set[i].fProxyView = i % 2 == 0 ? proxyViewA : proxyViewB;
126         } else {
127             // Each op gets its own proxy to force chaining only
128             sk_sp<GrSurfaceProxy> proxyA = create_proxy(dContext);
129             set[i].fProxyView = GrSurfaceProxyView(std::move(proxyA),
130                                                    kTopLeft_GrSurfaceOrigin,
131                                                    skgpu::Swizzle::RGBA());
132         }
133
134         set[i].fSrcAlphaType = kPremul_SkAlphaType;
135         set[i].fSrcRect = SkRect::MakeWH(100.0f, 100.0f);
136         set[i].fDstRect = SkRect::MakeWH(100.5f, 100.5f); // prevent the int non-AA optimization
137         set[i].fDstClipQuad = nullptr;
138         set[i].fPreViewMatrix = nullptr;
139         set[i].fColor = {1.f, 1.f, 1.f, 1.f};
140         set[i].fAAFlags = perQuadAA(i);
141     }
142
143     if (addOneByOne) {
144         for (int i = 0; i < requestedTotNumQuads; ++i) {
145             DrawQuad quad;
146
147             quad.fDevice = GrQuad::MakeFromRect(set[i].fDstRect,  SkMatrix::I());
148             quad.fLocal = GrQuad(set[i].fSrcRect);
149             quad.fEdgeFlags = set[i].fAAFlags;
150
151             GrOp::Owner op = skgpu::v1::TextureOp::Make(dContext,
152                                                         set[i].fProxyView,
153                                                         set[i].fSrcAlphaType,
154                                                         nullptr,
155                                                         GrSamplerState::Filter::kNearest,
156                                                         GrSamplerState::MipmapMode::kNone,
157                                                         set[i].fColor,
158                                                         skgpu::v1::TextureOp::Saturate::kYes,
159                                                         blendMode,
160                                                         overallAA,
161                                                         &quad,
162                                                         nullptr);
163             sdc->addDrawOp(nullptr, std::move(op));
164         }
165     } else {
166         skgpu::v1::TextureOp::AddTextureSetOps(sdc.get(),
167                                                nullptr,
168                                                dContext,
169                                                set,
170                                                requestedTotNumQuads,
171                                                requestedTotNumQuads,  // We alternate so proxyCnt == cnt
172                                                GrSamplerState::Filter::kNearest,
173                                                GrSamplerState::MipmapMode::kNone,
174                                                skgpu::v1::TextureOp::Saturate::kYes,
175                                                blendMode,
176                                                overallAA,
177                                                SkCanvas::kStrict_SrcRectConstraint,
178                                                SkMatrix::I(),
179                                                nullptr);
180     }
181
182     auto opsTask = sdc->testingOnly_PeekLastOpsTask();
183     int actualNumOps = opsTask->numOpChains();
184
185     int actualTotNumQuads = 0;
186
187     if (blendMode != SkBlendMode::kSrcOver ||
188         !dContext->priv().caps()->dynamicStateArrayGeometryProcessorTextureSupport()) {
189         // In either of these two cases, TextureOp creates one op per quad instead. Since
190         // each entry alternates proxies but overlaps geometrically, this will prevent the ops
191         // from being merged back into fewer ops.
192         expectedNumOps = requestedTotNumQuads;
193     }
194     uint32_t expectedOpID = blendMode == SkBlendMode::kSrcOver ? skgpu::v1::TextureOp::ClassID()
195                                                                : skgpu::v1::FillRectOp::ClassID();
196     for (int i = 0; i < actualNumOps; ++i) {
197         const GrOp* tmp = opsTask->getChain(i);
198         REPORTER_ASSERT(reporter, allUniqueProxies || tmp->isChainTail());
199         while (tmp) {
200             REPORTER_ASSERT(reporter, tmp->classID() == expectedOpID);
201             actualTotNumQuads += ((GrDrawOp*) tmp)->numQuads();
202             tmp = tmp->nextInChain();
203         }
204     }
205
206     REPORTER_ASSERT(reporter, expectedNumOps == actualNumOps);
207     REPORTER_ASSERT(reporter, requestedTotNumQuads == actualTotNumQuads);
208
209     dContext->flushAndSubmit();
210
211     delete[] set;
212 }
213
214 //-------------------------------------------------------------------------------------------------
215 static void run_test(GrDirectContext* dContext, skiatest::Reporter* reporter, BulkRectTest test) {
216
217     // This is the simple case where there is no AA at all. We expect 2 non-AA clumps of quads.
218     {
219         auto noAA = [](int i) -> GrQuadAAFlags {
220             return GrQuadAAFlags::kNone;
221         };
222
223         static const int kNumExpectedOps = 2;
224
225         test(reporter, dContext, noAA, GrAAType::kNone, SkBlendMode::kSrcOver,
226              false, false, 2*GrResourceProvider::MaxNumNonAAQuads(), kNumExpectedOps);
227     }
228
229     // This is the same as the above case except the overall AA is kCoverage. However, since
230     // the per-quad AA is still none, all the quads should be downgraded to non-AA.
231     {
232         auto noAA = [](int i) -> GrQuadAAFlags {
233             return GrQuadAAFlags::kNone;
234         };
235
236         static const int kNumExpectedOps = 2;
237
238         test(reporter, dContext, noAA, GrAAType::kCoverage, SkBlendMode::kSrcOver,
239              false, false, 2*GrResourceProvider::MaxNumNonAAQuads(), kNumExpectedOps);
240     }
241
242     // This case has an overall AA of kCoverage but the per-quad AA alternates.
243     // We should end up with several aa-sized clumps
244     {
245         auto alternateAA = [](int i) -> GrQuadAAFlags {
246             return (i % 2) ? GrQuadAAFlags::kAll : GrQuadAAFlags::kNone;
247         };
248
249         int numExpectedOps = 2*GrResourceProvider::MaxNumNonAAQuads() /
250                                                  GrResourceProvider::MaxNumAAQuads();
251
252         test(reporter, dContext, alternateAA, GrAAType::kCoverage, SkBlendMode::kSrcOver,
253              false, false, 2*GrResourceProvider::MaxNumNonAAQuads(), numExpectedOps);
254     }
255
256     // In this case we have a run of MaxNumAAQuads non-AA quads and then AA quads. This
257     // exercises the case where we have a clump of quads that can't be upgraded to AA bc of
258     // its size. We expect one clump of non-AA quads followed by one clump of AA quads.
259     {
260         auto runOfNonAA = [](int i) -> GrQuadAAFlags {
261             return (i < GrResourceProvider::MaxNumAAQuads()) ? GrQuadAAFlags::kNone
262                                                              : GrQuadAAFlags::kAll;
263         };
264
265         static const int kNumExpectedOps = 2;
266
267         test(reporter, dContext, runOfNonAA, GrAAType::kCoverage, SkBlendMode::kSrcOver,
268              false, false, 2*GrResourceProvider::MaxNumAAQuads(), kNumExpectedOps);
269     }
270
271     // In this case we use a blend mode other than src-over, which hits the FillRectOp fallback
272     // code path for TextureOp. We pass in the expected results if batching was successful, to
273     // that bulk_fill_rect_create_test batches on all modes; bulk_texture_rect_create_test is
274     // responsible for revising its expectations.
275     {
276         auto fixedAA = [](int i) -> GrQuadAAFlags {
277             return GrQuadAAFlags::kAll;
278         };
279
280         static const int kNumExpectedOps = 2;
281
282         test(reporter, dContext, fixedAA, GrAAType::kCoverage, SkBlendMode::kSrcATop,
283              false, false, 2*GrResourceProvider::MaxNumAAQuads(), kNumExpectedOps);
284     }
285
286     // This repros crbug.com/1108475, where we create 1024 non-AA texture ops w/ one coverage-AA
287     // texture op in the middle. Because each op has its own texture, all the texture ops
288     // get chained together so the quad count can exceed the AA maximum.
289     {
290         auto onlyOneAA = [](int i) -> GrQuadAAFlags {
291             return i == 256 ? GrQuadAAFlags::kAll : GrQuadAAFlags::kNone;
292         };
293
294         static const int kNumExpectedOps = 3;
295
296         test(reporter, dContext, onlyOneAA, GrAAType::kCoverage, SkBlendMode::kSrcOver,
297              true, true, 1024, kNumExpectedOps);
298     }
299
300     // This repros a problem related to crbug.com/1108475. In this case, the bulk creation
301     // method had no way to break up the set of texture ops at the AA quad limit.
302     {
303         auto onlyOneAA = [](int i) -> GrQuadAAFlags {
304             return i == 256 ? GrQuadAAFlags::kAll : GrQuadAAFlags::kNone;
305         };
306
307         static const int kNumExpectedOps = 2;
308
309         test(reporter, dContext, onlyOneAA, GrAAType::kCoverage, SkBlendMode::kSrcOver,
310              false, true, 1024, kNumExpectedOps);
311     }
312
313 }
314
315 DEF_GPUTEST_FOR_RENDERING_CONTEXTS(BulkFillRectTest, reporter, ctxInfo) {
316     run_test(ctxInfo.directContext(), reporter, fillrectop_creation_test);
317 }
318
319 DEF_GPUTEST_FOR_RENDERING_CONTEXTS(BulkTextureRectTest, reporter, ctxInfo) {
320     run_test(ctxInfo.directContext(), reporter, textureop_creation_test);
321 }