Update rive-cpp to 2.0 version
[platform/core/uifw/rive-tizen.git] / submodule / skia / src / gpu / ganesh / ops / SoftwarePathRenderer.cpp
1 /*
2  * Copyright 2012 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7
8 #include "src/gpu/ganesh/ops/SoftwarePathRenderer.h"
9
10 #include "include/gpu/GrDirectContext.h"
11 #include "include/private/SkSemaphore.h"
12 #include "src/core/SkTaskGroup.h"
13 #include "src/core/SkTraceEvent.h"
14 #include "src/gpu/ganesh/GrAuditTrail.h"
15 #include "src/gpu/ganesh/GrCaps.h"
16 #include "src/gpu/ganesh/GrClip.h"
17 #include "src/gpu/ganesh/GrDeferredProxyUploader.h"
18 #include "src/gpu/ganesh/GrDirectContextPriv.h"
19 #include "src/gpu/ganesh/GrGpuResourcePriv.h"
20 #include "src/gpu/ganesh/GrOpFlushState.h"
21 #include "src/gpu/ganesh/GrProxyProvider.h"
22 #include "src/gpu/ganesh/GrRecordingContextPriv.h"
23 #include "src/gpu/ganesh/GrSWMaskHelper.h"
24 #include "src/gpu/ganesh/GrUtil.h"
25 #include "src/gpu/ganesh/SkGr.h"
26 #include "src/gpu/ganesh/effects/GrTextureEffect.h"
27 #include "src/gpu/ganesh/geometry/GrStyledShape.h"
28 #include "src/gpu/ganesh/ops/GrDrawOp.h"
29 #include "src/gpu/ganesh/v1/SurfaceDrawContext_v1.h"
30
31 namespace {
32
33 /**
34  * Payload class for use with GrTDeferredProxyUploader. The software path renderer only draws
35  * a single path into the mask texture. This stores all of the information needed by the worker
36  * thread's call to drawShape (see below, in onDrawPath).
37  */
38 class SoftwarePathData {
39 public:
40     SoftwarePathData(const SkIRect& maskBounds, const SkMatrix& viewMatrix,
41                      const GrStyledShape& shape, GrAA aa)
42             : fMaskBounds(maskBounds)
43             , fViewMatrix(viewMatrix)
44             , fShape(shape)
45             , fAA(aa) {}
46
47     const SkIRect& getMaskBounds() const { return fMaskBounds; }
48     const SkMatrix* getViewMatrix() const { return &fViewMatrix; }
49     const GrStyledShape& getShape() const { return fShape; }
50     GrAA getAA() const { return fAA; }
51
52 private:
53     SkIRect fMaskBounds;
54     SkMatrix fViewMatrix;
55     GrStyledShape fShape;
56     GrAA fAA;
57 };
58
59 bool get_unclipped_shape_dev_bounds(const GrStyledShape& shape, const SkMatrix& matrix,
60                                     SkIRect* devBounds) {
61     SkRect shapeBounds = shape.styledBounds();
62     if (shapeBounds.isEmpty()) {
63         return false;
64     }
65     SkRect shapeDevBounds;
66     matrix.mapRect(&shapeDevBounds, shapeBounds);
67     // Even though these are "unclipped" bounds we still clip to the int32_t range.
68     // This is the largest int32_t that is representable exactly as a float. The next 63 larger ints
69     // would round down to this value when cast to a float, but who really cares.
70     // INT32_MIN is exactly representable.
71     static constexpr int32_t kMaxInt = 2147483520;
72     if (!shapeDevBounds.intersect(SkRect::MakeLTRB(INT32_MIN, INT32_MIN, kMaxInt, kMaxInt))) {
73         return false;
74     }
75     // Make sure that the resulting SkIRect can have representable width and height
76     if (SkScalarRoundToInt(shapeDevBounds.width()) > kMaxInt ||
77         SkScalarRoundToInt(shapeDevBounds.height()) > kMaxInt) {
78         return false;
79     }
80     shapeDevBounds.roundOut(devBounds);
81     return true;
82 }
83
84 GrSurfaceProxyView make_deferred_mask_texture_view(GrRecordingContext* rContext,
85                                                    SkBackingFit fit,
86                                                    SkISize dimensions) {
87     GrProxyProvider* proxyProvider = rContext->priv().proxyProvider();
88     const GrCaps* caps = rContext->priv().caps();
89
90     const GrBackendFormat format = caps->getDefaultBackendFormat(GrColorType::kAlpha_8,
91                                                                  GrRenderable::kNo);
92
93     skgpu::Swizzle swizzle = caps->getReadSwizzle(format, GrColorType::kAlpha_8);
94
95     auto proxy =
96             proxyProvider->createProxy(format, dimensions, GrRenderable::kNo, 1, GrMipmapped::kNo,
97                                        fit, SkBudgeted::kYes, GrProtected::kNo);
98     return {std::move(proxy), kTopLeft_GrSurfaceOrigin, swizzle};
99 }
100
101
102 } // anonymous namespace
103
104 namespace skgpu::v1 {
105
106 ////////////////////////////////////////////////////////////////////////////////
107 PathRenderer::CanDrawPath SoftwarePathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
108     // Pass on any style that applies. The caller will apply the style if a suitable renderer is
109     // not found and try again with the new GrStyledShape.
110     if (!args.fShape->style().applies() && SkToBool(fProxyProvider) &&
111         (args.fAAType == GrAAType::kCoverage || args.fAAType == GrAAType::kNone)) {
112         // This is the fallback renderer for when a path is too complicated for the GPU ones.
113         return CanDrawPath::kAsBackup;
114     }
115     return CanDrawPath::kNo;
116 }
117
118 ////////////////////////////////////////////////////////////////////////////////
119
120 // Gets the shape bounds, the clip bounds, and the intersection (if any). Returns false if there
121 // is no intersection.
122 bool SoftwarePathRenderer::GetShapeAndClipBounds(SurfaceDrawContext* sdc,
123                                                  const GrClip* clip,
124                                                  const GrStyledShape& shape,
125                                                  const SkMatrix& matrix,
126                                                  SkIRect* unclippedDevShapeBounds,
127                                                  SkIRect* clippedDevShapeBounds,
128                                                  SkIRect* devClipBounds) {
129     // compute bounds as intersection of rt size, clip, and path
130     *devClipBounds = clip ? clip->getConservativeBounds()
131                           : SkIRect::MakeWH(sdc->width(), sdc->height());
132
133     if (!get_unclipped_shape_dev_bounds(shape, matrix, unclippedDevShapeBounds)) {
134         *unclippedDevShapeBounds = SkIRect::MakeEmpty();
135         *clippedDevShapeBounds = SkIRect::MakeEmpty();
136         return false;
137     }
138     if (!clippedDevShapeBounds->intersect(*devClipBounds, *unclippedDevShapeBounds)) {
139         *clippedDevShapeBounds = SkIRect::MakeEmpty();
140         return false;
141     }
142     return true;
143 }
144
145 ////////////////////////////////////////////////////////////////////////////////
146
147 void SoftwarePathRenderer::DrawNonAARect(SurfaceDrawContext* sdc,
148                                          GrPaint&& paint,
149                                          const GrUserStencilSettings& userStencilSettings,
150                                          const GrClip* clip,
151                                          const SkMatrix& viewMatrix,
152                                          const SkRect& rect,
153                                          const SkMatrix& localMatrix) {
154     sdc->stencilRect(clip, &userStencilSettings, std::move(paint), GrAA::kNo,
155                      viewMatrix, rect, &localMatrix);
156 }
157
158 void SoftwarePathRenderer::DrawAroundInvPath(SurfaceDrawContext* sdc,
159                                              GrPaint&& paint,
160                                              const GrUserStencilSettings& userStencilSettings,
161                                              const GrClip* clip,
162                                              const SkMatrix& viewMatrix,
163                                              const SkIRect& devClipBounds,
164                                              const SkIRect& devPathBounds) {
165     SkMatrix invert;
166     if (!viewMatrix.invert(&invert)) {
167         return;
168     }
169
170     SkRect rect;
171     if (devClipBounds.fTop < devPathBounds.fTop) {
172         rect.setLTRB(SkIntToScalar(devClipBounds.fLeft),  SkIntToScalar(devClipBounds.fTop),
173                      SkIntToScalar(devClipBounds.fRight), SkIntToScalar(devPathBounds.fTop));
174         DrawNonAARect(sdc, GrPaint::Clone(paint), userStencilSettings, clip,
175                       SkMatrix::I(), rect, invert);
176     }
177     if (devClipBounds.fLeft < devPathBounds.fLeft) {
178         rect.setLTRB(SkIntToScalar(devClipBounds.fLeft), SkIntToScalar(devPathBounds.fTop),
179                      SkIntToScalar(devPathBounds.fLeft), SkIntToScalar(devPathBounds.fBottom));
180         DrawNonAARect(sdc, GrPaint::Clone(paint), userStencilSettings, clip,
181                       SkMatrix::I(), rect, invert);
182     }
183     if (devClipBounds.fRight > devPathBounds.fRight) {
184         rect.setLTRB(SkIntToScalar(devPathBounds.fRight), SkIntToScalar(devPathBounds.fTop),
185                      SkIntToScalar(devClipBounds.fRight), SkIntToScalar(devPathBounds.fBottom));
186         DrawNonAARect(sdc, GrPaint::Clone(paint), userStencilSettings, clip,
187                       SkMatrix::I(), rect, invert);
188     }
189     if (devClipBounds.fBottom > devPathBounds.fBottom) {
190         rect.setLTRB(SkIntToScalar(devClipBounds.fLeft),  SkIntToScalar(devPathBounds.fBottom),
191                      SkIntToScalar(devClipBounds.fRight), SkIntToScalar(devClipBounds.fBottom));
192         DrawNonAARect(sdc, std::move(paint), userStencilSettings, clip,
193                       SkMatrix::I(), rect, invert);
194     }
195 }
196
197 void SoftwarePathRenderer::DrawToTargetWithShapeMask(
198         GrSurfaceProxyView view,
199         SurfaceDrawContext* sdc,
200         GrPaint&& paint,
201         const GrUserStencilSettings& userStencilSettings,
202         const GrClip* clip,
203         const SkMatrix& viewMatrix,
204         const SkIPoint& textureOriginInDeviceSpace,
205         const SkIRect& deviceSpaceRectToDraw) {
206     SkMatrix invert;
207     if (!viewMatrix.invert(&invert)) {
208         return;
209     }
210
211     view.concatSwizzle(skgpu::Swizzle("aaaa"));
212
213     SkRect dstRect = SkRect::Make(deviceSpaceRectToDraw);
214
215     // We use device coords to compute the texture coordinates. We take the device coords and apply
216     // a translation so that the top-left of the device bounds maps to 0,0, and then a scaling
217     // matrix to normalized coords.
218     SkMatrix maskMatrix = SkMatrix::Translate(SkIntToScalar(-textureOriginInDeviceSpace.fX),
219                                               SkIntToScalar(-textureOriginInDeviceSpace.fY));
220     maskMatrix.preConcat(viewMatrix);
221
222     paint.setCoverageFragmentProcessor(GrTextureEffect::Make(
223             std::move(view), kPremul_SkAlphaType, maskMatrix, GrSamplerState::Filter::kNearest));
224     DrawNonAARect(sdc, std::move(paint), userStencilSettings, clip, SkMatrix::I(),
225                   dstRect, invert);
226 }
227
228 ////////////////////////////////////////////////////////////////////////////////
229 // return true on success; false on failure
230 bool SoftwarePathRenderer::onDrawPath(const DrawPathArgs& args) {
231     GR_AUDIT_TRAIL_AUTO_FRAME(args.fContext->priv().auditTrail(),
232                               "SoftwarePathRenderer::onDrawPath");
233
234     if (!fProxyProvider) {
235         return false;
236     }
237
238     SkASSERT(!args.fShape->style().applies());
239     // We really need to know if the shape will be inverse filled or not
240     // If the path is hairline, ignore inverse fill.
241     bool inverseFilled = args.fShape->inverseFilled() &&
242                         !GrIsStrokeHairlineOrEquivalent(args.fShape->style(),
243                                                         *args.fViewMatrix, nullptr);
244
245     SkIRect unclippedDevShapeBounds, clippedDevShapeBounds, devClipBounds;
246     // To prevent overloading the cache with entries during animations we limit the cache of masks
247     // to cases where the matrix preserves axis alignment.
248     bool useCache = fAllowCaching && !inverseFilled && args.fViewMatrix->preservesAxisAlignment() &&
249                     args.fShape->hasUnstyledKey() && (GrAAType::kCoverage == args.fAAType);
250
251     if (!GetShapeAndClipBounds(args.fSurfaceDrawContext,
252                                args.fClip, *args.fShape,
253                                *args.fViewMatrix, &unclippedDevShapeBounds,
254                                &clippedDevShapeBounds,
255                                &devClipBounds)) {
256         if (inverseFilled) {
257             DrawAroundInvPath(args.fSurfaceDrawContext, std::move(args.fPaint),
258                               *args.fUserStencilSettings, args.fClip, *args.fViewMatrix,
259                               devClipBounds, unclippedDevShapeBounds);
260         }
261         return true;
262     }
263
264     const SkIRect* boundsForMask = &clippedDevShapeBounds;
265     if (useCache) {
266         // Use the cache only if >50% of the path is visible.
267         int unclippedWidth = unclippedDevShapeBounds.width();
268         int unclippedHeight = unclippedDevShapeBounds.height();
269         int64_t unclippedArea = sk_64_mul(unclippedWidth, unclippedHeight);
270         int64_t clippedArea = sk_64_mul(clippedDevShapeBounds.width(),
271                                         clippedDevShapeBounds.height());
272         int maxTextureSize = args.fSurfaceDrawContext->caps()->maxTextureSize();
273         if (unclippedArea > 2 * clippedArea || unclippedWidth > maxTextureSize ||
274             unclippedHeight > maxTextureSize) {
275             useCache = false;
276         } else {
277             boundsForMask = &unclippedDevShapeBounds;
278         }
279     }
280
281     skgpu::UniqueKey maskKey;
282     if (useCache) {
283         // We require the upper left 2x2 of the matrix to match exactly for a cache hit.
284         SkScalar sx = args.fViewMatrix->get(SkMatrix::kMScaleX);
285         SkScalar sy = args.fViewMatrix->get(SkMatrix::kMScaleY);
286         SkScalar kx = args.fViewMatrix->get(SkMatrix::kMSkewX);
287         SkScalar ky = args.fViewMatrix->get(SkMatrix::kMSkewY);
288         static const skgpu::UniqueKey::Domain kDomain = skgpu::UniqueKey::GenerateDomain();
289         skgpu::UniqueKey::Builder builder(&maskKey, kDomain, 7 + args.fShape->unstyledKeySize(),
290                                      "SW Path Mask");
291         builder[0] = boundsForMask->width();
292         builder[1] = boundsForMask->height();
293
294 #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
295         // Fractional translate does not affect caching on Android. This is done for better cache
296         // hit ratio and speed, but it is matching HWUI behavior, which doesn't consider the matrix
297         // at all when caching paths.
298         SkFixed fracX = 0;
299         SkFixed fracY = 0;
300 #else
301         SkScalar tx = args.fViewMatrix->get(SkMatrix::kMTransX);
302         SkScalar ty = args.fViewMatrix->get(SkMatrix::kMTransY);
303         // Allow 8 bits each in x and y of subpixel positioning.
304         SkFixed fracX = SkScalarToFixed(SkScalarFraction(tx)) & 0x0000FF00;
305         SkFixed fracY = SkScalarToFixed(SkScalarFraction(ty)) & 0x0000FF00;
306 #endif
307         builder[2] = SkFloat2Bits(sx);
308         builder[3] = SkFloat2Bits(sy);
309         builder[4] = SkFloat2Bits(kx);
310         builder[5] = SkFloat2Bits(ky);
311         // Distinguish between hairline and filled paths. For hairlines, we also need to include
312         // the cap. (SW grows hairlines by 0.5 pixel with round and square caps). Note that
313         // stroke-and-fill of hairlines is turned into pure fill by SkStrokeRec, so this covers
314         // all cases we might see.
315         uint32_t styleBits = args.fShape->style().isSimpleHairline() ?
316                              ((args.fShape->style().strokeRec().getCap() << 1) | 1) : 0;
317         builder[6] = fracX | (fracY >> 8) | (styleBits << 16);
318         args.fShape->writeUnstyledKey(&builder[7]);
319     }
320
321     GrSurfaceProxyView view;
322     if (useCache) {
323         sk_sp<GrTextureProxy> proxy = fProxyProvider->findOrCreateProxyByUniqueKey(maskKey);
324         if (proxy) {
325             skgpu::Swizzle swizzle = args.fSurfaceDrawContext->caps()->getReadSwizzle(
326                     proxy->backendFormat(), GrColorType::kAlpha_8);
327             view = {std::move(proxy), kTopLeft_GrSurfaceOrigin, swizzle};
328             args.fContext->priv().stats()->incNumPathMasksCacheHits();
329         }
330     }
331     if (!view) {
332         SkBackingFit fit = useCache ? SkBackingFit::kExact : SkBackingFit::kApprox;
333         GrAA aa = GrAA(GrAAType::kCoverage == args.fAAType);
334
335         SkTaskGroup* taskGroup = nullptr;
336         if (auto direct = args.fContext->asDirectContext()) {
337             taskGroup = direct->priv().getTaskGroup();
338         }
339
340         if (taskGroup) {
341             view = make_deferred_mask_texture_view(args.fContext, fit, boundsForMask->size());
342             if (!view) {
343                 return false;
344             }
345
346             auto uploader = std::make_unique<GrTDeferredProxyUploader<SoftwarePathData>>(
347                     *boundsForMask, *args.fViewMatrix, *args.fShape, aa);
348             GrTDeferredProxyUploader<SoftwarePathData>* uploaderRaw = uploader.get();
349
350             auto drawAndUploadMask = [uploaderRaw] {
351                 TRACE_EVENT0("skia.gpu", "Threaded SW Mask Render");
352                 GrSWMaskHelper helper(uploaderRaw->getPixels());
353                 if (helper.init(uploaderRaw->data().getMaskBounds())) {
354                     helper.drawShape(uploaderRaw->data().getShape(),
355                                      *uploaderRaw->data().getViewMatrix(),
356                                      SkRegion::kReplace_Op, uploaderRaw->data().getAA(), 0xFF);
357                 } else {
358                     SkDEBUGFAIL("Unable to allocate SW mask.");
359                 }
360                 uploaderRaw->signalAndFreeData();
361             };
362             taskGroup->add(std::move(drawAndUploadMask));
363             view.asTextureProxy()->texPriv().setDeferredUploader(std::move(uploader));
364         } else {
365             GrSWMaskHelper helper;
366             if (!helper.init(*boundsForMask)) {
367                 return false;
368             }
369             helper.drawShape(*args.fShape, *args.fViewMatrix, SkRegion::kReplace_Op, aa, 0xFF);
370             view = helper.toTextureView(args.fContext, fit);
371         }
372
373         if (!view) {
374             return false;
375         }
376         if (useCache) {
377             SkASSERT(view.origin() == kTopLeft_GrSurfaceOrigin);
378
379             // We will add an invalidator to the path so that if the path goes away we will
380             // delete or recycle the mask texture.
381             auto listener = GrMakeUniqueKeyInvalidationListener(&maskKey,
382                                                                 args.fContext->priv().contextID());
383             fProxyProvider->assignUniqueKeyToProxy(maskKey, view.asTextureProxy());
384             args.fShape->addGenIDChangeListener(std::move(listener));
385         }
386
387         args.fContext->priv().stats()->incNumPathMasksGenerated();
388     }
389     SkASSERT(view);
390     if (inverseFilled) {
391         DrawAroundInvPath(args.fSurfaceDrawContext, GrPaint::Clone(args.fPaint),
392                           *args.fUserStencilSettings, args.fClip, *args.fViewMatrix, devClipBounds,
393                           unclippedDevShapeBounds);
394     }
395     DrawToTargetWithShapeMask(std::move(view), args.fSurfaceDrawContext, std::move(args.fPaint),
396                               *args.fUserStencilSettings, args.fClip, *args.fViewMatrix,
397                               SkIPoint{boundsForMask->fLeft, boundsForMask->fTop}, *boundsForMask);
398
399     return true;
400 }
401
402 } // namespace skgpu::v1