Update rive-cpp to 2.0 version
[platform/core/uifw/rive-tizen.git] / submodule / skia / src / gpu / ganesh / GrRecordingContext.cpp
1 /*
2  * Copyright 2019 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7
8 #include "include/gpu/GrRecordingContext.h"
9
10 #include "include/core/SkCapabilities.h"
11 #include "include/gpu/GrContextThreadSafeProxy.h"
12 #include "src/core/SkArenaAlloc.h"
13 #include "src/gpu/ganesh/GrAuditTrail.h"
14 #include "src/gpu/ganesh/GrCaps.h"
15 #include "src/gpu/ganesh/GrContextThreadSafeProxyPriv.h"
16 #include "src/gpu/ganesh/GrDrawingManager.h"
17 #include "src/gpu/ganesh/GrMemoryPool.h"
18 #include "src/gpu/ganesh/GrProgramDesc.h"
19 #include "src/gpu/ganesh/GrProxyProvider.h"
20 #include "src/gpu/ganesh/GrRecordingContextPriv.h"
21 #include "src/gpu/ganesh/SkGr.h"
22 #include "src/gpu/ganesh/SurfaceContext.h"
23 #include "src/gpu/ganesh/effects/GrSkSLFP.h"
24 #include "src/text/gpu/TextBlob.h"
25 #include "src/text/gpu/TextBlobRedrawCoordinator.h"
26
27 #if SK_GPU_V1
28 #include "src/gpu/ganesh/ops/AtlasTextOp.h"
29 #endif
30
31 using TextBlobRedrawCoordinator = sktext::gpu::TextBlobRedrawCoordinator;
32
33 GrRecordingContext::ProgramData::ProgramData(std::unique_ptr<const GrProgramDesc> desc,
34                                              const GrProgramInfo* info)
35         : fDesc(std::move(desc))
36         , fInfo(info) {
37 }
38
39 GrRecordingContext::ProgramData::ProgramData(ProgramData&& other)
40         : fDesc(std::move(other.fDesc))
41         , fInfo(other.fInfo) {
42 }
43
44 GrRecordingContext::ProgramData::~ProgramData() = default;
45
46 GrRecordingContext::GrRecordingContext(sk_sp<GrContextThreadSafeProxy> proxy, bool ddlRecording)
47         : INHERITED(std::move(proxy))
48         , fAuditTrail(new GrAuditTrail())
49         , fArenas(ddlRecording) {
50     fProxyProvider = std::make_unique<GrProxyProvider>(this);
51 }
52
53 GrRecordingContext::~GrRecordingContext() {
54 #if SK_GPU_V1
55     skgpu::v1::AtlasTextOp::ClearCache();
56 #endif
57 }
58
59 bool GrRecordingContext::init() {
60     if (!INHERITED::init()) {
61         return false;
62     }
63
64 #if SK_GPU_V1
65     skgpu::v1::PathRendererChain::Options prcOptions;
66     prcOptions.fAllowPathMaskCaching = this->options().fAllowPathMaskCaching;
67 #if GR_TEST_UTILS
68     prcOptions.fGpuPathRenderers = this->options().fGpuPathRenderers;
69 #endif
70     // FIXME: Once this is removed from Chrome and Android, rename to fEnable"".
71     if (this->options().fDisableDistanceFieldPaths) {
72         prcOptions.fGpuPathRenderers &= ~GpuPathRenderers::kSmall;
73     }
74 #endif
75
76     bool reduceOpsTaskSplitting = true;
77     if (this->caps()->avoidReorderingRenderTasks()) {
78         reduceOpsTaskSplitting = false;
79     } else if (GrContextOptions::Enable::kYes == this->options().fReduceOpsTaskSplitting) {
80         reduceOpsTaskSplitting = true;
81     } else if (GrContextOptions::Enable::kNo == this->options().fReduceOpsTaskSplitting) {
82         reduceOpsTaskSplitting = false;
83     }
84     fDrawingManager.reset(new GrDrawingManager(this,
85 #if SK_GPU_V1
86                                                prcOptions,
87 #endif
88                                                reduceOpsTaskSplitting));
89     return true;
90 }
91
92 void GrRecordingContext::abandonContext() {
93     INHERITED::abandonContext();
94
95     this->destroyDrawingManager();
96 }
97
98 GrDrawingManager* GrRecordingContext::drawingManager() {
99     return fDrawingManager.get();
100 }
101
102 void GrRecordingContext::destroyDrawingManager() {
103     fDrawingManager.reset();
104 }
105
106 GrRecordingContext::Arenas::Arenas(SkArenaAlloc* recordTimeAllocator,
107                                    sktext::gpu::SubRunAllocator* subRunAllocator)
108         : fRecordTimeAllocator(recordTimeAllocator)
109         , fRecordTimeSubRunAllocator(subRunAllocator) {
110     // OwnedArenas should instantiate these before passing the bare pointer off to this struct.
111     SkASSERT(subRunAllocator);
112 }
113
114 // Must be defined here so that std::unique_ptr can see the sizes of the various pools, otherwise
115 // it can't generate a default destructor for them.
116 GrRecordingContext::OwnedArenas::OwnedArenas(bool ddlRecording) : fDDLRecording(ddlRecording) {}
117 GrRecordingContext::OwnedArenas::~OwnedArenas() {}
118
119 GrRecordingContext::OwnedArenas& GrRecordingContext::OwnedArenas::operator=(OwnedArenas&& a) {
120     fDDLRecording = a.fDDLRecording;
121     fRecordTimeAllocator = std::move(a.fRecordTimeAllocator);
122     fRecordTimeSubRunAllocator = std::move(a.fRecordTimeSubRunAllocator);
123     return *this;
124 }
125
126 GrRecordingContext::Arenas GrRecordingContext::OwnedArenas::get() {
127     if (!fRecordTimeAllocator && fDDLRecording) {
128         // TODO: empirically determine a better number for SkArenaAlloc's firstHeapAllocation param
129         fRecordTimeAllocator = std::make_unique<SkArenaAlloc>(1024);
130     }
131
132     if (!fRecordTimeSubRunAllocator) {
133         fRecordTimeSubRunAllocator = std::make_unique<sktext::gpu::SubRunAllocator>();
134     }
135
136     return {fRecordTimeAllocator.get(), fRecordTimeSubRunAllocator.get()};
137 }
138
139 GrRecordingContext::OwnedArenas&& GrRecordingContext::detachArenas() {
140     return std::move(fArenas);
141 }
142
143 TextBlobRedrawCoordinator* GrRecordingContext::getTextBlobRedrawCoordinator() {
144     return fThreadSafeProxy->priv().getTextBlobRedrawCoordinator();
145 }
146
147 const TextBlobRedrawCoordinator* GrRecordingContext::getTextBlobRedrawCoordinator() const {
148     return fThreadSafeProxy->priv().getTextBlobRedrawCoordinator();
149 }
150
151 GrThreadSafeCache* GrRecordingContext::threadSafeCache() {
152     return fThreadSafeProxy->priv().threadSafeCache();
153 }
154
155 const GrThreadSafeCache* GrRecordingContext::threadSafeCache() const {
156     return fThreadSafeProxy->priv().threadSafeCache();
157 }
158
159 void GrRecordingContext::addOnFlushCallbackObject(GrOnFlushCallbackObject* onFlushCBObject) {
160     this->drawingManager()->addOnFlushCallbackObject(onFlushCBObject);
161 }
162
163 ////////////////////////////////////////////////////////////////////////////////
164
165 sk_sp<const SkCapabilities> GrRecordingContext::skCapabilities() const {
166     return this->refCaps();
167 }
168
169 int GrRecordingContext::maxTextureSize() const { return this->caps()->maxTextureSize(); }
170
171 int GrRecordingContext::maxRenderTargetSize() const { return this->caps()->maxRenderTargetSize(); }
172
173 bool GrRecordingContext::colorTypeSupportedAsImage(SkColorType colorType) const {
174     GrBackendFormat format =
175             this->caps()->getDefaultBackendFormat(SkColorTypeToGrColorType(colorType),
176                                                   GrRenderable::kNo);
177     return format.isValid();
178 }
179
180 ///////////////////////////////////////////////////////////////////////////////////////////////////
181
182 #ifdef SK_ENABLE_DUMP_GPU
183 #include "src/utils/SkJSONWriter.h"
184
185 void GrRecordingContext::dumpJSON(SkJSONWriter* writer) const {
186     writer->beginObject();
187
188 #if GR_GPU_STATS
189     writer->appendS32("path_masks_generated", this->stats()->numPathMasksGenerated());
190     writer->appendS32("path_mask_cache_hits", this->stats()->numPathMaskCacheHits());
191 #endif
192
193     writer->endObject();
194 }
195 #else
196 void GrRecordingContext::dumpJSON(SkJSONWriter*) const { }
197 #endif
198
199 #if GR_TEST_UTILS
200
201 #if GR_GPU_STATS
202
203 void GrRecordingContext::Stats::dump(SkString* out) const {
204     out->appendf("Num Path Masks Generated: %d\n", fNumPathMasksGenerated);
205     out->appendf("Num Path Mask Cache Hits: %d\n", fNumPathMaskCacheHits);
206 }
207
208 void GrRecordingContext::Stats::dumpKeyValuePairs(SkTArray<SkString>* keys,
209                                                   SkTArray<double>* values) const {
210     keys->push_back(SkString("path_masks_generated"));
211     values->push_back(fNumPathMasksGenerated);
212
213     keys->push_back(SkString("path_mask_cache_hits"));
214     values->push_back(fNumPathMaskCacheHits);
215 }
216
217 void GrRecordingContext::DMSAAStats::dumpKeyValuePairs(SkTArray<SkString>* keys,
218                                                        SkTArray<double>* values) const {
219     keys->push_back(SkString("dmsaa_render_passes"));
220     values->push_back(fNumRenderPasses);
221
222     keys->push_back(SkString("dmsaa_multisample_render_passes"));
223     values->push_back(fNumMultisampleRenderPasses);
224
225     for (const auto& [name, count] : fTriggerCounts) {
226         keys->push_back(SkStringPrintf("dmsaa_trigger_%s", name.c_str()));
227         values->push_back(count);
228     }
229 }
230
231 void GrRecordingContext::DMSAAStats::dump() const {
232     SkDebugf("DMSAA Render Passes: %d\n", fNumRenderPasses);
233     SkDebugf("DMSAA Multisample Render Passes: %d\n", fNumMultisampleRenderPasses);
234     if (!fTriggerCounts.empty()) {
235         SkDebugf("DMSAA Triggers:\n");
236         for (const auto& [name, count] : fTriggerCounts) {
237             SkDebugf("    %s: %d\n", name.c_str(), count);
238         }
239     }
240 }
241
242 void GrRecordingContext::DMSAAStats::merge(const DMSAAStats& stats) {
243     fNumRenderPasses += stats.fNumRenderPasses;
244     fNumMultisampleRenderPasses += stats.fNumMultisampleRenderPasses;
245     for (const auto& [name, count] : stats.fTriggerCounts) {
246         fTriggerCounts[name] += count;
247     }
248 }
249
250 #endif // GR_GPU_STATS
251 #endif // GR_TEST_UTILS