2 * Copyright 2019 Google Inc.
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
8 #include "include/gpu/GrRecordingContext.h"
10 #include "include/core/SkCapabilities.h"
11 #include "include/gpu/GrContextThreadSafeProxy.h"
12 #include "src/core/SkArenaAlloc.h"
13 #include "src/gpu/ganesh/GrAuditTrail.h"
14 #include "src/gpu/ganesh/GrCaps.h"
15 #include "src/gpu/ganesh/GrContextThreadSafeProxyPriv.h"
16 #include "src/gpu/ganesh/GrDrawingManager.h"
17 #include "src/gpu/ganesh/GrMemoryPool.h"
18 #include "src/gpu/ganesh/GrProgramDesc.h"
19 #include "src/gpu/ganesh/GrProxyProvider.h"
20 #include "src/gpu/ganesh/GrRecordingContextPriv.h"
21 #include "src/gpu/ganesh/SkGr.h"
22 #include "src/gpu/ganesh/SurfaceContext.h"
23 #include "src/gpu/ganesh/effects/GrSkSLFP.h"
24 #include "src/text/gpu/TextBlob.h"
25 #include "src/text/gpu/TextBlobRedrawCoordinator.h"
28 #include "src/gpu/ganesh/ops/AtlasTextOp.h"
31 using TextBlobRedrawCoordinator = sktext::gpu::TextBlobRedrawCoordinator;
33 GrRecordingContext::ProgramData::ProgramData(std::unique_ptr<const GrProgramDesc> desc,
34 const GrProgramInfo* info)
35 : fDesc(std::move(desc))
39 GrRecordingContext::ProgramData::ProgramData(ProgramData&& other)
40 : fDesc(std::move(other.fDesc))
41 , fInfo(other.fInfo) {
44 GrRecordingContext::ProgramData::~ProgramData() = default;
46 GrRecordingContext::GrRecordingContext(sk_sp<GrContextThreadSafeProxy> proxy, bool ddlRecording)
47 : INHERITED(std::move(proxy))
48 , fAuditTrail(new GrAuditTrail())
49 , fArenas(ddlRecording) {
50 fProxyProvider = std::make_unique<GrProxyProvider>(this);
53 GrRecordingContext::~GrRecordingContext() {
55 skgpu::v1::AtlasTextOp::ClearCache();
59 bool GrRecordingContext::init() {
60 if (!INHERITED::init()) {
65 skgpu::v1::PathRendererChain::Options prcOptions;
66 prcOptions.fAllowPathMaskCaching = this->options().fAllowPathMaskCaching;
68 prcOptions.fGpuPathRenderers = this->options().fGpuPathRenderers;
70 // FIXME: Once this is removed from Chrome and Android, rename to fEnable"".
71 if (this->options().fDisableDistanceFieldPaths) {
72 prcOptions.fGpuPathRenderers &= ~GpuPathRenderers::kSmall;
76 bool reduceOpsTaskSplitting = true;
77 if (this->caps()->avoidReorderingRenderTasks()) {
78 reduceOpsTaskSplitting = false;
79 } else if (GrContextOptions::Enable::kYes == this->options().fReduceOpsTaskSplitting) {
80 reduceOpsTaskSplitting = true;
81 } else if (GrContextOptions::Enable::kNo == this->options().fReduceOpsTaskSplitting) {
82 reduceOpsTaskSplitting = false;
84 fDrawingManager.reset(new GrDrawingManager(this,
88 reduceOpsTaskSplitting));
92 void GrRecordingContext::abandonContext() {
93 INHERITED::abandonContext();
95 this->destroyDrawingManager();
98 GrDrawingManager* GrRecordingContext::drawingManager() {
99 return fDrawingManager.get();
102 void GrRecordingContext::destroyDrawingManager() {
103 fDrawingManager.reset();
106 GrRecordingContext::Arenas::Arenas(SkArenaAlloc* recordTimeAllocator,
107 sktext::gpu::SubRunAllocator* subRunAllocator)
108 : fRecordTimeAllocator(recordTimeAllocator)
109 , fRecordTimeSubRunAllocator(subRunAllocator) {
110 // OwnedArenas should instantiate these before passing the bare pointer off to this struct.
111 SkASSERT(subRunAllocator);
114 // Must be defined here so that std::unique_ptr can see the sizes of the various pools, otherwise
115 // it can't generate a default destructor for them.
116 GrRecordingContext::OwnedArenas::OwnedArenas(bool ddlRecording) : fDDLRecording(ddlRecording) {}
117 GrRecordingContext::OwnedArenas::~OwnedArenas() {}
119 GrRecordingContext::OwnedArenas& GrRecordingContext::OwnedArenas::operator=(OwnedArenas&& a) {
120 fDDLRecording = a.fDDLRecording;
121 fRecordTimeAllocator = std::move(a.fRecordTimeAllocator);
122 fRecordTimeSubRunAllocator = std::move(a.fRecordTimeSubRunAllocator);
126 GrRecordingContext::Arenas GrRecordingContext::OwnedArenas::get() {
127 if (!fRecordTimeAllocator && fDDLRecording) {
128 // TODO: empirically determine a better number for SkArenaAlloc's firstHeapAllocation param
129 fRecordTimeAllocator = std::make_unique<SkArenaAlloc>(1024);
132 if (!fRecordTimeSubRunAllocator) {
133 fRecordTimeSubRunAllocator = std::make_unique<sktext::gpu::SubRunAllocator>();
136 return {fRecordTimeAllocator.get(), fRecordTimeSubRunAllocator.get()};
139 GrRecordingContext::OwnedArenas&& GrRecordingContext::detachArenas() {
140 return std::move(fArenas);
143 TextBlobRedrawCoordinator* GrRecordingContext::getTextBlobRedrawCoordinator() {
144 return fThreadSafeProxy->priv().getTextBlobRedrawCoordinator();
147 const TextBlobRedrawCoordinator* GrRecordingContext::getTextBlobRedrawCoordinator() const {
148 return fThreadSafeProxy->priv().getTextBlobRedrawCoordinator();
151 GrThreadSafeCache* GrRecordingContext::threadSafeCache() {
152 return fThreadSafeProxy->priv().threadSafeCache();
155 const GrThreadSafeCache* GrRecordingContext::threadSafeCache() const {
156 return fThreadSafeProxy->priv().threadSafeCache();
159 void GrRecordingContext::addOnFlushCallbackObject(GrOnFlushCallbackObject* onFlushCBObject) {
160 this->drawingManager()->addOnFlushCallbackObject(onFlushCBObject);
163 ////////////////////////////////////////////////////////////////////////////////
165 sk_sp<const SkCapabilities> GrRecordingContext::skCapabilities() const {
166 return this->refCaps();
169 int GrRecordingContext::maxTextureSize() const { return this->caps()->maxTextureSize(); }
171 int GrRecordingContext::maxRenderTargetSize() const { return this->caps()->maxRenderTargetSize(); }
173 bool GrRecordingContext::colorTypeSupportedAsImage(SkColorType colorType) const {
174 GrBackendFormat format =
175 this->caps()->getDefaultBackendFormat(SkColorTypeToGrColorType(colorType),
177 return format.isValid();
180 ///////////////////////////////////////////////////////////////////////////////////////////////////
182 #ifdef SK_ENABLE_DUMP_GPU
183 #include "src/utils/SkJSONWriter.h"
185 void GrRecordingContext::dumpJSON(SkJSONWriter* writer) const {
186 writer->beginObject();
189 writer->appendS32("path_masks_generated", this->stats()->numPathMasksGenerated());
190 writer->appendS32("path_mask_cache_hits", this->stats()->numPathMaskCacheHits());
196 void GrRecordingContext::dumpJSON(SkJSONWriter*) const { }
203 void GrRecordingContext::Stats::dump(SkString* out) const {
204 out->appendf("Num Path Masks Generated: %d\n", fNumPathMasksGenerated);
205 out->appendf("Num Path Mask Cache Hits: %d\n", fNumPathMaskCacheHits);
208 void GrRecordingContext::Stats::dumpKeyValuePairs(SkTArray<SkString>* keys,
209 SkTArray<double>* values) const {
210 keys->push_back(SkString("path_masks_generated"));
211 values->push_back(fNumPathMasksGenerated);
213 keys->push_back(SkString("path_mask_cache_hits"));
214 values->push_back(fNumPathMaskCacheHits);
217 void GrRecordingContext::DMSAAStats::dumpKeyValuePairs(SkTArray<SkString>* keys,
218 SkTArray<double>* values) const {
219 keys->push_back(SkString("dmsaa_render_passes"));
220 values->push_back(fNumRenderPasses);
222 keys->push_back(SkString("dmsaa_multisample_render_passes"));
223 values->push_back(fNumMultisampleRenderPasses);
225 for (const auto& [name, count] : fTriggerCounts) {
226 keys->push_back(SkStringPrintf("dmsaa_trigger_%s", name.c_str()));
227 values->push_back(count);
231 void GrRecordingContext::DMSAAStats::dump() const {
232 SkDebugf("DMSAA Render Passes: %d\n", fNumRenderPasses);
233 SkDebugf("DMSAA Multisample Render Passes: %d\n", fNumMultisampleRenderPasses);
234 if (!fTriggerCounts.empty()) {
235 SkDebugf("DMSAA Triggers:\n");
236 for (const auto& [name, count] : fTriggerCounts) {
237 SkDebugf(" %s: %d\n", name.c_str(), count);
242 void GrRecordingContext::DMSAAStats::merge(const DMSAAStats& stats) {
243 fNumRenderPasses += stats.fNumRenderPasses;
244 fNumMultisampleRenderPasses += stats.fNumMultisampleRenderPasses;
245 for (const auto& [name, count] : stats.fTriggerCounts) {
246 fTriggerCounts[name] += count;
250 #endif // GR_GPU_STATS
251 #endif // GR_TEST_UTILS