Update rive-cpp to 2.0 version
[platform/core/uifw/rive-tizen.git] / submodule / skia / src / gpu / ganesh / gl / GrGLBuffer.cpp
1 /*
2  * Copyright 2016 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7
8 #include "src/gpu/ganesh/gl/GrGLBuffer.h"
9
10 #include "include/core/SkTraceMemoryDump.h"
11 #include "src/core/SkTraceEvent.h"
12 #include "src/gpu/ganesh/GrGpuResourcePriv.h"
13 #include "src/gpu/ganesh/gl/GrGLCaps.h"
14 #include "src/gpu/ganesh/gl/GrGLGpu.h"
15
16 #define GL_CALL(X) GR_GL_CALL(this->glGpu()->glInterface(), X)
17 #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glGpu()->glInterface(), RET, X)
18
19 #define GL_ALLOC_CALL(call)                                            \
20     [&] {                                                              \
21         if (this->glGpu()->glCaps().skipErrorChecks()) {               \
22             GR_GL_CALL(this->glGpu()->glInterface(), call);            \
23             return static_cast<GrGLenum>(GR_GL_NO_ERROR);              \
24         } else {                                                       \
25             this->glGpu()->clearErrorsAndCheckForOOM();                \
26             GR_GL_CALL_NOERRCHECK(this->glGpu()->glInterface(), call); \
27             return this->glGpu()->getErrorAndCheckForOOM();            \
28         }                                                              \
29     }()
30
31 #ifdef SK_DEBUG
32 #define VALIDATE() this->validate()
33 #else
34 #define VALIDATE() do {} while(false)
35 #endif
36
37 sk_sp<GrGLBuffer> GrGLBuffer::Make(GrGLGpu* gpu, size_t size, GrGpuBufferType intendedType,
38                                    GrAccessPattern accessPattern, const void* data) {
39     if (gpu->glCaps().transferBufferType() == GrGLCaps::TransferBufferType::kNone &&
40         (GrGpuBufferType::kXferCpuToGpu == intendedType ||
41          GrGpuBufferType::kXferGpuToCpu == intendedType)) {
42         return nullptr;
43     }
44
45     sk_sp<GrGLBuffer> buffer(
46             new GrGLBuffer(gpu, size, intendedType, accessPattern, data, /*label=*/{}));
47     if (0 == buffer->bufferID()) {
48         return nullptr;
49     }
50     return buffer;
51 }
52
53 // GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a client's vertex buffer
54 // objects are implemented as client-side-arrays on tile-deferred architectures.
55 #define DYNAMIC_DRAW_PARAM GR_GL_STREAM_DRAW
56
57 inline static GrGLenum gr_to_gl_access_pattern(GrGpuBufferType bufferType,
58                                                GrAccessPattern accessPattern,
59                                                const GrGLCaps& caps) {
60     auto drawUsage = [](GrAccessPattern pattern) {
61         switch (pattern) {
62             case kDynamic_GrAccessPattern:
63                 // TODO: Do we really want to use STREAM_DRAW here on non-Chromium?
64                 return DYNAMIC_DRAW_PARAM;
65             case kStatic_GrAccessPattern:
66                 return GR_GL_STATIC_DRAW;
67             case kStream_GrAccessPattern:
68                 return GR_GL_STREAM_DRAW;
69         }
70         SkUNREACHABLE;
71     };
72
73     auto readUsage = [](GrAccessPattern pattern) {
74         switch (pattern) {
75             case kDynamic_GrAccessPattern:
76                 return GR_GL_DYNAMIC_READ;
77             case kStatic_GrAccessPattern:
78                 return GR_GL_STATIC_READ;
79             case kStream_GrAccessPattern:
80                 return GR_GL_STREAM_READ;
81         }
82         SkUNREACHABLE;
83     };
84
85     auto usageType = [&drawUsage, &readUsage, &caps](GrGpuBufferType type,
86                                                      GrAccessPattern pattern) {
87         // GL_NV_pixel_buffer_object adds transfer buffers but not the related <usage> values.
88         if (caps.transferBufferType() == GrGLCaps::TransferBufferType::kNV_PBO) {
89             return drawUsage(pattern);
90         }
91         switch (type) {
92             case GrGpuBufferType::kVertex:
93             case GrGpuBufferType::kIndex:
94             case GrGpuBufferType::kDrawIndirect:
95             case GrGpuBufferType::kXferCpuToGpu:
96             case GrGpuBufferType::kUniform:
97                 return drawUsage(pattern);
98             case GrGpuBufferType::kXferGpuToCpu:
99                 return readUsage(pattern);
100         }
101         SkUNREACHABLE;
102     };
103
104     return usageType(bufferType, accessPattern);
105 }
106
107 GrGLBuffer::GrGLBuffer(GrGLGpu* gpu,
108                        size_t size,
109                        GrGpuBufferType intendedType,
110                        GrAccessPattern accessPattern,
111                        const void* data,
112                        std::string_view label)
113         : INHERITED(gpu, size, intendedType, accessPattern, label)
114         , fIntendedType(intendedType)
115         , fBufferID(0)
116         , fUsage(gr_to_gl_access_pattern(intendedType, accessPattern, gpu->glCaps()))
117         , fGLSizeInBytes(0)
118         , fHasAttachedToTexture(false) {
119     GL_CALL(GenBuffers(1, &fBufferID));
120     if (fBufferID) {
121         GrGLenum target = gpu->bindBuffer(fIntendedType, this);
122         GrGLenum error = GL_ALLOC_CALL(BufferData(target, (GrGLsizeiptr)size, data, fUsage));
123         if (error != GR_GL_NO_ERROR) {
124             GL_CALL(DeleteBuffers(1, &fBufferID));
125             fBufferID = 0;
126         } else {
127             fGLSizeInBytes = size;
128         }
129     }
130     VALIDATE();
131     this->registerWithCache(SkBudgeted::kYes);
132     if (!fBufferID) {
133         this->resourcePriv().removeScratchKey();
134     }
135 }
136
137 inline GrGLGpu* GrGLBuffer::glGpu() const {
138     SkASSERT(!this->wasDestroyed());
139     return static_cast<GrGLGpu*>(this->getGpu());
140 }
141
142 inline const GrGLCaps& GrGLBuffer::glCaps() const {
143     return this->glGpu()->glCaps();
144 }
145
146 void GrGLBuffer::onRelease() {
147     TRACE_EVENT0("skia.gpu", TRACE_FUNC);
148
149     if (!this->wasDestroyed()) {
150         VALIDATE();
151         // make sure we've not been abandoned or already released
152         if (fBufferID) {
153             GL_CALL(DeleteBuffers(1, &fBufferID));
154             fBufferID = 0;
155             fGLSizeInBytes = 0;
156         }
157         fMapPtr = nullptr;
158         VALIDATE();
159     }
160
161     INHERITED::onRelease();
162 }
163
164 void GrGLBuffer::onAbandon() {
165     fBufferID = 0;
166     fGLSizeInBytes = 0;
167     fMapPtr = nullptr;
168     VALIDATE();
169     INHERITED::onAbandon();
170 }
171
172 void GrGLBuffer::onMap() {
173     SkASSERT(fBufferID);
174     SkASSERT(!this->wasDestroyed());
175     VALIDATE();
176     SkASSERT(!this->isMapped());
177
178     // TODO: Make this a function parameter.
179     bool readOnly = (GrGpuBufferType::kXferGpuToCpu == fIntendedType);
180
181     // Handling dirty context is done in the bindBuffer call
182     switch (this->glCaps().mapBufferType()) {
183         case GrGLCaps::kNone_MapBufferType:
184             return;
185         case GrGLCaps::kMapBuffer_MapBufferType: {
186             GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
187             if (!readOnly) {
188                 // Let driver know it can discard the old data
189                 if (this->glCaps().useBufferDataNullHint() || fGLSizeInBytes != this->size()) {
190                     GrGLenum error =
191                             GL_ALLOC_CALL(BufferData(target, this->size(), nullptr, fUsage));
192                     if (error != GR_GL_NO_ERROR) {
193                         return;
194                     }
195                 }
196             }
197             GL_CALL_RET(fMapPtr, MapBuffer(target, readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
198             break;
199         }
200         case GrGLCaps::kMapBufferRange_MapBufferType: {
201             GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
202             // Make sure the GL buffer size agrees with fDesc before mapping.
203             if (fGLSizeInBytes != this->size()) {
204                 GrGLenum error = GL_ALLOC_CALL(BufferData(target, this->size(), nullptr, fUsage));
205                 if (error != GR_GL_NO_ERROR) {
206                     return;
207                 }
208             }
209             GrGLbitfield access;
210             if (readOnly) {
211                 access = GR_GL_MAP_READ_BIT;
212             } else {
213                 access = GR_GL_MAP_WRITE_BIT;
214                 if (GrGpuBufferType::kXferCpuToGpu != fIntendedType) {
215                     // TODO: Make this a function parameter.
216                     access |= GR_GL_MAP_INVALIDATE_BUFFER_BIT;
217                 }
218             }
219             GL_CALL_RET(fMapPtr, MapBufferRange(target, 0, this->size(), access));
220             break;
221         }
222         case GrGLCaps::kChromium_MapBufferType: {
223             GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
224             // Make sure the GL buffer size agrees with fDesc before mapping.
225             if (fGLSizeInBytes != this->size()) {
226                 GrGLenum error = GL_ALLOC_CALL(BufferData(target, this->size(), nullptr, fUsage));
227                 if (error != GR_GL_NO_ERROR) {
228                     return;
229                 }
230             }
231             GL_CALL_RET(fMapPtr, MapBufferSubData(target, 0, this->size(),
232                                                   readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
233             break;
234         }
235     }
236     fGLSizeInBytes = this->size();
237     VALIDATE();
238 }
239
240 void GrGLBuffer::onUnmap() {
241     SkASSERT(fBufferID);
242     VALIDATE();
243     SkASSERT(this->isMapped());
244     if (0 == fBufferID) {
245         fMapPtr = nullptr;
246         return;
247     }
248     // bind buffer handles the dirty context
249     switch (this->glCaps().mapBufferType()) {
250         case GrGLCaps::kNone_MapBufferType:
251             SkDEBUGFAIL("Shouldn't get here.");
252             return;
253         case GrGLCaps::kMapBuffer_MapBufferType: // fall through
254         case GrGLCaps::kMapBufferRange_MapBufferType: {
255             GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
256             GL_CALL(UnmapBuffer(target));
257             break;
258         }
259         case GrGLCaps::kChromium_MapBufferType:
260             this->glGpu()->bindBuffer(fIntendedType, this); // TODO: Is this needed?
261             GL_CALL(UnmapBufferSubData(fMapPtr));
262             break;
263     }
264     fMapPtr = nullptr;
265 }
266
267 bool GrGLBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) {
268     SkASSERT(fBufferID);
269     if (this->wasDestroyed()) {
270         return false;
271     }
272
273     SkASSERT(!this->isMapped());
274     VALIDATE();
275     if (srcSizeInBytes > this->size()) {
276         return false;
277     }
278     SkASSERT(srcSizeInBytes <= this->size());
279     // bindbuffer handles dirty context
280     GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
281
282     if (this->glCaps().useBufferDataNullHint()) {
283         if (this->size() == srcSizeInBytes) {
284             GrGLenum error =
285                     GL_ALLOC_CALL(BufferData(target, (GrGLsizeiptr)srcSizeInBytes, src, fUsage));
286             if (error != GR_GL_NO_ERROR) {
287                 return false;
288             }
289         } else {
290             // Before we call glBufferSubData we give the driver a hint using
291             // glBufferData with nullptr. This makes the old buffer contents
292             // inaccessible to future draws. The GPU may still be processing
293             // draws that reference the old contents. With this hint it can
294             // assign a different allocation for the new contents to avoid
295             // flushing the gpu past draws consuming the old contents.
296             // TODO I think we actually want to try calling bufferData here
297             GrGLenum error =
298                     GL_ALLOC_CALL(BufferData(target, (GrGLsizeiptr)this->size(), nullptr, fUsage));
299             if (error != GR_GL_NO_ERROR) {
300                 return false;
301             }
302             GL_CALL(BufferSubData(target, 0, (GrGLsizeiptr) srcSizeInBytes, src));
303         }
304         fGLSizeInBytes = this->size();
305     } else {
306         // Note that we're cheating on the size here. Currently no methods
307         // allow a partial update that preserves contents of non-updated
308         // portions of the buffer (map() does a glBufferData(..size, nullptr..))
309         GrGLenum error =
310                 GL_ALLOC_CALL(BufferData(target, (GrGLsizeiptr)srcSizeInBytes, src, fUsage));
311         if (error != GR_GL_NO_ERROR) {
312             return false;
313         }
314         fGLSizeInBytes = srcSizeInBytes;
315     }
316     VALIDATE();
317     return true;
318 }
319
320 void GrGLBuffer::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
321                                        const SkString& dumpName) const {
322     SkString buffer_id;
323     buffer_id.appendU32(this->bufferID());
324     traceMemoryDump->setMemoryBacking(dumpName.c_str(), "gl_buffer",
325                                       buffer_id.c_str());
326 }
327
328 #ifdef SK_DEBUG
329
330 void GrGLBuffer::validate() const {
331     SkASSERT(0 != fBufferID || 0 == fGLSizeInBytes);
332     SkASSERT(nullptr == fMapPtr || fGLSizeInBytes <= this->size());
333 }
334
335 #endif