Upstream version 10.39.225.0
[platform/framework/web/crosswalk.git] / src / third_party / skia / src / gpu / gl / GrGLBufferImpl.cpp
1 /*
2  * Copyright 2013 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7
8 #include "GrGLBufferImpl.h"
9 #include "GrGpuGL.h"
10
11 #define GL_CALL(GPU, X) GR_GL_CALL(GPU->glInterface(), X)
12
13 #ifdef SK_DEBUG
14 #define VALIDATE() this->validate()
15 #else
16 #define VALIDATE() do {} while(false)
17 #endif
18
19 // GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a client's vertex buffer
20 // objects are implemented as client-side-arrays on tile-deferred architectures.
21 #define DYNAMIC_USAGE_PARAM GR_GL_STREAM_DRAW
22
23 GrGLBufferImpl::GrGLBufferImpl(GrGpuGL* gpu, const Desc& desc, GrGLenum bufferType)
24     : fDesc(desc)
25     , fBufferType(bufferType)
26     , fMapPtr(NULL) {
27     if (0 == desc.fID) {
28         fCPUData = sk_malloc_flags(desc.fSizeInBytes, SK_MALLOC_THROW);
29         fGLSizeInBytes = 0;
30     } else {
31         fCPUData = NULL;
32         // We assume that the GL buffer was created at the desc's size initially.
33         fGLSizeInBytes = fDesc.fSizeInBytes;
34     }
35     VALIDATE();
36 }
37
38 void GrGLBufferImpl::release(GrGpuGL* gpu) {
39     VALIDATE();
40     // make sure we've not been abandoned or already released
41     if (fCPUData) {
42         sk_free(fCPUData);
43         fCPUData = NULL;
44     } else if (fDesc.fID && !fDesc.fIsWrapped) {
45         GL_CALL(gpu, DeleteBuffers(1, &fDesc.fID));
46         if (GR_GL_ARRAY_BUFFER == fBufferType) {
47             gpu->notifyVertexBufferDelete(fDesc.fID);
48         } else {
49             SkASSERT(GR_GL_ELEMENT_ARRAY_BUFFER == fBufferType);
50             gpu->notifyIndexBufferDelete(fDesc.fID);
51         }
52         fDesc.fID = 0;
53         fGLSizeInBytes = 0;
54     }
55     fMapPtr = NULL;
56     VALIDATE();
57 }
58
59 void GrGLBufferImpl::abandon() {
60     fDesc.fID = 0;
61     fGLSizeInBytes = 0;
62     fMapPtr = NULL;
63     sk_free(fCPUData);
64     fCPUData = NULL;
65     VALIDATE();
66 }
67
68 void GrGLBufferImpl::bind(GrGpuGL* gpu) const {
69     VALIDATE();
70     if (GR_GL_ARRAY_BUFFER == fBufferType) {
71         gpu->bindVertexBuffer(fDesc.fID);
72     } else {
73         SkASSERT(GR_GL_ELEMENT_ARRAY_BUFFER == fBufferType);
74         gpu->bindIndexBufferAndDefaultVertexArray(fDesc.fID);
75     }
76     VALIDATE();
77 }
78
79 void* GrGLBufferImpl::map(GrGpuGL* gpu) {
80     VALIDATE();
81     SkASSERT(!this->isMapped());
82     if (0 == fDesc.fID) {
83         fMapPtr = fCPUData;
84     } else {
85         switch (gpu->glCaps().mapBufferType()) {
86             case GrGLCaps::kNone_MapBufferType:
87                 VALIDATE();
88                 return NULL;
89             case GrGLCaps::kMapBuffer_MapBufferType:
90                 this->bind(gpu);
91                 // Let driver know it can discard the old data
92                 if (GR_GL_USE_BUFFER_DATA_NULL_HINT || fDesc.fSizeInBytes != fGLSizeInBytes) {
93                     fGLSizeInBytes = fDesc.fSizeInBytes;
94                     GL_CALL(gpu,
95                             BufferData(fBufferType, fGLSizeInBytes, NULL,
96                                        fDesc.fDynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW));
97                 }
98                 GR_GL_CALL_RET(gpu->glInterface(), fMapPtr,
99                                MapBuffer(fBufferType, GR_GL_WRITE_ONLY));
100                 break;
101             case GrGLCaps::kMapBufferRange_MapBufferType: {
102                 this->bind(gpu);
103                 // Make sure the GL buffer size agrees with fDesc before mapping.
104                 if (fDesc.fSizeInBytes != fGLSizeInBytes) {
105                     fGLSizeInBytes = fDesc.fSizeInBytes;
106                     GL_CALL(gpu,
107                             BufferData(fBufferType, fGLSizeInBytes, NULL,
108                                        fDesc.fDynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW));
109                 }
110                 static const GrGLbitfield kAccess = GR_GL_MAP_INVALIDATE_BUFFER_BIT |
111                                                     GR_GL_MAP_WRITE_BIT;
112                 GR_GL_CALL_RET(gpu->glInterface(),
113                                fMapPtr,
114                                MapBufferRange(fBufferType, 0, fGLSizeInBytes, kAccess));
115                 break;
116             }
117             case GrGLCaps::kChromium_MapBufferType:
118                 this->bind(gpu);
119                 // Make sure the GL buffer size agrees with fDesc before mapping.
120                 if (fDesc.fSizeInBytes != fGLSizeInBytes) {
121                     fGLSizeInBytes = fDesc.fSizeInBytes;
122                     GL_CALL(gpu,
123                             BufferData(fBufferType, fGLSizeInBytes, NULL,
124                                        fDesc.fDynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW));
125                 }
126                 GR_GL_CALL_RET(gpu->glInterface(),
127                                fMapPtr,
128                                MapBufferSubData(fBufferType, 0, fGLSizeInBytes, GR_GL_WRITE_ONLY));
129                 break;
130         }
131     }
132     VALIDATE();
133     return fMapPtr;
134 }
135
136 void GrGLBufferImpl::unmap(GrGpuGL* gpu) {
137     VALIDATE();
138     SkASSERT(this->isMapped());
139     if (0 != fDesc.fID) {
140         switch (gpu->glCaps().mapBufferType()) {
141             case GrGLCaps::kNone_MapBufferType:
142                 SkDEBUGFAIL("Shouldn't get here.");
143                 return;
144             case GrGLCaps::kMapBuffer_MapBufferType: // fall through
145             case GrGLCaps::kMapBufferRange_MapBufferType:
146                 this->bind(gpu);
147                 GL_CALL(gpu, UnmapBuffer(fBufferType));
148                 break;
149             case GrGLCaps::kChromium_MapBufferType:
150                 this->bind(gpu);
151                 GR_GL_CALL(gpu->glInterface(), UnmapBufferSubData(fMapPtr));
152                 break;
153         }
154     }
155     fMapPtr = NULL;
156 }
157
158 bool GrGLBufferImpl::isMapped() const {
159     VALIDATE();
160     return SkToBool(fMapPtr);
161 }
162
163 bool GrGLBufferImpl::updateData(GrGpuGL* gpu, const void* src, size_t srcSizeInBytes) {
164     SkASSERT(!this->isMapped());
165     VALIDATE();
166     if (srcSizeInBytes > fDesc.fSizeInBytes) {
167         return false;
168     }
169     if (0 == fDesc.fID) {
170         memcpy(fCPUData, src, srcSizeInBytes);
171         return true;
172     }
173     this->bind(gpu);
174     GrGLenum usage = fDesc.fDynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW;
175
176 #if GR_GL_USE_BUFFER_DATA_NULL_HINT
177     if (fDesc.fSizeInBytes == srcSizeInBytes) {
178         GL_CALL(gpu, BufferData(fBufferType, (GrGLsizeiptr) srcSizeInBytes, src, usage));
179     } else {
180         // Before we call glBufferSubData we give the driver a hint using
181         // glBufferData with NULL. This makes the old buffer contents
182         // inaccessible to future draws. The GPU may still be processing
183         // draws that reference the old contents. With this hint it can
184         // assign a different allocation for the new contents to avoid
185         // flushing the gpu past draws consuming the old contents.
186         fGLSizeInBytes = fDesc.fSizeInBytes;
187         GL_CALL(gpu, BufferData(fBufferType, fGLSizeInBytes, NULL, usage));
188         GL_CALL(gpu, BufferSubData(fBufferType, 0, (GrGLsizeiptr) srcSizeInBytes, src));
189     }
190 #else
191     // Note that we're cheating on the size here. Currently no methods
192     // allow a partial update that preserves contents of non-updated
193     // portions of the buffer (map() does a glBufferData(..size, NULL..))
194     bool doSubData = false;
195 #if GR_GL_MAC_BUFFER_OBJECT_PERFOMANCE_WORKAROUND
196     static int N = 0;
197     // 128 was chosen experimentally. At 256 a slight hitchiness was noticed
198     // when dragging a Chromium window around with a canvas tab backgrounded.
199     doSubData = 0 == (N % 128);
200     ++N;
201 #endif
202     if (doSubData) {
203         // The workaround is to do a glBufferData followed by glBufferSubData.
204         // Chromium's command buffer may turn a glBufferSubData where the size
205         // exactly matches the buffer size into a glBufferData. So we tack 1
206         // extra byte onto the glBufferData.
207         fGLSizeInBytes = srcSizeInBytes + 1;
208         GL_CALL(gpu, BufferData(fBufferType, fGLSizeInBytes, NULL, usage));
209         GL_CALL(gpu, BufferSubData(fBufferType, 0, srcSizeInBytes, src));
210     } else {
211         fGLSizeInBytes = srcSizeInBytes;
212         GL_CALL(gpu, BufferData(fBufferType, fGLSizeInBytes, src, usage));
213     }
214 #endif
215     return true;
216 }
217
218 void GrGLBufferImpl::validate() const {
219     SkASSERT(GR_GL_ARRAY_BUFFER == fBufferType || GR_GL_ELEMENT_ARRAY_BUFFER == fBufferType);
220     // The following assert isn't valid when the buffer has been abandoned:
221     // SkASSERT((0 == fDesc.fID) == (fCPUData));
222     SkASSERT(0 != fDesc.fID || !fDesc.fIsWrapped);
223     SkASSERT(NULL == fCPUData || 0 == fGLSizeInBytes);
224     SkASSERT(NULL == fMapPtr || fCPUData || fGLSizeInBytes == fDesc.fSizeInBytes);
225     SkASSERT(NULL == fCPUData || NULL == fMapPtr || fCPUData == fMapPtr);
226 }