1 /*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "src/gpu/ganesh/gl/GrGLBuffer.h"
9
10 #include "include/core/SkString.h"
11 #include "include/core/SkTraceMemoryDump.h"
12 #include "include/gpu/GpuTypes.h"
13 #include "include/gpu/ganesh/gl/GrGLFunctions.h"
14 #include "include/gpu/ganesh/gl/GrGLInterface.h"
15 #include "include/private/base/SkMalloc.h"
16 #include "src/core/SkTraceEvent.h"
17 #include "src/gpu/ganesh/GrGpuResourcePriv.h"
18 #include "src/gpu/ganesh/gl/GrGLCaps.h"
19 #include "src/gpu/ganesh/gl/GrGLDefines.h"
20 #include "src/gpu/ganesh/gl/GrGLGpu.h"
21 #include "src/gpu/ganesh/gl/GrGLUtil.h"
22
23 #include <cstdint>
24 #include <cstring>
25 #include <string>
26
27 #define GL_CALL(X) GR_GL_CALL(this->glGpu()->glInterface(), X)
28 #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glGpu()->glInterface(), RET, X)
29
30 #define GL_ALLOC_CALL(gpu, call) \
31 [&] { \
32 if (gpu->glCaps().skipErrorChecks()) { \
33 GR_GL_CALL(gpu->glInterface(), call); \
34 return static_cast<GrGLenum>(GR_GL_NO_ERROR); \
35 } else { \
36 gpu->clearErrorsAndCheckForOOM(); \
37 GR_GL_CALL_NOERRCHECK(gpu->glInterface(), call); \
38 return gpu->getErrorAndCheckForOOM(); \
39 } \
40 }()
41
Make(GrGLGpu * gpu,size_t size,GrGpuBufferType intendedType,GrAccessPattern accessPattern)42 sk_sp<GrGLBuffer> GrGLBuffer::Make(GrGLGpu* gpu,
43 size_t size,
44 GrGpuBufferType intendedType,
45 GrAccessPattern accessPattern) {
46 if (gpu->glCaps().transferBufferType() == GrGLCaps::TransferBufferType::kNone &&
47 (GrGpuBufferType::kXferCpuToGpu == intendedType ||
48 GrGpuBufferType::kXferGpuToCpu == intendedType)) {
49 return nullptr;
50 }
51
52 sk_sp<GrGLBuffer> buffer(new GrGLBuffer(gpu, size, intendedType, accessPattern,
53 /*label=*/"MakeGlBuffer"));
54 if (0 == buffer->bufferID()) {
55 return nullptr;
56 }
57 return buffer;
58 }
59
60 // GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a client's vertex buffer
61 // objects are implemented as client-side-arrays on tile-deferred architectures.
62 #define DYNAMIC_DRAW_PARAM GR_GL_STREAM_DRAW
63
gr_to_gl_access_pattern(GrGpuBufferType bufferType,GrAccessPattern accessPattern,const GrGLCaps & caps)64 inline static GrGLenum gr_to_gl_access_pattern(GrGpuBufferType bufferType,
65 GrAccessPattern accessPattern,
66 const GrGLCaps& caps) {
67 auto drawUsage = [](GrAccessPattern pattern) {
68 switch (pattern) {
69 case kDynamic_GrAccessPattern:
70 // TODO: Do we really want to use STREAM_DRAW here on non-Chromium?
71 return DYNAMIC_DRAW_PARAM;
72 case kStatic_GrAccessPattern:
73 return GR_GL_STATIC_DRAW;
74 case kStream_GrAccessPattern:
75 return GR_GL_STREAM_DRAW;
76 }
77 SkUNREACHABLE;
78 };
79
80 auto readUsage = [](GrAccessPattern pattern) {
81 switch (pattern) {
82 case kDynamic_GrAccessPattern:
83 return GR_GL_DYNAMIC_READ;
84 case kStatic_GrAccessPattern:
85 return GR_GL_STATIC_READ;
86 case kStream_GrAccessPattern:
87 return GR_GL_STREAM_READ;
88 }
89 SkUNREACHABLE;
90 };
91
92 auto usageType = [&drawUsage, &readUsage, &caps](GrGpuBufferType type,
93 GrAccessPattern pattern) {
94 // GL_NV_pixel_buffer_object adds transfer buffers but not the related <usage> values.
95 if (caps.transferBufferType() == GrGLCaps::TransferBufferType::kNV_PBO) {
96 return drawUsage(pattern);
97 }
98 switch (type) {
99 case GrGpuBufferType::kVertex:
100 case GrGpuBufferType::kIndex:
101 case GrGpuBufferType::kDrawIndirect:
102 case GrGpuBufferType::kXferCpuToGpu:
103 case GrGpuBufferType::kUniform:
104 return drawUsage(pattern);
105 case GrGpuBufferType::kXferGpuToCpu:
106 return readUsage(pattern);
107 }
108 SkUNREACHABLE;
109 };
110
111 return usageType(bufferType, accessPattern);
112 }
113
GrGLBuffer(GrGLGpu * gpu,size_t size,GrGpuBufferType intendedType,GrAccessPattern accessPattern,std::string_view label)114 GrGLBuffer::GrGLBuffer(GrGLGpu* gpu,
115 size_t size,
116 GrGpuBufferType intendedType,
117 GrAccessPattern accessPattern,
118 std::string_view label)
119 : INHERITED(gpu, size, intendedType, accessPattern, label)
120 , fIntendedType(intendedType)
121 , fBufferID(0)
122 , fUsage(gr_to_gl_access_pattern(intendedType, accessPattern, gpu->glCaps()))
123 , fHasAttachedToTexture(false) {
124 GL_CALL(GenBuffers(1, &fBufferID));
125 if (fBufferID) {
126 GrGLenum target = gpu->bindBuffer(fIntendedType, this);
127 GrGLenum error = GL_ALLOC_CALL(this->glGpu(), BufferData(target,
128 (GrGLsizeiptr)size,
129 nullptr,
130 fUsage));
131 if (error != GR_GL_NO_ERROR) {
132 GL_CALL(DeleteBuffers(1, &fBufferID));
133 fBufferID = 0;
134 }
135 }
136 this->registerWithCache(skgpu::Budgeted::kYes);
137 if (!fBufferID) {
138 this->resourcePriv().removeScratchKey();
139 }
140 }
141
glGpu() const142 inline GrGLGpu* GrGLBuffer::glGpu() const {
143 SkASSERT(!this->wasDestroyed());
144 return static_cast<GrGLGpu*>(this->getGpu());
145 }
146
glCaps() const147 inline const GrGLCaps& GrGLBuffer::glCaps() const {
148 return this->glGpu()->glCaps();
149 }
150
onRelease()151 void GrGLBuffer::onRelease() {
152 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
153
154 if (!this->wasDestroyed()) {
155 // make sure we've not been abandoned or already released
156 if (fBufferID) {
157 GL_CALL(DeleteBuffers(1, &fBufferID));
158 fBufferID = 0;
159 }
160 fMapPtr = nullptr;
161 }
162
163 INHERITED::onRelease();
164 }
165
onAbandon()166 void GrGLBuffer::onAbandon() {
167 fBufferID = 0;
168 fMapPtr = nullptr;
169 INHERITED::onAbandon();
170 }
171
invalidate_buffer(GrGLGpu * gpu,GrGLenum target,GrGLenum usage,GrGLuint bufferID,size_t bufferSize)172 [[nodiscard]] static inline GrGLenum invalidate_buffer(GrGLGpu* gpu,
173 GrGLenum target,
174 GrGLenum usage,
175 GrGLuint bufferID,
176 size_t bufferSize) {
177 switch (gpu->glCaps().invalidateBufferType()) {
178 case GrGLCaps::InvalidateBufferType::kNone:
179 return GR_GL_NO_ERROR;
180 case GrGLCaps::InvalidateBufferType::kNullData:
181 return GL_ALLOC_CALL(gpu, BufferData(target, bufferSize, nullptr, usage));
182 case GrGLCaps::InvalidateBufferType::kInvalidate:
183 GR_GL_CALL(gpu->glInterface(), InvalidateBufferData(bufferID));
184 return GR_GL_NO_ERROR;
185 }
186 SkUNREACHABLE;
187 }
188
onMap(MapType type)189 void GrGLBuffer::onMap(MapType type) {
190 SkASSERT(fBufferID);
191 SkASSERT(!this->wasDestroyed());
192 SkASSERT(!this->isMapped());
193
194 // Handling dirty context is done in the bindBuffer call
195 switch (this->glCaps().mapBufferType()) {
196 case GrGLCaps::kNone_MapBufferType:
197 return;
198 case GrGLCaps::kMapBuffer_MapBufferType: {
199 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
200 if (type == MapType::kWriteDiscard) {
201 GrGLenum error = invalidate_buffer(this->glGpu(),
202 target,
203 fUsage,
204 fBufferID,
205 this->size());
206 if (error != GR_GL_NO_ERROR) {
207 return;
208 }
209 }
210 GrGLenum access = type == MapType::kRead ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY;
211 GL_CALL_RET(fMapPtr, MapBuffer(target, access));
212 break;
213 }
214 case GrGLCaps::kMapBufferRange_MapBufferType: {
215 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
216 GrGLbitfield access;
217 switch (type) {
218 case MapType::kRead:
219 access = GR_GL_MAP_READ_BIT;
220 break;
221 case MapType::kWriteDiscard:
222 access = GR_GL_MAP_WRITE_BIT | GR_GL_MAP_INVALIDATE_BUFFER_BIT;
223 break;
224 }
225 GL_CALL_RET(fMapPtr, MapBufferRange(target, 0, this->size(), access));
226 break;
227 }
228 case GrGLCaps::kChromium_MapBufferType: {
229 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
230 GrGLenum access = type == MapType::kRead ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY;
231 GL_CALL_RET(fMapPtr, MapBufferSubData(target, 0, this->size(), access));
232 break;
233 }
234 }
235 }
236
onUnmap(MapType)237 void GrGLBuffer::onUnmap(MapType) {
238 SkASSERT(fBufferID);
239 // bind buffer handles the dirty context
240 switch (this->glCaps().mapBufferType()) {
241 case GrGLCaps::kNone_MapBufferType:
242 SkUNREACHABLE;
243 case GrGLCaps::kMapBuffer_MapBufferType: // fall through
244 case GrGLCaps::kMapBufferRange_MapBufferType: {
245 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
246 GL_CALL(UnmapBuffer(target));
247 break;
248 }
249 case GrGLCaps::kChromium_MapBufferType:
250 this->glGpu()->bindBuffer(fIntendedType, this); // TODO: Is this needed?
251 GL_CALL(UnmapBufferSubData(fMapPtr));
252 break;
253 }
254 fMapPtr = nullptr;
255 }
256
onClearToZero()257 bool GrGLBuffer::onClearToZero() {
258 SkASSERT(fBufferID);
259
260 // We could improve this on GL 4.3+ with glClearBufferData (also GL_ARB_clear_buffer_object).
261 this->onMap(GrGpuBuffer::MapType::kWriteDiscard);
262 if (fMapPtr) {
263 std::memset(fMapPtr, 0, this->size());
264 this->onUnmap(GrGpuBuffer::MapType::kWriteDiscard);
265 return true;
266 }
267
268 void* zeros = sk_calloc_throw(this->size());
269 bool result = this->updateData(zeros, 0, this->size(), /*preserve=*/false);
270 sk_free(zeros);
271 return result;
272 }
273
onUpdateData(const void * src,size_t offset,size_t size,bool preserve)274 bool GrGLBuffer::onUpdateData(const void* src, size_t offset, size_t size, bool preserve) {
275 SkASSERT(fBufferID);
276
277 // bindbuffer handles dirty context
278 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
279 if (!preserve) {
280 GrGLenum error = invalidate_buffer(this->glGpu(), target, fUsage, fBufferID, this->size());
281 if (error != GR_GL_NO_ERROR) {
282 return false;
283 }
284 }
285 GL_CALL(BufferSubData(target, offset, size, src));
286 return true;
287 }
288
onSetLabel()289 void GrGLBuffer::onSetLabel() {
290 SkASSERT(fBufferID);
291 if (!this->getLabel().empty()) {
292 const std::string label = "_Skia_" + this->getLabel();
293 if (this->glGpu()->glCaps().debugSupport()) {
294 GL_CALL(ObjectLabel(GR_GL_BUFFER, fBufferID, -1, label.c_str()));
295 }
296 }
297 }
298
setMemoryBacking(SkTraceMemoryDump * traceMemoryDump,const SkString & dumpName) const299 void GrGLBuffer::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
300 const SkString& dumpName) const {
301 SkString buffer_id;
302 buffer_id.appendU32(this->bufferID());
303 traceMemoryDump->setMemoryBacking(dumpName.c_str(), "gl_buffer", buffer_id.c_str());
304 }
305