1 // 2 // Copyright 2016 The ANGLE Project Authors. All rights reserved. 3 // Use of this source code is governed by a BSD-style license that can be 4 // found in the LICENSE file. 5 // 6 // BufferVk.h: 7 // Defines the class interface for BufferVk, implementing BufferImpl. 8 // 9 10 #ifndef LIBANGLE_RENDERER_VULKAN_BUFFERVK_H_ 11 #define LIBANGLE_RENDERER_VULKAN_BUFFERVK_H_ 12 13 #include "libANGLE/Buffer.h" 14 #include "libANGLE/Observer.h" 15 #include "libANGLE/renderer/BufferImpl.h" 16 #include "libANGLE/renderer/vulkan/vk_helpers.h" 17 18 namespace rx 19 { 20 typedef gl::Range<VkDeviceSize> RangeDeviceSize; 21 22 // Conversion buffers hold translated index and vertex data. 23 class ConversionBuffer 24 { 25 public: ConversionBuffer()26 ConversionBuffer() : mEntireBufferDirty(true) 27 { 28 mData = std::make_unique<vk::BufferHelper>(); 29 mDirtyRanges.reserve(32); 30 } 31 ConversionBuffer(vk::Renderer *renderer, 32 VkBufferUsageFlags usageFlags, 33 size_t initialSize, 34 size_t alignment, 35 bool hostVisible); 36 ~ConversionBuffer(); 37 38 ConversionBuffer(ConversionBuffer &&other); 39 dirty()40 bool dirty() const { return mEntireBufferDirty || !mDirtyRanges.empty(); } isEntireBufferDirty()41 bool isEntireBufferDirty() const { return mEntireBufferDirty; } setEntireBufferDirty()42 void setEntireBufferDirty() { mEntireBufferDirty = true; } addDirtyBufferRange(const RangeDeviceSize & range)43 void addDirtyBufferRange(const RangeDeviceSize &range) { mDirtyRanges.emplace_back(range); } 44 void consolidateDirtyRanges(); getDirtyBufferRanges()45 const std::vector<RangeDeviceSize> &getDirtyBufferRanges() const { return mDirtyRanges; } clearDirty()46 void clearDirty() 47 { 48 mEntireBufferDirty = false; 49 mDirtyRanges.clear(); 50 } 51 valid()52 bool valid() const { return mData && mData->valid(); } getBuffer()53 vk::BufferHelper *getBuffer() const { return mData.get(); } release(vk::Renderer * renderer)54 void release(vk::Renderer *renderer) { mData->release(renderer); } destroy(vk::Renderer * renderer)55 void destroy(vk::Renderer *renderer) { mData->destroy(renderer); } 56 57 private: 58 // state value determines if we need to re-stream vertex data. mEntireBufferDirty indicates 59 // entire buffer data has changed. mDirtyRange should be ignored when mEntireBufferDirty is 60 // true. If mEntireBufferDirty is false, mDirtyRange is the ranges of data that has been 61 // modified. Note that there is no guarantee that ranges will not overlap. 62 bool mEntireBufferDirty; 63 std::vector<RangeDeviceSize> mDirtyRanges; 64 65 // Where the conversion data is stored. 66 std::unique_ptr<vk::BufferHelper> mData; 67 }; 68 69 class VertexConversionBuffer : public ConversionBuffer 70 { 71 public: 72 struct CacheKey final 73 { 74 angle::FormatID formatID; 75 GLuint stride; 76 size_t offset; 77 bool hostVisible; 78 bool offsetMustMatchExactly; 79 }; 80 81 VertexConversionBuffer(vk::Renderer *renderer, const CacheKey &cacheKey); 82 ~VertexConversionBuffer(); 83 84 VertexConversionBuffer(VertexConversionBuffer &&other); 85 match(const CacheKey & cacheKey)86 bool match(const CacheKey &cacheKey) 87 { 88 // If anything other than offset mismatch, it can't reuse. 89 if (mCacheKey.formatID != cacheKey.formatID || mCacheKey.stride != cacheKey.stride || 90 mCacheKey.offsetMustMatchExactly != cacheKey.offsetMustMatchExactly || 91 mCacheKey.hostVisible != cacheKey.hostVisible) 92 { 93 return false; 94 } 95 96 // If offset matches, for sure we can reuse. 97 if (mCacheKey.offset == cacheKey.offset) 98 { 99 return true; 100 } 101 102 // If offset exact match is not required and offsets are multiple strides apart, then we 103 // adjust the offset to reuse the buffer. The benefit of reused the buffer is that the 104 // previous conversion result is still valid. We only need to convert the modified data. 105 if (!cacheKey.offsetMustMatchExactly) 106 { 107 int64_t offsetGap = cacheKey.offset - mCacheKey.offset; 108 if ((offsetGap % cacheKey.stride) == 0) 109 { 110 if (cacheKey.offset < mCacheKey.offset) 111 { 112 addDirtyBufferRange(RangeDeviceSize(cacheKey.offset, mCacheKey.offset)); 113 mCacheKey.offset = cacheKey.offset; 114 } 115 return true; 116 } 117 } 118 return false; 119 } 120 getCacheKey()121 const CacheKey &getCacheKey() const { return mCacheKey; } 122 123 private: 124 // The conversion is identified by the triple of {format, stride, offset}. 125 CacheKey mCacheKey; 126 }; 127 128 enum class BufferUpdateType 129 { 130 StorageRedefined, 131 ContentsUpdate, 132 }; 133 134 struct BufferDataSource 135 { 136 // Buffer data can come from two sources: 137 // glBufferData and glBufferSubData upload through a CPU pointer 138 const void *data = nullptr; 139 // glCopyBufferSubData copies data from another buffer 140 vk::BufferHelper *buffer = nullptr; 141 VkDeviceSize bufferOffset = 0; 142 }; 143 144 VkBufferUsageFlags GetDefaultBufferUsageFlags(vk::Renderer *renderer); 145 146 class BufferVk : public BufferImpl 147 { 148 public: 149 BufferVk(const gl::BufferState &state); 150 ~BufferVk() override; 151 void destroy(const gl::Context *context) override; 152 153 angle::Result setExternalBufferData(const gl::Context *context, 154 gl::BufferBinding target, 155 GLeglClientBufferEXT clientBuffer, 156 size_t size, 157 VkMemoryPropertyFlags memoryPropertyFlags); 158 angle::Result setDataWithUsageFlags(const gl::Context *context, 159 gl::BufferBinding target, 160 GLeglClientBufferEXT clientBuffer, 161 const void *data, 162 size_t size, 163 gl::BufferUsage usage, 164 GLbitfield flags) override; 165 angle::Result setData(const gl::Context *context, 166 gl::BufferBinding target, 167 const void *data, 168 size_t size, 169 gl::BufferUsage usage) override; 170 angle::Result setSubData(const gl::Context *context, 171 gl::BufferBinding target, 172 const void *data, 173 size_t size, 174 size_t offset) override; 175 angle::Result copySubData(const gl::Context *context, 176 BufferImpl *source, 177 GLintptr sourceOffset, 178 GLintptr destOffset, 179 GLsizeiptr size) override; 180 angle::Result map(const gl::Context *context, GLenum access, void **mapPtr) override; 181 angle::Result mapRange(const gl::Context *context, 182 size_t offset, 183 size_t length, 184 GLbitfield access, 185 void **mapPtr) override; 186 angle::Result unmap(const gl::Context *context, GLboolean *result) override; 187 angle::Result getSubData(const gl::Context *context, 188 GLintptr offset, 189 GLsizeiptr size, 190 void *outData) override; 191 192 angle::Result getIndexRange(const gl::Context *context, 193 gl::DrawElementsType type, 194 size_t offset, 195 size_t count, 196 bool primitiveRestartEnabled, 197 gl::IndexRange *outRange) override; 198 getSize()199 GLint64 getSize() const { return mState.getSize(); } 200 201 void onDataChanged() override; 202 getBuffer()203 vk::BufferHelper &getBuffer() 204 { 205 ASSERT(isBufferValid()); 206 return mBuffer; 207 } 208 getBufferSerial()209 vk::BufferSerial getBufferSerial() { return mBuffer.getBufferSerial(); } 210 isBufferValid()211 bool isBufferValid() const { return mBuffer.valid(); } 212 bool isCurrentlyInUse(vk::Renderer *renderer) const; 213 214 angle::Result mapImpl(ContextVk *contextVk, GLbitfield access, void **mapPtr); 215 angle::Result mapRangeImpl(ContextVk *contextVk, 216 VkDeviceSize offset, 217 VkDeviceSize length, 218 GLbitfield access, 219 void **mapPtr); 220 angle::Result unmapImpl(ContextVk *contextVk); 221 angle::Result ghostMappedBuffer(ContextVk *contextVk, 222 VkDeviceSize offset, 223 VkDeviceSize length, 224 GLbitfield access, 225 void **mapPtr); 226 227 VertexConversionBuffer *getVertexConversionBuffer( 228 vk::Renderer *renderer, 229 const VertexConversionBuffer::CacheKey &cacheKey); 230 231 private: 232 angle::Result updateBuffer(ContextVk *contextVk, 233 size_t bufferSize, 234 const BufferDataSource &dataSource, 235 size_t size, 236 size_t offset); 237 angle::Result directUpdate(ContextVk *contextVk, 238 const BufferDataSource &dataSource, 239 size_t size, 240 size_t offset); 241 angle::Result stagedUpdate(ContextVk *contextVk, 242 const BufferDataSource &dataSource, 243 size_t size, 244 size_t offset); 245 angle::Result allocStagingBuffer(ContextVk *contextVk, 246 vk::MemoryCoherency coherency, 247 VkDeviceSize size, 248 uint8_t **mapPtr); 249 angle::Result flushStagingBuffer(ContextVk *contextVk, VkDeviceSize offset, VkDeviceSize size); 250 angle::Result acquireAndUpdate(ContextVk *contextVk, 251 size_t bufferSize, 252 const BufferDataSource &dataSource, 253 size_t updateSize, 254 size_t updateOffset, 255 BufferUpdateType updateType); 256 angle::Result setDataWithMemoryType(const gl::Context *context, 257 gl::BufferBinding target, 258 const void *data, 259 size_t size, 260 VkMemoryPropertyFlags memoryPropertyFlags, 261 gl::BufferUsage usage); 262 angle::Result handleDeviceLocalBufferMap(ContextVk *contextVk, 263 VkDeviceSize offset, 264 VkDeviceSize size, 265 uint8_t **mapPtr); 266 angle::Result mapHostVisibleBuffer(ContextVk *contextVk, 267 VkDeviceSize offset, 268 GLbitfield access, 269 uint8_t **mapPtr); 270 angle::Result setDataImpl(ContextVk *contextVk, 271 size_t bufferSize, 272 const BufferDataSource &dataSource, 273 size_t updateSize, 274 size_t updateOffset, 275 BufferUpdateType updateType); 276 angle::Result release(ContextVk *context); 277 void dataUpdated(); 278 void dataRangeUpdated(const RangeDeviceSize &range); 279 280 angle::Result acquireBufferHelper(ContextVk *contextVk, 281 size_t sizeInBytes, 282 BufferUsageType usageType); 283 isExternalBuffer()284 bool isExternalBuffer() const { return mClientBuffer != nullptr; } 285 BufferUpdateType calculateBufferUpdateTypeOnFullUpdate( 286 vk::Renderer *renderer, 287 size_t size, 288 VkMemoryPropertyFlags memoryPropertyFlags, 289 BufferUsageType usageType, 290 const void *data) const; 291 bool shouldRedefineStorage(vk::Renderer *renderer, 292 BufferUsageType usageType, 293 VkMemoryPropertyFlags memoryPropertyFlags, 294 size_t size) const; 295 296 void releaseConversionBuffers(vk::Renderer *renderer); 297 298 vk::BufferHelper mBuffer; 299 300 // If not null, this is the external memory pointer passed from client API. 301 void *mClientBuffer; 302 303 uint32_t mMemoryTypeIndex; 304 // Memory/Usage property that will be used for memory allocation. 305 VkMemoryPropertyFlags mMemoryPropertyFlags; 306 307 // The staging buffer to aid map operations. This is used when buffers are not host visible or 308 // for performance optimization when only a smaller range of buffer is mapped. 309 vk::BufferHelper mStagingBuffer; 310 311 // A cache of converted vertex data. 312 std::vector<VertexConversionBuffer> mVertexConversionBuffers; 313 314 // Tracks whether mStagingBuffer has been mapped to user or not 315 bool mIsStagingBufferMapped; 316 317 // Tracks if BufferVk object has valid data or not. 318 bool mHasValidData; 319 320 // True if the buffer is currently mapped for CPU write access. If the map call is originated 321 // from OpenGLES API call, then this should be consistent with mState.getAccessFlags() bits. 322 // Otherwise it is mapped from ANGLE internal and will not be consistent with mState access 323 // bits, so we have to keep record of it. 324 bool mIsMappedForWrite; 325 // True if usage is dynamic. May affect how we allocate memory. 326 BufferUsageType mUsageType; 327 // Similar as mIsMappedForWrite, this maybe different from mState's getMapOffset/getMapLength if 328 // mapped from angle internal. 329 RangeDeviceSize mMappedRange; 330 }; 331 332 } // namespace rx 333 334 #endif // LIBANGLE_RENDERER_VULKAN_BUFFERVK_H_ 335