1 // 2 // Copyright 2017 The ANGLE Project Authors. All rights reserved. 3 // Use of this source code is governed by a BSD-style license that can be 4 // found in the LICENSE file. 5 // 6 // ResourceMap: 7 // An optimized resource map which packs the first set of allocated objects into a 8 // flat array, and then falls back to an unordered map for the higher handle values. 9 // 10 11 #ifndef LIBANGLE_RESOURCE_MAP_H_ 12 #define LIBANGLE_RESOURCE_MAP_H_ 13 14 #include <mutex> 15 #include <type_traits> 16 17 #include "common/SimpleMutex.h" 18 #include "common/hash_containers.h" 19 #include "libANGLE/angletypes.h" 20 21 namespace gl 22 { 23 // The resource map needs to be internally synchronized for maps that are placed in the share group 24 // (as opposed to being private to the context) and that are accessed without holding the share 25 // group lock. 26 #if defined(ANGLE_ENABLE_SHARE_CONTEXT_LOCK) 27 using ResourceMapMutex = angle::SimpleMutex; 28 #else 29 using ResourceMapMutex = angle::NoOpMutex; 30 #endif 31 32 template <bool NeedsLock = true> 33 struct SelectResourceMapMutex 34 { 35 using type = ResourceMapMutex; 36 }; 37 38 template <> 39 struct SelectResourceMapMutex<false> 40 { 41 using type = angle::NoOpMutex; 42 }; 43 44 // Analysis of ANGLE's traces as well as Chrome usage reveals the following: 45 // 46 // - Buffers: Typical applications use no more than 4000 ids. Very few use over 6000. 47 // - Textures: Typical applications use no more than 1200 ids. Very few use over 2000. 48 // - Samplers: Typical applications use no more than 50 ids. Very few use over 100. 49 // - Shaders and Programs: Typical applications use no more than 500. Very few use over 700. 50 // - Sync objects: Typical applications use no more than 500. Very few use over 1500. 51 // 52 // For all the other shared types, the maximum used id is small (under 100). For 53 // context-private parts (such as vertex arrays and queries), the id count can be in the 54 // thousands. 55 // 56 // The initial size of the flat resource map is based on the above, rounded up to a multiple of 57 // 1536. Resource maps that need a lock (kNeedsLock == true) have the maximum flat size identical 58 // to initial flat size to avoid reallocation. For others, the maps start small and can grow. 59 template <typename IDType> 60 struct ResourceMapParams 61 { 62 static constexpr size_t kInitialFlatResourcesSize = 192; 63 64 // The following are private to the context and don't need a lock: 65 // 66 // - Vertex Array Objects 67 // - Framebuffer Objects 68 // - Transform Feedback Objects 69 // - Query Objects 70 // 71 // The rest of the maps need a lock. However, only a select few are currently locked as API 72 // relevant to the rest of the types are protected by the share group lock. As the share group 73 // lock is removed from more types, the resource map lock needs to be enabled for them. 74 static constexpr bool kNeedsLock = false; 75 }; 76 template <> 77 struct ResourceMapParams<BufferID> 78 { 79 static constexpr size_t kInitialFlatResourcesSize = 6144; 80 static constexpr bool kNeedsLock = true; 81 }; 82 template <> 83 struct ResourceMapParams<TextureID> 84 { 85 static constexpr size_t kInitialFlatResourcesSize = 1536; 86 static constexpr bool kNeedsLock = false; 87 }; 88 template <> 89 struct ResourceMapParams<ShaderProgramID> 90 { 91 static constexpr size_t kInitialFlatResourcesSize = 1536; 92 static constexpr bool kNeedsLock = false; 93 }; 94 template <> 95 struct ResourceMapParams<SyncID> 96 { 97 static constexpr size_t kInitialFlatResourcesSize = 1536; 98 static constexpr bool kNeedsLock = false; 99 }; 100 // For the purpose of unit testing, |int| is considered private (not needing lock), and 101 // |unsigned int| is considered shared (needing lock). 102 template <> 103 struct ResourceMapParams<unsigned int> 104 { 105 static constexpr size_t kInitialFlatResourcesSize = 192; 106 static constexpr bool kNeedsLock = true; 107 }; 108 109 template <typename ResourceType, typename IDType> 110 class ResourceMap final : angle::NonCopyable 111 { 112 public: 113 ResourceMap(); 114 ~ResourceMap(); 115 116 ANGLE_INLINE ResourceType *query(IDType id) const 117 { 118 GLuint handle = GetIDValue(id); 119 // No need for a lock when accessing the flat map. Either the flat map is static, or 120 // locking is not needed. 121 static_assert(!kNeedsLock || kInitialFlatResourcesSize == kFlatResourcesLimit); 122 123 if (handle < mFlatResourcesSize) 124 { 125 ResourceType *value = mFlatResources[handle]; 126 return (value == InvalidPointer() ? nullptr : value); 127 } 128 129 std::lock_guard<Mutex> lock(mMutex); 130 131 auto it = mHashedResources.find(handle); 132 return (it == mHashedResources.end() ? nullptr : it->second); 133 } 134 135 // Returns true if the handle was reserved. Not necessarily if the resource is created. 136 bool contains(IDType id) const; 137 138 // Returns the element that was at this location. 139 bool erase(IDType id, ResourceType **resourceOut); 140 141 void assign(IDType id, ResourceType *resource); 142 143 // Clears the map. 144 void clear(); 145 146 using IndexAndResource = std::pair<GLuint, ResourceType *>; 147 using HashMap = angle::HashMap<GLuint, ResourceType *>; 148 149 class Iterator final 150 { 151 public: 152 bool operator==(const Iterator &other) const; 153 bool operator!=(const Iterator &other) const; 154 Iterator &operator++(); 155 const IndexAndResource *operator->() const; 156 const IndexAndResource &operator*() const; 157 158 private: 159 friend class ResourceMap; 160 Iterator(const ResourceMap &origin, 161 GLuint flatIndex, 162 typename HashMap::const_iterator hashIndex, 163 bool skipNulls); 164 void updateValue(); 165 166 const ResourceMap &mOrigin; 167 GLuint mFlatIndex; 168 typename HashMap::const_iterator mHashIndex; 169 IndexAndResource mValue; 170 bool mSkipNulls; 171 }; 172 173 private: 174 friend class Iterator; 175 template <typename SameResourceType, typename SameIDType> 176 friend class UnsafeResourceMapIter; 177 178 // null values represent reserved handles. 179 Iterator begin() const; 180 Iterator end() const; 181 182 Iterator beginWithNull() const; 183 Iterator endWithNull() const; 184 185 // Used by iterators and related functions only (due to lack of thread safety). 186 GLuint nextResource(size_t flatIndex, bool skipNulls) const; 187 188 // constexpr methods cannot contain reinterpret_cast, so we need a static method. 189 static ResourceType *InvalidPointer(); 190 static constexpr intptr_t kInvalidPointer = static_cast<intptr_t>(-1); 191 192 static constexpr bool kNeedsLock = ResourceMapParams<IDType>::kNeedsLock; 193 using Mutex = typename SelectResourceMapMutex<kNeedsLock>::type; 194 195 static constexpr size_t kInitialFlatResourcesSize = 196 ResourceMapParams<IDType>::kInitialFlatResourcesSize; 197 198 // Experimental testing suggests that ~10k is a reasonable upper limit. 199 static constexpr size_t kFlatResourcesLimit = kNeedsLock ? kInitialFlatResourcesSize : 0x3000; 200 // Due to the way assign() is implemented, kFlatResourcesLimit / kInitialFlatResourcesSize must 201 // be a power of 2. 202 static_assert(kFlatResourcesLimit % kInitialFlatResourcesSize == 0); 203 static_assert(((kFlatResourcesLimit / kInitialFlatResourcesSize) & 204 (kFlatResourcesLimit / kInitialFlatResourcesSize - 1)) == 0); 205 206 size_t mFlatResourcesSize; 207 ResourceType **mFlatResources; 208 209 // A map of GL objects indexed by object ID. 210 HashMap mHashedResources; 211 212 // mFlatResources is allocated at object creation time, with a default size of 213 // |kInitialFlatResourcesSize|. This is thread safe, because the allocation is done by the 214 // first context in the share group. The flat map is allowed to grow up to 215 // |kFlatResourcesLimit|, but only for maps that don't need a lock (kNeedsLock == false). 216 // 217 // For maps that don't need a lock, this mutex is a no-op. For those that do, the mutex is 218 // taken when allocating / deleting objects, as well as when accessing |mHashedResources|. 219 // Otherwise, access to the flat map (which never gets reallocated due to 220 // |kInitialFlatResourcesSize == kFlatResourcesLimit|) is lockless. This latter is possible 221 // because the application is not allowed to gen/delete and bind the same ID in different 222 // threads at the same time. 223 // 224 // Note that because HandleAllocator is not yet thread-safe, glGen* and glDelete* functions 225 // cannot be free of the share group mutex yet. To remove the share group mutex from those 226 // functions, likely the HandleAllocator class should be merged with this class, and the 227 // necessary insert/erase operations done under this same lock. 228 mutable Mutex mMutex; 229 }; 230 231 // A helper to retrieve the resource map iterators while being explicit that this is not thread 232 // safe. Usage of iterators are limited to clean up on destruction and capture/replay, neither of 233 // which can race with other threads in their access to the resource map. 234 template <typename ResourceType, typename IDType> 235 class UnsafeResourceMapIter 236 { 237 public: 238 using ResMap = ResourceMap<ResourceType, IDType>; 239 240 UnsafeResourceMapIter(const ResMap &resourceMap) : mResourceMap(resourceMap) {} 241 242 typename ResMap::Iterator begin() const { return mResourceMap.begin(); } 243 typename ResMap::Iterator end() const { return mResourceMap.end(); } 244 245 typename ResMap::Iterator beginWithNull() const { return mResourceMap.beginWithNull(); } 246 typename ResMap::Iterator endWithNull() const { return mResourceMap.endWithNull(); } 247 248 // Not a constant-time operation, should only be used for verification. 249 bool empty() const; 250 251 private: 252 const ResMap &mResourceMap; 253 }; 254 255 template <typename ResourceType, typename IDType> 256 ResourceMap<ResourceType, IDType>::ResourceMap() 257 : mFlatResourcesSize(kInitialFlatResourcesSize), 258 mFlatResources(new ResourceType *[kInitialFlatResourcesSize]) 259 { 260 memset(mFlatResources, kInvalidPointer, mFlatResourcesSize * sizeof(mFlatResources[0])); 261 } 262 263 template <typename ResourceType, typename IDType> 264 ResourceMap<ResourceType, IDType>::~ResourceMap() 265 { 266 ASSERT(begin() == end()); 267 delete[] mFlatResources; 268 } 269 270 template <typename ResourceType, typename IDType> 271 ANGLE_INLINE bool ResourceMap<ResourceType, IDType>::contains(IDType id) const 272 { 273 GLuint handle = GetIDValue(id); 274 if (handle < mFlatResourcesSize) 275 { 276 return mFlatResources[handle] != InvalidPointer(); 277 } 278 std::lock_guard<Mutex> lock(mMutex); 279 return mHashedResources.find(handle) != mHashedResources.end(); 280 } 281 282 template <typename ResourceType, typename IDType> 283 bool ResourceMap<ResourceType, IDType>::erase(IDType id, ResourceType **resourceOut) 284 { 285 GLuint handle = GetIDValue(id); 286 if (handle < mFlatResourcesSize) 287 { 288 auto &value = mFlatResources[handle]; 289 if (value == InvalidPointer()) 290 { 291 return false; 292 } 293 *resourceOut = value; 294 value = InvalidPointer(); 295 } 296 else 297 { 298 std::lock_guard<Mutex> lock(mMutex); 299 300 auto it = mHashedResources.find(handle); 301 if (it == mHashedResources.end()) 302 { 303 return false; 304 } 305 *resourceOut = it->second; 306 mHashedResources.erase(it); 307 } 308 return true; 309 } 310 311 template <typename ResourceType, typename IDType> 312 void ResourceMap<ResourceType, IDType>::assign(IDType id, ResourceType *resource) 313 { 314 GLuint handle = GetIDValue(id); 315 if (handle < kFlatResourcesLimit) 316 { 317 if (handle >= mFlatResourcesSize) 318 { 319 // No need for a lock as the flat map never grows when locking is needed. 320 static_assert(!kNeedsLock || kInitialFlatResourcesSize == kFlatResourcesLimit); 321 322 // Use power-of-two. 323 size_t newSize = mFlatResourcesSize; 324 while (newSize <= handle) 325 { 326 newSize *= 2; 327 } 328 329 ResourceType **oldResources = mFlatResources; 330 331 mFlatResources = new ResourceType *[newSize]; 332 memset(&mFlatResources[mFlatResourcesSize], kInvalidPointer, 333 (newSize - mFlatResourcesSize) * sizeof(mFlatResources[0])); 334 memcpy(mFlatResources, oldResources, mFlatResourcesSize * sizeof(mFlatResources[0])); 335 mFlatResourcesSize = newSize; 336 delete[] oldResources; 337 } 338 ASSERT(mFlatResourcesSize > handle); 339 mFlatResources[handle] = resource; 340 } 341 else 342 { 343 std::lock_guard<Mutex> lock(mMutex); 344 mHashedResources[handle] = resource; 345 } 346 } 347 348 template <typename ResourceType, typename IDType> 349 typename ResourceMap<ResourceType, IDType>::Iterator ResourceMap<ResourceType, IDType>::begin() 350 const 351 { 352 return Iterator(*this, nextResource(0, true), mHashedResources.begin(), true); 353 } 354 355 template <typename ResourceType, typename IDType> 356 typename ResourceMap<ResourceType, IDType>::Iterator ResourceMap<ResourceType, IDType>::end() const 357 { 358 return Iterator(*this, static_cast<GLuint>(mFlatResourcesSize), mHashedResources.end(), true); 359 } 360 361 template <typename ResourceType, typename IDType> 362 typename ResourceMap<ResourceType, IDType>::Iterator 363 ResourceMap<ResourceType, IDType>::beginWithNull() const 364 { 365 return Iterator(*this, nextResource(0, false), mHashedResources.begin(), false); 366 } 367 368 template <typename ResourceType, typename IDType> 369 typename ResourceMap<ResourceType, IDType>::Iterator 370 ResourceMap<ResourceType, IDType>::endWithNull() const 371 { 372 return Iterator(*this, static_cast<GLuint>(mFlatResourcesSize), mHashedResources.end(), false); 373 } 374 375 template <typename ResourceType, typename IDType> 376 bool UnsafeResourceMapIter<ResourceType, IDType>::empty() const 377 { 378 return begin() == end(); 379 } 380 381 template <typename ResourceType, typename IDType> 382 void ResourceMap<ResourceType, IDType>::clear() 383 { 384 // No need for a lock as this is only called on destruction. 385 memset(mFlatResources, kInvalidPointer, kInitialFlatResourcesSize * sizeof(mFlatResources[0])); 386 mFlatResourcesSize = kInitialFlatResourcesSize; 387 mHashedResources.clear(); 388 } 389 390 template <typename ResourceType, typename IDType> 391 GLuint ResourceMap<ResourceType, IDType>::nextResource(size_t flatIndex, bool skipNulls) const 392 { 393 // This function is only used by the iterators, access to which is marked by 394 // UnsafeResourceMapIter. Locking is the responsibility of the caller. 395 for (size_t index = flatIndex; index < mFlatResourcesSize; index++) 396 { 397 if ((mFlatResources[index] != nullptr || !skipNulls) && 398 mFlatResources[index] != InvalidPointer()) 399 { 400 return static_cast<GLuint>(index); 401 } 402 } 403 return static_cast<GLuint>(mFlatResourcesSize); 404 } 405 406 template <typename ResourceType, typename IDType> 407 // static 408 ResourceType *ResourceMap<ResourceType, IDType>::InvalidPointer() 409 { 410 return reinterpret_cast<ResourceType *>(kInvalidPointer); 411 } 412 413 template <typename ResourceType, typename IDType> 414 ResourceMap<ResourceType, IDType>::Iterator::Iterator( 415 const ResourceMap &origin, 416 GLuint flatIndex, 417 typename ResourceMap<ResourceType, IDType>::HashMap::const_iterator hashIndex, 418 bool skipNulls) 419 : mOrigin(origin), mFlatIndex(flatIndex), mHashIndex(hashIndex), mSkipNulls(skipNulls) 420 { 421 updateValue(); 422 } 423 424 template <typename ResourceType, typename IDType> 425 bool ResourceMap<ResourceType, IDType>::Iterator::operator==(const Iterator &other) const 426 { 427 return (mFlatIndex == other.mFlatIndex && mHashIndex == other.mHashIndex); 428 } 429 430 template <typename ResourceType, typename IDType> 431 bool ResourceMap<ResourceType, IDType>::Iterator::operator!=(const Iterator &other) const 432 { 433 return !(*this == other); 434 } 435 436 template <typename ResourceType, typename IDType> 437 typename ResourceMap<ResourceType, IDType>::Iterator & 438 ResourceMap<ResourceType, IDType>::Iterator::operator++() 439 { 440 if (mFlatIndex < static_cast<GLuint>(mOrigin.mFlatResourcesSize)) 441 { 442 mFlatIndex = mOrigin.nextResource(mFlatIndex + 1, mSkipNulls); 443 } 444 else 445 { 446 mHashIndex++; 447 } 448 updateValue(); 449 return *this; 450 } 451 452 template <typename ResourceType, typename IDType> 453 const typename ResourceMap<ResourceType, IDType>::IndexAndResource * 454 ResourceMap<ResourceType, IDType>::Iterator::operator->() const 455 { 456 return &mValue; 457 } 458 459 template <typename ResourceType, typename IDType> 460 const typename ResourceMap<ResourceType, IDType>::IndexAndResource & 461 ResourceMap<ResourceType, IDType>::Iterator::operator*() const 462 { 463 return mValue; 464 } 465 466 template <typename ResourceType, typename IDType> 467 void ResourceMap<ResourceType, IDType>::Iterator::updateValue() 468 { 469 if (mFlatIndex < static_cast<GLuint>(mOrigin.mFlatResourcesSize)) 470 { 471 mValue.first = mFlatIndex; 472 mValue.second = mOrigin.mFlatResources[mFlatIndex]; 473 } 474 else if (mHashIndex != mOrigin.mHashedResources.end()) 475 { 476 mValue.first = mHashIndex->first; 477 mValue.second = mHashIndex->second; 478 } 479 } 480 481 } // namespace gl 482 483 #endif // LIBANGLE_RESOURCE_MAP_H_ 484