1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 3 Licensed under the Apache License, Version 2.0 (the "License"); 4 you may not use this file except in compliance with the License. 5 You may obtain a copy of the License at 6 7 http://www.apache.org/licenses/LICENSE-2.0 8 9 Unless required by applicable law or agreed to in writing, software 10 distributed under the License is distributed on an "AS IS" BASIS, 11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 See the License for the specific language governing permissions and 13 limitations under the License. 14 ==============================================================================*/ 15 16 #ifndef TENSORFLOW_COMPILER_XLA_PJRT_EVENT_POOL_H_ 17 #define TENSORFLOW_COMPILER_XLA_PJRT_EVENT_POOL_H_ 18 19 #include <memory> 20 #include <stack> 21 22 #include "absl/synchronization/mutex.h" 23 #include "tensorflow/compiler/xla/statusor.h" 24 #include "tensorflow/compiler/xla/types.h" 25 #include "tensorflow/core/platform/stream_executor.h" 26 27 namespace xla { 28 29 class EventPool { 30 public: 31 class Handle { 32 public: 33 Handle() = default; 34 ~Handle(); 35 36 Handle(const Handle&) = delete; 37 Handle(Handle&&) = default; 38 Handle& operator=(const Handle&) = delete; 39 Handle& operator=(Handle&&) = default; 40 41 // There is a total order on events handed out by the event pool. The most 42 // useful aspect of this total order is that two events returned by 43 // ThenAllocateAndRecordEvent on the same stream can be compared to see 44 // which was recorded earlier on that stream. 45 // Valid sequence numbers are > 0. 46 inline bool operator<(const Handle& rhs) const { 47 return sequence_number_ < rhs.sequence_number_; 48 } 49 inline bool operator>(const Handle& rhs) const { return rhs < *this; } 50 inline bool operator<=(const Handle& rhs) const { return !(*this > rhs); } 51 inline bool operator>=(const Handle& rhs) const { return !(*this < rhs); } 52 event()53 se::Event* event() const { return event_.get(); } sequence_number()54 uint64_t sequence_number() const { return sequence_number_; } 55 56 private: 57 friend class EventPool; 58 59 EventPool* pool_ = nullptr; 60 std::unique_ptr<se::Event> event_; 61 uint64_t sequence_number_; 62 }; 63 64 // Initializes a new EventPool. If `allow_reuse` is true, then events will be 65 // returned to the pool when their handles are deleted and made available to 66 // subsequent allocations. Reuse only works on the GPU platform. 67 explicit EventPool(bool allow_reuse); 68 69 // Allocates a new (or reused) event from the pool, and records the event on 70 // `stream`. 71 // 72 // Reuse is only possible on GPU. Event allocation and recording are coupled 73 // in a single operation because on GPU it is recording an event that makes it 74 // a "new" event. According to the CUDA documentation it is safe to call 75 // cudaEventRecord even if that event may still be in use on the device; APIs 76 // such as cudaStreamWaitEvent capture the state of the event at the time of 77 // the host-side call and are not affected by a later host-side 78 // cudaEventRecord. 79 StatusOr<Handle> ThenAllocateAndRecordEvent(se::Stream* stream); 80 81 // Version of ThenAllocateAndRecordEvent split into two phases; this is 82 // sometimes helpful if we want to avoid failures by preallocating events. 83 StatusOr<Handle> AllocateEvent(se::StreamExecutor* executor); 84 void ThenRecordEvent(se::Stream* stream, EventPool::Handle& handle); 85 86 private: 87 const bool allow_reuse_; 88 89 absl::Mutex mu_; 90 std::stack<std::unique_ptr<se::Event>> free_events_ ABSL_GUARDED_BY(mu_); 91 uint64_t next_sequence_number_ ABSL_GUARDED_BY(mu_); 92 }; 93 94 } // namespace xla 95 96 #endif // TENSORFLOW_COMPILER_XLA_PJRT_EVENT_POOL_H_ 97