1 // Copyright 2021 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 #ifndef VIRTIO_GPU_TIMELINES_H
15 #define VIRTIO_GPU_TIMELINES_H
16 
17 #include <atomic>
18 #include <functional>
19 #include <list>
20 #include <memory>
21 #include <mutex>
22 #include <sstream>
23 #include <string>
24 #include <unordered_map>
25 #include <variant>
26 
27 #include "gfxstream/virtio-gpu-gfxstream-renderer.h"
28 
29 typedef uint32_t VirtioGpuCtxId;
30 typedef uint8_t VirtioGpuRingIdx;
31 
32 struct VirtioGpuRingGlobal {};
33 struct VirtioGpuRingContextSpecific {
34     VirtioGpuCtxId mCtxId;
35     VirtioGpuRingIdx mRingIdx;
36 };
37 using VirtioGpuRing = std::variant<VirtioGpuRingGlobal, VirtioGpuRingContextSpecific>;
38 
39 template <>
40 struct std::hash<VirtioGpuRingGlobal> {
41     std::size_t operator()(VirtioGpuRingGlobal const&) const noexcept { return 0; }
42 };
43 
44 inline bool operator==(const VirtioGpuRingGlobal&, const VirtioGpuRingGlobal&) { return true; }
45 
46 template <>
47 struct std::hash<VirtioGpuRingContextSpecific> {
48     std::size_t operator()(VirtioGpuRingContextSpecific const& ringContextSpecific) const noexcept {
49         std::size_t ctxHash = std::hash<VirtioGpuCtxId>{}(ringContextSpecific.mCtxId);
50         std::size_t ringHash = std::hash<VirtioGpuRingIdx>{}(ringContextSpecific.mRingIdx);
51         // Use the hash_combine from
52         // https://www.boost.org/doc/libs/1_78_0/boost/container_hash/hash.hpp.
53         std::size_t res = ctxHash;
54         res ^= ringHash + 0x9e3779b9 + (res << 6) + (res >> 2);
55         return res;
56     }
57 };
58 
59 inline bool operator==(const VirtioGpuRingContextSpecific& lhs,
60                        const VirtioGpuRingContextSpecific& rhs) {
61     return lhs.mCtxId == rhs.mCtxId && lhs.mRingIdx == rhs.mRingIdx;
62 }
63 
64 inline std::string to_string(const VirtioGpuRing& ring) {
65     struct {
66         std::string operator()(const VirtioGpuRingGlobal&) { return "global"; }
67         std::string operator()(const VirtioGpuRingContextSpecific& ring) {
68             std::stringstream ss;
69             ss << "context specific {ctx = " << ring.mCtxId << ", ring = " << (int)ring.mRingIdx
70                << "}";
71             return ss.str();
72         }
73     } visitor;
74     return std::visit(visitor, ring);
75 }
76 
77 class VirtioGpuTimelines {
78    public:
79     using FenceId = uint64_t;
80     using Ring = VirtioGpuRing;
81     using TaskId = uint64_t;
82     using FenceCompletionCallback = std::function<void(const Ring&, FenceId)>;
83 
84     static std::unique_ptr<VirtioGpuTimelines> create(FenceCompletionCallback callback);
85 
86     TaskId enqueueTask(const Ring&);
87     void enqueueFence(const Ring&, FenceId);
88     void notifyTaskCompletion(TaskId);
89     void poll();
90 
91    private:
92     VirtioGpuTimelines(FenceCompletionCallback callback);
93 
94     struct Task {
95         TaskId mId;
96         Ring mRing;
97         uint64_t mTraceId;
98         std::atomic_bool mHasCompleted;
99         Task(TaskId id, const Ring& ring, uint64_t traceId)
100             : mId(id), mRing(ring), mTraceId(traceId), mHasCompleted(false) {}
101     };
102 
103     using TimelineItem = std::variant<FenceId, std::shared_ptr<Task>>;
104     struct Timeline {
105         uint64_t mTraceTrackId;
106         std::list<TimelineItem> mQueue;
107     };
108 
109     Timeline& GetOrCreateTimelineLocked(const Ring& ring);
110 
111     // Go over the timeline, signal any fences without pending tasks, and remove
112     // timeline items that are no longer needed.
113     void poll_locked(const Ring&);
114 
115     std::atomic<TaskId> mNextId;
116     FenceCompletionCallback mFenceCompletionCallback;
117     std::mutex mTimelinesMutex;
118     // The mTaskIdToTask cache must be destroyed after the actual owner of Task,
119     // mTimelineQueues, is destroyed, because the deleter of Task will
120     // automatically remove the entry in mTaskIdToTask.
121     std::unordered_map<TaskId, std::weak_ptr<Task>> mTaskIdToTask;
122     std::unordered_map<Ring, Timeline> mTimelineQueues;
123 };
124 
125 #endif  // VIRTIO_GPU_TIMELINES_H
126