1 // Copyright (C) 2024 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #include "VirtioGpuFrontend.h"
16 
17 #ifdef GFXSTREAM_BUILD_WITH_SNAPSHOT_FRONTEND_SUPPORT
18 #include <filesystem>
19 #include <fcntl.h>
20 // X11 defines status as a preprocessor define which messes up
21 // anyone with a `Status` type.
22 #include <google/protobuf/io/zero_copy_stream_impl.h>
23 #include <google/protobuf/text_format.h>
24 #endif  // ifdef GFXSTREAM_BUILD_WITH_SNAPSHOT_FRONTEND_SUPPORT
25 
26 #include <vulkan/vulkan.h>
27 
28 #include "FrameBuffer.h"
29 #include "FrameworkFormats.h"
30 #include "VkCommonOperations.h"
31 #include "aemu/base/ManagedDescriptor.hpp"
32 #include "aemu/base/files/StdioStream.h"
33 #include "aemu/base/memory/SharedMemory.h"
34 #include "aemu/base/threads/WorkerThread.h"
35 #include "gfxstream/host/Tracing.h"
36 #include "host-common/AddressSpaceService.h"
37 #include "host-common/address_space_device.h"
38 #include "host-common/address_space_device.hpp"
39 #include "host-common/address_space_device_control_ops.h"
40 #include "host-common/opengles.h"
41 #include "virtgpu_gfxstream_protocol.h"
42 
43 namespace gfxstream {
44 namespace host {
45 namespace {
46 
47 using android::base::DescriptorType;
48 using android::base::SharedMemory;
49 #ifdef GFXSTREAM_BUILD_WITH_SNAPSHOT_FRONTEND_SUPPORT
50 using gfxstream::host::snapshot::VirtioGpuContextSnapshot;
51 using gfxstream::host::snapshot::VirtioGpuFrontendSnapshot;
52 using gfxstream::host::snapshot::VirtioGpuResourceSnapshot;
53 #endif  // ifdef GFXSTREAM_BUILD_WITH_SNAPSHOT_FRONTEND_SUPPORT
54 
55 struct VirtioGpuCmd {
56     uint32_t op;
57     uint32_t cmdSize;
58     unsigned char buf[0];
59 } __attribute__((packed));
60 
convert32to64(uint32_t lo,uint32_t hi)61 static uint64_t convert32to64(uint32_t lo, uint32_t hi) {
62     return ((uint64_t)lo) | (((uint64_t)hi) << 32);
63 }
64 
65 }  // namespace
66 
67 class CleanupThread {
68    public:
69     using GenericCleanup = std::function<void()>;
70 
CleanupThread()71     CleanupThread()
72         : mWorker([](CleanupTask task) {
73               return std::visit(
74                   [](auto&& work) {
75                       using T = std::decay_t<decltype(work)>;
76                       if constexpr (std::is_same_v<T, GenericCleanup>) {
77                           work();
78                           return android::base::WorkerProcessingResult::Continue;
79                       } else if constexpr (std::is_same_v<T, Exit>) {
80                           return android::base::WorkerProcessingResult::Stop;
81                       }
82                   },
83                   std::move(task));
84           }) {
85         mWorker.start();
86     }
87 
~CleanupThread()88     ~CleanupThread() { stop(); }
89 
90     // CleanupThread is neither copyable nor movable.
91     CleanupThread(const CleanupThread& other) = delete;
92     CleanupThread& operator=(const CleanupThread& other) = delete;
93     CleanupThread(CleanupThread&& other) = delete;
94     CleanupThread& operator=(CleanupThread&& other) = delete;
95 
enqueueCleanup(GenericCleanup command)96     void enqueueCleanup(GenericCleanup command) { mWorker.enqueue(std::move(command)); }
97 
waitForPendingCleanups()98     void waitForPendingCleanups() {
99         std::promise<void> pendingCleanupsCompletedSignal;
100         std::future<void> pendingCleanupsCompltedWaitable = pendingCleanupsCompletedSignal.get_future();
101         enqueueCleanup([&]() { pendingCleanupsCompletedSignal.set_value(); });
102         pendingCleanupsCompltedWaitable.wait();
103     }
104 
stop()105     void stop() {
106         mWorker.enqueue(Exit{});
107         mWorker.join();
108     }
109 
110    private:
111     struct Exit {};
112     using CleanupTask = std::variant<GenericCleanup, Exit>;
113     android::base::WorkerThread<CleanupTask> mWorker;
114 };
115 
116 VirtioGpuFrontend::VirtioGpuFrontend() = default;
117 
init(void * cookie,gfxstream::host::FeatureSet features,stream_renderer_fence_callback fence_callback)118 int VirtioGpuFrontend::init(void* cookie, gfxstream::host::FeatureSet features,
119                             stream_renderer_fence_callback fence_callback) {
120     stream_renderer_debug("cookie: %p", cookie);
121     mCookie = cookie;
122     mFeatures = features;
123     mFenceCallback = fence_callback;
124     mAddressSpaceDeviceControlOps = get_address_space_device_control_ops();
125     if (!mAddressSpaceDeviceControlOps) {
126         stream_renderer_error("Could not get address space device control ops!");
127         return -EINVAL;
128     }
129 
130     // Forwards fence completions from VirtioGpuTimelines to the client (VMM).
131     auto fenceCompletionCallback = [this](const VirtioGpuTimelines::Ring& ring,
132                                           VirtioGpuTimelines::FenceId fenceId) {
133         struct stream_renderer_fence fence = {0};
134         fence.fence_id = fenceId;
135         fence.flags = STREAM_RENDERER_FLAG_FENCE;
136         if (const auto* contextSpecificRing = std::get_if<VirtioGpuRingContextSpecific>(&ring)) {
137             fence.flags |= STREAM_RENDERER_FLAG_FENCE_RING_IDX;
138             fence.ctx_id = contextSpecificRing->mCtxId;
139             fence.ring_idx = contextSpecificRing->mRingIdx;
140         }
141         mFenceCallback(mCookie, &fence);
142     };
143     mVirtioGpuTimelines = VirtioGpuTimelines::create(std::move(fenceCompletionCallback));
144 
145 #if !defined(_WIN32)
146     mPageSize = getpagesize();
147 #endif
148 
149     mCleanupThread.reset(new CleanupThread());
150 
151     return 0;
152 }
153 
teardown()154 void VirtioGpuFrontend::teardown() {
155     destroyVirtioGpuObjects();
156 
157     mCleanupThread.reset();
158 }
159 
resetPipe(VirtioGpuContextId contextId,GoldfishHostPipe * hostPipe)160 int VirtioGpuFrontend::resetPipe(VirtioGpuContextId contextId, GoldfishHostPipe* hostPipe) {
161     stream_renderer_debug("reset pipe for context %u to hostpipe %p", contextId, hostPipe);
162 
163     auto contextIt = mContexts.find(contextId);
164     if (contextIt == mContexts.end()) {
165         stream_renderer_error("failed to reset pipe: context %u not found.", contextId);
166         return -EINVAL;
167     }
168     auto& context = contextIt->second;
169     context.SetHostPipe(hostPipe);
170 
171     // Also update any resources associated with it
172     for (auto resourceId : context.GetAttachedResources()) {
173         auto resourceIt = mResources.find(resourceId);
174         if (resourceIt == mResources.end()) {
175             stream_renderer_error("failed to reset pipe: resource %d not found.", resourceId);
176             return -EINVAL;
177         }
178         auto& resource = resourceIt->second;
179         resource.SetHostPipe(hostPipe);
180     }
181 
182     return 0;
183 }
184 
createContext(VirtioGpuCtxId contextId,uint32_t nlen,const char * name,uint32_t contextInit)185 int VirtioGpuFrontend::createContext(VirtioGpuCtxId contextId, uint32_t nlen, const char* name,
186                                      uint32_t contextInit) {
187     std::string contextName(name, nlen);
188 
189     stream_renderer_debug("ctxid: %u len: %u name: %s", contextId, nlen, contextName.c_str());
190     auto ops = ensureAndGetServiceOps();
191 
192     auto contextOpt = VirtioGpuContext::Create(ops, contextId, contextName, contextInit);
193     if (!contextOpt) {
194         stream_renderer_error("Failed to create context %u.", contextId);
195         return -EINVAL;
196     }
197     mContexts[contextId] = std::move(*contextOpt);
198     return 0;
199 }
200 
destroyContext(VirtioGpuCtxId contextId)201 int VirtioGpuFrontend::destroyContext(VirtioGpuCtxId contextId) {
202     stream_renderer_debug("ctxid: %u", contextId);
203 
204     auto contextIt = mContexts.find(contextId);
205     if (contextIt == mContexts.end()) {
206         stream_renderer_error("failed to destroy context %d: context not found", contextId);
207         return -EINVAL;
208     }
209     auto& context = contextIt->second;
210 
211     context.Destroy(ensureAndGetServiceOps(), mAddressSpaceDeviceControlOps);
212 
213     mContexts.erase(contextIt);
214     return 0;
215 }
216 
217 #define DECODE(variable, type, input) \
218     type variable = {};               \
219     memcpy(&variable, input, sizeof(type));
220 
addressSpaceProcessCmd(VirtioGpuCtxId ctxId,uint32_t * dwords)221 int VirtioGpuFrontend::addressSpaceProcessCmd(VirtioGpuCtxId ctxId, uint32_t* dwords) {
222     DECODE(header, gfxstream::gfxstreamHeader, dwords)
223 
224     auto contextIt = mContexts.find(ctxId);
225     if (contextIt == mContexts.end()) {
226         stream_renderer_error("ctx id %u not found", ctxId);
227         return -EINVAL;
228     }
229     auto& context = contextIt->second;
230 
231     switch (header.opCode) {
232         case GFXSTREAM_CONTEXT_CREATE: {
233             DECODE(contextCreate, gfxstream::gfxstreamContextCreate, dwords)
234 
235             auto resourceIt = mResources.find(contextCreate.resourceId);
236             if (resourceIt == mResources.end()) {
237                 stream_renderer_error("ASG coherent resource %u not found",
238                                       contextCreate.resourceId);
239                 return -EINVAL;
240             }
241             auto& resource = resourceIt->second;
242 
243             return context.CreateAddressSpaceGraphicsInstance(mAddressSpaceDeviceControlOps,
244                                                               resource);
245         }
246         case GFXSTREAM_CONTEXT_PING: {
247             DECODE(contextPing, gfxstream::gfxstreamContextPing, dwords)
248 
249             return context.PingAddressSpaceGraphicsInstance(mAddressSpaceDeviceControlOps,
250                                                             contextPing.resourceId);
251         }
252         default:
253             break;
254     }
255 
256     return 0;
257 }
258 
submitCmd(struct stream_renderer_command * cmd)259 int VirtioGpuFrontend::submitCmd(struct stream_renderer_command* cmd) {
260     if (!cmd) return -EINVAL;
261 
262     void* buffer = reinterpret_cast<void*>(cmd->cmd);
263 
264     VirtioGpuRing ring = VirtioGpuRingGlobal{};
265     stream_renderer_debug("ctx: % u, ring: %s buffer: %p dwords: %d", cmd->ctx_id,
266                           to_string(ring).c_str(), buffer, cmd->cmd_size);
267 
268     if (!buffer) {
269         stream_renderer_error("error: buffer null");
270         return -EINVAL;
271     }
272 
273     if (cmd->cmd_size < 4) {
274         stream_renderer_error("error: not enough bytes (got %d)", cmd->cmd_size);
275         return -EINVAL;
276     }
277 
278     DECODE(header, gfxstream::gfxstreamHeader, buffer);
279     switch (header.opCode) {
280         case GFXSTREAM_CONTEXT_CREATE:
281         case GFXSTREAM_CONTEXT_PING:
282         case GFXSTREAM_CONTEXT_PING_WITH_RESPONSE: {
283             GFXSTREAM_TRACE_EVENT(GFXSTREAM_TRACE_STREAM_RENDERER_CATEGORY,
284                                   "GFXSTREAM_CONTEXT_[CREATE|PING]");
285 
286             if (addressSpaceProcessCmd(cmd->ctx_id, (uint32_t*)buffer)) {
287                 return -EINVAL;
288             }
289             break;
290         }
291         case GFXSTREAM_CREATE_EXPORT_SYNC: {
292             GFXSTREAM_TRACE_EVENT(GFXSTREAM_TRACE_STREAM_RENDERER_CATEGORY,
293                                   "GFXSTREAM_CREATE_EXPORT_SYNC");
294 
295             // Make sure the context-specific ring is used
296             ring = VirtioGpuRingContextSpecific{
297                 .mCtxId = cmd->ctx_id,
298                 .mRingIdx = 0,
299             };
300 
301             DECODE(exportSync, gfxstream::gfxstreamCreateExportSync, buffer)
302 
303             uint64_t sync_handle = convert32to64(exportSync.syncHandleLo, exportSync.syncHandleHi);
304 
305             stream_renderer_debug("wait for gpu ring %s", to_string(ring).c_str());
306             auto taskId = mVirtioGpuTimelines->enqueueTask(ring);
307 #if GFXSTREAM_ENABLE_HOST_GLES
308             gfxstream::FrameBuffer::getFB()->asyncWaitForGpuWithCb(
309                 sync_handle, [this, taskId] { mVirtioGpuTimelines->notifyTaskCompletion(taskId); });
310 #endif
311             break;
312         }
313         case GFXSTREAM_CREATE_EXPORT_SYNC_VK:
314         case GFXSTREAM_CREATE_IMPORT_SYNC_VK: {
315             GFXSTREAM_TRACE_EVENT(GFXSTREAM_TRACE_STREAM_RENDERER_CATEGORY,
316                                   "GFXSTREAM_CREATE_[IMPORT|EXPORT]_SYNC_VK");
317 
318             // The guest sync export assumes fence context support and always uses
319             // VIRTGPU_EXECBUF_RING_IDX. With this, the task created here must use
320             // the same ring as the fence created for the virtio gpu command or the
321             // fence may be signaled without properly waiting for the task to complete.
322             ring = VirtioGpuRingContextSpecific{
323                 .mCtxId = cmd->ctx_id,
324                 .mRingIdx = 0,
325             };
326 
327             DECODE(exportSyncVK, gfxstream::gfxstreamCreateExportSyncVK, buffer)
328 
329             uint64_t device_handle =
330                 convert32to64(exportSyncVK.deviceHandleLo, exportSyncVK.deviceHandleHi);
331 
332             uint64_t fence_handle =
333                 convert32to64(exportSyncVK.fenceHandleLo, exportSyncVK.fenceHandleHi);
334 
335             stream_renderer_debug("wait for gpu ring %s", to_string(ring).c_str());
336             auto taskId = mVirtioGpuTimelines->enqueueTask(ring);
337             gfxstream::FrameBuffer::getFB()->asyncWaitForGpuVulkanWithCb(
338                 device_handle, fence_handle,
339                 [this, taskId] { mVirtioGpuTimelines->notifyTaskCompletion(taskId); });
340             break;
341         }
342         case GFXSTREAM_CREATE_QSRI_EXPORT_VK: {
343             GFXSTREAM_TRACE_EVENT(GFXSTREAM_TRACE_STREAM_RENDERER_CATEGORY,
344                                   "GFXSTREAM_CREATE_QSRI_EXPORT_VK");
345 
346             // The guest QSRI export assumes fence context support and always uses
347             // VIRTGPU_EXECBUF_RING_IDX. With this, the task created here must use
348             // the same ring as the fence created for the virtio gpu command or the
349             // fence may be signaled without properly waiting for the task to complete.
350             ring = VirtioGpuRingContextSpecific{
351                 .mCtxId = cmd->ctx_id,
352                 .mRingIdx = 0,
353             };
354 
355             DECODE(exportQSRI, gfxstream::gfxstreamCreateQSRIExportVK, buffer)
356 
357             uint64_t image_handle =
358                 convert32to64(exportQSRI.imageHandleLo, exportQSRI.imageHandleHi);
359 
360             stream_renderer_debug("wait for gpu vk qsri ring %u image 0x%llx",
361                                   to_string(ring).c_str(), (unsigned long long)image_handle);
362             auto taskId = mVirtioGpuTimelines->enqueueTask(ring);
363             gfxstream::FrameBuffer::getFB()->asyncWaitForGpuVulkanQsriWithCb(
364                 image_handle,
365                 [this, taskId] { mVirtioGpuTimelines->notifyTaskCompletion(taskId); });
366             break;
367         }
368         case GFXSTREAM_RESOURCE_CREATE_3D: {
369             GFXSTREAM_TRACE_EVENT(GFXSTREAM_TRACE_STREAM_RENDERER_CATEGORY,
370                                   "GFXSTREAM_RESOURCE_CREATE_3D");
371 
372             DECODE(create3d, gfxstream::gfxstreamResourceCreate3d, buffer)
373             struct stream_renderer_resource_create_args rc3d = {0};
374 
375             rc3d.target = create3d.target;
376             rc3d.format = create3d.format;
377             rc3d.bind = create3d.bind;
378             rc3d.width = create3d.width;
379             rc3d.height = create3d.height;
380             rc3d.depth = create3d.depth;
381             rc3d.array_size = create3d.arraySize;
382             rc3d.last_level = create3d.lastLevel;
383             rc3d.nr_samples = create3d.nrSamples;
384             rc3d.flags = create3d.flags;
385 
386             auto contextIt = mContexts.find(cmd->ctx_id);
387             if (contextIt == mContexts.end()) {
388                 stream_renderer_error("ctx id %u is not found", cmd->ctx_id);
389                 return -EINVAL;
390             }
391             auto& context = contextIt->second;
392 
393             return context.AddPendingBlob(create3d.blobId, rc3d);
394         }
395         case GFXSTREAM_ACQUIRE_SYNC: {
396             GFXSTREAM_TRACE_EVENT(GFXSTREAM_TRACE_STREAM_RENDERER_CATEGORY,
397                                   "GFXSTREAM_ACQUIRE_SYNC");
398 
399             DECODE(acquireSync, gfxstream::gfxstreamAcquireSync, buffer);
400 
401             auto contextIt = mContexts.find(cmd->ctx_id);
402             if (contextIt == mContexts.end()) {
403                 stream_renderer_error("ctx id %u is not found", cmd->ctx_id);
404                 return -EINVAL;
405             }
406             auto& context = contextIt->second;
407             return context.AcquireSync(acquireSync.syncId);
408         }
409         case GFXSTREAM_PLACEHOLDER_COMMAND_VK: {
410             GFXSTREAM_TRACE_EVENT(GFXSTREAM_TRACE_STREAM_RENDERER_CATEGORY,
411                                   "GFXSTREAM_PLACEHOLDER_COMMAND_VK");
412 
413             // Do nothing, this is a placeholder command
414             break;
415         }
416         default:
417             return -EINVAL;
418     }
419 
420     return 0;
421 }
422 
createFence(uint64_t fence_id,const VirtioGpuRing & ring)423 int VirtioGpuFrontend::createFence(uint64_t fence_id, const VirtioGpuRing& ring) {
424     stream_renderer_debug("fenceid: %llu ring: %s", (unsigned long long)fence_id,
425                           to_string(ring).c_str());
426 
427     mVirtioGpuTimelines->enqueueFence(ring, fence_id);
428 
429     return 0;
430 }
431 
acquireContextFence(uint32_t contextId,uint64_t fenceId)432 int VirtioGpuFrontend::acquireContextFence(uint32_t contextId, uint64_t fenceId) {
433     auto contextIt = mContexts.find(contextId);
434     if (contextIt == mContexts.end()) {
435         stream_renderer_error("failed to acquire context %u fence: context not found", contextId);
436         return -EINVAL;
437     }
438     auto& context = contextIt->second;
439 
440     auto syncInfoOpt = context.TakeSync();
441     if (!syncInfoOpt) {
442         stream_renderer_error("failed to acquire context %u fence: no sync acquired", contextId);
443         return -EINVAL;
444     }
445 
446     mSyncMap[fenceId] = std::make_shared<gfxstream::SyncDescriptorInfo>(std::move(*syncInfoOpt));
447 
448     return 0;
449 }
450 
poll()451 void VirtioGpuFrontend::poll() { mVirtioGpuTimelines->poll(); }
452 
createResource(struct stream_renderer_resource_create_args * args,struct iovec * iov,uint32_t num_iovs)453 int VirtioGpuFrontend::createResource(struct stream_renderer_resource_create_args* args,
454                                       struct iovec* iov, uint32_t num_iovs) {
455     auto resourceOpt = VirtioGpuResource::Create(args, iov, num_iovs);
456     if (!resourceOpt) {
457         stream_renderer_error("Failed to create resource %u.", args->handle);
458         return -EINVAL;
459     }
460     mResources[args->handle] = std::move(*resourceOpt);
461     return 0;
462 }
463 
unrefResource(uint32_t resourceId)464 void VirtioGpuFrontend::unrefResource(uint32_t resourceId) {
465     stream_renderer_debug("resource: %u", resourceId);
466 
467     auto resourceIt = mResources.find(resourceId);
468     if (resourceIt == mResources.end()) return;
469     auto& resource = resourceIt->second;
470 
471     auto attachedContextIds = resource.GetAttachedContexts();
472     for (auto contextId : attachedContextIds) {
473         detachResource(contextId, resourceId);
474     }
475 
476     resource.Destroy();
477 
478     mResources.erase(resourceIt);
479 }
480 
attachIov(int resourceId,struct iovec * iov,int num_iovs)481 int VirtioGpuFrontend::attachIov(int resourceId, struct iovec* iov, int num_iovs) {
482     stream_renderer_debug("resource:%d numiovs: %d", resourceId, num_iovs);
483 
484     auto it = mResources.find(resourceId);
485     if (it == mResources.end()) {
486         stream_renderer_error("failed to attach iov: resource %u not found.", resourceId);
487         return ENOENT;
488     }
489     auto& resource = it->second;
490     resource.AttachIov(iov, num_iovs);
491     return 0;
492 }
493 
detachIov(int resourceId)494 void VirtioGpuFrontend::detachIov(int resourceId) {
495     stream_renderer_debug("resource:%d", resourceId);
496 
497     auto it = mResources.find(resourceId);
498     if (it == mResources.end()) {
499         stream_renderer_error("failed to detach iov: resource %u not found.", resourceId);
500         return;
501     }
502     auto& resource = it->second;
503     resource.DetachIov();
504 }
505 
506 namespace {
507 
AsVecOption(struct iovec * iov,int iovec_cnt)508 std::optional<std::vector<struct iovec>> AsVecOption(struct iovec* iov, int iovec_cnt) {
509     if (iovec_cnt > 0) {
510         std::vector<struct iovec> ret;
511         ret.reserve(iovec_cnt);
512         for (int i = 0; i < iovec_cnt; i++) {
513             ret.push_back(iov[i]);
514         }
515         return ret;
516     }
517     return std::nullopt;
518 }
519 
520 }  // namespace
521 
transferReadIov(int resId,uint64_t offset,stream_renderer_box * box,struct iovec * iov,int iovec_cnt)522 int VirtioGpuFrontend::transferReadIov(int resId, uint64_t offset, stream_renderer_box* box,
523                                        struct iovec* iov, int iovec_cnt) {
524     auto it = mResources.find(resId);
525     if (it == mResources.end()) {
526         stream_renderer_error("Failed to transfer: failed to find resource %d.", resId);
527         return EINVAL;
528     }
529     auto& resource = it->second;
530 
531     auto ops = ensureAndGetServiceOps();
532     return resource.TransferRead(ops, offset, box, AsVecOption(iov, iovec_cnt));
533 }
534 
transferWriteIov(int resId,uint64_t offset,stream_renderer_box * box,struct iovec * iov,int iovec_cnt)535 int VirtioGpuFrontend::transferWriteIov(int resId, uint64_t offset, stream_renderer_box* box,
536                                         struct iovec* iov, int iovec_cnt) {
537     auto it = mResources.find(resId);
538     if (it == mResources.end()) {
539         stream_renderer_error("Failed to transfer: failed to find resource %d.", resId);
540         return EINVAL;
541     }
542     auto& resource = it->second;
543 
544     auto ops = ensureAndGetServiceOps();
545     auto result = resource.TransferWrite(ops, offset, box, AsVecOption(iov, iovec_cnt));
546     if (result.status != 0) return result.status;
547 
548     if (result.contextPipe) {
549         resetPipe(result.contextId, result.contextPipe);
550     }
551     return 0;
552 }
553 
getCapset(uint32_t set,uint32_t * max_size)554 void VirtioGpuFrontend::getCapset(uint32_t set, uint32_t* max_size) {
555     switch (set) {
556         case VIRTGPU_CAPSET_GFXSTREAM_VULKAN:
557             *max_size = sizeof(struct gfxstream::vulkanCapset);
558             break;
559         case VIRTGPU_CAPSET_GFXSTREAM_MAGMA:
560             *max_size = sizeof(struct gfxstream::magmaCapset);
561             break;
562         case VIRTGPU_CAPSET_GFXSTREAM_GLES:
563             *max_size = sizeof(struct gfxstream::glesCapset);
564             break;
565         case VIRTGPU_CAPSET_GFXSTREAM_COMPOSER:
566             *max_size = sizeof(struct gfxstream::composerCapset);
567             break;
568         default:
569             stream_renderer_error("Incorrect capability set specified (%u)", set);
570     }
571 }
572 
fillCaps(uint32_t set,void * caps)573 void VirtioGpuFrontend::fillCaps(uint32_t set, void* caps) {
574     switch (set) {
575         case VIRTGPU_CAPSET_GFXSTREAM_VULKAN: {
576             struct gfxstream::vulkanCapset* capset =
577                 reinterpret_cast<struct gfxstream::vulkanCapset*>(caps);
578 
579             memset(capset, 0, sizeof(*capset));
580 
581             capset->protocolVersion = 1;
582             capset->ringSize = 12288;
583             capset->bufferSize = 1048576;
584 
585             auto vk_emu = gfxstream::vk::getGlobalVkEmulation();
586             if (vk_emu && vk_emu->live && vk_emu->representativeColorBufferMemoryTypeInfo) {
587                 capset->colorBufferMemoryIndex =
588                     vk_emu->representativeColorBufferMemoryTypeInfo->guestMemoryTypeIndex;
589             }
590 
591             if (mFeatures.VulkanBatchedDescriptorSetUpdate.enabled) {
592                 capset->vulkanBatchedDescriptorSetUpdate=1;
593             }
594             capset->noRenderControlEnc = 1;
595             capset->blobAlignment = mPageSize;
596             if (vk_emu && vk_emu->live) {
597                 capset->deferredMapping = 1;
598             }
599 
600 #if GFXSTREAM_UNSTABLE_VULKAN_DMABUF_WINSYS
601             capset->alwaysBlob = 1;
602 #endif
603 
604 #if GFXSTREAM_UNSTABLE_VULKAN_EXTERNAL_SYNC
605             capset->externalSync = 1;
606 #endif
607 
608             memset(capset->virglSupportedFormats, 0, sizeof(capset->virglSupportedFormats));
609 
610             struct FormatWithName {
611                 uint32_t format;
612                 const char* name;
613             };
614 #define MAKE_FORMAT_AND_NAME(x) \
615     {                           \
616         x, #x                   \
617     }
618             static const FormatWithName kPossibleFormats[] = {
619                 MAKE_FORMAT_AND_NAME(VIRGL_FORMAT_B5G6R5_UNORM),
620                 MAKE_FORMAT_AND_NAME(VIRGL_FORMAT_B8G8R8A8_UNORM),
621                 MAKE_FORMAT_AND_NAME(VIRGL_FORMAT_B8G8R8X8_UNORM),
622                 MAKE_FORMAT_AND_NAME(VIRGL_FORMAT_NV12),
623                 MAKE_FORMAT_AND_NAME(VIRGL_FORMAT_P010),
624                 MAKE_FORMAT_AND_NAME(VIRGL_FORMAT_R10G10B10A2_UNORM),
625                 MAKE_FORMAT_AND_NAME(VIRGL_FORMAT_R16_UNORM),
626                 MAKE_FORMAT_AND_NAME(VIRGL_FORMAT_R16G16B16A16_FLOAT),
627                 MAKE_FORMAT_AND_NAME(VIRGL_FORMAT_R8_UNORM),
628                 MAKE_FORMAT_AND_NAME(VIRGL_FORMAT_R8G8_UNORM),
629                 MAKE_FORMAT_AND_NAME(VIRGL_FORMAT_R8G8B8_UNORM),
630                 MAKE_FORMAT_AND_NAME(VIRGL_FORMAT_R8G8B8A8_UNORM),
631                 MAKE_FORMAT_AND_NAME(VIRGL_FORMAT_R8G8B8X8_UNORM),
632                 MAKE_FORMAT_AND_NAME(VIRGL_FORMAT_YV12),
633                 MAKE_FORMAT_AND_NAME(VIRGL_FORMAT_Z16_UNORM),
634                 MAKE_FORMAT_AND_NAME(VIRGL_FORMAT_Z24_UNORM_S8_UINT),
635                 MAKE_FORMAT_AND_NAME(VIRGL_FORMAT_Z24X8_UNORM),
636                 MAKE_FORMAT_AND_NAME(VIRGL_FORMAT_Z32_FLOAT_S8X24_UINT),
637                 MAKE_FORMAT_AND_NAME(VIRGL_FORMAT_Z32_FLOAT),
638             };
639 #undef MAKE_FORMAT_AND_NAME
640 
641             stream_renderer_info("Format support:");
642             for (std::size_t i = 0; i < std::size(kPossibleFormats); i++) {
643                 const FormatWithName& possibleFormat = kPossibleFormats[i];
644 
645                 GLenum possibleFormatGl = virgl_format_to_gl(possibleFormat.format);
646                 const bool supported =
647                     gfxstream::FrameBuffer::getFB()->isFormatSupported(possibleFormatGl);
648 
649                 stream_renderer_info(" %s: %s", possibleFormat.name,
650                                      (supported ? "supported" : "unsupported"));
651                 set_virgl_format_supported(capset->virglSupportedFormats, possibleFormat.format,
652                                            supported);
653             }
654             break;
655         }
656         case VIRTGPU_CAPSET_GFXSTREAM_MAGMA: {
657             struct gfxstream::magmaCapset* capset =
658                 reinterpret_cast<struct gfxstream::magmaCapset*>(caps);
659 
660             capset->protocolVersion = 1;
661             capset->ringSize = 12288;
662             capset->bufferSize = 1048576;
663             capset->blobAlignment = mPageSize;
664             break;
665         }
666         case VIRTGPU_CAPSET_GFXSTREAM_GLES: {
667             struct gfxstream::glesCapset* capset =
668                 reinterpret_cast<struct gfxstream::glesCapset*>(caps);
669 
670             capset->protocolVersion = 1;
671             capset->ringSize = 12288;
672             capset->bufferSize = 1048576;
673             capset->blobAlignment = mPageSize;
674             break;
675         }
676         case VIRTGPU_CAPSET_GFXSTREAM_COMPOSER: {
677             struct gfxstream::composerCapset* capset =
678                 reinterpret_cast<struct gfxstream::composerCapset*>(caps);
679 
680             capset->protocolVersion = 1;
681             capset->ringSize = 12288;
682             capset->bufferSize = 1048576;
683             capset->blobAlignment = mPageSize;
684             break;
685         }
686         default:
687             stream_renderer_error("Incorrect capability set specified");
688     }
689 }
690 
attachResource(uint32_t contextId,uint32_t resourceId)691 void VirtioGpuFrontend::attachResource(uint32_t contextId, uint32_t resourceId) {
692     stream_renderer_debug("ctxid: %u resid: %u", contextId, resourceId);
693 
694     auto contextIt = mContexts.find(contextId);
695     if (contextIt == mContexts.end()) {
696         stream_renderer_error("failed to attach resource %u to context %u: context not found.",
697                               resourceId, contextId);
698         return;
699     }
700     auto& context = contextIt->second;
701 
702     auto resourceIt = mResources.find(resourceId);
703     if (resourceIt == mResources.end()) {
704         stream_renderer_error("failed to attach resource %u to context %u: resource not found.",
705                               resourceId, contextId);
706         return;
707     }
708     auto& resource = resourceIt->second;
709 
710     context.AttachResource(resource);
711 }
712 
detachResource(uint32_t contextId,uint32_t resourceId)713 void VirtioGpuFrontend::detachResource(uint32_t contextId, uint32_t resourceId) {
714     stream_renderer_debug("ctxid: %u resid: %u", contextId, resourceId);
715 
716     auto contextIt = mContexts.find(contextId);
717     if (contextIt == mContexts.end()) {
718         stream_renderer_error("failed to detach resource %u to context %u: context not found.",
719                               resourceId, contextId);
720         return;
721     }
722     auto& context = contextIt->second;
723 
724     auto resourceIt = mResources.find(resourceId);
725     if (resourceIt == mResources.end()) {
726         stream_renderer_error("failed to attach resource %u to context %u: resource not found.",
727                               resourceId, contextId);
728         return;
729     }
730     auto& resource = resourceIt->second;
731 
732     auto resourceAsgOpt = context.TakeAddressSpaceGraphicsHandle(resourceId);
733     if (resourceAsgOpt) {
734         mCleanupThread->enqueueCleanup(
735             [this, asgBlob = resource.ShareRingBlob(), asgHandle = *resourceAsgOpt]() {
736                 mAddressSpaceDeviceControlOps->destroy_handle(asgHandle);
737             });
738     }
739 
740     context.DetachResource(resource);
741 }
742 
getResourceInfo(uint32_t resourceId,struct stream_renderer_resource_info * info)743 int VirtioGpuFrontend::getResourceInfo(uint32_t resourceId,
744                                        struct stream_renderer_resource_info* info) {
745     stream_renderer_debug("resource: %u", resourceId);
746 
747     if (!info) {
748         stream_renderer_error("Failed to get info: invalid info struct.");
749         return EINVAL;
750     }
751 
752     auto resourceIt = mResources.find(resourceId);
753     if (resourceIt == mResources.end()) {
754         stream_renderer_error("Failed to get info: failed to find resource %d.", resourceId);
755         return ENOENT;
756     }
757     auto& resource = resourceIt->second;
758     return resource.GetInfo(info);
759 }
760 
flushResource(uint32_t res_handle)761 void VirtioGpuFrontend::flushResource(uint32_t res_handle) {
762     auto taskId = mVirtioGpuTimelines->enqueueTask(VirtioGpuRingGlobal{});
763     gfxstream::FrameBuffer::getFB()->postWithCallback(
764         res_handle, [this, taskId](std::shared_future<void> waitForGpu) {
765             waitForGpu.wait();
766             mVirtioGpuTimelines->notifyTaskCompletion(taskId);
767         });
768 }
769 
createBlob(uint32_t contextId,uint32_t resourceId,const struct stream_renderer_create_blob * createBlobArgs,const struct stream_renderer_handle * handle)770 int VirtioGpuFrontend::createBlob(uint32_t contextId, uint32_t resourceId,
771                                   const struct stream_renderer_create_blob* createBlobArgs,
772                                   const struct stream_renderer_handle* handle) {
773     auto contextIt = mContexts.find(contextId);
774     if (contextIt == mContexts.end()) {
775         stream_renderer_error("failed to create blob resource %u: context %u missing.", resourceId,
776                               contextId);
777         return -EINVAL;
778     }
779     auto& context = contextIt->second;
780 
781     auto createArgs = context.TakePendingBlob(createBlobArgs->blob_id);
782 
783     auto resourceOpt =
784         VirtioGpuResource::Create(mFeatures, mPageSize, contextId, resourceId,
785                                   createArgs ? &*createArgs : nullptr, createBlobArgs, handle);
786     if (!resourceOpt) {
787         stream_renderer_error("failed to create blob resource %u.", resourceId);
788         return -EINVAL;
789     }
790     mResources[resourceId] = std::move(*resourceOpt);
791     return 0;
792 }
793 
resourceMap(uint32_t resourceId,void ** hvaOut,uint64_t * sizeOut)794 int VirtioGpuFrontend::resourceMap(uint32_t resourceId, void** hvaOut, uint64_t* sizeOut) {
795     stream_renderer_debug("resource: %u", resourceId);
796 
797     if (mFeatures.ExternalBlob.enabled) {
798         stream_renderer_error("Failed to map resource: external blob enabled.");
799         return -EINVAL;
800     }
801 
802     auto it = mResources.find(resourceId);
803     if (it == mResources.end()) {
804         if (hvaOut) *hvaOut = nullptr;
805         if (sizeOut) *sizeOut = 0;
806 
807         stream_renderer_error("Failed to map resource: unknown resource id %d.", resourceId);
808         return -EINVAL;
809     }
810 
811     auto& resource = it->second;
812     return resource.Map(hvaOut, sizeOut);
813 }
814 
resourceUnmap(uint32_t resourceId)815 int VirtioGpuFrontend::resourceUnmap(uint32_t resourceId) {
816     stream_renderer_debug("resource: %u", resourceId);
817 
818     auto it = mResources.find(resourceId);
819     if (it == mResources.end()) {
820         stream_renderer_error("Failed to map resource: unknown resource id %d.", resourceId);
821         return -EINVAL;
822     }
823 
824     // TODO(lfy): Good place to run any registered cleanup callbacks.
825     // No-op for now.
826     return 0;
827 }
828 
platformImportResource(int res_handle,int res_info,void * resource)829 int VirtioGpuFrontend::platformImportResource(int res_handle, int res_info, void* resource) {
830     auto it = mResources.find(res_handle);
831     if (it == mResources.end()) return -EINVAL;
832     bool success =
833         gfxstream::FrameBuffer::getFB()->platformImportResource(res_handle, res_info, resource);
834     return success ? 0 : -1;
835 }
836 
platformCreateSharedEglContext()837 void* VirtioGpuFrontend::platformCreateSharedEglContext() {
838     void* ptr = nullptr;
839 #if GFXSTREAM_ENABLE_HOST_GLES
840     ptr = gfxstream::FrameBuffer::getFB()->platformCreateSharedEglContext();
841 #endif
842     return ptr;
843 }
844 
platformDestroySharedEglContext(void * context)845 int VirtioGpuFrontend::platformDestroySharedEglContext(void* context) {
846     bool success = false;
847 #if GFXSTREAM_ENABLE_HOST_GLES
848     success = gfxstream::FrameBuffer::getFB()->platformDestroySharedEglContext(context);
849 #endif
850     return success ? 0 : -1;
851 }
852 
waitSyncResource(uint32_t res_handle)853 int VirtioGpuFrontend::waitSyncResource(uint32_t res_handle) {
854     auto resourceIt = mResources.find(res_handle);
855     if (resourceIt == mResources.end()) {
856         stream_renderer_error("waitSyncResource could not find resource: %d", res_handle);
857         return -EINVAL;
858     }
859     auto& resource = resourceIt->second;
860     return resource.WaitSyncResource();
861 }
862 
resourceMapInfo(uint32_t resourceId,uint32_t * map_info)863 int VirtioGpuFrontend::resourceMapInfo(uint32_t resourceId, uint32_t* map_info) {
864     stream_renderer_debug("resource: %u", resourceId);
865 
866     auto resourceIt = mResources.find(resourceId);
867     if (resourceIt == mResources.end()) {
868         stream_renderer_error("Failed to get resource map info: unknown resource %d.", resourceId);
869         return -EINVAL;
870     }
871 
872     const auto& resource = resourceIt->second;
873     return resource.GetCaching(map_info);
874 }
875 
exportBlob(uint32_t resourceId,struct stream_renderer_handle * handle)876 int VirtioGpuFrontend::exportBlob(uint32_t resourceId, struct stream_renderer_handle* handle) {
877     stream_renderer_debug("resource: %u", resourceId);
878 
879     auto resourceIt = mResources.find(resourceId);
880     if (resourceIt == mResources.end()) {
881         stream_renderer_error("Failed to export blob: unknown resource %d.", resourceId);
882         return -EINVAL;
883     }
884     auto& resource = resourceIt->second;
885     return resource.ExportBlob(handle);
886 }
887 
exportFence(uint64_t fenceId,struct stream_renderer_handle * handle)888 int VirtioGpuFrontend::exportFence(uint64_t fenceId, struct stream_renderer_handle* handle) {
889     auto it = mSyncMap.find(fenceId);
890     if (it == mSyncMap.end()) {
891         return -EINVAL;
892     }
893 
894     auto& entry = it->second;
895     DescriptorType rawDescriptor;
896     auto rawDescriptorOpt = entry->descriptor.release();
897     if (rawDescriptorOpt)
898         rawDescriptor = *rawDescriptorOpt;
899     else
900         return -EINVAL;
901 
902     handle->handle_type = entry->handleType;
903 
904 #ifdef _WIN32
905     handle->os_handle = static_cast<int64_t>(reinterpret_cast<intptr_t>(rawDescriptor));
906 #else
907     handle->os_handle = static_cast<int64_t>(rawDescriptor);
908 #endif
909 
910     return 0;
911 }
912 
vulkanInfo(uint32_t resourceId,struct stream_renderer_vulkan_info * vulkanInfo)913 int VirtioGpuFrontend::vulkanInfo(uint32_t resourceId,
914                                   struct stream_renderer_vulkan_info* vulkanInfo) {
915     auto resourceIt = mResources.find(resourceId);
916     if (resourceIt == mResources.end()) {
917         stream_renderer_error("failed to get vulkan info: failed to find resource %d", resourceId);
918         return -EINVAL;
919     }
920     auto& resource = resourceIt->second;
921     return resource.GetVulkanInfo(vulkanInfo);
922 }
923 
destroyVirtioGpuObjects()924 int VirtioGpuFrontend::destroyVirtioGpuObjects() {
925     {
926         std::vector<VirtioGpuResourceId> resourceIds;
927         resourceIds.reserve(mResources.size());
928         for (auto& [resourceId, resource] : mResources) {
929             const auto contextIds = resource.GetAttachedContexts();
930             for (const VirtioGpuContextId contextId : contextIds) {
931                 detachResource(contextId, resourceId);
932             }
933             resourceIds.push_back(resourceId);
934         }
935         for (const VirtioGpuResourceId resourceId : resourceIds) {
936             unrefResource(resourceId);
937         }
938         mResources.clear();
939     }
940     {
941         std::vector<VirtioGpuContextId> contextIds;
942         contextIds.reserve(mContexts.size());
943         for (const auto& [contextId, _] : mContexts) {
944             contextIds.push_back(contextId);
945         }
946         for (const VirtioGpuContextId contextId : contextIds) {
947             destroyContext(contextId);
948         }
949         mContexts.clear();
950     }
951 
952     if (mCleanupThread) {
953         mCleanupThread->waitForPendingCleanups();
954     }
955 
956     return 0;
957 }
958 
959 #ifdef CONFIG_AEMU
setServiceOps(const GoldfishPipeServiceOps * ops)960 void VirtioGpuFrontend::setServiceOps(const GoldfishPipeServiceOps* ops) { mServiceOps = ops; }
961 #endif  // CONFIG_AEMU
962 
ensureAndGetServiceOps()963 inline const GoldfishPipeServiceOps* VirtioGpuFrontend::ensureAndGetServiceOps() {
964     if (mServiceOps) return mServiceOps;
965     mServiceOps = goldfish_pipe_get_service_ops();
966     return mServiceOps;
967 }
968 
969 #ifdef GFXSTREAM_BUILD_WITH_SNAPSHOT_FRONTEND_SUPPORT
970 
971 static constexpr const char kSnapshotBasenameAsg[] = "gfxstream_asg.bin";
972 static constexpr const char kSnapshotBasenameFrontend[] = "gfxstream_frontend.txtproto";
973 static constexpr const char kSnapshotBasenameRenderer[] = "gfxstream_renderer.bin";
974 
snapshotRenderer(const char * directory)975 int VirtioGpuFrontend::snapshotRenderer(const char* directory) {
976     const std::filesystem::path snapshotDirectory = std::string(directory);
977     const std::filesystem::path snapshotPath = snapshotDirectory / kSnapshotBasenameRenderer;
978 
979     android::base::StdioStream stream(fopen(snapshotPath.c_str(), "wb"),
980                                       android::base::StdioStream::kOwner);
981     android::snapshot::SnapshotSaveStream saveStream{
982         .stream = &stream,
983     };
984 
985     android_getOpenglesRenderer()->save(saveStream.stream, saveStream.textureSaver);
986     return 0;
987 }
988 
snapshotFrontend(const char * directory)989 int VirtioGpuFrontend::snapshotFrontend(const char* directory) {
990     gfxstream::host::snapshot::VirtioGpuFrontendSnapshot snapshot;
991 
992     for (const auto& [contextId, context] : mContexts) {
993         auto contextSnapshotOpt = context.Snapshot();
994         if (!contextSnapshotOpt) {
995             stream_renderer_error("Failed to snapshot context %d", contextId);
996             return -1;
997         }
998         (*snapshot.mutable_contexts())[contextId] = std::move(*contextSnapshotOpt);
999     }
1000     for (const auto& [resourceId, resource] : mResources) {
1001         auto resourceSnapshotOpt = resource.Snapshot();
1002         if (!resourceSnapshotOpt) {
1003             stream_renderer_error("Failed to snapshot resource %d", resourceId);
1004             return -1;
1005         }
1006         (*snapshot.mutable_resources())[resourceId] = std::move(*resourceSnapshotOpt);
1007     }
1008 
1009     const std::filesystem::path snapshotDirectory = std::string(directory);
1010     const std::filesystem::path snapshotPath = snapshotDirectory / kSnapshotBasenameFrontend;
1011     int snapshotFd = open(snapshotPath.c_str(), O_CREAT | O_WRONLY | O_TRUNC, 0660);
1012     if (snapshotFd < 0) {
1013         stream_renderer_error("Failed to save snapshot: failed to open %s", snapshotPath.c_str());
1014         return -1;
1015     }
1016     google::protobuf::io::FileOutputStream snapshotOutputStream(snapshotFd);
1017     snapshotOutputStream.SetCloseOnDelete(true);
1018     if (!google::protobuf::TextFormat::Print(snapshot, &snapshotOutputStream)) {
1019         stream_renderer_error("Failed to save snapshot: failed to serialize to stream.");
1020         return -1;
1021     }
1022 
1023     return 0;
1024 }
1025 
snapshotAsg(const char * directory)1026 int VirtioGpuFrontend::snapshotAsg(const char* directory) {
1027     const std::filesystem::path snapshotDirectory = std::string(directory);
1028     const std::filesystem::path snapshotPath = snapshotDirectory / kSnapshotBasenameAsg;
1029 
1030     android::base::StdioStream stream(fopen(snapshotPath.c_str(), "wb"),
1031                                       android::base::StdioStream::kOwner);
1032     android::snapshot::SnapshotLoadStream saveStream{
1033         .stream = &stream,
1034     };
1035 
1036     int ret = android::emulation::goldfish_address_space_memory_state_save(saveStream.stream);
1037     if (ret) {
1038         stream_renderer_error("Failed to save snapshot: failed to save ASG state.");
1039         return ret;
1040     }
1041     return 0;
1042 }
1043 
snapshot(const char * directory)1044 int VirtioGpuFrontend::snapshot(const char* directory) {
1045     stream_renderer_debug("directory:%s", directory);
1046 
1047     android_getOpenglesRenderer()->pauseAllPreSave();
1048 
1049     int ret = snapshotRenderer(directory);
1050     if (ret) {
1051         stream_renderer_error("Failed to save snapshot: failed to snapshot renderer.");
1052         return ret;
1053     }
1054 
1055     ret = snapshotFrontend(directory);
1056     if (ret) {
1057         stream_renderer_error("Failed to save snapshot: failed to snapshot frontend.");
1058         return ret;
1059     }
1060 
1061     ret = snapshotAsg(directory);
1062     if (ret) {
1063         stream_renderer_error("Failed to save snapshot: failed to snapshot ASG device.");
1064         return ret;
1065     }
1066 
1067     stream_renderer_debug("directory:%s - done!", directory);
1068     return 0;
1069 }
1070 
restoreRenderer(const char * directory)1071 int VirtioGpuFrontend::restoreRenderer(const char* directory) {
1072     const std::filesystem::path snapshotDirectory = std::string(directory);
1073     const std::filesystem::path snapshotPath = snapshotDirectory / kSnapshotBasenameRenderer;
1074 
1075     android::base::StdioStream stream(fopen(snapshotPath.c_str(), "rb"),
1076                                       android::base::StdioStream::kOwner);
1077     android::snapshot::SnapshotLoadStream loadStream{
1078         .stream = &stream,
1079     };
1080 
1081     android_getOpenglesRenderer()->load(loadStream.stream, loadStream.textureLoader);
1082     return 0;
1083 }
1084 
restoreFrontend(const char * directory)1085 int VirtioGpuFrontend::restoreFrontend(const char* directory) {
1086     const std::filesystem::path snapshotDirectory = std::string(directory);
1087     const std::filesystem::path snapshotPath = snapshotDirectory / kSnapshotBasenameFrontend;
1088 
1089     gfxstream::host::snapshot::VirtioGpuFrontendSnapshot snapshot;
1090     {
1091         int snapshotFd = open(snapshotPath.c_str(), O_RDONLY);
1092         if (snapshotFd < 0) {
1093             stream_renderer_error("Failed to restore snapshot: failed to open %s",
1094                                 snapshotPath.c_str());
1095             return -1;
1096         }
1097         google::protobuf::io::FileInputStream snapshotInputStream(snapshotFd);
1098         snapshotInputStream.SetCloseOnDelete(true);
1099         if (!google::protobuf::TextFormat::Parse(&snapshotInputStream, &snapshot)) {
1100             stream_renderer_error("Failed to restore snapshot: failed to parse from file.");
1101             return -1;
1102         }
1103     }
1104 
1105     mContexts.clear();
1106     mResources.clear();
1107 
1108     for (const auto& [contextId, contextSnapshot] : snapshot.contexts()) {
1109         auto contextOpt = VirtioGpuContext::Restore(contextSnapshot);
1110         if (!contextOpt) {
1111             stream_renderer_error("Failed to restore context %d", contextId);
1112             return -1;
1113         }
1114         mContexts.emplace(contextId, std::move(*contextOpt));
1115     }
1116     for (const auto& [resourceId, resourceSnapshot] : snapshot.resources()) {
1117         auto resourceOpt = VirtioGpuResource::Restore(resourceSnapshot);
1118         if (!resourceOpt) {
1119             stream_renderer_error("Failed to restore resource %d", resourceId);
1120             return -1;
1121         }
1122         mResources.emplace(resourceId, std::move(*resourceOpt));
1123     }
1124     return 0;
1125 }
1126 
restoreAsg(const char * directory)1127 int VirtioGpuFrontend::restoreAsg(const char* directory) {
1128     const std::filesystem::path snapshotDirectory = std::string(directory);
1129     const std::filesystem::path snapshotPath = snapshotDirectory / kSnapshotBasenameAsg;
1130 
1131     android::base::StdioStream stream(fopen(snapshotPath.c_str(), "rb"),
1132                                       android::base::StdioStream::kOwner);
1133     android::snapshot::SnapshotLoadStream loadStream{
1134         .stream = &stream,
1135     };
1136 
1137     // Gather external memory info that the ASG device needs to reload.
1138     android::emulation::AddressSpaceDeviceLoadResources asgLoadResources;
1139     for (const auto& [contextId, context] : mContexts) {
1140         for (const auto [resourceId, asgId] : context.AsgInstances()) {
1141             auto resourceIt = mResources.find(resourceId);
1142             if (resourceIt == mResources.end()) {
1143                 stream_renderer_error("Failed to restore ASG device: context %" PRIu32
1144                                       " claims resource %" PRIu32 " is used for ASG %" PRIu32
1145                                       " but resource not found.",
1146                                       contextId, resourceId, asgId);
1147                 return -1;
1148             }
1149             auto& resource = resourceIt->second;
1150 
1151             void* mappedAddr = nullptr;
1152             uint64_t mappedSize = 0;
1153 
1154             int ret = resource.Map(&mappedAddr, &mappedSize);
1155             if (ret) {
1156                 stream_renderer_error("Failed to restore ASG device: failed to map resource %" PRIu32, resourceId);
1157                 return -1;
1158             }
1159 
1160             asgLoadResources.contextExternalMemoryMap[asgId] = {
1161                 .externalAddress = mappedAddr,
1162                 .externalAddressSize = mappedSize,
1163             };
1164         }
1165     }
1166 
1167     int ret = android::emulation::goldfish_address_space_memory_state_set_load_resources(asgLoadResources);
1168     if (ret) {
1169         stream_renderer_error("Failed to restore ASG device: failed to set ASG load resources.");
1170         return ret;
1171     }
1172 
1173     ret = android::emulation::goldfish_address_space_memory_state_load(loadStream.stream);
1174     if (ret) {
1175         stream_renderer_error("Failed to restore ASG device: failed to restore ASG state.");
1176         return ret;
1177     }
1178     return 0;
1179 }
1180 
restore(const char * directory)1181 int VirtioGpuFrontend::restore(const char* directory) {
1182     stream_renderer_debug("directory:%s", directory);
1183 
1184     destroyVirtioGpuObjects();
1185 
1186     int ret = restoreRenderer(directory);
1187     if (ret) {
1188         stream_renderer_error("Failed to load snapshot: failed to load renderer.");
1189         return ret;
1190     }
1191 
1192     ret = restoreFrontend(directory);
1193     if (ret) {
1194         stream_renderer_error("Failed to load snapshot: failed to load frontend.");
1195         return ret;
1196     }
1197 
1198     ret = restoreAsg(directory);
1199     if (ret) {
1200         stream_renderer_error("Failed to load snapshot: failed to load ASG device.");
1201         return ret;
1202     }
1203 
1204     // In end2end tests, we don't really do snapshot save for render threads.
1205     // We will need to resume all render threads without waiting for snapshot.
1206     android_getOpenglesRenderer()->resumeAll(false);
1207 
1208     stream_renderer_debug("directory:%s - done!", directory);
1209     return 0;
1210 }
1211 
1212 #endif  // ifdef GFXSTREAM_BUILD_WITH_SNAPSHOT_FRONTEND_SUPPORT
1213 
1214 }  // namespace host
1215 }  // namespace gfxstream
1216