1 // Copyright (C) 2024 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include "VirtioGpuResource.h"
16
17 #include "FrameBuffer.h"
18 #include "VirtioGpuFormatUtils.h"
19
20 namespace gfxstream {
21 namespace host {
22
23 using android::base::DescriptorType;
24 #ifdef GFXSTREAM_BUILD_WITH_SNAPSHOT_FRONTEND_SUPPORT
25 using gfxstream::host::snapshot::VirtioGpuResourceCreateArgs;
26 using gfxstream::host::snapshot::VirtioGpuResourceCreateBlobArgs;
27 using gfxstream::host::snapshot::VirtioGpuResourceSnapshot;
28 #endif
29
30 namespace {
31
32 static constexpr int kPipeTryAgain = -2;
33
34 enum pipe_texture_target {
35 PIPE_BUFFER,
36 PIPE_TEXTURE_1D,
37 PIPE_TEXTURE_2D,
38 PIPE_TEXTURE_3D,
39 PIPE_TEXTURE_CUBE,
40 PIPE_TEXTURE_RECT,
41 PIPE_TEXTURE_1D_ARRAY,
42 PIPE_TEXTURE_2D_ARRAY,
43 PIPE_TEXTURE_CUBE_ARRAY,
44 PIPE_MAX_TEXTURE_TYPES,
45 };
46
47 /**
48 * Resource binding flags -- state tracker must specify in advance all
49 * the ways a resource might be used.
50 */
51 #define PIPE_BIND_DEPTH_STENCIL (1 << 0) /* create_surface */
52 #define PIPE_BIND_RENDER_TARGET (1 << 1) /* create_surface */
53 #define PIPE_BIND_BLENDABLE (1 << 2) /* create_surface */
54 #define PIPE_BIND_SAMPLER_VIEW (1 << 3) /* create_sampler_view */
55 #define PIPE_BIND_VERTEX_BUFFER (1 << 4) /* set_vertex_buffers */
56 #define PIPE_BIND_INDEX_BUFFER (1 << 5) /* draw_elements */
57 #define PIPE_BIND_CONSTANT_BUFFER (1 << 6) /* set_constant_buffer */
58 #define PIPE_BIND_DISPLAY_TARGET (1 << 7) /* flush_front_buffer */
59 #define PIPE_BIND_STREAM_OUTPUT (1 << 10) /* set_stream_output_buffers */
60 #define PIPE_BIND_CURSOR (1 << 11) /* mouse cursor */
61 #define PIPE_BIND_CUSTOM (1 << 12) /* state-tracker/winsys usages */
62 #define PIPE_BIND_GLOBAL (1 << 13) /* set_global_binding */
63 #define PIPE_BIND_SHADER_BUFFER (1 << 14) /* set_shader_buffers */
64 #define PIPE_BIND_SHADER_IMAGE (1 << 15) /* set_shader_images */
65 #define PIPE_BIND_COMPUTE_RESOURCE (1 << 16) /* set_compute_resources */
66 #define PIPE_BIND_COMMAND_ARGS_BUFFER (1 << 17) /* pipe_draw_info.indirect */
67 #define PIPE_BIND_QUERY_BUFFER (1 << 18) /* get_query_result_resource */
68
AlignUp(uint32_t n,uint32_t a)69 static inline uint32_t AlignUp(uint32_t n, uint32_t a) { return ((n + a - 1) / a) * a; }
70
GetResourceType(const struct stream_renderer_resource_create_args & args)71 VirtioGpuResourceType GetResourceType(const struct stream_renderer_resource_create_args& args) {
72 if (args.target == PIPE_BUFFER) {
73 return VirtioGpuResourceType::PIPE;
74 }
75
76 if (args.format != VIRGL_FORMAT_R8_UNORM) {
77 return VirtioGpuResourceType::COLOR_BUFFER;
78 }
79 if (args.bind & VIRGL_BIND_SAMPLER_VIEW) {
80 return VirtioGpuResourceType::COLOR_BUFFER;
81 }
82 if (args.bind & VIRGL_BIND_RENDER_TARGET) {
83 return VirtioGpuResourceType::COLOR_BUFFER;
84 }
85 if (args.bind & VIRGL_BIND_SCANOUT) {
86 return VirtioGpuResourceType::COLOR_BUFFER;
87 }
88 if (args.bind & VIRGL_BIND_CURSOR) {
89 return VirtioGpuResourceType::COLOR_BUFFER;
90 }
91 if (!(args.bind & VIRGL_BIND_LINEAR)) {
92 return VirtioGpuResourceType::COLOR_BUFFER;
93 }
94
95 return VirtioGpuResourceType::BUFFER;
96 }
97
98 } // namespace
99
100 /*static*/
Create(const struct stream_renderer_resource_create_args * args,struct iovec * iov,uint32_t num_iovs)101 std::optional<VirtioGpuResource> VirtioGpuResource::Create(
102 const struct stream_renderer_resource_create_args* args, struct iovec* iov, uint32_t num_iovs) {
103 stream_renderer_debug("resource id: %u", args->handle);
104
105 const auto resourceType = GetResourceType(*args);
106 if (resourceType == VirtioGpuResourceType::BLOB) {
107 stream_renderer_error("Failed to create resource: encountered blob.");
108 return std::nullopt;
109 }
110
111 if (resourceType == VirtioGpuResourceType::PIPE) {
112 // Frontend only resource.
113 } else if (resourceType == VirtioGpuResourceType::BUFFER) {
114 FrameBuffer::getFB()->createBufferWithHandle(args->width * args->height, args->handle);
115 } else if (resourceType == VirtioGpuResourceType::COLOR_BUFFER) {
116 const uint32_t glformat = virgl_format_to_gl(args->format);
117 const auto fwkformat = (gfxstream::FrameworkFormat)virgl_format_to_fwk_format(args->format);
118 const bool linear =
119 #ifdef GFXSTREAM_ENABLE_GUEST_VIRTIO_RESOURCE_TILING_CONTROL
120 !!(args->bind & VIRGL_BIND_LINEAR);
121 #else
122 false;
123 #endif
124 FrameBuffer::getFB()->createColorBufferWithHandle(args->width, args->height, glformat,
125 fwkformat, args->handle, linear);
126 FrameBuffer::getFB()->setGuestManagedColorBufferLifetime(true /* guest manages lifetime */);
127 FrameBuffer::getFB()->openColorBuffer(args->handle);
128 } else {
129 stream_renderer_error("Failed to create resource: unhandled type.");
130 return std::nullopt;
131 }
132
133 VirtioGpuResource resource;
134 resource.mId = args->handle;
135 resource.mResourceType = resourceType;
136 resource.mCreateArgs = *args;
137
138 resource.AttachIov(iov, num_iovs);
139
140 return resource;
141 }
142
Create(const gfxstream::host::FeatureSet & features,uint32_t pageSize,uint32_t contextId,uint32_t resourceId,const struct stream_renderer_resource_create_args * createArgs,const struct stream_renderer_create_blob * createBlobArgs,const struct stream_renderer_handle * handle)143 /*static*/ std::optional<VirtioGpuResource> VirtioGpuResource::Create(
144 const gfxstream::host::FeatureSet& features, uint32_t pageSize, uint32_t contextId,
145 uint32_t resourceId, const struct stream_renderer_resource_create_args* createArgs,
146 const struct stream_renderer_create_blob* createBlobArgs,
147 const struct stream_renderer_handle* handle) {
148 VirtioGpuResource resource;
149
150 std::optional<BlobDescriptorInfo> descriptorInfoOpt;
151
152 if (createArgs != nullptr) {
153 auto resourceType = GetResourceType(*createArgs);
154 if (resourceType != VirtioGpuResourceType::BUFFER &&
155 resourceType != VirtioGpuResourceType::COLOR_BUFFER) {
156 stream_renderer_error("failed to create blob resource: unhandled type.");
157 return std::nullopt;
158 }
159
160 auto resourceOpt = Create(createArgs, nullptr, 0);
161 if (!resourceOpt) {
162 return std::nullopt;
163 }
164
165 if (resourceType == VirtioGpuResourceType::BUFFER) {
166 descriptorInfoOpt = FrameBuffer::getFB()->exportBuffer(resourceId);
167 } else if (resourceType == VirtioGpuResourceType::COLOR_BUFFER) {
168 descriptorInfoOpt = FrameBuffer::getFB()->exportColorBuffer(resourceId);
169 } else {
170 stream_renderer_error("failed to create blob resource: unhandled type.");
171 return std::nullopt;
172 }
173
174 resource = std::move(*resourceOpt);
175 } else {
176 resource.mResourceType = VirtioGpuResourceType::BLOB;
177 }
178
179 resource.mId = resourceId;
180 resource.mCreateBlobArgs = *createBlobArgs;
181
182 if (createBlobArgs->blob_id == 0) {
183 RingBlobMemory memory;
184 if (features.ExternalBlob.enabled) {
185 memory = RingBlob::CreateWithShmem(resourceId, createBlobArgs->size);
186 } else {
187 memory = RingBlob::CreateWithHostMemory(resourceId, createBlobArgs->size, pageSize);
188 }
189 if (!memory) {
190 stream_renderer_error("Failed to create blob: failed to create ring blob.");
191 return std::nullopt;
192 }
193 resource.mBlobMemory.emplace(std::move(memory));
194 } else if (features.ExternalBlob.enabled) {
195 if (createBlobArgs->blob_mem == STREAM_BLOB_MEM_GUEST &&
196 (createBlobArgs->blob_flags & STREAM_BLOB_FLAG_CREATE_GUEST_HANDLE)) {
197 #if defined(__linux__) || defined(__QNX__)
198 ManagedDescriptor managedHandle(handle->os_handle);
199 ExternalObjectManager::get()->addBlobDescriptorInfo(
200 contextId, createBlobArgs->blob_id, std::move(managedHandle), handle->handle_type,
201 0, std::nullopt);
202 #else
203 stream_renderer_error("Failed to create blob: unimplemented external blob.");
204 return std::nullopt;
205 #endif
206 } else {
207 if (!descriptorInfoOpt) {
208 descriptorInfoOpt = ExternalObjectManager::get()->removeBlobDescriptorInfo(
209 contextId, createBlobArgs->blob_id);
210 }
211 if (!descriptorInfoOpt) {
212 stream_renderer_error("Failed to create blob: no external blob descriptor.");
213 return std::nullopt;
214 }
215 resource.mBlobMemory.emplace(
216 std::make_shared<BlobDescriptorInfo>(std::move(*descriptorInfoOpt)));
217 }
218 } else {
219 auto memoryMappingOpt =
220 ExternalObjectManager::get()->removeMapping(contextId, createBlobArgs->blob_id);
221 if (!memoryMappingOpt) {
222 stream_renderer_error("Failed to create blob: no external blob mapping.");
223 return std::nullopt;
224 }
225 resource.mBlobMemory.emplace(std::move(*memoryMappingOpt));
226 }
227
228 return resource;
229 }
230
Destroy()231 int VirtioGpuResource::Destroy() {
232 if (mResourceType == VirtioGpuResourceType::BUFFER) {
233 FrameBuffer::getFB()->closeBuffer(mId);
234 } else if (mResourceType == VirtioGpuResourceType::COLOR_BUFFER) {
235 FrameBuffer::getFB()->closeColorBuffer(mId);
236 }
237 return 0;
238 }
239
AttachIov(struct iovec * iov,uint32_t num_iovs)240 void VirtioGpuResource::AttachIov(struct iovec* iov, uint32_t num_iovs) {
241 mIovs.clear();
242 mLinear.clear();
243
244 size_t linearSize = 0;
245 if (num_iovs) {
246 mIovs.reserve(num_iovs);
247 for (int i = 0; i < num_iovs; ++i) {
248 mIovs.push_back(iov[i]);
249 linearSize += iov[i].iov_len;
250 }
251 }
252
253 if (linearSize > 0) {
254 mLinear.resize(linearSize, 0);
255 }
256 }
257
AttachToContext(VirtioGpuContextId contextId)258 void VirtioGpuResource::AttachToContext(VirtioGpuContextId contextId) {
259 mAttachedToContexts.insert(contextId);
260 mLatestAttachedContext = contextId;
261 }
262
DetachFromContext(VirtioGpuContextId contextId)263 void VirtioGpuResource::DetachFromContext(VirtioGpuContextId contextId) {
264 mAttachedToContexts.erase(contextId);
265 mLatestAttachedContext.reset();
266 mHostPipe = nullptr;
267 }
268
GetAttachedContexts() const269 std::unordered_set<VirtioGpuContextId> VirtioGpuResource::GetAttachedContexts() const {
270 return mAttachedToContexts;
271 }
272
DetachIov()273 void VirtioGpuResource::DetachIov() {
274 mIovs.clear();
275 mLinear.clear();
276 }
277
Map(void ** outAddress,uint64_t * outSize)278 int VirtioGpuResource::Map(void** outAddress, uint64_t* outSize) {
279 if (!mBlobMemory) {
280 stream_renderer_error("Failed to map resource %d: no blob memory to map.", mId);
281 return -EINVAL;
282 }
283
284 void* hva = nullptr;
285 uint64_t hvaSize = 0;
286
287 if (std::holds_alternative<RingBlobMemory>(*mBlobMemory)) {
288 auto& memory = std::get<RingBlobMemory>(*mBlobMemory);
289 hva = memory->map();
290 hvaSize = memory->size();
291 } else if (std::holds_alternative<ExternalMemoryMapping>(*mBlobMemory)) {
292 if (!mCreateBlobArgs) {
293 stream_renderer_error("failed to map resource %d: missing args.", mId);
294 return -EINVAL;
295 }
296 auto& memory = std::get<ExternalMemoryMapping>(*mBlobMemory);
297 hva = memory.addr;
298 hvaSize = mCreateBlobArgs->size;
299 } else {
300 stream_renderer_error("failed to map resource %d: no mappable memory.", mId);
301 return -EINVAL;
302 }
303
304 if (outAddress) {
305 *outAddress = hva;
306 }
307 if (outSize) {
308 *outSize = hvaSize;
309 }
310 return 0;
311 }
312
GetInfo(struct stream_renderer_resource_info * outInfo) const313 int VirtioGpuResource::GetInfo(struct stream_renderer_resource_info* outInfo) const {
314 if (!mCreateArgs) {
315 stream_renderer_error("Failed to get info: resource %d missing args.", mId);
316 return ENOENT;
317 }
318
319 uint32_t bpp = 4U;
320 switch (mCreateArgs->format) {
321 case VIRGL_FORMAT_B8G8R8A8_UNORM:
322 outInfo->drm_fourcc = DRM_FORMAT_ARGB8888;
323 break;
324 case VIRGL_FORMAT_B8G8R8X8_UNORM:
325 outInfo->drm_fourcc = DRM_FORMAT_XRGB8888;
326 break;
327 case VIRGL_FORMAT_B5G6R5_UNORM:
328 outInfo->drm_fourcc = DRM_FORMAT_RGB565;
329 bpp = 2U;
330 break;
331 case VIRGL_FORMAT_R8G8B8A8_UNORM:
332 outInfo->drm_fourcc = DRM_FORMAT_ABGR8888;
333 break;
334 case VIRGL_FORMAT_R8G8B8X8_UNORM:
335 outInfo->drm_fourcc = DRM_FORMAT_XBGR8888;
336 break;
337 case VIRGL_FORMAT_R8_UNORM:
338 outInfo->drm_fourcc = DRM_FORMAT_R8;
339 bpp = 1U;
340 break;
341 default:
342 return EINVAL;
343 }
344
345 outInfo->stride = AlignUp(mCreateArgs->width * bpp, 16U);
346 outInfo->virgl_format = mCreateArgs->format;
347 outInfo->handle = mCreateArgs->handle;
348 outInfo->height = mCreateArgs->height;
349 outInfo->width = mCreateArgs->width;
350 outInfo->depth = mCreateArgs->depth;
351 outInfo->flags = mCreateArgs->flags;
352 outInfo->tex_id = 0;
353 return 0;
354 }
355
GetVulkanInfo(struct stream_renderer_vulkan_info * outInfo) const356 int VirtioGpuResource::GetVulkanInfo(struct stream_renderer_vulkan_info* outInfo) const {
357 if (!mBlobMemory) {
358 return -EINVAL;
359 }
360 if (!std::holds_alternative<ExternalMemoryDescriptor>(*mBlobMemory)) {
361 return -EINVAL;
362 }
363 auto& memory = std::get<ExternalMemoryDescriptor>(*mBlobMemory);
364 if (!memory->vulkanInfoOpt) {
365 return -EINVAL;
366 }
367 auto& memoryVulkanInfo = *memory->vulkanInfoOpt;
368
369 outInfo->memory_index = memoryVulkanInfo.memoryIndex;
370 memcpy(outInfo->device_id.device_uuid, memoryVulkanInfo.deviceUUID,
371 sizeof(outInfo->device_id.device_uuid));
372 memcpy(outInfo->device_id.driver_uuid, memoryVulkanInfo.driverUUID,
373 sizeof(outInfo->device_id.driver_uuid));
374 return 0;
375 }
376
GetCaching(uint32_t * outCaching) const377 int VirtioGpuResource::GetCaching(uint32_t* outCaching) const {
378 if (!mBlobMemory) {
379 stream_renderer_error("failed to get caching for resource %d: no blob memory", mId);
380 return -EINVAL;
381 }
382
383 if (std::holds_alternative<RingBlobMemory>(*mBlobMemory)) {
384 *outCaching = STREAM_RENDERER_MAP_CACHE_CACHED;
385 return 0;
386 } else if (std::holds_alternative<ExternalMemoryMapping>(*mBlobMemory)) {
387 auto& memory = std::get<ExternalMemoryMapping>(*mBlobMemory);
388 *outCaching = memory.caching;
389 return 0;
390 } else if (std::holds_alternative<ExternalMemoryDescriptor>(*mBlobMemory)) {
391 auto& descriptor = std::get<ExternalMemoryDescriptor>(*mBlobMemory);
392 *outCaching = descriptor->caching;
393 return 0;
394 }
395
396 stream_renderer_error("failed to get caching for resource %d: unhandled type?", mId);
397 return -EINVAL;
398 }
399
WaitSyncResource()400 int VirtioGpuResource::WaitSyncResource() {
401 if (mResourceType != VirtioGpuResourceType::COLOR_BUFFER) {
402 stream_renderer_error("waitSyncResource is undefined for non-ColorBuffer resource.");
403 return -EINVAL;
404 }
405
406 return FrameBuffer::getFB()->waitSyncColorBuffer(mId);
407 }
408
409 // Corresponds to Virtio GPU "TransferFromHost" commands and VMM requests to
410 // copy into display buffers.
TransferRead(const GoldfishPipeServiceOps * ops,uint64_t offset,stream_renderer_box * box,std::optional<std::vector<struct iovec>> iovs)411 int VirtioGpuResource::TransferRead(const GoldfishPipeServiceOps* ops, uint64_t offset,
412 stream_renderer_box* box,
413 std::optional<std::vector<struct iovec>> iovs) {
414 // First, copy from the underlying backend resource to this resource's linear buffer:
415 int ret = 0;
416 if (mResourceType == VirtioGpuResourceType::BLOB) {
417 stream_renderer_error("Failed to transfer: unexpected blob.");
418 return -EINVAL;
419 } else if (mResourceType == VirtioGpuResourceType::PIPE) {
420 ret = ReadFromPipeToLinear(ops, offset, box);
421 } else if (mResourceType == VirtioGpuResourceType::BUFFER) {
422 ret = ReadFromBufferToLinear(offset, box);
423 } else if (mResourceType == VirtioGpuResourceType::COLOR_BUFFER) {
424 ret = ReadFromColorBufferToLinear(offset, box);
425 } else {
426 stream_renderer_error("Failed to transfer: unhandled resource type.");
427 return -EINVAL;
428 }
429 if (ret != 0) {
430 stream_renderer_error("Failed to transfer: failed to sync with backend resource.");
431 return ret;
432 }
433
434 // Second, copy from this resource's linear buffer to the desired iov:
435 if (iovs) {
436 ret = TransferToIov(offset, box, *iovs);
437 } else {
438 ret = TransferToIov(offset, box, mIovs);
439 }
440 if (ret != 0) {
441 stream_renderer_error("Failed to transfer: failed to copy to iov.");
442 }
443 return ret;
444 }
445
446 // Corresponds to Virtio GPU "TransferToHost" commands.
TransferWrite(const GoldfishPipeServiceOps * ops,uint64_t offset,stream_renderer_box * box,std::optional<std::vector<struct iovec>> iovs)447 VirtioGpuResource::TransferWriteResult VirtioGpuResource::TransferWrite(
448 const GoldfishPipeServiceOps* ops, uint64_t offset, stream_renderer_box* box,
449 std::optional<std::vector<struct iovec>> iovs) {
450 // First, copy from the desired iov to this resource's linear buffer:
451 int ret = 0;
452 if (iovs) {
453 ret = TransferFromIov(offset, box, *iovs);
454 } else {
455 ret = TransferFromIov(offset, box, mIovs);
456 }
457 if (ret != 0) {
458 stream_renderer_error("Failed to transfer: failed to copy from iov.");
459 return TransferWriteResult{
460 .status = ret,
461 };
462 }
463
464 // Second, copy from this resource's linear buffer to the underlying backend resource:
465 if (mResourceType == VirtioGpuResourceType::BLOB) {
466 stream_renderer_error("Failed to transfer: unexpected blob.");
467 return TransferWriteResult{
468 .status = -EINVAL,
469 };
470 } else if (mResourceType == VirtioGpuResourceType::PIPE) {
471 return WriteToPipeFromLinear(ops, offset, box);
472 } else if (mResourceType == VirtioGpuResourceType::BUFFER) {
473 ret = WriteToBufferFromLinear(offset, box);
474 } else if (mResourceType == VirtioGpuResourceType::COLOR_BUFFER) {
475 ret = WriteToColorBufferFromLinear(offset, box);
476 } else {
477 stream_renderer_error("Failed to transfer: unhandled resource type.");
478 return TransferWriteResult{
479 .status = -EINVAL,
480 };
481 }
482 if (ret != 0) {
483 stream_renderer_error("Failed to transfer: failed to sync with backend resource.");
484 }
485 return TransferWriteResult{
486 .status = ret,
487 };
488 }
489
ReadFromPipeToLinear(const GoldfishPipeServiceOps * ops,uint64_t offset,stream_renderer_box * box)490 int VirtioGpuResource::ReadFromPipeToLinear(const GoldfishPipeServiceOps* ops, uint64_t offset,
491 stream_renderer_box* box) {
492 if (mResourceType != VirtioGpuResourceType::PIPE) {
493 stream_renderer_error("Failed to transfer: resource %d is not PIPE.", mId);
494 return -EINVAL;
495 }
496
497 // Do the pipe service op here, if there is an associated hostpipe.
498 auto hostPipe = mHostPipe;
499 if (!hostPipe) {
500 stream_renderer_error("Failed to transfer: resource %d missing PIPE.", mId);
501 return -EINVAL;
502 }
503
504 size_t readBytes = 0;
505 size_t wantedBytes = readBytes + (size_t)box->w;
506
507 while (readBytes < wantedBytes) {
508 GoldfishPipeBuffer buf = {
509 ((char*)mLinear.data()) + box->x + readBytes,
510 wantedBytes - readBytes,
511 };
512 auto status = ops->guest_recv(hostPipe, &buf, 1);
513
514 if (status > 0) {
515 readBytes += status;
516 } else if (status == kPipeTryAgain) {
517 ops->wait_guest_recv(hostPipe);
518 } else {
519 return EIO;
520 }
521 }
522
523 return 0;
524 }
525
WriteToPipeFromLinear(const GoldfishPipeServiceOps * ops,uint64_t offset,stream_renderer_box * box)526 VirtioGpuResource::TransferWriteResult VirtioGpuResource::WriteToPipeFromLinear(
527 const GoldfishPipeServiceOps* ops, uint64_t offset, stream_renderer_box* box) {
528 if (mResourceType != VirtioGpuResourceType::PIPE) {
529 stream_renderer_error("Failed to transfer: resource %d is not PIPE.", mId);
530 return TransferWriteResult{
531 .status = -EINVAL,
532 };
533 }
534
535 if (!mCreateArgs) {
536 stream_renderer_error("Failed to transfer: resource %d missing args.", mId);
537 return TransferWriteResult{
538 .status = -EINVAL,
539 };
540 }
541
542 // Do the pipe service op here, if there is an associated hostpipe.
543 auto hostPipe = mHostPipe;
544 if (!hostPipe) {
545 stream_renderer_error("No hostPipe");
546 return TransferWriteResult{
547 .status = -EINVAL,
548 };
549 }
550
551 stream_renderer_debug("resid: %d offset: 0x%llx hostpipe: %p", mCreateArgs->handle,
552 (unsigned long long)offset, hostPipe);
553
554 size_t writtenBytes = 0;
555 size_t wantedBytes = (size_t)box->w;
556
557 GoldfishHostPipe* updatedHostPipe = nullptr;
558
559 while (writtenBytes < wantedBytes) {
560 GoldfishPipeBuffer buf = {
561 ((char*)mLinear.data()) + box->x + writtenBytes,
562 wantedBytes - writtenBytes,
563 };
564
565 // guest_send can now reallocate the pipe.
566 void* hostPipeBefore = hostPipe;
567 auto status = ops->guest_send(&hostPipe, &buf, 1);
568
569 if (hostPipe != hostPipeBefore) {
570 updatedHostPipe = hostPipe;
571 }
572
573 if (status > 0) {
574 writtenBytes += status;
575 } else if (status == kPipeTryAgain) {
576 ops->wait_guest_send(hostPipe);
577 } else {
578 return TransferWriteResult{
579 .status = EIO,
580 };
581 }
582 }
583
584 TransferWriteResult result = {
585 .status = 0,
586 };
587 if (updatedHostPipe != nullptr) {
588 result.contextId = mLatestAttachedContext.value_or(-1);
589 result.contextPipe = updatedHostPipe;
590 }
591 return result;
592 }
593
ReadFromBufferToLinear(uint64_t offset,stream_renderer_box * box)594 int VirtioGpuResource::ReadFromBufferToLinear(uint64_t offset, stream_renderer_box* box) {
595 if (mResourceType != VirtioGpuResourceType::BUFFER) {
596 stream_renderer_error("Failed to transfer: resource %d is not BUFFER.", mId);
597 return -EINVAL;
598 }
599
600 if (!mCreateArgs) {
601 stream_renderer_error("Failed to transfer: resource %d missing args.", mId);
602 return -EINVAL;
603 }
604
605 FrameBuffer::getFB()->readBuffer(mCreateArgs->handle, 0,
606 mCreateArgs->width * mCreateArgs->height, mLinear.data());
607 return 0;
608 }
609
WriteToBufferFromLinear(uint64_t offset,stream_renderer_box * box)610 int VirtioGpuResource::WriteToBufferFromLinear(uint64_t offset, stream_renderer_box* box) {
611 if (mResourceType != VirtioGpuResourceType::BUFFER) {
612 stream_renderer_error("Failed to transfer: resource %d is not BUFFER.", mId);
613 return -EINVAL;
614 }
615
616 if (!mCreateArgs) {
617 stream_renderer_error("Failed to transfer: resource %d missing args.", mId);
618 return -EINVAL;
619 }
620
621 FrameBuffer::getFB()->updateBuffer(mCreateArgs->handle, 0,
622 mCreateArgs->width * mCreateArgs->height, mLinear.data());
623 return 0;
624 }
625
ReadFromColorBufferToLinear(uint64_t offset,stream_renderer_box * box)626 int VirtioGpuResource::ReadFromColorBufferToLinear(uint64_t offset, stream_renderer_box* box) {
627 if (mResourceType != VirtioGpuResourceType::COLOR_BUFFER) {
628 stream_renderer_error("Failed to transfer: resource %d is not COLOR_BUFFER.", mId);
629 return -EINVAL;
630 }
631
632 if (!mCreateArgs) {
633 stream_renderer_error("Failed to transfer: resource %d missing args.", mId);
634 return -EINVAL;
635 }
636
637 auto glformat = virgl_format_to_gl(mCreateArgs->format);
638 auto gltype = gl_format_to_natural_type(glformat);
639
640 // We always xfer the whole thing again from GL
641 // since it's fiddly to calc / copy-out subregions
642 if (virgl_format_is_yuv(mCreateArgs->format)) {
643 FrameBuffer::getFB()->readColorBufferYUV(mCreateArgs->handle, 0, 0, mCreateArgs->width,
644 mCreateArgs->height, mLinear.data(),
645 mLinear.size());
646 } else {
647 FrameBuffer::getFB()->readColorBuffer(mCreateArgs->handle, 0, 0, mCreateArgs->width,
648 mCreateArgs->height, glformat, gltype,
649 mLinear.data());
650 }
651
652 return 0;
653 }
654
WriteToColorBufferFromLinear(uint64_t offset,stream_renderer_box * box)655 int VirtioGpuResource::WriteToColorBufferFromLinear(uint64_t offset, stream_renderer_box* box) {
656 if (mResourceType != VirtioGpuResourceType::COLOR_BUFFER) {
657 stream_renderer_error("Failed to transfer: resource %d is not COLOR_BUFFER.", mId);
658 return -EINVAL;
659 }
660
661 if (!mCreateArgs) {
662 stream_renderer_error("Failed to transfer: resource %d missing args.", mId);
663 return -EINVAL;
664 }
665
666 auto glformat = virgl_format_to_gl(mCreateArgs->format);
667 auto gltype = gl_format_to_natural_type(glformat);
668
669 // We always xfer the whole thing again to GL
670 // since it's fiddly to calc / copy-out subregions
671 FrameBuffer::getFB()->updateColorBuffer(mCreateArgs->handle, 0, 0, mCreateArgs->width,
672 mCreateArgs->height, glformat, gltype, mLinear.data());
673 return 0;
674 }
675
TransferToIov(uint64_t offset,const stream_renderer_box * box,std::optional<std::vector<struct iovec>> iovs)676 int VirtioGpuResource::TransferToIov(uint64_t offset, const stream_renderer_box* box,
677 std::optional<std::vector<struct iovec>> iovs) {
678 if (iovs) {
679 return TransferWithIov(offset, box, *iovs, TransferDirection::LINEAR_TO_IOV);
680 } else {
681 return TransferWithIov(offset, box, mIovs, TransferDirection::LINEAR_TO_IOV);
682 }
683 }
684
TransferFromIov(uint64_t offset,const stream_renderer_box * box,std::optional<std::vector<struct iovec>> iovs)685 int VirtioGpuResource::TransferFromIov(uint64_t offset, const stream_renderer_box* box,
686 std::optional<std::vector<struct iovec>> iovs) {
687 if (iovs) {
688 return TransferWithIov(offset, box, *iovs, TransferDirection::IOV_TO_LINEAR);
689 } else {
690 return TransferWithIov(offset, box, mIovs, TransferDirection::IOV_TO_LINEAR);
691 }
692 }
693
TransferWithIov(uint64_t offset,const stream_renderer_box * box,const std::vector<struct iovec> & iovs,TransferDirection direction)694 int VirtioGpuResource::TransferWithIov(uint64_t offset, const stream_renderer_box* box,
695 const std::vector<struct iovec>& iovs,
696 TransferDirection direction) {
697 if (!mCreateArgs) {
698 stream_renderer_error("failed to transfer: missing resource args.");
699 return -EINVAL;
700 }
701 if (box->x > mCreateArgs->width || box->y > mCreateArgs->height) {
702 stream_renderer_error("failed to transfer: box out of range of resource");
703 return -EINVAL;
704 }
705 if (box->w == 0U || box->h == 0U) {
706 stream_renderer_error("failed to transfer: empty transfer");
707 return -EINVAL;
708 }
709 if (box->x + box->w > mCreateArgs->width) {
710 stream_renderer_error("failed to transfer: box overflows resource width");
711 return -EINVAL;
712 }
713
714 size_t linearBase =
715 virgl_format_to_linear_base(mCreateArgs->format, mCreateArgs->width, mCreateArgs->height,
716 box->x, box->y, box->w, box->h);
717 size_t start = linearBase;
718 // height - 1 in order to treat the (w * bpp) row specially
719 // (i.e., the last row does not occupy the full stride)
720 size_t length =
721 virgl_format_to_total_xfer_len(mCreateArgs->format, mCreateArgs->width, mCreateArgs->height,
722 box->x, box->y, box->w, box->h);
723 size_t end = start + length;
724
725 if (start == end) {
726 stream_renderer_error("failed to transfer: nothing to transfer");
727 return -EINVAL;
728 }
729
730 if (end > mLinear.size()) {
731 stream_renderer_error("failed to transfer: start + length overflows!");
732 return -EINVAL;
733 }
734
735 uint32_t iovIndex = 0;
736 size_t iovOffset = 0;
737 size_t written = 0;
738 char* linear = static_cast<char*>(mLinear.data());
739
740 while (written < length) {
741 if (iovIndex >= iovs.size()) {
742 stream_renderer_error("failed to transfer: write request overflowed iovs");
743 return -EINVAL;
744 }
745
746 const char* iovBase_const = static_cast<const char*>(iovs[iovIndex].iov_base);
747 char* iovBase = static_cast<char*>(iovs[iovIndex].iov_base);
748 size_t iovLen = iovs[iovIndex].iov_len;
749 size_t iovOffsetEnd = iovOffset + iovLen;
750
751 auto lower_intersect = std::max(iovOffset, start);
752 auto upper_intersect = std::min(iovOffsetEnd, end);
753 if (lower_intersect < upper_intersect) {
754 size_t toWrite = upper_intersect - lower_intersect;
755 switch (direction) {
756 case TransferDirection::IOV_TO_LINEAR:
757 memcpy(linear + lower_intersect, iovBase_const + lower_intersect - iovOffset,
758 toWrite);
759 break;
760 case TransferDirection::LINEAR_TO_IOV:
761 memcpy(iovBase + lower_intersect - iovOffset, linear + lower_intersect,
762 toWrite);
763 break;
764 default:
765 stream_renderer_error("failed to transfer: invalid synchronization dir");
766 return -EINVAL;
767 }
768 written += toWrite;
769 }
770 ++iovIndex;
771 iovOffset += iovLen;
772 }
773
774 return 0;
775 }
776
ExportBlob(struct stream_renderer_handle * outHandle)777 int VirtioGpuResource::ExportBlob(struct stream_renderer_handle* outHandle) {
778 if (!mBlobMemory) {
779 return -EINVAL;
780 }
781
782 if (std::holds_alternative<RingBlobMemory>(*mBlobMemory)) {
783 auto& memory = std::get<RingBlobMemory>(*mBlobMemory);
784 if (!memory->isExportable()) {
785 return -EINVAL;
786 }
787
788 // Handle ownership transferred to VMM, Gfxstream keeps the mapping.
789 #ifdef _WIN32
790 outHandle->os_handle =
791 static_cast<int64_t>(reinterpret_cast<intptr_t>(memory->releaseHandle()));
792 #else
793 outHandle->os_handle = static_cast<int64_t>(memory->releaseHandle());
794 #endif
795 outHandle->handle_type = STREAM_MEM_HANDLE_TYPE_SHM;
796 return 0;
797 } else if (std::holds_alternative<ExternalMemoryDescriptor>(*mBlobMemory)) {
798 auto& memory = std::get<ExternalMemoryDescriptor>(*mBlobMemory);
799
800 auto rawDescriptorOpt = memory->descriptor.release();
801 if (!rawDescriptorOpt) {
802 stream_renderer_error(
803 "failed to export blob for resource %u: failed to get raw handle.", mId);
804 return -EINVAL;
805 }
806 auto rawDescriptor = *rawDescriptorOpt;
807
808 #ifdef _WIN32
809 outHandle->os_handle = static_cast<int64_t>(reinterpret_cast<intptr_t>(rawDescriptor));
810 #else
811 outHandle->os_handle = static_cast<int64_t>(rawDescriptor);
812 #endif
813 outHandle->handle_type = memory->handleType;
814 return 0;
815 }
816
817 return -EINVAL;
818 }
819
ShareRingBlob()820 std::shared_ptr<RingBlob> VirtioGpuResource::ShareRingBlob() {
821 if (!mBlobMemory) {
822 return nullptr;
823 }
824 if (!std::holds_alternative<RingBlobMemory>(*mBlobMemory)) {
825 return nullptr;
826 }
827 return std::get<RingBlobMemory>(*mBlobMemory);
828 }
829
830 #ifdef GFXSTREAM_BUILD_WITH_SNAPSHOT_FRONTEND_SUPPORT
831
Snapshot() const832 std::optional<VirtioGpuResourceSnapshot> VirtioGpuResource::Snapshot() const {
833 VirtioGpuResourceSnapshot resourceSnapshot;
834 resourceSnapshot.set_id(mId);
835 resourceSnapshot.set_type(static_cast<::gfxstream::host::snapshot::VirtioGpuResourceType>(mResourceType));
836
837 if (mCreateArgs) {
838 VirtioGpuResourceCreateArgs* snapshotCreateArgs = resourceSnapshot.mutable_create_args();
839 snapshotCreateArgs->set_id(mCreateArgs->handle);
840 snapshotCreateArgs->set_target(mCreateArgs->target);
841 snapshotCreateArgs->set_format(mCreateArgs->format);
842 snapshotCreateArgs->set_bind(mCreateArgs->bind);
843 snapshotCreateArgs->set_width(mCreateArgs->width);
844 snapshotCreateArgs->set_height(mCreateArgs->height);
845 snapshotCreateArgs->set_depth(mCreateArgs->depth);
846 snapshotCreateArgs->set_array_size(mCreateArgs->array_size);
847 snapshotCreateArgs->set_last_level(mCreateArgs->last_level);
848 snapshotCreateArgs->set_nr_samples(mCreateArgs->nr_samples);
849 snapshotCreateArgs->set_flags(mCreateArgs->flags);
850 }
851
852 if (mCreateBlobArgs) {
853 auto* snapshotCreateArgs = resourceSnapshot.mutable_create_blob_args();
854 snapshotCreateArgs->set_mem(mCreateBlobArgs->blob_mem);
855 snapshotCreateArgs->set_flags(mCreateBlobArgs->blob_flags);
856 snapshotCreateArgs->set_id(mCreateBlobArgs->blob_id);
857 snapshotCreateArgs->set_size(mCreateBlobArgs->size);
858 }
859
860 if (mBlobMemory) {
861 if (std::holds_alternative<RingBlobMemory>(*mBlobMemory)) {
862 auto& memory = std::get<RingBlobMemory>(*mBlobMemory);
863
864 auto snapshotRingBlobOpt = memory->Snapshot();
865 if (!snapshotRingBlobOpt) {
866 stream_renderer_error("Failed to snapshot ring blob for resource %d.", mId);
867 return std::nullopt;
868 }
869 resourceSnapshot.mutable_ring_blob()->Swap(&*snapshotRingBlobOpt);
870 } else if (std::holds_alternative<ExternalMemoryDescriptor>(*mBlobMemory)) {
871 if (!mLatestAttachedContext) {
872 stream_renderer_error("Failed to snapshot resource %d: missing blob context?", mId);
873 return std::nullopt;
874 }
875 if (!mCreateBlobArgs) {
876 stream_renderer_error("Failed to snapshot resource %d: missing blob args?", mId);
877 return std::nullopt;
878 }
879 auto snapshotDescriptorInfo = resourceSnapshot.mutable_external_memory_descriptor();
880 snapshotDescriptorInfo->set_context_id(*mLatestAttachedContext);
881 snapshotDescriptorInfo->set_blob_id(mCreateBlobArgs->blob_id);
882 } else if (std::holds_alternative<ExternalMemoryMapping>(*mBlobMemory)) {
883 if (!mLatestAttachedContext) {
884 stream_renderer_error("Failed to snapshot resource %d: missing blob context?", mId);
885 return std::nullopt;
886 }
887 if (!mCreateBlobArgs) {
888 stream_renderer_error("Failed to snapshot resource %d: missing blob args?", mId);
889 return std::nullopt;
890 }
891 auto snapshotDescriptorInfo = resourceSnapshot.mutable_external_memory_mapping();
892 snapshotDescriptorInfo->set_context_id(*mLatestAttachedContext);
893 snapshotDescriptorInfo->set_blob_id(mCreateBlobArgs->blob_id);
894 }
895 }
896
897 if (mLatestAttachedContext) {
898 resourceSnapshot.set_latest_attached_context(*mLatestAttachedContext);
899 }
900
901 resourceSnapshot.mutable_attached_contexts()->Add(mAttachedToContexts.begin(),
902 mAttachedToContexts.end());
903
904 return resourceSnapshot;
905 }
906
Restore(const VirtioGpuResourceSnapshot & resourceSnapshot)907 /*static*/ std::optional<VirtioGpuResource> VirtioGpuResource::Restore(
908 const VirtioGpuResourceSnapshot& resourceSnapshot) {
909 VirtioGpuResource resource = {};
910 resource.mId = resourceSnapshot.id();
911 resource.mResourceType = static_cast<VirtioGpuResourceType>(resourceSnapshot.type());
912
913 if (resourceSnapshot.has_create_args()) {
914 const auto& createArgsSnapshot = resourceSnapshot.create_args();
915 resource.mCreateArgs = {
916 .handle = createArgsSnapshot.id(),
917 .target = createArgsSnapshot.target(),
918 .format = createArgsSnapshot.format(),
919 .bind = createArgsSnapshot.bind(),
920 .width = createArgsSnapshot.width(),
921 .height = createArgsSnapshot.height(),
922 .depth = createArgsSnapshot.depth(),
923 .array_size = createArgsSnapshot.array_size(),
924 .last_level = createArgsSnapshot.last_level(),
925 .nr_samples = createArgsSnapshot.nr_samples(),
926 .flags = createArgsSnapshot.flags(),
927 };
928 }
929
930 if (resourceSnapshot.has_create_blob_args()) {
931 const auto& createArgsSnapshot = resourceSnapshot.create_blob_args();
932 resource.mCreateBlobArgs = {
933 .blob_mem = createArgsSnapshot.mem(),
934 .blob_flags = createArgsSnapshot.flags(),
935 .blob_id = createArgsSnapshot.id(),
936 .size = createArgsSnapshot.size(),
937 };
938 }
939
940 if (resourceSnapshot.has_ring_blob()) {
941 auto resourceRingBlobOpt = RingBlob::Restore(resourceSnapshot.ring_blob());
942 if (!resourceRingBlobOpt) {
943 stream_renderer_error("Failed to restore ring blob for resource %d", resource.mId);
944 return std::nullopt;
945 }
946 resource.mBlobMemory.emplace(std::move(*resourceRingBlobOpt));
947 } else if (resourceSnapshot.has_external_memory_descriptor()) {
948 const auto& snapshotDescriptorInfo = resourceSnapshot.external_memory_descriptor();
949
950 auto descriptorInfoOpt = ExternalObjectManager::get()->removeBlobDescriptorInfo(
951 snapshotDescriptorInfo.context_id(), snapshotDescriptorInfo.blob_id());
952 if (!descriptorInfoOpt) {
953 stream_renderer_error(
954 "Failed to restore resource: failed to find blob descriptor info.");
955 return std::nullopt;
956 }
957
958 resource.mBlobMemory.emplace(
959 std::make_shared<BlobDescriptorInfo>(std::move(*descriptorInfoOpt)));
960 } else if (resourceSnapshot.has_external_memory_mapping()) {
961 const auto& snapshotDescriptorInfo = resourceSnapshot.external_memory_mapping();
962
963 auto memoryMappingOpt = ExternalObjectManager::get()->removeMapping(
964 snapshotDescriptorInfo.context_id(), snapshotDescriptorInfo.blob_id());
965 if (!memoryMappingOpt) {
966 stream_renderer_error("Failed to restore resource: failed to find mapping info.");
967 return std::nullopt;
968 }
969 resource.mBlobMemory.emplace(std::move(*memoryMappingOpt));
970 }
971
972 if (resourceSnapshot.has_latest_attached_context()) {
973 resource.mLatestAttachedContext = resourceSnapshot.latest_attached_context();
974 }
975
976 resource.mAttachedToContexts.insert(resourceSnapshot.attached_contexts().begin(),
977 resourceSnapshot.attached_contexts().end());
978
979 return resource;
980 }
981
982 #endif // #ifdef GFXSTREAM_BUILD_WITH_SNAPSHOT_FRONTEND_SUPPORT
983
984 } // namespace host
985 } // namespace gfxstream