xref: /aosp_15_r20/external/crosvm/rutabaga_gfx/kumquat/gpu_client/src/virtgpu/virtgpu_kumquat.rs (revision bb4ee6a4ae7042d18b07a98463b9c8b875e44b39)
1 // Copyright 2024 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use std::cmp::min;
6 use std::collections::BTreeMap as Map;
7 use std::convert::TryInto;
8 use std::path::PathBuf;
9 use std::slice::from_raw_parts_mut;
10 use std::sync::Mutex;
11 use std::sync::OnceLock;
12 
13 use rutabaga_gfx::kumquat_support::kumquat_gpu_protocol::*;
14 use rutabaga_gfx::kumquat_support::RutabagaEvent;
15 use rutabaga_gfx::kumquat_support::RutabagaMemoryMapping;
16 use rutabaga_gfx::kumquat_support::RutabagaReader;
17 use rutabaga_gfx::kumquat_support::RutabagaSharedMemory;
18 use rutabaga_gfx::kumquat_support::RutabagaStream;
19 use rutabaga_gfx::kumquat_support::RutabagaTube;
20 use rutabaga_gfx::kumquat_support::RutabagaTubeType;
21 use rutabaga_gfx::kumquat_support::RutabagaWriter;
22 use rutabaga_gfx::RutabagaDescriptor;
23 use rutabaga_gfx::RutabagaError;
24 use rutabaga_gfx::RutabagaGralloc;
25 use rutabaga_gfx::RutabagaGrallocBackendFlags;
26 use rutabaga_gfx::RutabagaHandle;
27 use rutabaga_gfx::RutabagaIntoRawDescriptor;
28 use rutabaga_gfx::RutabagaMappedRegion;
29 use rutabaga_gfx::RutabagaMapping;
30 use rutabaga_gfx::RutabagaRawDescriptor;
31 use rutabaga_gfx::RutabagaResult;
32 use rutabaga_gfx::VulkanInfo;
33 use rutabaga_gfx::RUTABAGA_FLAG_FENCE;
34 use rutabaga_gfx::RUTABAGA_FLAG_FENCE_HOST_SHAREABLE;
35 use rutabaga_gfx::RUTABAGA_FLAG_INFO_RING_IDX;
36 use rutabaga_gfx::RUTABAGA_MAP_ACCESS_RW;
37 use rutabaga_gfx::RUTABAGA_MAP_CACHE_CACHED;
38 use rutabaga_gfx::RUTABAGA_MEM_HANDLE_TYPE_OPAQUE_FD;
39 
40 use crate::virtgpu::defines::*;
41 
42 const VK_ICD_FILENAMES: &str = "VK_ICD_FILENAMES";
43 
44 // The Tesla V-100 driver seems to enter a power management mode and stops being available to the
45 // Vulkan loader if more than a certain number of VK instances are created in the same process.
46 //
47 // This behavior is reproducible via:
48 //
49 // GfxstreamEnd2EndTests --gtest_filter="*MultiThreadedVkMapMemory*"
50 //
51 // Workaround this by having a singleton gralloc per-process.
gralloc() -> &'static Mutex<RutabagaGralloc>52 fn gralloc() -> &'static Mutex<RutabagaGralloc> {
53     static GRALLOC: OnceLock<Mutex<RutabagaGralloc>> = OnceLock::new();
54     GRALLOC.get_or_init(|| {
55         // The idea is to make sure the gfxstream ICD isn't loaded when gralloc starts
56         // up. The Nvidia ICD should be loaded.
57         //
58         // This is mostly useful for developers.  For AOSP hermetic gfxstream end2end
59         // testing, VK_ICD_FILENAMES shouldn't be defined.  For deqp-vk, this is
60         // useful, but not safe for multi-threaded tests.  For now, since this is only
61         // used for end2end tests, we should be good.
62         let vk_icd_name_opt = match std::env::var(VK_ICD_FILENAMES) {
63             Ok(vk_icd_name) => {
64                 std::env::remove_var(VK_ICD_FILENAMES);
65                 Some(vk_icd_name)
66             }
67             Err(_) => None,
68         };
69 
70         let gralloc = Mutex::new(RutabagaGralloc::new(RutabagaGrallocBackendFlags::new()).unwrap());
71 
72         if let Some(vk_icd_name) = vk_icd_name_opt {
73             std::env::set_var(VK_ICD_FILENAMES, vk_icd_name);
74         }
75 
76         gralloc
77     })
78 }
79 
80 pub struct VirtGpuResource {
81     resource_id: u32,
82     size: usize,
83     handle: RutabagaHandle,
84     attached_fences: Vec<RutabagaHandle>,
85     vulkan_info: VulkanInfo,
86     system_mapping: Option<RutabagaMemoryMapping>,
87     gpu_mapping: Option<Box<dyn RutabagaMappedRegion>>,
88 }
89 
90 impl VirtGpuResource {
new( resource_id: u32, size: usize, handle: RutabagaHandle, vulkan_info: VulkanInfo, ) -> VirtGpuResource91     pub fn new(
92         resource_id: u32,
93         size: usize,
94         handle: RutabagaHandle,
95         vulkan_info: VulkanInfo,
96     ) -> VirtGpuResource {
97         VirtGpuResource {
98             resource_id,
99             size,
100             handle,
101             attached_fences: Vec::new(),
102             vulkan_info,
103             system_mapping: None,
104             gpu_mapping: None,
105         }
106     }
107 }
108 
109 pub struct VirtGpuKumquat {
110     context_id: u32,
111     id_allocator: u32,
112     capset_mask: u64,
113     stream: RutabagaStream,
114     capsets: Map<u32, Vec<u8>>,
115     resources: Map<u32, VirtGpuResource>,
116 }
117 
118 impl VirtGpuKumquat {
new(gpu_socket: &str) -> RutabagaResult<VirtGpuKumquat>119     pub fn new(gpu_socket: &str) -> RutabagaResult<VirtGpuKumquat> {
120         let path = PathBuf::from(gpu_socket);
121         let connection = RutabagaTube::new(path, RutabagaTubeType::Packet)?;
122         let mut stream = RutabagaStream::new(connection);
123 
124         let get_num_capsets = kumquat_gpu_protocol_ctrl_hdr {
125             type_: KUMQUAT_GPU_PROTOCOL_GET_NUM_CAPSETS,
126             ..Default::default()
127         };
128 
129         stream.write(KumquatGpuProtocolWrite::Cmd(get_num_capsets))?;
130         let mut protocols = stream.read()?;
131         let num_capsets = match protocols.remove(0) {
132             KumquatGpuProtocol::RespNumCapsets(num) => num,
133             _ => return Err(RutabagaError::Unsupported),
134         };
135 
136         let mut capset_mask = 0;
137         let mut capsets: Map<u32, Vec<u8>> = Default::default();
138         for capset_index in 0..num_capsets {
139             let get_capset_info = kumquat_gpu_protocol_ctrl_hdr {
140                 type_: KUMQUAT_GPU_PROTOCOL_GET_CAPSET_INFO,
141                 payload: capset_index,
142             };
143 
144             stream.write(KumquatGpuProtocolWrite::Cmd(get_capset_info))?;
145             protocols = stream.read()?;
146             let resp_capset_info = match protocols.remove(0) {
147                 KumquatGpuProtocol::RespCapsetInfo(info) => info,
148                 _ => return Err(RutabagaError::Unsupported),
149             };
150 
151             let get_capset = kumquat_gpu_protocol_get_capset {
152                 hdr: kumquat_gpu_protocol_ctrl_hdr {
153                     type_: KUMQUAT_GPU_PROTOCOL_GET_CAPSET,
154                     ..Default::default()
155                 },
156                 capset_id: resp_capset_info.capset_id,
157                 capset_version: resp_capset_info.version,
158             };
159 
160             stream.write(KumquatGpuProtocolWrite::Cmd(get_capset))?;
161             protocols = stream.read()?;
162             let capset = match protocols.remove(0) {
163                 KumquatGpuProtocol::RespCapset(capset) => capset,
164                 _ => return Err(RutabagaError::Unsupported),
165             };
166 
167             capset_mask = 1u64 << resp_capset_info.capset_id | capset_mask;
168             capsets.insert(resp_capset_info.capset_id, capset);
169         }
170 
171         Ok(VirtGpuKumquat {
172             context_id: 0,
173             id_allocator: 0,
174             capset_mask,
175             stream,
176             capsets,
177             resources: Default::default(),
178         })
179     }
180 
allocate_id(&mut self) -> u32181     pub fn allocate_id(&mut self) -> u32 {
182         self.id_allocator = self.id_allocator + 1;
183         self.id_allocator
184     }
185 
get_param(&self, getparam: &mut VirtGpuParam) -> RutabagaResult<()>186     pub fn get_param(&self, getparam: &mut VirtGpuParam) -> RutabagaResult<()> {
187         getparam.value = match getparam.param {
188             VIRTGPU_KUMQUAT_PARAM_3D_FEATURES => (self.capset_mask != 0) as u64,
189             VIRTGPU_KUMQUAT_PARAM_CAPSET_QUERY_FIX..=VIRTGPU_KUMQUAT_PARAM_CONTEXT_INIT => 1,
190             VIRTGPU_KUMQUAT_PARAM_SUPPORTED_CAPSET_IDS => self.capset_mask,
191             VIRTGPU_KUMQUAT_PARAM_EXPLICIT_DEBUG_NAME => 0,
192             VIRTGPU_KUMQUAT_PARAM_FENCE_PASSING => 1,
193             _ => return Err(RutabagaError::Unsupported),
194         };
195 
196         Ok(())
197     }
198 
get_caps(&self, capset_id: u32, slice: &mut [u8]) -> RutabagaResult<()>199     pub fn get_caps(&self, capset_id: u32, slice: &mut [u8]) -> RutabagaResult<()> {
200         let caps = self
201             .capsets
202             .get(&capset_id)
203             .ok_or(RutabagaError::InvalidCapset)?;
204         let length = min(slice.len(), caps.len());
205         slice.copy_from_slice(&caps[0..length]);
206         Ok(())
207     }
208 
context_create(&mut self, capset_id: u64, name: &str) -> RutabagaResult<u32>209     pub fn context_create(&mut self, capset_id: u64, name: &str) -> RutabagaResult<u32> {
210         let mut debug_name = [0u8; 64];
211         debug_name
212             .iter_mut()
213             .zip(name.bytes())
214             .for_each(|(dst, src)| *dst = src);
215 
216         let context_create = kumquat_gpu_protocol_ctx_create {
217             hdr: kumquat_gpu_protocol_ctrl_hdr {
218                 type_: KUMQUAT_GPU_PROTOCOL_CTX_CREATE,
219                 ..Default::default()
220             },
221             nlen: 0,
222             context_init: capset_id.try_into()?,
223             debug_name,
224         };
225 
226         self.stream
227             .write(KumquatGpuProtocolWrite::Cmd(context_create))?;
228         let mut protocols = self.stream.read()?;
229         self.context_id = match protocols.remove(0) {
230             KumquatGpuProtocol::RespContextCreate(ctx_id) => ctx_id,
231             _ => return Err(RutabagaError::Unsupported),
232         };
233 
234         Ok(self.context_id)
235     }
236 
resource_create_3d( &mut self, create_3d: &mut VirtGpuResourceCreate3D, ) -> RutabagaResult<()>237     pub fn resource_create_3d(
238         &mut self,
239         create_3d: &mut VirtGpuResourceCreate3D,
240     ) -> RutabagaResult<()> {
241         let resource_create_3d = kumquat_gpu_protocol_resource_create_3d {
242             hdr: kumquat_gpu_protocol_ctrl_hdr {
243                 type_: KUMQUAT_GPU_PROTOCOL_RESOURCE_CREATE_3D,
244                 ..Default::default()
245             },
246             target: create_3d.target,
247             format: create_3d.format,
248             bind: create_3d.bind,
249             width: create_3d.width,
250             height: create_3d.height,
251             depth: create_3d.depth,
252             array_size: create_3d.array_size,
253             last_level: create_3d.last_level,
254             nr_samples: create_3d.nr_samples,
255             flags: create_3d.flags,
256             size: create_3d.size,
257             stride: create_3d.stride,
258             ctx_id: self.context_id,
259         };
260 
261         self.stream
262             .write(KumquatGpuProtocolWrite::Cmd(resource_create_3d))?;
263         let mut protocols = self.stream.read()?;
264         let resource = match protocols.remove(0) {
265             KumquatGpuProtocol::RespResourceCreate(resp, handle) => {
266                 let size: usize = create_3d.size.try_into()?;
267                 VirtGpuResource::new(resp.resource_id, size, handle, resp.vulkan_info)
268             }
269             _ => return Err(RutabagaError::Unsupported),
270         };
271 
272         create_3d.res_handle = resource.resource_id;
273         create_3d.bo_handle = self.allocate_id();
274         self.resources.insert(create_3d.bo_handle, resource);
275 
276         Ok(())
277     }
278 
resource_create_blob( &mut self, create_blob: &mut VirtGpuResourceCreateBlob, blob_cmd: &[u8], ) -> RutabagaResult<()>279     pub fn resource_create_blob(
280         &mut self,
281         create_blob: &mut VirtGpuResourceCreateBlob,
282         blob_cmd: &[u8],
283     ) -> RutabagaResult<()> {
284         if blob_cmd.len() != 0 {
285             let submit_command = kumquat_gpu_protocol_cmd_submit {
286                 hdr: kumquat_gpu_protocol_ctrl_hdr {
287                     type_: KUMQUAT_GPU_PROTOCOL_SUBMIT_3D,
288                     ..Default::default()
289                 },
290                 ctx_id: self.context_id,
291                 pad: 0,
292                 size: blob_cmd.len().try_into()?,
293                 num_in_fences: 0,
294                 flags: 0,
295                 ring_idx: 0,
296                 padding: Default::default(),
297             };
298 
299             let mut data: Vec<u8> = vec![0; blob_cmd.len()];
300             data.copy_from_slice(blob_cmd);
301 
302             self.stream
303                 .write(KumquatGpuProtocolWrite::CmdWithData(submit_command, data))?;
304         }
305 
306         let resource_create_blob = kumquat_gpu_protocol_resource_create_blob {
307             hdr: kumquat_gpu_protocol_ctrl_hdr {
308                 type_: KUMQUAT_GPU_PROTOCOL_RESOURCE_CREATE_BLOB,
309                 ..Default::default()
310             },
311             ctx_id: self.context_id,
312             blob_mem: create_blob.blob_mem,
313             blob_flags: create_blob.blob_flags,
314             padding: 0,
315             blob_id: create_blob.blob_id,
316             size: create_blob.size,
317         };
318 
319         self.stream
320             .write(KumquatGpuProtocolWrite::Cmd(resource_create_blob))?;
321         let mut protocols = self.stream.read()?;
322         let resource = match protocols.remove(0) {
323             KumquatGpuProtocol::RespResourceCreate(resp, handle) => {
324                 let size: usize = create_blob.size.try_into()?;
325                 VirtGpuResource::new(resp.resource_id, size, handle, resp.vulkan_info)
326             }
327             _ => {
328                 return Err(RutabagaError::Unsupported);
329             }
330         };
331 
332         create_blob.res_handle = resource.resource_id;
333         create_blob.bo_handle = self.allocate_id();
334         self.resources.insert(create_blob.bo_handle, resource);
335         Ok(())
336     }
337 
resource_unref(&mut self, bo_handle: u32) -> RutabagaResult<()>338     pub fn resource_unref(&mut self, bo_handle: u32) -> RutabagaResult<()> {
339         let resource = self
340             .resources
341             .remove(&bo_handle)
342             .ok_or(RutabagaError::InvalidResourceId)?;
343 
344         let detach_resource = kumquat_gpu_protocol_ctx_resource {
345             hdr: kumquat_gpu_protocol_ctrl_hdr {
346                 type_: KUMQUAT_GPU_PROTOCOL_CTX_DETACH_RESOURCE,
347                 ..Default::default()
348             },
349             ctx_id: self.context_id,
350             resource_id: resource.resource_id,
351         };
352 
353         self.stream
354             .write(KumquatGpuProtocolWrite::Cmd(detach_resource))?;
355 
356         Ok(())
357     }
358 
map(&mut self, bo_handle: u32) -> RutabagaResult<RutabagaMapping>359     pub fn map(&mut self, bo_handle: u32) -> RutabagaResult<RutabagaMapping> {
360         let resource = self
361             .resources
362             .get_mut(&bo_handle)
363             .ok_or(RutabagaError::InvalidResourceId)?;
364 
365         if let Some(ref system_mapping) = resource.system_mapping {
366             let rutabaga_mapping = system_mapping.as_rutabaga_mapping();
367             Ok(rutabaga_mapping)
368         } else if let Some(ref gpu_mapping) = resource.gpu_mapping {
369             let rutabaga_mapping = gpu_mapping.as_rutabaga_mapping();
370             Ok(rutabaga_mapping)
371         } else {
372             let clone = resource.handle.try_clone()?;
373 
374             if clone.handle_type == RUTABAGA_MEM_HANDLE_TYPE_OPAQUE_FD {
375                 let region = gralloc().lock().unwrap().import_and_map(
376                     clone,
377                     resource.vulkan_info,
378                     resource.size as u64,
379                 )?;
380 
381                 let rutabaga_mapping = region.as_rutabaga_mapping();
382                 resource.gpu_mapping = Some(region);
383                 Ok(rutabaga_mapping)
384             } else {
385                 let mapping = RutabagaMemoryMapping::from_safe_descriptor(
386                     clone.os_handle,
387                     resource.size,
388                     RUTABAGA_MAP_CACHE_CACHED | RUTABAGA_MAP_ACCESS_RW,
389                 )?;
390 
391                 let rutabaga_mapping = mapping.as_rutabaga_mapping();
392                 resource.system_mapping = Some(mapping);
393                 Ok(rutabaga_mapping)
394             }
395         }
396     }
397 
unmap(&mut self, bo_handle: u32) -> RutabagaResult<()>398     pub fn unmap(&mut self, bo_handle: u32) -> RutabagaResult<()> {
399         let resource = self
400             .resources
401             .get_mut(&bo_handle)
402             .ok_or(RutabagaError::InvalidResourceId)?;
403 
404         resource.system_mapping = None;
405         resource.gpu_mapping = None;
406         Ok(())
407     }
408 
transfer_to_host(&mut self, transfer: &VirtGpuTransfer) -> RutabagaResult<()>409     pub fn transfer_to_host(&mut self, transfer: &VirtGpuTransfer) -> RutabagaResult<()> {
410         let resource = self
411             .resources
412             .get_mut(&transfer.bo_handle)
413             .ok_or(RutabagaError::InvalidResourceId)?;
414 
415         let event = RutabagaEvent::new()?;
416         let emulated_fence: RutabagaHandle = event.into();
417 
418         resource.attached_fences.push(emulated_fence.try_clone()?);
419 
420         let transfer_to_host = kumquat_gpu_protocol_transfer_host_3d {
421             hdr: kumquat_gpu_protocol_ctrl_hdr {
422                 type_: KUMQUAT_GPU_PROTOCOL_TRANSFER_TO_HOST_3D,
423                 ..Default::default()
424             },
425             box_: kumquat_gpu_protocol_box {
426                 x: transfer._box.x,
427                 y: transfer._box.y,
428                 z: transfer._box.z,
429                 w: transfer._box.w,
430                 h: transfer._box.h,
431                 d: transfer._box.d,
432             },
433             offset: transfer.offset,
434             level: transfer.level,
435             stride: transfer.stride,
436             layer_stride: transfer.layer_stride,
437             ctx_id: self.context_id,
438             resource_id: resource.resource_id,
439             padding: 0,
440         };
441 
442         self.stream.write(KumquatGpuProtocolWrite::CmdWithHandle(
443             transfer_to_host,
444             emulated_fence,
445         ))?;
446         Ok(())
447     }
448 
transfer_from_host(&mut self, transfer: &VirtGpuTransfer) -> RutabagaResult<()>449     pub fn transfer_from_host(&mut self, transfer: &VirtGpuTransfer) -> RutabagaResult<()> {
450         let resource = self
451             .resources
452             .get_mut(&transfer.bo_handle)
453             .ok_or(RutabagaError::InvalidResourceId)?;
454 
455         let event = RutabagaEvent::new()?;
456         let emulated_fence: RutabagaHandle = event.into();
457 
458         resource.attached_fences.push(emulated_fence.try_clone()?);
459         let transfer_from_host = kumquat_gpu_protocol_transfer_host_3d {
460             hdr: kumquat_gpu_protocol_ctrl_hdr {
461                 type_: KUMQUAT_GPU_PROTOCOL_TRANSFER_FROM_HOST_3D,
462                 ..Default::default()
463             },
464             box_: kumquat_gpu_protocol_box {
465                 x: transfer._box.x,
466                 y: transfer._box.y,
467                 z: transfer._box.z,
468                 w: transfer._box.w,
469                 h: transfer._box.h,
470                 d: transfer._box.d,
471             },
472             offset: transfer.offset,
473             level: transfer.level,
474             stride: transfer.stride,
475             layer_stride: transfer.layer_stride,
476             ctx_id: self.context_id,
477             resource_id: resource.resource_id,
478             padding: 0,
479         };
480 
481         self.stream.write(KumquatGpuProtocolWrite::CmdWithHandle(
482             transfer_from_host,
483             emulated_fence,
484         ))?;
485 
486         Ok(())
487     }
488 
submit_command( &mut self, flags: u32, bo_handles: &[u32], cmd: &[u8], ring_idx: u32, in_fences: &[u64], raw_descriptor: &mut RutabagaRawDescriptor, ) -> RutabagaResult<()>489     pub fn submit_command(
490         &mut self,
491         flags: u32,
492         bo_handles: &[u32],
493         cmd: &[u8],
494         ring_idx: u32,
495         in_fences: &[u64],
496         raw_descriptor: &mut RutabagaRawDescriptor,
497     ) -> RutabagaResult<()> {
498         let mut fence_opt: Option<RutabagaHandle> = None;
499         let mut data: Vec<u8> = vec![0; cmd.len()];
500         let mut host_flags = 0;
501 
502         if flags & VIRTGPU_KUMQUAT_EXECBUF_RING_IDX != 0 {
503             host_flags = RUTABAGA_FLAG_INFO_RING_IDX;
504         }
505 
506         let need_fence =
507             bo_handles.len() != 0 || (flags & VIRTGPU_KUMQUAT_EXECBUF_FENCE_FD_OUT) != 0;
508 
509         let actual_fence = (flags & VIRTGPU_KUMQUAT_EXECBUF_SHAREABLE_OUT) != 0
510             && (flags & VIRTGPU_KUMQUAT_EXECBUF_FENCE_FD_OUT) != 0;
511 
512         // Should copy from in-fences when gfxstream supports it.
513         data.copy_from_slice(cmd);
514 
515         if actual_fence {
516             host_flags |= RUTABAGA_FLAG_FENCE_HOST_SHAREABLE;
517             host_flags |= RUTABAGA_FLAG_FENCE;
518         } else if need_fence {
519             host_flags |= RUTABAGA_FLAG_FENCE;
520         }
521 
522         let submit_command = kumquat_gpu_protocol_cmd_submit {
523             hdr: kumquat_gpu_protocol_ctrl_hdr {
524                 type_: KUMQUAT_GPU_PROTOCOL_SUBMIT_3D,
525                 ..Default::default()
526             },
527             ctx_id: self.context_id,
528             pad: 0,
529             size: cmd.len().try_into()?,
530             num_in_fences: in_fences.len().try_into()?,
531             flags: host_flags,
532             ring_idx: ring_idx.try_into()?,
533             padding: Default::default(),
534         };
535 
536         if need_fence {
537             self.stream
538                 .write(KumquatGpuProtocolWrite::CmdWithData(submit_command, data))?;
539 
540             let mut protocols = self.stream.read()?;
541             let fence = match protocols.remove(0) {
542                 KumquatGpuProtocol::RespCmdSubmit3d(_fence_id, handle) => handle,
543                 _ => {
544                     return Err(RutabagaError::Unsupported);
545                 }
546             };
547 
548             for handle in bo_handles {
549                 // We could support implicit sync with real fences, but the need does not exist.
550                 if actual_fence {
551                     return Err(RutabagaError::Unsupported);
552                 }
553 
554                 let resource = self
555                     .resources
556                     .get_mut(handle)
557                     .ok_or(RutabagaError::InvalidResourceId)?;
558 
559                 resource.attached_fences.push(fence.try_clone()?);
560             }
561 
562             fence_opt = Some(fence);
563         } else {
564             self.stream
565                 .write(KumquatGpuProtocolWrite::CmdWithData(submit_command, data))?;
566         }
567 
568         if flags & VIRTGPU_KUMQUAT_EXECBUF_FENCE_FD_OUT != 0 {
569             *raw_descriptor = fence_opt
570                 .ok_or(RutabagaError::SpecViolation("no fence found"))?
571                 .os_handle
572                 .into_raw_descriptor();
573         }
574 
575         Ok(())
576     }
577 
wait(&mut self, bo_handle: u32) -> RutabagaResult<()>578     pub fn wait(&mut self, bo_handle: u32) -> RutabagaResult<()> {
579         let resource = self
580             .resources
581             .get_mut(&bo_handle)
582             .ok_or(RutabagaError::InvalidResourceId)?;
583 
584         let new_fences: Vec<RutabagaHandle> = std::mem::take(&mut resource.attached_fences);
585         for fence in new_fences {
586             let event: RutabagaEvent = fence.try_into()?;
587             event.wait()?;
588         }
589 
590         Ok(())
591     }
592 
resource_export( &mut self, bo_handle: u32, flags: u32, ) -> RutabagaResult<RutabagaHandle>593     pub fn resource_export(
594         &mut self,
595         bo_handle: u32,
596         flags: u32,
597     ) -> RutabagaResult<RutabagaHandle> {
598         let resource = self
599             .resources
600             .get_mut(&bo_handle)
601             .ok_or(RutabagaError::InvalidResourceId)?;
602 
603         if flags & VIRTGPU_KUMQUAT_EMULATED_EXPORT != 0 {
604             let descriptor: RutabagaDescriptor =
605                 RutabagaSharedMemory::new("virtgpu_export", VIRTGPU_KUMQUAT_PAGE_SIZE as u64)?
606                     .into();
607 
608             let clone = descriptor.try_clone()?;
609 
610             // Creating the mapping closes the cloned descriptor.
611             let mapping = RutabagaMemoryMapping::from_safe_descriptor(
612                 clone,
613                 VIRTGPU_KUMQUAT_PAGE_SIZE,
614                 RUTABAGA_MAP_CACHE_CACHED | RUTABAGA_MAP_ACCESS_RW,
615             )?;
616             let rutabaga_mapping = mapping.as_rutabaga_mapping();
617 
618             let mut slice: &mut [u8] = unsafe {
619                 from_raw_parts_mut(rutabaga_mapping.ptr as *mut u8, VIRTGPU_KUMQUAT_PAGE_SIZE)
620             };
621             let mut writer = RutabagaWriter::new(&mut slice);
622             writer.write_obj(resource.resource_id)?;
623 
624             // Opaque to users of this API, shared memory internally
625             Ok(RutabagaHandle {
626                 os_handle: descriptor,
627                 handle_type: RUTABAGA_MEM_HANDLE_TYPE_OPAQUE_FD,
628             })
629         } else {
630             let clone = resource.handle.try_clone()?;
631             Ok(clone)
632         }
633     }
634 
resource_import( &mut self, handle: RutabagaHandle, bo_handle: &mut u32, resource_handle: &mut u32, size: &mut u64, ) -> RutabagaResult<()>635     pub fn resource_import(
636         &mut self,
637         handle: RutabagaHandle,
638         bo_handle: &mut u32,
639         resource_handle: &mut u32,
640         size: &mut u64,
641     ) -> RutabagaResult<()> {
642         let clone = handle.try_clone()?;
643         let mapping = RutabagaMemoryMapping::from_safe_descriptor(
644             clone.os_handle,
645             VIRTGPU_KUMQUAT_PAGE_SIZE,
646             RUTABAGA_MAP_CACHE_CACHED | RUTABAGA_MAP_ACCESS_RW,
647         )?;
648 
649         let rutabaga_mapping = mapping.as_rutabaga_mapping();
650 
651         let mut slice: &mut [u8] = unsafe {
652             from_raw_parts_mut(rutabaga_mapping.ptr as *mut u8, VIRTGPU_KUMQUAT_PAGE_SIZE)
653         };
654 
655         let mut reader = RutabagaReader::new(&mut slice);
656         *resource_handle = reader.read_obj()?;
657 
658         let attach_resource = kumquat_gpu_protocol_ctx_resource {
659             hdr: kumquat_gpu_protocol_ctrl_hdr {
660                 type_: KUMQUAT_GPU_PROTOCOL_CTX_ATTACH_RESOURCE,
661                 ..Default::default()
662             },
663             ctx_id: self.context_id,
664             resource_id: *resource_handle,
665         };
666 
667         self.stream
668             .write(KumquatGpuProtocolWrite::Cmd(attach_resource))?;
669         let resource = VirtGpuResource::new(
670             *resource_handle,
671             VIRTGPU_KUMQUAT_PAGE_SIZE,
672             handle,
673             Default::default(),
674         );
675 
676         *bo_handle = self.allocate_id();
677         // Should ask the server about the size long-term.
678         *size = VIRTGPU_KUMQUAT_PAGE_SIZE as u64;
679         self.resources.insert(*bo_handle, resource);
680 
681         Ok(())
682     }
683 
snapshot(&mut self) -> RutabagaResult<()>684     pub fn snapshot(&mut self) -> RutabagaResult<()> {
685         let snapshot_save = kumquat_gpu_protocol_ctrl_hdr {
686             type_: KUMQUAT_GPU_PROTOCOL_SNAPSHOT_SAVE,
687             ..Default::default()
688         };
689 
690         self.stream
691             .write(KumquatGpuProtocolWrite::Cmd(snapshot_save))?;
692 
693         let mut protocols = self.stream.read()?;
694         match protocols.remove(0) {
695             KumquatGpuProtocol::RespOkSnapshot => Ok(()),
696             _ => Err(RutabagaError::Unsupported),
697         }
698     }
699 
restore(&mut self) -> RutabagaResult<()>700     pub fn restore(&mut self) -> RutabagaResult<()> {
701         let snapshot_restore = kumquat_gpu_protocol_ctrl_hdr {
702             type_: KUMQUAT_GPU_PROTOCOL_SNAPSHOT_RESTORE,
703             ..Default::default()
704         };
705 
706         self.stream
707             .write(KumquatGpuProtocolWrite::Cmd(snapshot_restore))?;
708 
709         let mut protocols = self.stream.read()?;
710         match protocols.remove(0) {
711             KumquatGpuProtocol::RespOkSnapshot => Ok(()),
712             _ => Err(RutabagaError::Unsupported),
713         }
714     }
715 }
716 
717 impl Drop for VirtGpuKumquat {
drop(&mut self)718     fn drop(&mut self) {
719         if self.context_id != 0 {
720             for (_, resource) in self.resources.iter() {
721                 let detach_resource = kumquat_gpu_protocol_ctx_resource {
722                     hdr: kumquat_gpu_protocol_ctrl_hdr {
723                         type_: KUMQUAT_GPU_PROTOCOL_CTX_DETACH_RESOURCE,
724                         ..Default::default()
725                     },
726                     ctx_id: self.context_id,
727                     resource_id: resource.resource_id,
728                 };
729 
730                 let _ = self
731                     .stream
732                     .write(KumquatGpuProtocolWrite::Cmd(detach_resource));
733             }
734 
735             self.resources.clear();
736             let context_destroy = kumquat_gpu_protocol_ctrl_hdr {
737                 type_: KUMQUAT_GPU_PROTOCOL_CTX_DESTROY,
738                 payload: self.context_id,
739             };
740 
741             let _ = self
742                 .stream
743                 .write(KumquatGpuProtocolWrite::Cmd(context_destroy));
744         }
745     }
746 }
747