1 // Copyright 2019 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 //! Crate for displaying simple surfaces and GPU buffers over wayland.
6
7 extern crate base;
8
9 #[path = "dwl.rs"]
10 #[allow(dead_code)]
11 mod dwl;
12
13 use std::cell::Cell;
14 use std::cmp::max;
15 use std::collections::HashMap;
16 use std::ffi::CStr;
17 use std::ffi::CString;
18 use std::mem::zeroed;
19 use std::panic::catch_unwind;
20 use std::path::Path;
21 use std::process::abort;
22 use std::ptr::null;
23
24 use anyhow::bail;
25 use base::error;
26 use base::round_up_to_page_size;
27 use base::AsRawDescriptor;
28 use base::MemoryMapping;
29 use base::MemoryMappingBuilder;
30 use base::RawDescriptor;
31 use base::SharedMemory;
32 use base::VolatileMemory;
33 use dwl::*;
34 use linux_input_sys::virtio_input_event;
35 use sync::Waitable;
36 use vm_control::gpu::DisplayParameters;
37
38 use crate::DisplayExternalResourceImport;
39 use crate::DisplayT;
40 use crate::EventDeviceKind;
41 use crate::FlipToExtraInfo;
42 use crate::GpuDisplayError;
43 use crate::GpuDisplayEvents;
44 use crate::GpuDisplayFramebuffer;
45 use crate::GpuDisplayResult;
46 use crate::GpuDisplaySurface;
47 use crate::SemaphoreTimepoint;
48 use crate::SurfaceType;
49 use crate::SysDisplayT;
50
51 const BUFFER_COUNT: usize = 3;
52 const BYTES_PER_PIXEL: u32 = 4;
53
54 struct DwlContext(*mut dwl_context);
55 impl Drop for DwlContext {
drop(&mut self)56 fn drop(&mut self) {
57 if !self.0.is_null() {
58 // SAFETY:
59 // Safe given that we checked the pointer for non-null and it should always be of the
60 // correct type.
61 unsafe {
62 dwl_context_destroy(&mut self.0);
63 }
64 }
65 }
66 }
67
68 impl AsRawDescriptor for DwlContext {
as_raw_descriptor(&self) -> RawDescriptor69 fn as_raw_descriptor(&self) -> RawDescriptor {
70 // SAFETY:
71 // Safe given that the context pointer is valid.
72 unsafe { dwl_context_fd(self.0) }
73 }
74 }
75
76 struct DwlDmabuf(*mut dwl_dmabuf);
77
78 impl Drop for DwlDmabuf {
drop(&mut self)79 fn drop(&mut self) {
80 if !self.0.is_null() {
81 // SAFETY:
82 // Safe given that we checked the pointer for non-null and it should always be of the
83 // correct type.
84 unsafe {
85 dwl_dmabuf_destroy(&mut self.0);
86 }
87 }
88 }
89 }
90
91 struct DwlSurface(*mut dwl_surface);
92 impl Drop for DwlSurface {
drop(&mut self)93 fn drop(&mut self) {
94 if !self.0.is_null() {
95 // SAFETY:
96 // Safe given that we checked the pointer for non-null and it should always be of the
97 // correct type.
98 unsafe {
99 dwl_surface_destroy(&mut self.0);
100 }
101 }
102 }
103 }
104
105 struct WaylandSurface {
106 surface: DwlSurface,
107 row_size: u32,
108 buffer_size: usize,
109 buffer_index: Cell<usize>,
110 buffer_mem: MemoryMapping,
111 }
112
113 impl WaylandSurface {
surface(&self) -> *mut dwl_surface114 fn surface(&self) -> *mut dwl_surface {
115 self.surface.0
116 }
117 }
118
119 impl GpuDisplaySurface for WaylandSurface {
surface_descriptor(&self) -> u64120 fn surface_descriptor(&self) -> u64 {
121 // SAFETY:
122 // Safe if the surface is valid.
123 let pointer = unsafe { dwl_surface_descriptor(self.surface.0) };
124 pointer as u64
125 }
126
framebuffer(&mut self) -> Option<GpuDisplayFramebuffer>127 fn framebuffer(&mut self) -> Option<GpuDisplayFramebuffer> {
128 let buffer_index = (self.buffer_index.get() + 1) % BUFFER_COUNT;
129 let framebuffer = self
130 .buffer_mem
131 .get_slice(buffer_index * self.buffer_size, self.buffer_size)
132 .ok()?;
133
134 Some(GpuDisplayFramebuffer::new(
135 framebuffer,
136 self.row_size,
137 BYTES_PER_PIXEL,
138 ))
139 }
140
next_buffer_in_use(&self) -> bool141 fn next_buffer_in_use(&self) -> bool {
142 let next_buffer_index = (self.buffer_index.get() + 1) % BUFFER_COUNT;
143 // SAFETY:
144 // Safe because only a valid surface and buffer index is used.
145 unsafe { dwl_surface_buffer_in_use(self.surface(), next_buffer_index) }
146 }
147
close_requested(&self) -> bool148 fn close_requested(&self) -> bool {
149 // SAFETY:
150 // Safe because only a valid surface is used.
151 unsafe { dwl_surface_close_requested(self.surface()) }
152 }
153
flip(&mut self)154 fn flip(&mut self) {
155 self.buffer_index
156 .set((self.buffer_index.get() + 1) % BUFFER_COUNT);
157
158 // SAFETY:
159 // Safe because only a valid surface and buffer index is used.
160 unsafe {
161 dwl_surface_flip(self.surface(), self.buffer_index.get());
162 }
163 }
164
flip_to( &mut self, import_id: u32, _acquire_timepoint: Option<SemaphoreTimepoint>, _release_timepoint: Option<SemaphoreTimepoint>, _extra_info: Option<FlipToExtraInfo>, ) -> anyhow::Result<Waitable>165 fn flip_to(
166 &mut self,
167 import_id: u32,
168 _acquire_timepoint: Option<SemaphoreTimepoint>,
169 _release_timepoint: Option<SemaphoreTimepoint>,
170 _extra_info: Option<FlipToExtraInfo>,
171 ) -> anyhow::Result<Waitable> {
172 // SAFETY:
173 // Safe because only a valid surface and import_id is used.
174 unsafe { dwl_surface_flip_to(self.surface(), import_id) };
175 Ok(Waitable::signaled())
176 }
177
commit(&mut self) -> GpuDisplayResult<()>178 fn commit(&mut self) -> GpuDisplayResult<()> {
179 // SAFETY:
180 // Safe because only a valid surface is used.
181 unsafe {
182 dwl_surface_commit(self.surface());
183 }
184
185 Ok(())
186 }
187
set_position(&mut self, x: u32, y: u32)188 fn set_position(&mut self, x: u32, y: u32) {
189 // SAFETY:
190 // Safe because only a valid surface is used.
191 unsafe {
192 dwl_surface_set_position(self.surface(), x, y);
193 }
194 }
195 }
196
197 /// A connection to the compositor and associated collection of state.
198 ///
199 /// The user of `GpuDisplay` can use `AsRawDescriptor` to poll on the compositor connection's file
200 /// descriptor. When the connection is readable, `dispatch_events` can be called to process it.
201
202 pub struct DisplayWl {
203 dmabufs: HashMap<u32, DwlDmabuf>,
204 ctx: DwlContext,
205 current_event: Option<dwl_event>,
206 mt_tracking_id: u16,
207 }
208
209 /// Error logging callback used by wrapped C implementation.
210 ///
211 /// # Safety
212 ///
213 /// safe because it must be passed a valid pointer to null-terminated c-string.
214 #[allow(clippy::unnecessary_cast)]
error_callback(message: *const ::std::os::raw::c_char)215 unsafe extern "C" fn error_callback(message: *const ::std::os::raw::c_char) {
216 catch_unwind(|| {
217 assert!(!message.is_null());
218 // SAFETY: trivially safe
219 let msg = unsafe {
220 std::str::from_utf8(std::slice::from_raw_parts(
221 message as *const u8,
222 libc::strlen(message),
223 ))
224 .unwrap()
225 };
226 error!("{}", msg);
227 })
228 .unwrap_or_else(|_| abort())
229 }
230
231 impl DisplayWl {
232 /// Opens a fresh connection to the compositor.
new(wayland_path: Option<&Path>) -> GpuDisplayResult<DisplayWl>233 pub fn new(wayland_path: Option<&Path>) -> GpuDisplayResult<DisplayWl> {
234 // SAFETY:
235 // The dwl_context_new call should always be safe to call, and we check its result.
236 let ctx = DwlContext(unsafe { dwl_context_new(Some(error_callback)) });
237 if ctx.0.is_null() {
238 return Err(GpuDisplayError::Allocate);
239 }
240
241 // The dwl_context_setup call is always safe to call given that the supplied context is
242 // valid. and we check its result.
243 let cstr_path = match wayland_path.map(|p| p.as_os_str().to_str()) {
244 Some(Some(s)) => match CString::new(s) {
245 Ok(cstr) => Some(cstr),
246 Err(_) => return Err(GpuDisplayError::InvalidPath),
247 },
248 Some(None) => return Err(GpuDisplayError::InvalidPath),
249 None => None,
250 };
251 // This grabs a pointer to cstr_path without moving the CString into the .map closure
252 // accidentally, which triggeres a really hard to catch use after free in
253 // dwl_context_setup.
254 let cstr_path_ptr = cstr_path
255 .as_ref()
256 .map(|s: &CString| CStr::as_ptr(s))
257 .unwrap_or(null());
258 // SAFETY: args are valid and the return value is checked.
259 let setup_success = unsafe { dwl_context_setup(ctx.0, cstr_path_ptr) };
260 if !setup_success {
261 return Err(GpuDisplayError::Connect);
262 }
263
264 Ok(DisplayWl {
265 dmabufs: HashMap::new(),
266 ctx,
267 current_event: None,
268 mt_tracking_id: 0u16,
269 })
270 }
271
ctx(&self) -> *mut dwl_context272 fn ctx(&self) -> *mut dwl_context {
273 self.ctx.0
274 }
275
pop_event(&self) -> dwl_event276 fn pop_event(&self) -> dwl_event {
277 // SAFETY:
278 // Safe because dwl_next_events from a context's circular buffer.
279 unsafe {
280 let mut ev = zeroed();
281 dwl_context_next_event(self.ctx(), &mut ev);
282 ev
283 }
284 }
285
next_tracking_id(&mut self) -> i32286 fn next_tracking_id(&mut self) -> i32 {
287 let cur_id: i32 = self.mt_tracking_id as i32;
288 self.mt_tracking_id = self.mt_tracking_id.wrapping_add(1);
289 cur_id
290 }
291
current_tracking_id(&self) -> i32292 fn current_tracking_id(&self) -> i32 {
293 self.mt_tracking_id as i32
294 }
295 }
296
297 impl DisplayT for DisplayWl {
pending_events(&self) -> bool298 fn pending_events(&self) -> bool {
299 // SAFETY:
300 // Safe because the function just queries the values of two variables in a context.
301 unsafe { dwl_context_pending_events(self.ctx()) }
302 }
303
next_event(&mut self) -> GpuDisplayResult<u64>304 fn next_event(&mut self) -> GpuDisplayResult<u64> {
305 let ev = self.pop_event();
306 let descriptor = ev.surface_descriptor as u64;
307 self.current_event = Some(ev);
308 Ok(descriptor)
309 }
310
handle_next_event( &mut self, _surface: &mut Box<dyn GpuDisplaySurface>, ) -> Option<GpuDisplayEvents>311 fn handle_next_event(
312 &mut self,
313 _surface: &mut Box<dyn GpuDisplaySurface>,
314 ) -> Option<GpuDisplayEvents> {
315 // Should not panic since the common layer only calls this when an event occurs.
316 let event = self.current_event.take().unwrap();
317
318 match event.event_type {
319 DWL_EVENT_TYPE_KEYBOARD_ENTER => None,
320 DWL_EVENT_TYPE_KEYBOARD_LEAVE => None,
321 DWL_EVENT_TYPE_KEYBOARD_KEY => {
322 let linux_keycode = event.params[0] as u16;
323 let pressed = event.params[1] == DWL_KEYBOARD_KEY_STATE_PRESSED;
324 let events = vec![virtio_input_event::key(linux_keycode, pressed, false)];
325 Some(GpuDisplayEvents {
326 events,
327 device_type: EventDeviceKind::Keyboard,
328 })
329 }
330 // TODO(tutankhamen): slot is always 0, because all the input
331 // events come from mouse device, i.e. only one touch is possible at a time.
332 // Full MT protocol has to be implemented and properly wired later.
333 DWL_EVENT_TYPE_TOUCH_DOWN | DWL_EVENT_TYPE_TOUCH_MOTION => {
334 let tracking_id = if event.event_type == DWL_EVENT_TYPE_TOUCH_DOWN {
335 self.next_tracking_id()
336 } else {
337 self.current_tracking_id()
338 };
339
340 let events = vec![
341 virtio_input_event::multitouch_slot(0),
342 virtio_input_event::multitouch_tracking_id(tracking_id),
343 virtio_input_event::multitouch_absolute_x(max(0, event.params[0])),
344 virtio_input_event::multitouch_absolute_y(max(0, event.params[1])),
345 virtio_input_event::touch(true),
346 ];
347 Some(GpuDisplayEvents {
348 events,
349 device_type: EventDeviceKind::Touchscreen,
350 })
351 }
352 DWL_EVENT_TYPE_TOUCH_UP => {
353 let events = vec![
354 virtio_input_event::multitouch_slot(0),
355 virtio_input_event::multitouch_tracking_id(-1),
356 virtio_input_event::touch(false),
357 ];
358 Some(GpuDisplayEvents {
359 events,
360 device_type: EventDeviceKind::Touchscreen,
361 })
362 }
363 _ => {
364 error!("unknown event type {}", event.event_type);
365 None
366 }
367 }
368 }
369
flush(&self)370 fn flush(&self) {
371 // SAFETY:
372 // Safe given that the context pointer is valid.
373 unsafe {
374 dwl_context_dispatch(self.ctx());
375 }
376 }
377
create_surface( &mut self, parent_surface_id: Option<u32>, surface_id: u32, scanout_id: Option<u32>, display_params: &DisplayParameters, surf_type: SurfaceType, ) -> GpuDisplayResult<Box<dyn GpuDisplaySurface>>378 fn create_surface(
379 &mut self,
380 parent_surface_id: Option<u32>,
381 surface_id: u32,
382 scanout_id: Option<u32>,
383 display_params: &DisplayParameters,
384 surf_type: SurfaceType,
385 ) -> GpuDisplayResult<Box<dyn GpuDisplaySurface>> {
386 let parent_id = parent_surface_id.unwrap_or(0);
387
388 let (width, height) = display_params.get_virtual_display_size();
389 let row_size = width * BYTES_PER_PIXEL;
390 let fb_size = row_size * height;
391 let buffer_size = round_up_to_page_size(fb_size as usize * BUFFER_COUNT);
392 let buffer_shm = SharedMemory::new("GpuDisplaySurface", buffer_size as u64)?;
393 let buffer_mem = MemoryMappingBuilder::new(buffer_size)
394 .from_shared_memory(&buffer_shm)
395 .build()
396 .unwrap();
397
398 let dwl_surf_flags = match surf_type {
399 SurfaceType::Cursor => DWL_SURFACE_FLAG_HAS_ALPHA,
400 SurfaceType::Scanout => DWL_SURFACE_FLAG_RECEIVE_INPUT,
401 };
402 // SAFETY:
403 // Safe because only a valid context, parent ID (if not non-zero), and buffer FD are used.
404 // The returned surface is checked for validity before being filed away.
405 let surface = DwlSurface(unsafe {
406 dwl_context_surface_new(
407 self.ctx(),
408 parent_id,
409 surface_id,
410 buffer_shm.as_raw_descriptor(),
411 buffer_size,
412 fb_size as usize,
413 width,
414 height,
415 row_size,
416 dwl_surf_flags,
417 )
418 });
419
420 if surface.0.is_null() {
421 return Err(GpuDisplayError::CreateSurface);
422 }
423
424 if let Some(scanout_id) = scanout_id {
425 // SAFETY:
426 // Safe because only a valid surface is used.
427 unsafe {
428 dwl_surface_set_scanout_id(surface.0, scanout_id);
429 }
430 }
431
432 Ok(Box::new(WaylandSurface {
433 surface,
434 row_size,
435 buffer_size: fb_size as usize,
436 buffer_index: Cell::new(0),
437 buffer_mem,
438 }))
439 }
440
import_resource( &mut self, import_id: u32, _surface_id: u32, external_display_resource: DisplayExternalResourceImport, ) -> anyhow::Result<()>441 fn import_resource(
442 &mut self,
443 import_id: u32,
444 _surface_id: u32,
445 external_display_resource: DisplayExternalResourceImport,
446 ) -> anyhow::Result<()> {
447 // This let pattern is always true if the vulkan_display feature is disabled.
448 #[allow(irrefutable_let_patterns)]
449 if let DisplayExternalResourceImport::Dmabuf {
450 descriptor,
451 offset,
452 stride,
453 modifiers,
454 width,
455 height,
456 fourcc,
457 } = external_display_resource
458 {
459 // SAFETY:
460 // Safe given that the context pointer is valid. Any other invalid parameters would be
461 // rejected by dwl_context_dmabuf_new safely. We check that the resulting dmabuf is
462 // valid before filing it away.
463 let dmabuf = DwlDmabuf(unsafe {
464 dwl_context_dmabuf_new(
465 self.ctx(),
466 import_id,
467 descriptor.as_raw_descriptor(),
468 offset,
469 stride,
470 modifiers,
471 width,
472 height,
473 fourcc,
474 )
475 });
476
477 if dmabuf.0.is_null() {
478 bail!("dmabuf import failed.");
479 }
480
481 self.dmabufs.insert(import_id, dmabuf);
482
483 Ok(())
484 } else {
485 bail!("gpu_display_wl only supports Dmabuf imports");
486 }
487 }
488
release_import(&mut self, _surface_id: u32, import_id: u32)489 fn release_import(&mut self, _surface_id: u32, import_id: u32) {
490 self.dmabufs.remove(&import_id);
491 }
492 }
493
494 impl SysDisplayT for DisplayWl {}
495
496 impl AsRawDescriptor for DisplayWl {
as_raw_descriptor(&self) -> RawDescriptor497 fn as_raw_descriptor(&self) -> RawDescriptor {
498 // Safe given that the context pointer is valid.
499 self.ctx.as_raw_descriptor()
500 }
501 }
502